subprocess.PIPE

Here are the examples of the python api subprocess.PIPE taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: amico
Source File: ip2asn.py
View license
def ip2asn(dump_id):
    # Connect to database
    try:
        conn = psycopg2.connect("dbname=%s host=%s user=%s password=%s"
            % (db_name, db_host, db_user, db_password))
    except:
        print "Unable to connect to database: " + db_name

    # Use Autocommit mode for database connection
    conn.set_isolation_level(0)
    cursor = conn.cursor()

    # Setup SOCKS proxy
    if socks_proxy_host:
        socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,
                        socks_proxy_host, socks_proxy_port)
        socket.socket = socks.socksocket
    ###

    # Database query to get the relevant recent record
    cursor.execute("""
        SELECT server, timestamp FROM pe_dumps WHERE dump_id = %s
            """, (dump_id,))
    row = cursor.fetchone()
    server_ip = row[0]
    # Exit if an AS containing this IP has been logged with in the last 1 month
    cursor.execute("""
        SELECT * FROM bgp2asn WHERE log_date > (current_date - interval '1 month')
        AND bgp_prefix >> %s """, (server_ip,))
    if cursor.rowcount > 0:
        return

    # Query whois.cymru.com
    #cmd = subprocess.Popen(['whois','-h','whois.cymru.com','-v',
    #       server_ip], stdout = subprocess.PIPE)
    #as_info = cmd.stdout
    #for line in as_info:
    #   if(server_ip in line):
    #       output = line.split('|')
    #       break
    #words=[]
    #for word in output:
    #   words.append(word.strip())

    # Query asn.cymru.com using dig
    # A sample output is:
    #     "701 1239 3549 3561 7132 | 216.90.108.0/24 | US | arin | 1998-09-25"
    print "making call"
    cmd = subprocess.Popen(['dig', '+short', util.reverse_ip(server_ip) +
                    '.origin.asn.cymru.com', 'TXT'], stdout=subprocess.PIPE)
    time.sleep(CYMRU_TIMEOUT)
    if cmd.poll() is None:
        cmd.kill()
        return
    as_info = cmd.stdout.readline()
    as_info = as_info.strip().strip('"')
    output = as_info.split('|')
    words = []
    for answer in output:
        if answer:
            words.append(answer.split()[0].strip())
        else:
            words.append(None)

    #print words
    as_number = words[0]
    bgp_prefix = words[1]
    country_code = words[2]
    date_allocated = words[4]

    # Sample output:
    #     "23028 | US | arin | 2002-01-04 | TEAMCYMRU - SAUNET"
    cmd = subprocess.Popen(['dig', '+short', 'AS' + as_number + '.asn.cymru.com',
                            'TXT'], stdout=subprocess.PIPE)
    time.sleep(CYMRU_TIMEOUT)
    if cmd.poll() is None:
        cmd.kill()
        print ("ip2asn.py: Couldn't finish the call to cymru for {0}. Aborting..."
                .format((server_ip,)))
        return
    as_info = cmd.stdout.readline()
    as_info = as_info.strip().strip('"')
    output = as_info.split('|')
    words = []
    for word in output:
        words.append(word.strip())
    print words
    as_name = words[4]

    # Store the record in the database
    cursor.execute("""
        INSERT INTO
            bgp2asn
            (bgp_prefix, as_number, as_name, country_code,
             date_allocated, log_date)
        VALUES (%s,%s,%s,%s,%s,current_date)"""
        , (bgp_prefix, as_number, as_name, country_code,
          date_allocated))

    cursor.close()
    conn.close()

Example 2

Project: penelope
Source File: format_kobo.py
View license
def write(dictionary, args, output_file_path):
    # result to be returned
    result = None

    # get absolute path
    output_file_path_absolute = os.path.abspath(output_file_path)

    # create tmp directory
    cwd = os.getcwd()
    tmp_path = create_temp_directory()
    print_debug("Working in temp dir '%s'" % (tmp_path), args.debug)
    os.chdir(tmp_path)

    # sort by headword
    dictionary.sort(by_headword=True)

    # group by prefix
    files_to_compress = []
    prefix_length = int(args.group_by_prefix_length)
    special_group, group_keys, group_dict = dictionary.group(
        prefix_function=get_prefix_kobo,
        prefix_length=prefix_length,
        merge_min_size=int(args.group_by_prefix_merge_min_size),
        merge_across_first=args.group_by_prefix_merge_across_first
    )
    if special_group is not None:
        special_group_key = u"1" * prefix_length
        group_dict[special_group_key] = special_group
        group_keys = [special_group_key] + group_keys

    # write files
    for key in group_keys:
        # write html file
        file_html_path = key + u".html"
        file_html_obj = io.open(file_html_path, "wb")
        file_html_obj.write(u"<?xml version=\"1.0\" encoding=\"utf-8\"?><html>".encode("utf-8"))
        for entry in group_dict[key]:
            headword = entry.headword
            definition = entry.definition
            file_html_obj.write((u"<w><a name=\"%s\"/><div><b>%s</b><br/>%s</div></w>" % (headword, headword, definition)).encode("utf-8"))
        file_html_obj.write((u"</html>").encode("utf-8"))
        file_html_obj.close()

        # compress in gz format
        file_html_obj = io.open(file_html_path, "rb")
        file_gz_path = file_html_path + u".gz"
        file_gz_obj = gzip.open(file_gz_path, "wb")
        file_gz_obj.writelines(file_html_obj)
        file_gz_obj.close()
        file_html_obj.close()

        # delete .html file
        delete_file(None, file_html_path)
        # rename .html.gz file into .html
        rename_file(file_gz_path, file_html_path)
        files_to_compress.append(file_html_path)

    # write words
    file_words_path = WORDS_FILE_NAME
    keys = sorted(dictionary.entries_index.keys())
    try:
        import marisa_trie
        trie = marisa_trie.Trie(keys)
        trie.save(file_words_path)
        result = [file_words_path]
    except ImportError as exc:
        # call MARISA with subprocess
        print_info("  MARISA cannot be imported as Python module. You might want to install it with:")
        print_info("  $ [sudo] pip install marisa_trie")
        marisa_build_path = MARISA_BUILD
        if args.marisa_bin_path is None:
            print_info("  Running '%s' from $PATH" % MARISA_BUILD)
        else:
            marisa_build_path = os.path.join(args.marisa_bin_path, MARISA_BUILD)
            print_info("  Running '%s' from '%s'" % (MARISA_BUILD, args.marisa_bin_path))
        # TODO this is ugly, but it works
        query = (u"\n".join([x for x in keys]) + u"\n").encode("utf-8")

        try:
            proc = subprocess.Popen(
                [marisa_build_path, "-l", "-o", file_words_path],
                stdout=subprocess.PIPE,
                stdin=subprocess.PIPE,
                stderr=subprocess.PIPE
            )
            proc.communicate(input=query)[0].decode("utf-8")
            result = [file_words_path]
        except OSError as exc:
            print_error("  Unable to run '%s' as '%s'" % (MARISA_BUILD, marisa_build_path))
            print_error("  Please make sure '%s':" % MARISA_BUILD)
            print_error("    1. is available on your $PATH or")
            print_error("    2. specify its path with --marisa-bin-path or")
            print_error("    3. install the marisa_trie Python module")
            result = None

    if result is not None:
        # add file_words_path to files to compress
        files_to_compress.append(file_words_path)
        # create output zip file
        try:
            print_debug("Writing to file '%s'..." % (output_file_path_absolute), args.debug)
            file_zip_obj = zipfile.ZipFile(output_file_path_absolute, "w", zipfile.ZIP_DEFLATED)
            for file_to_compress in files_to_compress:
                file_to_compress = os.path.basename(file_to_compress)
                file_zip_obj.write(file_to_compress)
            file_zip_obj.close()
            result = [output_file_path]
            print_debug("Writing to file '%s'... success" % (output_file_path_absolute), args.debug)
        except:
            print_error("Writing to file '%s'... failure" % (output_file_path_absolute))

    # delete tmp directory
    os.chdir(cwd)
    if args.keep:
        print_info("Not deleting temp dir '%s'" % (tmp_path))
    else:
        delete_directory(tmp_path)
        print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)

    return result

Example 3

Project: penelope
Source File: format_stardict.py
View license
def write(dictionary, args, output_file_path):
    # result to be returned
    result = None

    # get absolute path
    output_file_path_absolute = os.path.abspath(output_file_path)

    # create tmp directory
    cwd = os.getcwd()
    tmp_path = create_temp_directory()
    print_debug("Working in temp dir '%s'" % (tmp_path), args.debug)
    os.chdir(tmp_path)

    # get the basename and compute output file paths
    base = os.path.basename(output_file_path)
    if base.endswith(".zip"):
        base = base[:-4]
    ifo_file_path = base + ".ifo"
    idx_file_path = base + ".idx"
    dict_file_path = base + ".dict"
    dict_dz_file_path = base + ".dict.dz"
    syn_file_path = base + ".syn"

    # TODO by spec, the index should be sorted
    # TODO using the comparator stardict_strcmp() defined in the spec
    # TODO (it calls g_ascii_strcasecmp() and/or strcmp() ),
    # TODO or with a user-defined collation function
    #
    # From https://developer.gnome.org/glib/2.28/glib-String-Utility-Functions.html#g-ascii-strcasecmp
    # gint g_ascii_strcasecmp (const gchar *s1, const gchar *s2);
    # Compare two strings, ignoring the case of ASCII characters.
    # Unlike the BSD strcasecmp() function, this only recognizes standard ASCII letters and ignores the locale, treating all non-ASCII bytes as if they are not letters.
    # This function should be used only on strings that are known to be in encodings where the bytes corresponding to ASCII letters always represent themselves. This includes UTF-8 and the ISO-8859-* charsets, but not for instance double-byte encodings like the Windows Codepage 932, where the trailing bytes of double-byte characters include all ASCII letters. If you compare two CP932 strings using this function, you will get false matches.
    #
    # using Python's builtin lower() and sort() by headword
    # should be equivalent for UTF-8 encoded dictionaries (and it is fast)
    #
    dictionary.sort(by_headword=True, ignore_case=True)

    # write .idx and .dict files
    print_debug("Writing .idx and .dict files...", args.debug)
    idx_file_obj = io.open(idx_file_path, "wb")
    dict_file_obj = io.open(dict_file_path, "wb")
    current_offset = 0
    current_idx_size = 0
    for entry_index in dictionary.entries_index_sorted:
        entry = dictionary.entries[entry_index]
        headword_bytes = entry.headword.encode("utf-8")
        definition_bytes = entry.definition.encode("utf-8")
        definition_size = len(definition_bytes)
        # write .idx
        idx_file_obj.write(headword_bytes)
        idx_file_obj.write(b"\0")
        idx_file_obj.write(struct.pack('>i', current_offset))
        idx_file_obj.write(struct.pack('>i', definition_size))
        current_idx_size += (len(headword_bytes) + 1 + 4 + 4)
        # write .dict
        dict_file_obj.write(definition_bytes)
        current_offset += definition_size
    idx_file_obj.close()
    dict_file_obj.close()
    print_debug("Writing .idx and .dict files... done", args.debug)

    # list files to compress
    files_to_compress = []
    files_to_compress.append(ifo_file_path)
    files_to_compress.append(idx_file_path)

    # write .syn file
    dict_syns_len = 0
    if dictionary.has_synonyms:
        if args.ignore_synonyms:
            print_debug("Dictionary has synonyms, but ignoring them", args.debug)
        else:
            print_debug("Dictionary has synonyms, writing .syn file...", args.debug)
            syn_file_obj = io.open(syn_file_path, "wb")
            dict_syns = dictionary.get_synonyms()
            dict_syns_len = len(dict_syns)
            for pair in dict_syns:
                synonym_bytes = pair[0].encode("utf-8")
                index = pair[1]
                syn_file_obj.write(synonym_bytes)
                syn_file_obj.write(b"\0")
                syn_file_obj.write(struct.pack('>i', index))
            syn_file_obj.close()
            files_to_compress.append(syn_file_path)
            print_debug("Dictionary has synonyms, writing .syn file... done", args.debug)

    # compress .dict file
    if args.sd_no_dictzip:
        print_debug("Not compressing .dict file with dictzip", args.debug)
        files_to_compress.append(dict_file_path)
        result = [dict_file_path]
    else:
        try:
            print_debug("Compressing .dict file with dictzip...", args.debug)
            dictzip_path = DICTZIP
            if args.dictzip_path is None:
                print_info("  Running '%s' from $PATH" % DICTZIP)
            else:
                dictzip_path = args.dictzip_path
                print_info("  Running '%s' from '%s'" % (DICTZIP, dictzip_path))
            proc = subprocess.Popen(
                [dictzip_path, "-k", dict_file_path],
                stdout=subprocess.PIPE,
                stdin=subprocess.PIPE,
                stderr=subprocess.PIPE
            )
            proc.communicate()
            result = [dict_dz_file_path]
            files_to_compress.append(dict_dz_file_path)
            print_debug("Compressing .dict file with dictzip... done", args.debug)
        except OSError as exc:
            print_error("  Unable to run '%s' as '%s'" % (DICTZIP, dictzip_path))
            print_error("  Please make sure '%s':" % DICTZIP)
            print_error("    1. is available on your $PATH or")
            print_error("    2. specify its path with --dictzip-path or")
            print_error("    3. specify --no-dictzip to avoid compressing the .dict file")
            result = None

    if result is not None:
        # create ifo file
        ifo_file_obj = io.open(ifo_file_path, "wb")
        ifo_file_obj.write((u"StarDict's dict ifo file\n").encode("utf-8"))
        ifo_file_obj.write((u"version=2.4.2\n").encode("utf-8"))
        ifo_file_obj.write((u"wordcount=%d\n" % (len(dictionary))).encode("utf-8"))
        ifo_file_obj.write((u"idxfilesize=%d\n" % (current_idx_size)).encode("utf-8"))
        ifo_file_obj.write((u"bookname=%s\n" % (args.title)).encode("utf-8"))
        ifo_file_obj.write((u"date=%s\n" % (args.year)).encode("utf-8"))
        ifo_file_obj.write((u"sametypesequence=m\n").encode("utf-8"))
        ifo_file_obj.write((u"description=%s\n" % (args.description)).encode("utf-8"))
        ifo_file_obj.write((u"author=%s\n" % (args.author)).encode("utf-8"))
        ifo_file_obj.write((u"email=%s\n" % (args.email)).encode("utf-8"))
        ifo_file_obj.write((u"website=%s\n" % (args.website)).encode("utf-8"))
        if dict_syns_len > 0:
            ifo_file_obj.write((u"synwordcount=%d\n" % (dict_syns_len)).encode("utf-8"))
        ifo_file_obj.close()

        # create output zip file
        try:
            print_debug("Writing to file '%s'..." % (output_file_path_absolute), args.debug)
            file_zip_obj = zipfile.ZipFile(output_file_path_absolute, "w", zipfile.ZIP_DEFLATED)
            for file_to_compress in files_to_compress:
                file_to_compress = os.path.basename(file_to_compress)
                file_zip_obj.write(file_to_compress)
                print_debug("Written %s" % (file_to_compress), args.debug)
            file_zip_obj.close()
            result = [output_file_path]
            print_debug("Writing to file '%s'... success" % (output_file_path_absolute), args.debug)
        except:
            print_error("Writing to file '%s'... failure" % (output_file_path_absolute))

    # delete tmp directory
    os.chdir(cwd)
    if args.keep:
        print_info("Not deleting temp dir '%s'" % (tmp_path))
    else:
        delete_directory(tmp_path)
        print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)

    return result

Example 4

Project: dgroc
Source File: dgroc.py
View license
def generate_new_srpm(config, project, first=True):
    ''' For a given project in the configuration file generate a new srpm
    if it is possible.
    '''
    if not config.has_option(project, 'scm') or config.get(project, 'scm') == 'git':
        reader = GitReader
    elif config.get(project, 'scm') == 'hg':
        reader = MercurialReader
    else:
        raise DgrocException(
            'Project "%s" tries to use unknown "scm" option'
            % project)
    reader.init()
    LOG.debug('Generating new source rpm for project: %s', project)
    if not config.has_option(project, '%s_folder' % reader.short):
        raise DgrocException(
            'Project "%s" does not specify a "%s_folder" option'
            % (project, reader.short))

    if not config.has_option(project, '%s_url' % reader.short) and not os.path.exists(
            config.get(project, '%s_folder' % reader.short)):
        raise DgrocException(
            'Project "%s" does not specify a "%s_url" option and its '
            '"%s_folder" option does not exists' % (project, reader.short, reader.short))

    if not config.has_option(project, 'spec_file'):
        raise DgrocException(
            'Project "%s" does not specify a "spec_file" option'
            % project)

    # git clone if needed
    git_folder = config.get(project, '%s_folder' % reader.short)
    if '~' in git_folder:
        git_folder = os.path.expanduser(git_folder)

    if not os.path.exists(git_folder):
        git_url = config.get(project, '%s_url' % reader.short)
        LOG.info('Cloning %s', git_url)
        reader.clone(git_url, git_folder)

    # git pull
    cwd = os.getcwd()
    os.chdir(git_folder)
    pull = reader.pull()
    out = pull.communicate()
    os.chdir(cwd)
    if pull.returncode:
        LOG.info('Strange result of the %s pull:\n%s', reader.short, out[0])
        if first:
            LOG.info('Gonna try to re-clone the project')
            shutil.rmtree(git_folder)
            generate_new_srpm(config, project, first=False)
        return

    # Retrieve last commit
    commit_hash = reader.commit_hash(git_folder)
    LOG.info('Last commit: %s', commit_hash)

    # Check if commit changed
    changed = False
    if not config.has_option(project, '%s_hash' % reader.short):
        config.set(project, '%s_hash  % reader.short', commit_hash)
        changed = True
    elif config.get(project, '%s_hash' % reader.short) == commit_hash:
        changed = False
    elif config.get(project, '%s_hash  % reader.short') != commit_hash:
        changed = True

    if not changed:
        return

    # Build sources
    cwd = os.getcwd()
    os.chdir(git_folder)
    archive_name = "%s-%s.tar" % (project, commit_hash)
    cmd = reader.archive_cmd(project, archive_name)
    LOG.debug('Command to generate archive: %s', ' '.join(cmd))
    pull = subprocess.Popen(
        cmd,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE)
    out = pull.communicate()
    os.chdir(cwd)

    # Update spec file
    spec_file = config.get(project, 'spec_file')
    if '~' in spec_file:
        spec_file = os.path.expanduser(spec_file)

    update_spec(
        spec_file,
        commit_hash,
        archive_name,
        config.get('main', 'username'),
        config.get('main', 'email'),
        reader)

    # Copy patches
    if config.has_option(project, 'patch_files'):
        LOG.info('Copying patches')
        candidates = config.get(project, 'patch_files').split(',')
        candidates = [candidate.strip() for candidate in candidates]
        for candidate in candidates:
            LOG.debug('Expanding path: %s', candidate)
            candidate = os.path.expanduser(candidate)
            patches = glob.glob(candidate)
            if not patches:
                LOG.info('Could not expand path: `%s`', candidate)
            for patch in patches:
                filename = os.path.basename(patch)
                dest = os.path.join(get_rpm_sourcedir(), filename)
                LOG.debug('Copying from %s, to %s', patch, dest)
                shutil.copy(
                    patch,
                    dest
                )

    # Generate SRPM
    env = os.environ
    env['LANG'] = 'C'
    build = subprocess.Popen(
        ["rpmbuild", "-bs", spec_file],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        env=env)
    out = build.communicate()
    os.chdir(cwd)
    if build.returncode:
        LOG.info(
            'Strange result of the rpmbuild -bs:\n  stdout:%s\n  stderr:%s',
            out[0],
            out[1]
        )
        return
    srpm = out[0].split('Wrote:')[1].strip()
    LOG.info('SRPM built: %s', srpm)

    return srpm

Example 5

Project: pyqtgraph
Source File: processes.py
View license
    def __init__(self, name=None, target=None, executable=None, copySysPath=True, debug=False, timeout=20, wrapStdout=None):
        """
        ==============  =============================================================
        **Arguments:**
        name            Optional name for this process used when printing messages
                        from the remote process.
        target          Optional function to call after starting remote process.
                        By default, this is startEventLoop(), which causes the remote
                        process to process requests from the parent process until it
                        is asked to quit. If you wish to specify a different target,
                        it must be picklable (bound methods are not).
        copySysPath     If True, copy the contents of sys.path to the remote process
        debug           If True, print detailed information about communication
                        with the child process.
        wrapStdout      If True (default on windows) then stdout and stderr from the
                        child process will be caught by the parent process and
                        forwarded to its stdout/stderr. This provides a workaround
                        for a python bug: http://bugs.python.org/issue3905
                        but has the side effect that child output is significantly
                        delayed relative to the parent output.
        ==============  =============================================================
        """
        if target is None:
            target = startEventLoop
        if name is None:
            name = str(self)
        if executable is None:
            executable = sys.executable
        self.debug = 7 if debug is True else False  # 7 causes printing in white
        
        ## random authentication key
        authkey = os.urandom(20)

        ## Windows seems to have a hard time with hmac 
        if sys.platform.startswith('win'):
            authkey = None

        #print "key:", ' '.join([str(ord(x)) for x in authkey])
        ## Listen for connection from remote process (and find free port number)
        l = multiprocessing.connection.Listener(('localhost', 0), authkey=authkey)
        port = l.address[1]

        ## start remote process, instruct it to run target function
        sysPath = sys.path if copySysPath else None
        bootstrap = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bootstrap.py'))
        self.debugMsg('Starting child process (%s %s)' % (executable, bootstrap))

        # Decide on printing color for this process
        if debug:
            procDebug = (Process._process_count%6) + 1  # pick a color for this process to print in
            Process._process_count += 1
        else:
            procDebug = False
        
        if wrapStdout is None:
            wrapStdout = sys.platform.startswith('win')

        if wrapStdout:
            ## note: we need all three streams to have their own PIPE due to this bug:
            ## http://bugs.python.org/issue3905
            stdout = subprocess.PIPE
            stderr = subprocess.PIPE
            self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
            ## to circumvent the bug and still make the output visible, we use 
            ## background threads to pass data from pipes to stdout/stderr
            self._stdoutForwarder = FileForwarder(self.proc.stdout, "stdout", procDebug)
            self._stderrForwarder = FileForwarder(self.proc.stderr, "stderr", procDebug)
        else:
            self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE)

        targetStr = pickle.dumps(target)  ## double-pickle target so that child has a chance to 
                                          ## set its sys.path properly before unpickling the target
        pid = os.getpid() # we must send pid to child because windows does not have getppid
        
        ## Send everything the remote process needs to start correctly
        data = dict(
            name=name+'_child', 
            port=port, 
            authkey=authkey, 
            ppid=pid, 
            targetStr=targetStr, 
            path=sysPath, 
            pyside=USE_PYSIDE,
            debug=procDebug
            )
        pickle.dump(data, self.proc.stdin)
        self.proc.stdin.close()
        
        ## open connection for remote process
        self.debugMsg('Listening for child process on port %d, authkey=%s..' % (port, repr(authkey)))
        while True:
            try:
                conn = l.accept()
                break
            except IOError as err:
                if err.errno == 4:  # interrupted; try again
                    continue
                else:
                    raise

        RemoteEventHandler.__init__(self, conn, name+'_parent', pid=self.proc.pid, debug=self.debug)
        self.debugMsg('Connected to child process.')
        
        atexit.register(self.join)

Example 6

Project: upstream_sync
Source File: upstream_sync.py
View license
def main():
    """main subroutine"""

    parser = OptionParser()
    parser.add_option("-v", "--verbose", help="Be verbose", action="count")
    parser.add_option("-l", "--list", help="list repos", action="store_true")
    parser.add_option("-c", "--command", help="print the sync command", action="store_true")
    parser.add_option("-r", "--repos", help="syncs specific repo(s) (comma seperated list)", dest="rfilter")
    parser.add_option("--root", help="run script as root", action="store_true")
    try:
        (options, args) = parser.parse_args()
    except OptionError:
        parser.print_help()
        return 1

    global verbose
    if options.verbose:
        logging.basicConfig(level=logging.DEBUG)
        verbose = True
    else:
        logging.basicConfig(level=logging.WARNING)
        verbose = False

    show_command = options.command
    run_as_root = options.root

    # get repos from config
    if options.rfilter:
        repos = config_repos(rfilter=options.rfilter.split(','))
    else:
        repos = config_repos()

    # list repos
    if options.list:
        list_repos(repos)
        sys.exit(0)

    if not run_as_root:
        if os.geteuid() == 0:
            print("It is strongly advised not to run this script as root!")
            print("Running as root will most likely mess up filesystem permissions.")
            print("If you are sure you want to run as root, pass --root")
            sys.exit(2)

    if not show_command:
        make_dir(confd_dir)

    for repo in repos:
        # set variables based on values in config
        url = repo['url']
        name = repo['name']
        path = os.path.join(mirror_dir, repo['path'])  # absolute path of repository

        # create repo directory
        if not show_command:
            make_dir(path, 0775)

        createrepo = False
        if repo['createrepo'].lower() == "true":
            createrepo = True

        # Generate the sync and createrepo commands to be used based on repository type
        createrepo_exec = ['createrepo']
        createrepo_opts = ['--pretty', '--database', '--update', '--cachedir', os.path.join(path, '.cache'), path]
        if not options.verbose:
            createrepo_opts.append('-q')

        if re.match('^(http|https)://', url):
            sync_cmd = sync_cmd_reposync(repo)
        elif re.match('^rhns:///', url):
            sync_cmd = sync_cmd_rhnget(repo)
        elif re.match('^you://', url):
            sync_cmd = sync_cmd_you(repo)
        elif re.match('^rsync://', url):
            sync_cmd = sync_cmd_rsync(repo)
        else:
            logging.warn('url type unknown - %s' % url)
            continue

        if not sync_cmd:
            continue

        # if option -c is passed print commands and continue to loop item
        if show_command:
            print('%s:' % name)
            print('  '+' '.join(sync_cmd))
            if createrepo:
                print('  '+' '.join(createrepo_exec + createrepo_opts))
            continue

        # preform sync - rhnget/rsync
        logging.info('syncing %s' % name)
        if options.verbose:
            stdout_pipe = sys.stdout
            stderr_pipe = sys.stderr
        else:
            stdout_pipe = subprocess.PIPE
            stderr_pipe = subprocess.STDOUT

        p1 = subprocess.Popen(sync_cmd, stdout=stdout_pipe, stderr=stderr_pipe, stdin=subprocess.PIPE)
        p1_rc = p1.wait()
        stdout, _ = p1.communicate()

        # display output if the sync fails
        if p1_rc > 0:
            if not options.verbose:
                logging.warn(stdout)
            logging.warn('sync failed: %s' % name)
            continue  # no need to run createrepo if sync failed

        # run createrepo to generate package metadata
        if createrepo:
            logging.info('generating package metadata: {0}'.format(name))

            # if comps.xml exists, use it to generate group data
            comps_file = os.path.join(path, 'comps.xml')
            if os.path.isfile(comps_file):
                createrepo_opts = ['-g', comps_file] + createrepo_opts

            createrepo_cmd = createrepo_exec + createrepo_opts

            p2 = subprocess.Popen(createrepo_cmd, stdout=stdout_pipe, stderr=stderr_pipe, stdin=subprocess.PIPE)
            p2_rc = p2.wait()
            stdout, _ = p2.communicate()

            if p2_rc > 0:
                if not options.verbose:
                    logging.warn(stdout)
                logging.warn('createrepo failed: %s' % name)

Example 7

Project: Diamond
Source File: ipvs.py
View license
    def collect(self):
        if not os.access(self.config['bin'], os.X_OK):
            self.log.error("%s does not exist, or is not executable",
                           self.config['bin'])
            return False

        if ((str_to_bool(self.config['use_sudo']) and
             not os.access(self.config['sudo_cmd'], os.X_OK))):
            self.log.error("%s does not exist, or is not executable",
                           self.config['sudo_cmd'])
            return False

        p = subprocess.Popen(self.statcommand, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        p.wait()

        if p.returncode == 255:
            self.statcommand = filter(
                lambda a: a != '--exact', self.statcommand)

        p = subprocess.Popen(self.statcommand,
                             stdout=subprocess.PIPE).communicate()[0][:-1]

        columns = {
            'conns': 2,
            'inpkts': 3,
            'outpkts': 4,
            'inbytes': 5,
            'outbytes': 6,
        }

        external = ""
        backend = ""
        for i, line in enumerate(p.split("\n")):
            if i < 3:
                continue
            row = line.split()

            if row[0] == "TCP" or row[0] == "UDP":
                external = row[0] + "_" + string.replace(row[1], ".", "_")
                backend = "total"
            elif row[0] == "->":
                backend = string.replace(row[1], ".", "_")
            else:
                continue

            for metric, column in columns.iteritems():
                metric_name = ".".join([external, backend, metric])
                # metric_value = int(row[column])
                value = row[column]
                if value.endswith('K'):
                    metric_value = int(value[0:len(value) - 1]) * 1024
                elif value.endswith('M'):
                    metric_value = (int(value[0:len(value) - 1]) * 1024 * 1024)
                elif value.endswith('G'):
                    metric_value = (
                        int(value[0:len(value) - 1]) * 1024 * 1024 * 1024)
                else:
                    metric_value = float(value)

                self.publish(metric_name, metric_value)

        p = subprocess.Popen(self.concommand,
                             stdout=subprocess.PIPE).communicate()[0][:-1]

        columns = {
            'active': 4,
            'inactive': 5,
        }

        external = ""
        backend = ""
        total = {}
        for i, line in enumerate(p.split("\n")):
            if i < 3:
                continue
            row = line.split()

            if row[0] == "TCP" or row[0] == "UDP":
                if total:
                    for metric, value in total.iteritems():
                        self.publish(
                            ".".join([external, "total", metric]), value)

                for k in columns.keys():
                    total[k] = 0.0

                external = row[0] + "_" + string.replace(row[1], ".", "_")
                continue
            elif row[0] == "->":
                backend = string.replace(row[1], ".", "_")
            else:
                continue

            for metric, column in columns.iteritems():
                metric_name = ".".join([external, backend, metric])
                # metric_value = int(row[column])
                value = row[column]
                if value.endswith('K'):
                    metric_value = int(value[0:len(value) - 1]) * 1024
                elif value.endswith('M'):
                    metric_value = int(value[0:len(value) - 1]) * 1024 * 1024
                elif value.endswith('G'):
                    metric_value = (
                        int(value[0:len(value) - 1]) * 1024 * 1024 * 1024)
                else:
                    metric_value = float(value)

                total[metric] += metric_value
                self.publish(metric_name, metric_value)

        if total:
            for metric, value in total.iteritems():
                self.publish(".".join([external, "total", metric]), value)

Example 8

Project: qubes-core-admin
Source File: vm_qrexec_gui.py
View license
    @unittest.skipUnless(spawn.find_executable('xdotool'),
                         "xdotool not installed")
    def test_300_bug_1028_gui_memory_pinning(self):
        """
        If VM window composition buffers are relocated in memory, GUI will
        still use old pointers and will display old pages
        :return:
        """
        self.testvm1.memory = 800
        self.testvm1.maxmem = 800
        # exclude from memory balancing
        self.testvm1.services['meminfo-writer'] = False
        self.testvm1.start()
        # and allow large map count
        self.testvm1.run("echo 256000 > /proc/sys/vm/max_map_count",
            user="root", wait=True)
        allocator_c = (
            "#include <sys/mman.h>\n"
            "#include <stdlib.h>\n"
            "#include <stdio.h>\n"
            "\n"
            "int main(int argc, char **argv) {\n"
            "	int total_pages;\n"
            "	char *addr, *iter;\n"
            "\n"
            "	total_pages = atoi(argv[1]);\n"
            "	addr = mmap(NULL, total_pages * 0x1000, PROT_READ | "
            "PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_POPULATE, -1, 0);\n"
            "	if (addr == MAP_FAILED) {\n"
            "		perror(\"mmap\");\n"
            "		exit(1);\n"
            "	}\n"
            "	printf(\"Stage1\\n\");\n"
            "   fflush(stdout);\n"
            "	getchar();\n"
            "	for (iter = addr; iter < addr + total_pages*0x1000; iter += "
            "0x2000) {\n"
            "		if (mlock(iter, 0x1000) == -1) {\n"
            "			perror(\"mlock\");\n"
            "           fprintf(stderr, \"%d of %d\\n\", (iter-addr)/0x1000, "
            "total_pages);\n"
            "			exit(1);\n"
            "		}\n"
            "	}\n"
            "	printf(\"Stage2\\n\");\n"
            "   fflush(stdout);\n"
            "	for (iter = addr+0x1000; iter < addr + total_pages*0x1000; "
            "iter += 0x2000) {\n"
            "		if (munmap(iter, 0x1000) == -1) {\n"
            "			perror(\"munmap\");\n"
            "			exit(1);\n"
            "		}\n"
            "	}\n"
            "	printf(\"Stage3\\n\");\n"
            "   fflush(stdout);\n"
            "   fclose(stdout);\n"
            "	getchar();\n"
            "\n"
            "	return 0;\n"
            "}\n")

        p = self.testvm1.run("cat > allocator.c", passio_popen=True)
        p.communicate(allocator_c)
        p = self.testvm1.run("gcc allocator.c -o allocator",
            passio_popen=True, passio_stderr=True)
        (stdout, stderr) = p.communicate()
        if p.returncode != 0:
            self.skipTest("allocator compile failed: {}".format(stderr))

        # drop caches to have even more memory pressure
        self.testvm1.run("echo 3 > /proc/sys/vm/drop_caches",
            user="root", wait=True)

        # now fragment all free memory
        p = self.testvm1.run("grep ^MemFree: /proc/meminfo|awk '{print $2}'",
            passio_popen=True)
        memory_pages = int(p.communicate()[0].strip())
        memory_pages /= 4 # 4k pages
        alloc1 = self.testvm1.run(
            "ulimit -l unlimited; exec /home/user/allocator {}".format(
                memory_pages),
            user="root", passio_popen=True, passio_stderr=True)
        # wait for memory being allocated; can't use just .read(), because EOF
        # passing is unreliable while the process is still running
        alloc1.stdin.write("\n")
        alloc1.stdin.flush()
        alloc_out = alloc1.stdout.read(len("Stage1\nStage2\nStage3\n"))

        if "Stage3" not in alloc_out:
            # read stderr only in case of failed assert, but still have nice
            # failure message (don't use self.fail() directly)
            self.assertIn("Stage3", alloc_out, alloc1.stderr.read())

        # now, launch some window - it should get fragmented composition buffer
        # it is important to have some changing content there, to generate
        # content update events (aka damage notify)
        proc = self.testvm1.run("gnome-terminal --full-screen -e top",
            passio_popen=True)

        # help xdotool a little...
        time.sleep(2)
        # get window ID
        search = subprocess.Popen(['xdotool', 'search', '--sync',
            '--onlyvisible', '--class', self.testvm1.name + ':.*erminal'],
            stdout=subprocess.PIPE)
        winid = search.communicate()[0].strip()
        xprop = subprocess.Popen(['xprop', '-notype', '-id', winid,
            '_QUBES_VMWINDOWID'], stdout=subprocess.PIPE)
        vm_winid = xprop.stdout.read().strip().split(' ')[4]

        # now free the fragmented memory and trigger compaction
        alloc1.stdin.write("\n")
        alloc1.wait()
        self.testvm1.run("echo 1 > /proc/sys/vm/compact_memory", user="root")

        # now window may be already "broken"; to be sure, allocate (=zero)
        # some memory
        alloc2 = self.testvm1.run(
            "ulimit -l unlimited; /home/user/allocator {}".format(memory_pages),
            user="root", passio_popen=True, passio_stderr=True)
        alloc2.stdout.read(len("Stage1\n"))

        # wait for damage notify - top updates every 3 sec by default
        time.sleep(6)

        # now take screenshot of the window, from dom0 and VM
        # choose pnm format, as it doesn't have any useless metadata - easy
        # to compare
        p = self.testvm1.run("import -window {} pnm:-".format(vm_winid),
            passio_popen=True, passio_stderr=True)
        (vm_image, stderr) = p.communicate()
        if p.returncode != 0:
            raise Exception("Failed to get VM window image: {}".format(
                stderr))

        p = subprocess.Popen(["import", "-window", winid, "pnm:-"],
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        (dom0_image, stderr) = p.communicate()
        if p.returncode != 0:
            raise Exception("Failed to get dom0 window image: {}".format(
                stderr))

        if vm_image != dom0_image:
            self.fail("Dom0 window doesn't match VM window content")

Example 9

Project: aeneas
Source File: ffprobewrapper.py
View license
    def read_properties(self, audio_file_path):
        """
        Read the properties of an audio file
        and return them as a dictionary.

        Example: ::

            d["index"]=0
            d["codec_name"]=mp3
            d["codec_long_name"]=MP3 (MPEG audio layer 3)
            d["profile"]=unknown
            d["codec_type"]=audio
            d["codec_time_base"]=1/44100
            d["codec_tag_string"]=[0][0][0][0]
            d["codec_tag"]=0x0000
            d["sample_fmt"]=s16p
            d["sample_rate"]=44100
            d["channels"]=1
            d["channel_layout"]=mono
            d["bits_per_sample"]=0
            d["id"]=N/A
            d["r_frame_rate"]=0/0
            d["avg_frame_rate"]=0/0
            d["time_base"]=1/14112000
            d["start_pts"]=0
            d["start_time"]=0.000000
            d["duration_ts"]=1545083190
            d["duration"]=109.487188
            d["bit_rate"]=128000
            d["max_bit_rate"]=N/A
            d["bits_per_raw_sample"]=N/A
            d["nb_frames"]=N/A
            d["nb_read_frames"]=N/A
            d["nb_read_packets"]=N/A
            d["DISPOSITION:default"]=0
            d["DISPOSITION:dub"]=0
            d["DISPOSITION:original"]=0
            d["DISPOSITION:comment"]=0
            d["DISPOSITION:lyrics"]=0
            d["DISPOSITION:karaoke"]=0
            d["DISPOSITION:forced"]=0
            d["DISPOSITION:hearing_impaired"]=0
            d["DISPOSITION:visual_impaired"]=0
            d["DISPOSITION:clean_effects"]=0
            d["DISPOSITION:attached_pic"]=0

        :param string audio_file_path: the path of the audio file to analyze
        :rtype: dict
        :raises: TypeError: if ``audio_file_path`` is None
        :raises: OSError: if the file at ``audio_file_path`` cannot be read
        :raises: FFPROBEParsingError: if the call to ``ffprobe`` does not produce any output
        :raises: FFPROBEPathError: if the path to the ``ffprobe`` executable cannot be called
        :raises: FFPROBEUnsupportedFormatError: if the file has a format not supported by ``ffprobe``
        """

        # test if we can read the file at audio_file_path
        if audio_file_path is None:
            self.log_exc(u"The audio file path is None", None, True, TypeError)
        if not gf.file_can_be_read(audio_file_path):
            self.log_exc(u"Input file '%s' cannot be read" % (audio_file_path), None, True, OSError)

        # call ffprobe
        arguments = [self.rconf[RuntimeConfiguration.FFPROBE_PATH]]
        arguments.extend(self.FFPROBE_PARAMETERS)
        arguments.append(audio_file_path)
        self.log([u"Calling with arguments '%s'", arguments])
        try:
            proc = subprocess.Popen(
                arguments,
                stdout=subprocess.PIPE,
                stdin=subprocess.PIPE,
                stderr=subprocess.PIPE
            )
            (stdoutdata, stderrdata) = proc.communicate()
            proc.stdout.close()
            proc.stdin.close()
            proc.stderr.close()
        except OSError as exc:
            self.log_exc(u"Unable to call the '%s' ffprobe executable" % (self.rconf[RuntimeConfiguration.FFPROBE_PATH]), exc, True, FFPROBEPathError)
        self.log(u"Call completed")

        # check there is some output
        if (stdoutdata is None) or (len(stderrdata) == 0):
            self.log_exc(u"ffprobe produced no output", None, True, FFPROBEParsingError)

        # decode stdoutdata and stderrdata to Unicode string
        try:
            stdoutdata = gf.safe_unicode(stdoutdata)
            stderrdata = gf.safe_unicode(stderrdata)
        except UnicodeDecodeError as exc:
            self.log_exc(u"Unable to decode ffprobe out/err", exc, True, FFPROBEParsingError)

        # dictionary for the results
        results = {
            self.STDOUT_CHANNELS: None,
            self.STDOUT_CODEC_NAME: None,
            self.STDOUT_DURATION: None,
            self.STDOUT_SAMPLE_RATE: None
        }

        # scan the first audio stream the ffprobe stdout output
        # TODO more robust parsing
        # TODO deal with multiple audio streams
        for line in stdoutdata.splitlines():
            if line == self.STDOUT_END_STREAM:
                self.log(u"Reached end of the stream")
                break
            elif len(line.split("=")) == 2:
                key, value = line.split("=")
                results[key] = value
                self.log([u"Found property '%s'='%s'", key, value])

        try:
            self.log([u"Duration found in stdout: '%s'", results[self.STDOUT_DURATION]])
            results[self.STDOUT_DURATION] = TimeValue(results[self.STDOUT_DURATION])
            self.log(u"Valid duration")
        except:
            self.log_warn(u"Invalid duration")
            results[self.STDOUT_DURATION] = None
            # try scanning ffprobe stderr output
            for line in stderrdata.splitlines():
                match = self.STDERR_DURATION_REGEX.search(line)
                if match is not None:
                    self.log([u"Found matching line '%s'", line])
                    results[self.STDOUT_DURATION] = gf.time_from_hhmmssmmm(line)
                    self.log([u"Extracted duration '%.3f'", results[self.STDOUT_DURATION]])
                    break

        if results[self.STDOUT_DURATION] is None:
            self.log_exc(u"No duration found in stdout or stderr. Unsupported audio file format?", None, True, FFPROBEUnsupportedFormatError)

        # return dictionary
        self.log(u"Returning dict")
        return results

Example 10

Project: flask-funnel
Source File: manager.py
View license
@manager.command
def bundle_assets():
    """Compress and minify assets"""
    YUI_COMPRESSOR_BIN = current_app.config.get('YUI_COMPRESSOR_BIN')

    path_to_jar = YUI_COMPRESSOR_BIN

    tmp_files = []

    def get_path(item):
        """Get the static path of an item"""
        return os.path.join(current_app.static_folder, item)

    def fix_urls(filename, compressed_file):
        """Fix relative paths in URLs for bundles"""
        print("Fixing URL's in %s" % filename)

        def fix_urls_regex(url, relpath):
            """Callback to fix relative path"""
            url = url.group(1).strip('"\'')
            if url.startswith(('data:', 'http:', 'https:', 'attr(')):
                return url
            else:
                url = os.path.relpath(url, relpath)
                return 'url(%s)' % url

        css_content = ''
        with open(get_path(filename), 'r') as css_in:
            css_content = css_in.read()

        relpath = os.path.relpath(os.path.dirname(compressed_file),
                                  get_path(os.path.dirname(filename)))

        parse = lambda url: fix_urls_regex(url, relpath)

        css_parsed = re.sub('url\(([^)]*?)\)', parse, css_content)

        out_file = get_path(os.path.join(current_app.config.get('BUNDLES_DIR'),
                                         'tmp', '%s.tmp' % filename))

        if not os.path.exists(os.path.dirname(out_file)):
            os.makedirs(os.path.dirname(out_file))

        with open(out_file, 'w') as css_out:
            css_out.write(css_parsed)

        return os.path.relpath(out_file, get_path('.'))

    def preprocess_file(filename, compressed_file):
        """Preprocess the file"""
        if filename.startswith('//'):
            url = 'http:%s' % filename
        elif filename.startswith(('http:', 'https:')):
            url = filename
        else:
            url = None

        if url:
            ext_media_path = get_path('external')

            if not os.path.exists(ext_media_path):
                os.makedirs(ext_media_path)

            filename = os.path.basename(url)
            if filename.endswith(('.js', '.css', '.less')):
                fp = get_path(filename.lstrip('/'))
                file_path = os.path.join(ext_media_path, fp)

                try:
                    req = urlopen(url)
                    print(' - Fetching %s ...' % url)
                except HTTPError as e:
                    print(' - HTTP Error %s for %s, %s' % (url, filename,
                                                           str(e.code)))
                    return None
                except URLError as e:
                    print(' - Invalid URL %s for %s, %s' % (url, filename,
                                                            str(e.reason)))
                    return None

                with open(file_path, 'w+') as fp:
                    try:
                        shutil.copyfileobj(req, fp)
                    except shutil.Error:
                        print(' - Could not copy file %s' % filename)
                filename = os.path.join('external', filename)
            else:
                print(' - Not a valid remote file %s' % filename)
                return None

        filename = preprocess(filename.lstrip('/'))

        if url is None and filename.endswith('.css'):
            filename = fix_urls(filename, compressed_file)
            tmp_files.append(filename)

        return get_path(filename.lstrip('/'))

    def minify(ftype, file_in, file_out):
        """Minify the file"""
        if ftype == 'js' and 'UGLIFY_BIN' in current_app.config:
            o = {'method': 'UglifyJS',
                 'bin': current_app.config.get('UGLIFY_BIN')}
            subprocess.call("%s -o %s %s" % (o['bin'], file_out, file_in),
                            shell=True, stdout=subprocess.PIPE)
        elif ftype == 'css' and 'CLEANCSS_BIN' in current_app.config:
            o = {'method': 'clean-css',
                 'bin': current_app.config.get('CLEANCSS_BIN')}
            subprocess.call("%s -o %s %s" % (o['bin'], file_out, file_in),
                            shell=True, stdout=subprocess.PIPE)
        else:
            o = {'method': 'YUI Compressor',
                 'bin': current_app.config.get('JAVA_BIN')}
            variables = (o['bin'], path_to_jar, file_in, file_out)
            subprocess.call("%s -jar %s %s -o %s" % variables,
                            shell=True, stdout=subprocess.PIPE)

        print("Minifying %s (using %s)" % (file_in, o['method']))

    # Assemble bundles and process
    bundles = {
        'css': current_app.config.get('CSS_BUNDLES'),
        'js': current_app.config.get('JS_BUNDLES'),
    }

    for ftype, bundle in bundles.items():
        for name, files in bundle.items():
            concatenated_file = get_path(os.path.join(
                current_app.config.get('BUNDLES_DIR'), ftype,
                '%s-all.%s' % (name, ftype,)))
            compressed_file = get_path(os.path.join(
                current_app.config.get('BUNDLES_DIR'), ftype,
                '%s-min.%s' % (name, ftype,)))

            if not os.path.exists(os.path.dirname(concatenated_file)):
                os.makedirs(os.path.dirname(concatenated_file))

            all_files = []
            for fn in files:
                processed = preprocess_file(fn, compressed_file)
                print('Processed: %s' % processed)
                if processed is not None:
                    all_files.append(processed)

            # Concatenate
            if len(all_files) == 0:
                print("Warning: '%s' is an empty bundle." % bundle)

            all_files = ' '.join(all_files)

            subprocess.call("cat %s > %s" % (all_files, concatenated_file),
                            shell=True)

            # Minify
            minify(ftype, concatenated_file, compressed_file)

            # Post process
            postprocess(compressed_file, fix_path=False)

            # Remove concatenated file
            print('Remove concatenated file')
            os.remove(concatenated_file)

    # Cleanup
    print('Clean up temporary files')
    for file in tmp_files:
        try:
            os.remove(get_path(file))
            os.rmdir(os.path.dirname(get_path(file)))
        except OSError:
            pass

    try:
        os.rmdir(get_path(os.path.join(current_app.config.get('BUNDLES_DIR'),
                                       'tmp')))
    except OSError:
        pass

Example 11

Project: rockstor-core
Source File: receiver.py
View license
    def run(self):
        logger.debug('Id: %s. Starting a new Receiver for meta: %s' % (self.identity, self.meta))
        self.msg = ('Top level exception in receiver')
        latest_snap = None
        with self._clean_exit_handler():
            self.law = APIWrapper()
            self.poll = zmq.Poller()
            self.dealer = self.ctx.socket(zmq.DEALER)
            self.dealer.setsockopt_string(zmq.IDENTITY, u'%s' % self.identity)
            self.dealer.set_hwm(10)
            self.dealer.connect('ipc://%s' % settings.REPLICATION.get('ipc_socket'))
            self.poll.register(self.dealer, zmq.POLLIN)

            self.ack = True
            self.msg = ('Failed to get the sender ip for appliance: %s' % self.sender_id)
            self.sender_ip = Appliance.objects.get(uuid=self.sender_id).ip

            if (not self.incremental):
                self.msg = ('Failed to verify/create share: %s.' % self.sname)
                self.create_share(self.sname, self.dest_pool)

                self.msg = ('Failed to create the replica metadata object '
                            'for share: %s.' % self.sname)
                data = {'share': self.sname,
                        'appliance': self.sender_ip,
                        'src_share': self.src_share, }
                self.rid = self.create_rshare(data)
            else:
                self.msg = ('Failed to retreive the replica metadata object for '
                            'share: %s.' % self.sname)
                rso = ReplicaShare.objects.get(share=self.sname)
                self.rid = rso.id
                #Find and send the current snapshot to the sender. This will
                #be used as the start by btrfs-send diff.
                self.msg = ('Failed to verify latest replication snapshot on the system.')
                latest_snap = self._latest_snap(rso)

            self.msg = ('Failed to create receive trail for rid: %d' % self.rid)
            data = {'snap_name': self.snap_name, }
            self.rtid = self.create_receive_trail(self.rid, data)

            #delete the share, move the oldest snap to share
            self.msg = ('Failed to promote the oldest Snapshot to Share.')
            oldest_snap = get_oldest_snap(self.snap_dir, self.num_retain_snaps, regex='_replication_')
            if (oldest_snap is not None):
                snap_path = ('%s/%s' % (self.snap_dir, oldest_snap))
                share_path = ('%s%s/%s' %
                              (settings.MNT_PT, self.dest_pool,
                               self.sname))
                pool = Pool.objects.get(name=self.dest_pool)
                remove_share(pool, self.sname, '-1/-1')
                set_property(snap_path, 'ro', 'false',
                             mount=False)
                run_command(['/usr/bin/rm', '-rf', share_path],
                            throw=False)
                shutil.move(snap_path, share_path)
                self.delete_snapshot(self.sname, oldest_snap)

            self.msg = ('Failed to prune old Snapshots')
            self._delete_old_snaps(self.sname, self.snap_dir, self.num_retain_snaps + 1)

            self.msg = ('Failed to validate the source share(%s) on sender(uuid: %s '
                        ') Did the ip of the sender change?' %
                        (self.src_share, self.sender_id))
            self.validate_src_share(self.sender_id, self.src_share)

            sub_vol = ('%s%s/%s' % (settings.MNT_PT, self.dest_pool, self.sname))
            if (not is_subvol(sub_vol)):
                self.msg = ('Failed to create parent subvolume %s' % sub_vol)
                run_command([BTRFS, 'subvolume', 'create', sub_vol])

            self.msg = ('Failed to create snapshot directory: %s' % self.snap_dir)
            run_command(['/usr/bin/mkdir', '-p', self.snap_dir])
            snap_fp = ('%s/%s' % (self.snap_dir, self.snap_name))

            #If the snapshot already exists, presumably from the previous attempt and
            #the sender tries to send the same, reply back with snap_exists and do not
            #start the btrfs-receive
            if (is_subvol(snap_fp)):
                logger.debug('Id: %s. Snapshot to be sent(%s) already exists. Not '
                             'starting a new receive process' % (self.identity, snap_fp))
                self._send_recv('snap-exists')
                self._sys_exit(0)

            cmd = [BTRFS, 'receive', self.snap_dir]
            self.msg = ('Failed to start the low level btrfs receive command(%s)'
                        '. Aborting.' % cmd)
            self.rp = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            self.msg = ('Failed to send receiver-ready')
            rcommand, rmsg = self._send_recv('receiver-ready', latest_snap or '')
            if (rcommand is None):
                logger.error('Id: %s. No response from the broker for '
                             'receiver-ready command. Aborting.' % self.identity)
                self._sys_exit(3)

            term_commands = ('btrfs-send-init-error', 'btrfs-send-unexpected-termination-error',
                             'btrfs-send-nonzero-termination-error',)
            num_tries = 10
            poll_interval = 6000 # 6 seconds
            num_msgs = 0
            t0 = time.time()
            while (True):
                socks = dict(self.poll.poll(poll_interval))
                if (socks.get(self.dealer) == zmq.POLLIN):
                    #reset to wait upto 60(poll_interval x num_tries milliseconds) for every message
                    num_tries = 10
                    command, message = self.dealer.recv_multipart()
                    if (command == 'btrfs-send-stream-finished'):
                        #this command concludes fsdata transfer. After this, btrfs-recev
                        #process should be terminated(.communicate).
                        if (self.rp.poll() is None):
                            self.msg = ('Failed to terminate btrfs-recv command')
                            out, err = self.rp.communicate()
                            out = out.split('\n')
                            err = err.split('\n')
                            logger.debug('Id: %s. Terminated btrfs-recv. cmd = %s '
                                         'out = %s err: %s rc: %s' %
                                         (self.identity, cmd, out, err, self.rp.returncode))
                        if (self.rp.returncode != 0):
                            self.msg = ('btrfs-recv exited with unexpected exitcode(%s). ' % self.rp.returncode)
                            raise Exception(self.msg)
                        self._send_recv('btrfs-recv-finished')
                        self.refresh_share_state()
                        self.refresh_snapshot_state()

                        self.msg = ('Failed to update receive trail for rtid: %d' % self.rtid)
                        self.update_receive_trail(self.rtid, {'status': 'succeeded',})
                        dsize, drate = self.size_report(self.total_bytes_received, t0)
                        logger.debug('Id: %s. Receive complete. Total data '
                                     'transferred: %s. Rate: %s/sec.' %
                                     (self.identity, dsize, drate))
                        self._sys_exit(0)

                    if (command in term_commands):
                        self.msg = ('Terminal command(%s) received from the sender. Aborting.' % command)
                        raise Exception(self.msg)

                    if (self.rp.poll() is None):
                        self.rp.stdin.write(message)
                        self.rp.stdin.flush()
                        #@todo: implement advanced credit request system.
                        self.dealer.send_multipart([b'send-more', ''])
                        num_msgs += 1
                        self.total_bytes_received += len(message)
                        if (num_msgs == 1000):
                            num_msgs = 0
                            dsize, drate = self.size_report(self.total_bytes_received, t0)
                            logger.debug('Id: %s. Receiver alive. Data '
                                         'transferred: %s. Rate: %s/sec.' %
                                         (self.identity, dsize, drate))
                    else:
                        out, err = self.rp.communicate()
                        out = out.split('\n')
                        err = err.split('\n')
                        logger.error('Id: %s. btrfs-recv died unexpectedly. cmd: %s out: %s. err: %s' %
                                     (self.identity, cmd, out, err))
                        msg = ('Low level system error from btrfs receive '
                               'command. cmd: %s out: %s err: %s for rtid: %s'
                               % (cmd, out, err, self.rtid))
                        data = {'status': 'failed',
                                'error': msg, }
                        self.msg = ('Failed to update receive trail for rtid: %d.' % self.rtid)
                        self.update_receive_trail(self.rtid, data)
                        self.msg = msg
                        raise Exception(self.msg)
                else:
                    num_tries -= 1
                    msg = ('No response received from the broker. '
                           'remaining tries: %d' % num_tries)
                    logger.error('Id: %s. %s' % (self.identity, msg))
                    if (num_tries == 0):
                        self.msg = ('%s. Terminating the receiver.' % msg)
                        raise Exception(self.msg)

Example 12

Project: scalarizr
Source File: __init__.py
View license
    def _src_generator(self):
        '''
        Compress, split, yield out
        '''
        if self._up:
            # Tranzit volume size is chunk for each worker
            # and Ext filesystem overhead

            # if the upload is multiparted, the manifest won't be used
            self.manifest = Manifest()
            # supposedly, manifest's destination path; assumes that dst
            # generator yields self.dst.next()+transfer_id
            self.manifest.cloudfs_path = os.path.join(self.dst.next(),
                                                      self.transfer_id, self.manifest_path)
            self.manifest["description"] = self.description
            if self.tags:
                self.manifest["tags"] = self.tags

            def delete_uploaded_chunk(src, dst, retry, chunk_num):
                os.remove(src)
            self._transfer.on(transfer_complete=delete_uploaded_chunk)

            for src in self.src:
                LOG.debug('src: %s, type: %s', src, type(src))
                fileinfo = {
                        "name": '',
                        "streamer": None,
                        "compressor": None,
                        "chunks": [],
                }
                self.manifest["files"].append(fileinfo)  # moved here from the bottom
                prefix = self._tranzit_vol.mpoint
                stream = None
                cmd = tar = gzip = None

                if hasattr(src, 'read'):
                    stream = src
                    if hasattr(stream, 'name'):
                        # os.pipe stream has name '<fdopen>'
                        name = os.path.basename(stream.name).strip('<>')  # ? can stream name end with '/'
                    else:
                        name = 'stream-%s' % hash(stream)
                    fileinfo["name"] = name
                    prefix = os.path.join(prefix, name) + '.'
                elif self.streamer and isinstance(src, basestring) and os.path.isdir(src):
                    name = os.path.basename(src.rstrip('/'))
                    fileinfo["name"] = name

                    if self.streamer == "tar":
                        fileinfo["streamer"] = "tar"
                        prefix = os.path.join(prefix, name) + '.tar.'

                        if src.endswith('/'):  # tar dir content
                            tar_cmdargs = ['/bin/tar', 'cp', '-C', src, '.']
                        else:
                            parent, target = os.path.split(src)
                            tar_cmdargs = ['/bin/tar', 'cp', '-C', parent, target]

                        LOG.debug("LargeTransfer src_generator TAR POPEN")
                        tar = cmd = subprocess.Popen(
                                                        tar_cmdargs,
                                                        stdout=subprocess.PIPE,
                                                        stderr=subprocess.PIPE,
                                                        close_fds=True)
                        LOG.debug("LargeTransfer src_generator AFTER TAR")
                    elif hasattr(self.streamer, "popen"):
                        fileinfo["streamer"] = str(self.streamer)
                        prefix = os.path.join(prefix, name) + '.'

                        LOG.debug("LargeTransfer src_generator custom streamer POPEN")
                        # TODO: self.streamer.args += src
                        tar = cmd = self.streamer.popen(stdin=None)
                        LOG.debug("LargeTransfer src_generator after custom streamer POPEN")
                    stream = tar.stdout
                elif isinstance(src, basestring) and os.path.isfile(src):
                    name = os.path.basename(src)
                    fileinfo["name"] = name
                    prefix = os.path.join(prefix, name) + '.'

                    stream = open(src)
                else:
                    raise ValueError('Unsupported src: %s' % src)

                if self.compressor == "gzip":
                    fileinfo["compressor"] = "gzip"
                    prefix += 'gz.'
                    LOG.debug("LargeTransfer src_generator GZIP POPEN")
                    gzip = cmd = subprocess.Popen(
                                            [self._gzip_bin(), '-5'],
                                            stdin=stream,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            close_fds=True)
                    LOG.debug("LargeTransfer src_generator AFTER GZIP")
                    if tar:
                        # Allow tar to receive SIGPIPE if gzip exits.
                        tar.stdout.close()
                    stream = gzip.stdout
                # custom compressor
                elif hasattr(self.compressor, "popen"):
                    fileinfo["compressor"] = str(self.compressor)
                    LOG.debug("LargeTransfer src_generator custom compressor POPEN")
                    cmd = self.compressor.popen(stdin=stream)
                    LOG.debug("LargeTransfer src_generator after custom compressor POPEN")
                    if tar:
                        tar.stdout.close()
                    stream = cmd.stdout

                for filename, md5sum, size in self._split(stream, prefix):
                    fileinfo["chunks"].append((os.path.basename(filename), md5sum, size))
                    LOG.debug("LargeTransfer src_generator yield %s", filename)
                    yield filename
                if cmd:
                    out, err = cmd.communicate()
                    if err:
                        LOG.debug("LargeTransfer src_generator cmd pipe stderr: %s", err)

            # send manifest to file transfer
            if not self.multipart:
                LOG.debug("Manifest: %s", self.manifest.data)
                manifest_f = os.path.join(self._tranzit_vol.mpoint, self.manifest_path)
                self.manifest.write(manifest_f)
                LOG.debug("LargeTransfer yield %s", manifest_f)
                yield manifest_f

        elif not self._up:
            def on_transfer_error(*args):
                LOG.debug("transfer_error event, shutting down")
                self.kill()
            self._transfer.on(transfer_error=on_transfer_error)

            # The first yielded object will be the manifest, so
            # catch_manifest is a listener that's supposed to trigger only
            # once and unsubscribe itself.
            def wait_manifest(src, dst, retry, chunk_num):
                self._transfer.un('transfer_complete', wait_manifest)
                self._manifest_ready.set()
            self._transfer.on(transfer_complete=wait_manifest)


            manifest_path = self.src
            yield manifest_path

            # ? except EventInterrupt: save exc and return
            self._manifest_ready.wait()

            # we should have the manifest on the tmpfs by now
            manifest_local = os.path.join(self._tranzit_vol.mpoint,
                                          os.path.basename(manifest_path))
            manifest = Manifest(manifest_local)
            os.remove(manifest_local)
            remote_path = os.path.dirname(manifest_path)

            # add ready and done events to each chunk without breaking the
            # chunk order
            with self._chunks_events_access:
                if not self._killed:
                    self.files = copy(manifest["files"])
                    for file_ in self.files:
                        file_["chunks"] = OrderedDict([(
                                chunk[0], {
                                        "md5sum": chunk[1],
                                        "size": chunk[2] if len(chunk) > 2 else None,
                                        "downloaded": InterruptibleEvent(),
                                        "processed": InterruptibleEvent()
                                }
                        ) for chunk in file_["chunks"]])
                        # chunk is [basename, md5sum, size]

            # launch restorer
            if self._restorer is None:
                LOG.debug("STARTING RESTORER")
                self._restorer = threading.Thread(target=self._dl_restorer)
                self._restorer.start()

            def wait_chunk(src, dst, retry, chunk_num):
                chunk_name = os.path.basename(src)
                for file_ in self.files:
                    if chunk_name in file_["chunks"]:
                        chunk = file_["chunks"][chunk_name]
                chunk["downloaded"].set()
                chunk["processed"].wait()
                os.remove(os.path.join(dst, chunk_name))
            self._transfer.on(transfer_complete=wait_chunk)

            for file_ in self.files:
                for chunk in file_["chunks"]:
                    yield os.path.join(remote_path, chunk)

Example 13

View license
def check_repo(
        limit, pylint='pylint', pylintrc=None, pylint_params='',
        suppress_report=False, always_show_violations=False, ignored_files=None):
    """ Main function doing the checks

    :type limit: float
    :param limit: Minimum score to pass the commit
    :type pylint: str
    :param pylint: Path to pylint executable
    :type pylintrc: str
    :param pylintrc: Path to pylintrc file
    :type pylint_params: str
    :param pylint_params: Custom pylint parameters to add to the pylint command
    :type suppress_report: bool
    :param suppress_report: Suppress report if score is below limit
    :type always_show_violations: bool
    :param always_show_violations: Show violations in case of pass as well
    :type ignored_files: list
    :param ignored_files: List of files to exclude from the validation
    """
    # Lists are mutable and should not be assigned in function arguments
    if ignored_files is None:
        ignored_files = []

    # List of checked files and their results
    python_files = []

    # Set the exit code
    all_filed_passed = True

    if pylintrc is None:
        # If no config is found, use the old default '.pylintrc'
        pylintrc = pylint_config.find_pylintrc() or '.pylintrc'

    # Stash any unstaged changes while we look at the tree
    with _stash_unstaged():
        # Find Python files
        for filename in _get_list_of_committed_files():
            try:
                if _is_python_file(filename) and \
                        not _is_ignored(filename, ignored_files):
                    python_files.append((filename, None))
            except IOError:
                print('File not found (probably deleted): {}\t\tSKIPPED'.format(
                    filename))

        # Don't do anything if there are no Python files
        if len(python_files) == 0:
            sys.exit(0)

        # Load any pre-commit-hooks options from a .pylintrc file (if there is one)
        if os.path.exists(pylintrc):
            conf = configparser.SafeConfigParser()
            conf.read(pylintrc)
            if conf.has_option('pre-commit-hook', 'command'):
                pylint = conf.get('pre-commit-hook', 'command')
            if conf.has_option('pre-commit-hook', 'params'):
                pylint_params += ' ' + conf.get('pre-commit-hook', 'params')
            if conf.has_option('pre-commit-hook', 'limit'):
                limit = float(conf.get('pre-commit-hook', 'limit'))

        # Pylint Python files
        i = 1
        for python_file, score in python_files:
            # Allow __init__.py files to be completely empty
            if os.path.basename(python_file) == '__init__.py':
                if os.stat(python_file).st_size == 0:
                    print(
                        'Skipping pylint on {} (empty __init__.py)..'
                        '\tSKIPPED'.format(python_file))

                    # Bump parsed files
                    i += 1
                    continue

            # Start pylinting
            sys.stdout.write("Running pylint on {} (file {}/{})..\t".format(
                python_file, i, len(python_files)))
            sys.stdout.flush()
            try:
                command = [pylint]
                if pylint_params:
                    command += pylint_params.split()
                    if '--rcfile' not in pylint_params:
                        command.append('--rcfile={}'.format(pylintrc))
                else:
                    command.append('--rcfile={}'.format(pylintrc))

                command.append(python_file)
                proc = subprocess.Popen(
                    command,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)

                out, _ = proc.communicate()
            except OSError:
                print("\nAn error occurred. Is pylint installed?")
                sys.exit(1)

            # Verify the score
            score = _parse_score(out)
            ignored = _check_ignore(out)
            if ignored or score >= float(limit):
                status = 'PASSED'
            else:
                status = 'FAILED'
                all_filed_passed = False

            # Add some output
            print('{:.2}/10.00\t{}{}'.format(
                decimal.Decimal(score),
                status,
                ignored and '\tIGNORED' or ''))

            status_check_list = ['FAILED']

            if always_show_violations:
                status_check_list.append('PASSED')

            if status in status_check_list:
                if suppress_report:
                    command.append('--reports=n')
                    proc = subprocess.Popen(
                        command,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE)
                    out, _ = proc.communicate()

                print(_futurize_str(out))

            # Bump parsed files
            i += 1

    return all_filed_passed

Example 14

Project: Breach-Miner
Source File: breachminer.py
View license
def invokeBM(EmailList):
    os.system('clear')
    print banner
    print ("|n")
    choice = raw_input("\033[92m [*] Do you want to go for a detailed analysis \033[93m[Y/N] : ")
    flag = 'false'
    count = 1
    banner_html = create_html()
    html_file = 'Files/Results.html'
    print ("\n  [*] "+"\033[92m"+"I am mining ... Sit back and relax !!!")
    try:
        with open(html_file, 'w') as res:
            res.write(banner_html)
            with open(EmailList) as f:
                for email in f:
                    Url1 = urllib.quote(email, safe='')
                    Url = BaseUrl+Url1
                    Url = Url[:-3]
                    headers = None
                    r = requests.get(Url, headers = headers)
                    try:
                        JsonData =  (r.json())
                    except ValueError:
                        print "\n \033[31m [*] No data found for " + email
                        
                    if (r.status_code == 200):
                        print ('\n')
                        print ("\033[94m *************************************************************************************")
                        print '  \033[93m  [*] Located email account in leaked data dumps for : \033[93m'+email
                        print ("\033[94m *************************************************************************************")
                        print ('\n')
                        for item in JsonData:
                            source = item.get('Source')
                            did = item.get('Id')
                            title = item.get('Title')
                            if title is None:
                                title = "None"
                                
                            if choice.lower() == 'n':
                                print ('\n')
                                print "\033[92m Title of the dump : "+title
                                print "\033[92m Source of the dump : "+source
                                print "\033[92m Breach data can be found at : "+source+"/"+did
                                print ('\n')
                                
                            if choice.lower() == 'y':
                                if source == 'Pastebin':
                                    puid = did
                                    headers = None
                                    purl = 'http://pastebin.com/raw.php?i='+puid
                                    purl1 = 'http://pastebin.com/'+puid
                                    r1 = requests.get(purl, headers = headers)
                                    if r1.status_code != 302:
                                        if r1.status_code != 404:
                                            print '\n'
                                            print "\033[94m"+"=============================================================================================================="
                                            print "\033[98m [*]   Got It !!! Dump found at 033[31m "+purl+' for email account \033[93m'+email
                                            print "\033[94m"+"=============================================================================================================="
                                            CurrPath =  os.getcwd()+'/tmp.txt'
                                            grab = str('wget '+purl+' -O  '+CurrPath+' > /dev/null 2>&1')
                                            os.system(grab)
                                            #CredMiner(CurrPath, email)
                                            print '\033[92m'
                                            os.system('cat '+CurrPath+' | grep -B 1 -A 1 '+email)
                                            p = subprocess.Popen('cat '+CurrPath+' | grep -B 1 -A 1 '+email, stdout=subprocess.PIPE, shell=True)
                                            (output, err) = p.communicate()
                                            p1 = subprocess.Popen('cat '+CurrPath+' | grep '+email, stdout=subprocess.PIPE, shell=True)
                                            (output1, err1) = p1.communicate()
                                            #print output
                                            res.write('<div style="color: #1aff1a;"">')
                                            res.write('<h4>Data for email account : %s </h4>'%email)
                                            print '\033[31m'
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output))
                                            res.write('<p> [*] More Accurate Details : ')
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output1))
                                            res.write('</div><br>')
                                            if os.path.exists(CurrPath):
                                                #os.system('mv '+CurrPath+' tmp.txt.bkp')
                                                os.system('rm '+CurrPath)
                                            
                                        else:
                                            print "\n \033[31m [*] Sorry !!! The pastebin dumb seems to be missing at "+source+"/"+did+"  :( "
                                            if (count == '1') or (flag != 'true'):
                                                s = raw_input('\033[92m Do you want to search archives for the missing data A(All)/Y(Only This)/N(No) : ')
                                                count = 0
                                            if s.lower() == 'a':
                                                flag = 'true'
                                            if (s.lower() == 'y') or (flag == 'true'):
                                                cache_search(purl1, email) 
                                                                                  
                                
                                if source == 'Pastie':
                                    puid = did
                                    headers = None
                                    purl = 'http://pastie.org/pastes/' + puid + '/text'
                                    purl1 = 'http://pastie.org/pastes/'+puid
                                    r1 = requests.get(purl, headers = headers)
                                    if r1.status_code != 302:
                                        if r1.status_code != 404:
                                            print '\n'
                                            print "\033[94m"+"=============================================================================================================="
                                            print "\033[98m [*]   Got It !!! Dump found at 033[31m "+purl+' for email account \033[93m'+email
                                            print "\033[94m"+"=============================================================================================================="
                                            CurrPath =  os.getcwd()+'/tmp.txt'
                                            grab = str('wget '+purl+' -O  '+CurrPath+' > /dev/null 2>&1')
                                            os.system(grab)
                                            #CredMiner(CurrPath, email)
                                            print '\033[92m'
                                            os.system('cat '+CurrPath+' | grep -B 1 -A 1 '+email)
                                            p = subprocess.Popen('cat '+CurrPath+' | grep -B 1 -A 1 '+email, stdout=subprocess.PIPE, shell=True)
                                            (output, err) = p.communicate()
                                            p1 = subprocess.Popen('cat '+CurrPath+' | grep '+email, stdout=subprocess.PIPE, shell=True)
                                            (output1, err1) = p1.communicate()
                                            #print output
                                            res.write('<div style="color: #1aff1a;"">')
                                            res.write('<h4>Data for email account : %s </h4>'%email)
                                            print '\033[31m'
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output))
                                            res.write('<p> [*] More Accurate Details : ')
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output1))
                                            res.write('</div><br>')
                                            if os.path.exists(CurrPath):
                                                #os.system('mv '+CurrPath+' tmp.txt.bkp')
                                                os.system('rm '+CurrPath)
                                                
                                        else:
                                            print "\n \033[31m [*] Sorry !!! The pastie dumb seems to be missing at "+source+"/"+did+"  :( "
                                            if (count == '1') or (flag != 'true'):
                                                s = raw_input('\033[92m Do you want to search archives for the missing data A(All)/Y(Only This)/N(No) : ')
                                                count = 0
                                            if s.lower() == 'a':
                                                flag = 'true'
                                            if (s.lower() == 'y') or (flag == 'true'):
                                                cache_search(purl1, email) 
                                            
                                            
                                if source == 'Slexy':
                                    puid = did
                                    headers = {'Referer': 'http://slexy.org/view/' + puid}
                                    purl = 'http://slexy.org/raw/' + puid
                                    purl1 = 'http://slexy.org/view/'+puid
                                    r1 = requests.get(purl, headers = headers)
                                    if r1.status_code != 302:
                                        if r1.status_code != 404:
                                            print '\n'
                                            print "\033[94m"+"=============================================================================================================="
                                            print "\033[98m [*]   Got It !!! Dump found at 033[31m"+purl+' for email account \033[93m'+email
                                            print "\033[94m"+"=============================================================================================================="
                                            CurrPath =  os.getcwd()+'/tmp.txt'
                                            grab = str('wget '+purl+' -O  '+CurrPath+' > /dev/null 2>&1')
                                            os.system(grab)
                                            #CredMiner(CurrPath, email)
                                            print '\033[92m'
                                            os.system('cat '+CurrPath+' | grep -B 1 -A 1 '+email)
                                            p = subprocess.Popen('cat '+CurrPath+' | grep -B 1 -A 1 '+email, stdout=subprocess.PIPE, shell=True)
                                            (output, err) = p.communicate()
                                            p1 = subprocess.Popen('cat '+CurrPath+' | grep '+email, stdout=subprocess.PIPE, shell=True)
                                            (output1, err1) = p1.communicate()
                                            #print output
                                            res.write('<div style="color: #1aff1a;"">')
                                            res.write('<h4>Data for email account : %s </h4>'%email)
                                            print '\033[31m'
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output))
                                            res.write('<p> [*] More Accurate Details : ')
                                            res.write('<p> [*] The dump may be found at %s.\033[92m <br> [*] Details : <br> %s </p>'%(purl1, output1))
                                            res.write('</div><br>')
                                            if os.path.exists(CurrPath):
                                                #os.system('mv '+CurrPath+' tmp.txt.bkp')
                                                os.system('rm '+CurrPath)
                                            
                                        else:
                                            print "\n \033[31m [*] Sorry !!! The slexy dumb seems to be missing at "+source+"/"+did+"  :( "
                                            if (count == '1') or (flag != 'true'):
                                                s = raw_input('\033[92m Do you want to search archives for the missing data A(All)/Y(Only This)/N(No) : ')
                                                count = 0
                                            if s.lower() == 'a':
                                                flag = 'true'
                                            if (s.lower() == 'y') or (flag == 'true'):
                                                cache_search(purl1, email) 
            f.close()
        res.close()
    except:
        print 'Something went wrong.. May be I donot have that much skills :('

Example 15

Project: sd-agent
Source File: sd_cpu_stats.py
View license
    def check(self, instance):
        #self.log.debug('hello')
        ##self.gauge('serverdensity.disk.free', 1)
        #self.gauge('serverdensity.disk.free', 1, device_name="/")
        #self.gauge('serverdensity.disk.free', 2, device_name="/var")
        #self.gauge('serverdensity.disk.free', 3, device_name="/home")
        #self.log.debug('hello2')

        def get_value(legend, data, name, filter_value=None):
            "Using the legend and a metric name, get the value or None from the data line"
            if name in legend:
                value = data[legend.index(name)]
                if filter_value is not None:
                    if value > filter_value:
                        return None
                return value

            else:
                # FIXME return a float or False, would trigger type error if not python
                self.log.debug("Cannot extract cpu value %s from %s (%s)" % (name, data, legend))
                return 0.0

        self.log.debug('getCPUStats: start')

        cpu_stats = {}

        if sys.platform == 'linux2':
            self.log.debug('getCPUStats: linux2')

            headerRegexp = re.compile(r'.*?([%][a-zA-Z0-9]+)[\s+]?')
            itemRegexp = re.compile(r'.*?\s+(\d+)[\s+]?')
            valueRegexp = re.compile(r'\d+\.\d+')
            proc = None
            try:
                proc = subprocess.Popen(['mpstat', '-P', 'ALL', '1', '1'], stdout=subprocess.PIPE, close_fds=True)
                stats = proc.communicate()[0]

                if int(pythonVersion[1]) >= 6:
                    try:
                        proc.kill()
                    except Exception:
                        self.log.debug('Process already terminated')

                stats = stats.split('\n')
                header = stats[2]
                headerNames = re.findall(headerRegexp, header)
                device = None

                for statsIndex in range(3, len(stats)):
                    row = stats[statsIndex]

                    if not row:  # skip the averages
                        break

                    deviceMatch = re.match(itemRegexp, row)

                    if string.find(row, 'all') is not -1:
                        device = 'ALL'
                    elif deviceMatch is not None:
                        device = 'CPU%s' % deviceMatch.groups()[0]

                    values = re.findall(valueRegexp, row.replace(',', '.'))

                    cpu_stats[device] = {}
                    for headerIndex in range(0, len(headerNames)):
                        headerName = headerNames[headerIndex]
                        cpu_stats[device][headerName] = values[headerIndex]
                        key = headerName.replace('%', '')
                        self.gauge('serverdensity.cpu.{0}'.format(key), float(values[headerIndex]), device_name=device)

            except OSError:
                # we dont have it installed return nothing
                return False

            except Exception:
                import traceback
                self.log.error("getCPUStats: exception = %s", traceback.format_exc())

                if int(pythonVersion[1]) >= 6:
                    try:
                        if proc is not None:
                            proc.kill()
                    except UnboundLocalError:
                        self.log.debug('Process already terminated')
                    except Exception:
                        self.log.debug('Process already terminated')

                return False

        elif sys.platform == 'darwin':
            self.log.debug('getCPUStats: darwin')

            try:
                proc = subprocess.Popen(['sar', '-u', '1', '2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
                stats = proc.communicate()[0]

                itemRegexp = re.compile(r'\s+(\d+)[\s+]?')
                titleRegexp = re.compile(r'.*?([%][a-zA-Z0-9]+)[\s+]?')
                titles = []
                values = []
                for line in stats.split('\n'):
                    # top line with the titles in
                    if '%' in line:
                        titles = re.findall(titleRegexp, line)
                    if line and line.startswith('Average:'):
                        values = re.findall(itemRegexp, line)

                if values and titles:
                    cpu_stats['ALL'] = dict(zip(titles, values))
                    for headerIndex in range(0, len(titles)):
                        key = titles[headerIndex].replace('%', '')
                        self.gauge('serverdensity.cpu.{0}'.format(key), float(values[headerIndex]), device_name='ALL')

            except Exception:
                import traceback
                self.log.error('getCPUStats: exception = %s', traceback.format_exc())
                return False

        elif sys.platform.startswith("freebsd"):
            # generate 3 seconds of data
            # tty            ada0              cd0            pass0             cpu
            # tin  tout  KB/t tps  MB/s   KB/t tps  MB/s   KB/t tps  MB/s  us ni sy in id
            # 0    69 26.71   0  0.01   0.00   0  0.00   0.00   0  0.00   2  0  0  1 97
            # 0    78  0.00   0  0.00   0.00   0  0.00   0.00   0  0.00   0  0  0  0 100
            iostats, _, _ = get_subprocess_output(['iostat', '-w', '3', '-c', '2'], self.log)
            lines = [l for l in iostats.splitlines() if len(l) > 0]
            legend = [l for l in lines if "us" in l]
            if len(legend) == 1:
                headers = legend[0].split()
                data = lines[-1].split()
                cpu_user = get_value(headers, data, "us")
                cpu_nice = get_value(headers, data, "ni")
                cpu_sys = get_value(headers, data, "sy")
                cpu_intr = get_value(headers, data, "in")
                cpu_idle = get_value(headers, data, "id")
                self.gauge('serverdensity.cpu.usr', float(cpu_user), device_name='ALL')
                self.gauge('serverdensity.cpu.nice', float(cpu_nice), device_name='ALL')
                self.gauge('serverdensity.cpu.sys', float(cpu_sys), device_name='ALL')
                self.gauge('serverdensity.cpu.irq', float(cpu_intr), device_name='ALL')
                self.gauge('serverdensity.cpu.idle', float(cpu_idle), device_name='ALL')
                cpu_stats['ALL'] = {
                    'usr': cpu_user,
                    'nice': cpu_nice,
                    'sys': cpu_sys,
                    'irq': cpu_intr,
                    'idle': cpu_idle,
                }

            else:
                self.logger.warn("Expected to get at least 4 lines of data from iostat instead of just " + str(iostats[:max(80, len(iostats))]))
                return False

        else:
            self.log.debug('getCPUStats: unsupported platform')
            return False

        self.log.debug('getCPUStats: completed, returning')
        return {'cpuStats': cpu_stats}

Example 16

Project: polytester
Source File: runner.py
View license
    def run_tests(self):
        try:
            puts()
            puts("Running tests...")
            self.results = {}
            self.processes = {}

            if self.verbose:
                with indent(2):
                    for t in self.tests:
                        p = subprocess.Popen(t.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                        self.processes[t.short_name] = p
                        self.results[t.short_name] = Bunch(
                            output=u"",
                            return_code=None,
                            parser=t.parser,
                            test_obj=t,
                            passed=None,
                        )
                        while p.poll() is None:
                            line = self.non_blocking_read(p.stdout)
                            if line:
                                self.results[t.short_name].output += "\n%s" % line.decode("utf-8")
                                puts(line.decode("utf-8"))
                            time.sleep(0.5)

                        if p.returncode is not None:
                            try:
                                out, err = p.communicate()
                            except ValueError:
                                out = None
                                err = None

                            if out:
                                self.results[t.short_name].output += "\n%s" % out.decode("utf-8")
                                puts(out.decode("utf-8"))
                            if err:
                                self.results[t.short_name].output += "\n%s" % err.decode("utf-8")
                                puts(err.decode("utf-8"))
                            self.results[t.short_name].return_code = p.returncode
                            if t.short_name in self.processes:
                                del self.processes[t.short_name]
            else:
                for t in self.tests:
                    self.processes[t.short_name] = subprocess.Popen(
                        t.command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
                    )
                    self.results[t.short_name] = Bunch(
                        output=u"",
                        return_code=None,
                        parser=t.parser,
                        test_obj=t,
                        passed=None,
                    )
                while len(self.processes.items()) > 0:
                    for name, p in list(self.processes.items()):
                        while p.poll() is None:
                            line = self.non_blocking_read(p.stdout)
                            if line:
                                self.results[name].output += "\n%s" % line.decode("utf-8")
                            time.sleep(0.5)

                        if p.returncode is not None:
                            out, err = p.communicate()
                            if out:
                                self.results[name].output += "\n%s" % out.decode("utf-8")
                            if err:
                                self.results[name].output += "\n%s" % err.decode("utf-8")
                            self.results[name].return_code = p.returncode
                            if name in self.processes:
                                del self.processes[name]

            all_passed = True
            with indent(2):
                for t in self.tests:
                    name = t.short_name
                    r = self.results[name]
                    r.cleaned_output = self.strip_ansi_escape_codes(r.output)

                    r.passed = r.parser.tests_passed(r)
                    pass_string = ""
                    if not r.passed:
                        all_passed = False
                        try:
                            if hasattr(r.parser, "num_failed"):
                                pass_string = " %s" % r.parser.num_failed(r)
                            else:
                                pass_string = " some"

                            if hasattr(r.parser, "num_total"):
                                pass_string += " of %s" % r.parser.num_total(r)
                            else:
                                pass_string += ""
                        except:
                            pass
                        puts(colored.red("✘ %s:%s tests failed." % (name, pass_string)))

                        with indent(2):
                            puts(u"%s" % r.output)
                    else:
                        try:
                            if hasattr(r.parser, "num_passed"):
                                pass_string = " %s" % r.parser.num_passed(r)
                            else:
                                pass_string = ""
                        except:
                            pass
                        puts(colored.green("✔" + " %s:%s tests passed." % (name, pass_string)))
            if all_passed:
                puts()
                puts(colored.green("✔ All tests passed."))
                puts()
                if self.autoreload:
                    while True:
                        time.sleep(1)
            else:
                self._fail("✘ Tests failed.")
                puts()
                if not self.autoreload:
                    sys.exit(1)
                else:
                    while True:
                        time.sleep(1)
        except KeyboardInterrupt:
            self.handle_keyboard_exception()

Example 17

Project: spiderfoot
Source File: process.py
View license
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, stdin = None):
  """
  Initializes a tor process. This blocks until initialization completes or we
  error out.

  If tor's data directory is missing or stale then bootstrapping will include
  making several requests to the directory authorities which can take a little
  while. Usually this is done in 50 seconds or so, but occasionally calls seem
  to get stuck, taking well over the default timeout.

  **To work to must log at NOTICE runlevel to stdout.** It does this by
  default, but if you have a 'Log' entry in your torrc then you'll also need
  'Log NOTICE stdout'.

  Note: The timeout argument does not work on Windows, and relies on the global
  state of the signal module.

  :param str tor_cmd: command for starting tor
  :param list args: additional arguments for tor
  :param str torrc_path: location of the torrc for us to use
  :param int completion_percent: percent of bootstrap completion at which
    this'll return
  :param functor init_msg_handler: optional functor that will be provided with
    tor's initialization stdout as we get it
  :param int timeout: time after which the attempt to start tor is aborted, no
    timeouts are applied if **None**
  :param bool take_ownership: asserts ownership over the tor process so it
    aborts if this python process terminates or a :class:`~stem.control.Controller`
    we establish to it disconnects
  :param str stdin: content to provide on stdin

  :returns: **subprocess.Popen** instance for the tor subprocess

  :raises: **OSError** if we either fail to create the tor process or reached a
    timeout without success
  """

  if stem.util.system.is_windows():
    timeout = None

  # sanity check that we got a tor binary

  if os.path.sep in tor_cmd:
    # got a path (either relative or absolute), check what it leads to

    if os.path.isdir(tor_cmd):
      raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
    elif not os.path.isfile(tor_cmd):
      raise OSError("'%s' doesn't exist" % tor_cmd)
  elif not stem.util.system.is_available(tor_cmd):
    raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)

  # double check that we have a torrc to work with
  if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path):
    raise OSError("torrc doesn't exist (%s)" % torrc_path)

  # starts a tor subprocess, raising an OSError if it fails
  runtime_args, temp_file = [tor_cmd], None

  if args:
    runtime_args += args

  if torrc_path:
    if torrc_path == NO_TORRC:
      temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1]
      runtime_args += ['-f', temp_file]
    else:
      runtime_args += ['-f', torrc_path]

  if take_ownership:
    runtime_args += ['__OwningControllerProcess', str(os.getpid())]

  tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)

  if stdin:
    tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
    tor_process.stdin.close()

  if timeout:
    def timeout_handler(signum, frame):
      # terminates the uninitialized tor process and raise on timeout

      tor_process.kill()
      raise OSError('reached a %i second timeout without success' % timeout)

    signal.signal(signal.SIGALRM, timeout_handler)
    signal.alarm(timeout)

  bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ')
  problem_line = re.compile('\[(warn|err)\] (.*)$')
  last_problem = 'Timed out'

  try:
    while True:
      # Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
      # in python 3 that means it'll mismatch with other operations (for instance
      # the bootstrap_line.search() call later will fail).
      #
      # It seems like python 2.x is perfectly happy for this to be unicode, so
      # normalizing to that.

      init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip()

      # this will provide empty results if the process is terminated

      if not init_line:
        tor_process.kill()  # ... but best make sure
        raise OSError('Process terminated: %s' % last_problem)

      # provide the caller with the initialization message if they want it

      if init_msg_handler:
        init_msg_handler(init_line)

      # return the process if we're done with bootstrapping

      bootstrap_match = bootstrap_line.search(init_line)
      problem_match = problem_line.search(init_line)

      if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent:
        return tor_process
      elif problem_match:
        runlevel, msg = problem_match.groups()

        if 'see warnings above' not in msg:
          if ': ' in msg:
            msg = msg.split(': ')[-1].strip()

          last_problem = msg
  finally:
    if timeout:
      signal.alarm(0)  # stop alarm

    tor_process.stdout.close()
    tor_process.stderr.close()

    if temp_file:
      try:
        os.remove(temp_file)
      except:
        pass

Example 18

Project: qgisSpaceSyntaxToolkit
Source File: processes.py
View license
    def __init__(self, name=None, target=None, executable=None, copySysPath=True, debug=False, timeout=20, wrapStdout=None):
        """
        ==============  =============================================================
        **Arguments:**
        name            Optional name for this process used when printing messages
                        from the remote process.
        target          Optional function to call after starting remote process.
                        By default, this is startEventLoop(), which causes the remote
                        process to process requests from the parent process until it
                        is asked to quit. If you wish to specify a different target,
                        it must be picklable (bound methods are not).
        copySysPath     If True, copy the contents of sys.path to the remote process
        debug           If True, print detailed information about communication
                        with the child process.
        wrapStdout      If True (default on windows) then stdout and stderr from the
                        child process will be caught by the parent process and
                        forwarded to its stdout/stderr. This provides a workaround
                        for a python bug: http://bugs.python.org/issue3905
                        but has the side effect that child output is significantly
                        delayed relative to the parent output.
        ==============  =============================================================
        """
        if target is None:
            target = startEventLoop
        if name is None:
            name = str(self)
        if executable is None:
            executable = sys.executable
        self.debug = 7 if debug is True else False  # 7 causes printing in white
        
        ## random authentication key
        authkey = os.urandom(20)

        ## Windows seems to have a hard time with hmac 
        if sys.platform.startswith('win'):
            authkey = None

        #print "key:", ' '.join([str(ord(x)) for x in authkey])
        ## Listen for connection from remote process (and find free port number)
        l = multiprocessing.connection.Listener(('localhost', 0), authkey=authkey)
        port = l.address[1]

        ## start remote process, instruct it to run target function
        sysPath = sys.path if copySysPath else None
        bootstrap = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bootstrap.py'))
        self.debugMsg('Starting child process (%s %s)' % (executable, bootstrap))

        # Decide on printing color for this process
        if debug:
            procDebug = (Process._process_count%6) + 1  # pick a color for this process to print in
            Process._process_count += 1
        else:
            procDebug = False
        
        if wrapStdout is None:
            wrapStdout = sys.platform.startswith('win')

        if wrapStdout:
            ## note: we need all three streams to have their own PIPE due to this bug:
            ## http://bugs.python.org/issue3905
            stdout = subprocess.PIPE
            stderr = subprocess.PIPE
            self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
            ## to circumvent the bug and still make the output visible, we use 
            ## background threads to pass data from pipes to stdout/stderr
            self._stdoutForwarder = FileForwarder(self.proc.stdout, "stdout", procDebug)
            self._stderrForwarder = FileForwarder(self.proc.stderr, "stderr", procDebug)
        else:
            self.proc = subprocess.Popen((executable, bootstrap), stdin=subprocess.PIPE)

        targetStr = pickle.dumps(target)  ## double-pickle target so that child has a chance to 
                                          ## set its sys.path properly before unpickling the target
        pid = os.getpid() # we must send pid to child because windows does not have getppid
        
        ## Send everything the remote process needs to start correctly
        data = dict(
            name=name+'_child', 
            port=port, 
            authkey=authkey, 
            ppid=pid, 
            targetStr=targetStr, 
            path=sysPath, 
            pyside=USE_PYSIDE,
            debug=procDebug
            )
        pickle.dump(data, self.proc.stdin)
        self.proc.stdin.close()
        
        ## open connection for remote process
        self.debugMsg('Listening for child process on port %d, authkey=%s..' % (port, repr(authkey)))
        while True:
            try:
                conn = l.accept()
                break
            except IOError as err:
                if err.errno == 4:  # interrupted; try again
                    continue
                else:
                    raise

        RemoteEventHandler.__init__(self, conn, name+'_parent', pid=self.proc.pid, debug=self.debug)
        self.debugMsg('Connected to child process.')
        
        atexit.register(self.join)

Example 19

Project: onigiri
Source File: onigiri.py
View license
    def acquire_ram(self, victim, alternative):
        targets = victim.Targets
        pm = re.compile(r'.*:pmem$')
        self.logger.debug('Issue Discovery Request...')
    	for target in targets:
            if pm.search(target.TargetName):
                self.logger.info('Physical Memory found: {0} (DiskType={1})'.format(target.TargetName, target.DiskType))

                dest_path = self.out_path + "\\" + victim.MachineNameOrIP
                img_path = dest_path + "\\pmem"
                if self.skip and (os.path.exists(img_path + '.dd4.001') or os.path.exists(img_path + '.dmp')):
                    self.logger.info('the RAM image already exists, so skip the acquisition ({0})'.format(img_path))
                    continue
                if not os.path.exists(dest_path):
                    os.mkdir(dest_path)

                if alternative:
                    self.logger.info('acquiring mapped physical memory using PsExec&DumpIt...')
                    #cmd_listen = [self.dumpit_path, '/l', '/f', img_path + '.dmp.lznt1']
                    cmd_listen = [self.dumpit_path, '/l', '/f', img_path + '.dmp']
                    self.logger.debug('DumpIt Listener cmdline: {}'.format(' '.join(cmd_listen)))
                    proc_listen = subprocess.Popen(cmd_listen, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    for i in range(3):
                        self.logger.info('trying... {0}'.format(i+1))
                        dest_host = socket.gethostbyname(socket.gethostname())
                        cmd_psexec = [self.psexec_path, r'\\' + victim.MachineNameOrIP, '-accepteula', '-c', '-f', '-u', self.domain + '\\' + self.user,
                                #'-p', self.password, '-r', 'onigiri', self.dumpit_path, '/t', dest_host, '/a', '/d', '/lznt1'] # /lznt1 through network NOT work
                                '-p', self.password, '-r', 'onigiri', self.dumpit_path, '/t', dest_host, '/a', '/d']
                        self.logger.debug('PsExec cmdline: {}'.format(' '.join(cmd_psexec)))
                        proc_psexec = subprocess.Popen(cmd_psexec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

                        #stdout_data, stderr_data = proc_psexec.communicate()
                        '''  # for Python 3.3
                        while 1:
                            try:
                                outs, errs = proc_psexec.communicate(timeout=5)
                                break
                            except subprocess.TimeoutExpired:
                                size = os.path.getsize(img_path + '.dmp')
                                sys.stdout.write('\r...{:8d}MB'.format(long(size / (1024 * 1024))))
                                continue
                        '''
                        sleep(2)
                        while proc_psexec.poll() is None:
                            sleep(0.1)
                            #size = os.path.getsize(img_path + '.dmp.lznt1')
                            try:
                                size = os.path.getsize(img_path + '.dmp')
                                sys.stdout.write('\r...{:8d}MB'.format(long(size / (1024 * 1024))))
                            except WindowsError:
                                self.logger.debug('WindowsError: os.path.getsize for {}'.format(img_path + '.dmp'))
                                sleep(1)

                        print '\r\t\t ...Done.'

                        if proc_psexec.returncode == 0:
                            break
                        else:
                            self.logger.error(stderr_data)
                            self.logger.error('PsExec&DumpIt failed.')
                    self.logger.debug('PsExec returncode={0}'.format(proc_psexec.returncode))
                    if proc_psexec.returncode != 0:
                        proc_listen.terminate()
                        self.logger.critical('RAM acquisition failed (PsExec&DumpIt).')
                        self.logger.error("check with the cmdline: {0}".format(' '.join(cmd_psexec)))
                        sys.exit(1)
                    else:
                        stdout_data, stderr_data = proc_listen.communicate()
                    self.logger.debug('DumpIt Listener returncode={0}'.format(proc_listen.returncode))
                    if proc_listen.returncode != 0:
                        self.logger.error(stderr_data)
                        self.logger.critical('RAM acquisition failed (DumpIt Listener).')
                        self.logger.error("check with the cmdline: {0}".format(' '.join(cmd_listen)))
                        sys.exit(1)
                    #self.logger.info('RAM crashdump image saved (lznt1 compressed): {0}'.format(img_path + '.dmp.lznt1'))
                    self.logger.info('RAM crashdump image saved: {0}'.format(img_path + '.dmp'))

                    '''
                    self.logger.info('decompressing...')
                    cmd_decomp = [self.dumpit_path, '/unpack', img_path + '.dmp.lznt1',  img_path + '.dmp']
                    self.logger.debug('DumpIt unpack cmdline: {}'.format(' '.join(cmd_decomp)))
                    proc_decomp = subprocess.Popen(cmd_decomp, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                    stdout_data, stderr_data = proc_decomp.communicate()
                    if proc_decomp.returncode != 0:
                        self.logger.critical('DumpIt decompression failed.')
                        self.logger.error("check with the cmdline: {0}".format(' '.join(cmd_decomp)))
                        sys.exit(1)
                    self.logger.info('Decompressed RAM crashdump image saved: {0}'.format(img_path + '.dmp'))
                    '''

                else:
                    try:
                        self.logger.debug('Login to F-Response Disk...')
                        target.Login()
                    except win32com.client.pywintypes.com_error:
                        self.logger.critical('Login to F-Response Disk failed. Aborted in the previous acquisition? Please check the status on GUI console and logout the pmem manually.')
                        sys.exit(1)
                    #login_check = target.PhysicalDiskMapping
                    #device = target.PhysicalDiskName
                    if target.PhysicalDiskMapping == -1:
                        self.logger.critical('PhysicalDiskMapping failed due to timing issue. Simply try again.')
                        sys.exit(1)
                    device = r'\\.\PhysicalDrive' + str(target.PhysicalDiskMapping)
                    self.logger.info('acquiring mapped physical memory using F-Response&FTKImager ({0})...'.format(device))
                    cmd = [self.ftk_path, device, dest_path + "\\pmem"]

                    self.logger.debug('FTKImager cmdline: {}'.format(' '.join(cmd)))
                    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1)
                    with io.open(proc.stderr.fileno(), closefd=False) as stream: # iter(proc.stdout.readline) doesn't work for '\r'?
                        for line in stream:
                            if line.find('MB') != -1 or line.find('complete') != -1:
                                sys.stdout.write('\r' + line.rstrip('\n'))
                    print ''
                    proc.wait()
                    self.logger.debug('Remove F-Response Disk...')
                    target.Logout()
                    self.logger.debug('returncode={0}'.format(proc.returncode))
                    if proc.returncode != 0:
                        self.logger.critical('RAM acquisition failed (F-Response&FTKImager).')
                        self.logger.error("check with the cmdline: {0}".format(' '.join(cmd)))
                        sys.exit(1)
                    self.logger.info('RAM raw image saved: {0}'.format(img_path + '.dd4.001'))

Example 20

Project: sublimetext-Pandoc
Source File: Pandoc.py
View license
    def run(self, edit, transformation):

        # string to work with
        region = sublime.Region(0, self.view.size())
        contents = self.view.substr(region)

        # pandoc executable
        binary_name = 'pandoc.exe' if sublime.platform() == 'windows' else 'pandoc'
        pandoc = _find_binary(binary_name, _s('pandoc-path'))
        if pandoc is None:
            return
        cmd = [pandoc]

        # from format
        score = 0
        for scope, c_iformat in transformation['scope'].items():
            c_score = self.view.score_selector(0, scope)
            if c_score <= score:
                continue
            score = c_score
            iformat = c_iformat
        cmd.extend(['-f', iformat])

        # configured parameters
        args = Args(transformation['pandoc-arguments'])
        # Use pandoc output format name as file extension unless specified by out-ext in transformation
        try:
            transformation['out-ext']
        except:
            argsext = None
        else:
            argsext = transformation['out-ext']
        # output format
        oformat = args.get(short=['t', 'w'], long=['to', 'write'])
        oext = argsext

        # pandoc doesn't actually take 'pdf' as an output format
        # see https://github.com/jgm/pandoc/issues/571
        if oformat == 'pdf':
            args = args.remove(
                short=['t', 'w'], long=['to', 'write'], values=['pdf'])

        # if write to file, add -o if necessary, set file path to output_path
        if oformat is not None and oformat in _s('pandoc-format-file'):
            output_path = args.get(short=['o'], long=['output'])
            if output_path is None:
                # note the file extension matches the pandoc format name
                output_path = tempfile.NamedTemporaryFile().name
                # If a specific output format not specified in transformation, default to pandoc format name
                if oext is None:
                    output_path += "." + oformat
                else:
                    output_path += "." + oext
                args.extend(['-o', output_path])

        cmd.extend(args)

        # run pandoc
        current_file_path = self.view.file_name()
        if current_file_path:
            working_dir = os.path.dirname(current_file_path)
        else:
            working_dir = None
        process = subprocess.Popen(
            cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
            stderr=subprocess.PIPE, cwd=working_dir)
        result, error = process.communicate(contents.encode('utf-8'))

        # handle pandoc errors
        if error:
            sublime.error_message('\n\n'.join([
                'Error when running:',
                ' '.join(cmd),
                error.decode('utf-8').strip()]))
            return

        # write pandoc command to console
        print(' '.join(cmd))

        # if write to file, open
        if oformat is not None and oformat in _s('pandoc-format-file'):
            try:
                if sublime.platform() == 'osx':
                    subprocess.call(["open", output_path])
                elif sublime.platform() == 'windows':
                    os.startfile(output_path)
                elif os.name == 'posix':
                    subprocess.call(('xdg-open', output_path))
            except:
                sublime.message_dialog('Wrote to file ' + output_path)
            return

        # write to buffer
        if result:
            if transformation['new-buffer']:
                w = self.view.window()
                w.new_file()
                view = w.active_view()
                region = sublime.Region(0, view.size())
            else:
                view = self.view
            view.replace(edit, region, result.decode('utf8').replace('\r\n','\n'))
            view.set_syntax_file(transformation['syntax_file'])

Example 21

Project: WIPSTER
Source File: handler.py
View license
def get_thug(uri, ua, ticket):
    #These will be removed and pulled in from 'settings' file later
    thug_loc="/opt/remnux-thug/src/thug.py"
    es_loc = "/opt/remnux-didier/extractscripts.py"

    results = {'thug_res': '',
               'html': '',
               'js_didier': '',
               'js': ''}
    #results['html']=""
    #results['js_didier']=""
    #results['js'] = ""
    md5 = hashlib.md5(uri).hexdigest()
    
    #cmd = ["python", thug_loc, uri]
    #cmd = [thug_loc+" "+uri+" && pwd"]
    #cmd = ["echo blah"] 
    
    #Check if URI is a valid URL
    #If not, return 'thug_res' as "Invalid URL - Thug did not run"
    validate = URLValidator()
    if uri.startswith('http'):
        uri_test = uri
    else:
        uri_test = "http://"+uri
        
    try:
        validate(uri_test)
    except ValidationError, e:
        results['thug_res']="Invalid URI. Thug was not run."
        return results

    #Match UserAgent to list
    ua_list = {'Internet Explorer 6.0 (Windows XP)': 'winxpie60',
               'Internet Explorer 6.1 (Windows XP)':'winxpie61',
               'Internet Explorer 7.0 (Windows XP)':'winxpie70',
               'Internet Explorer 8.0 (Windows XP)':'winxpie80',
               'Chrome 20.0.1132.47 (Windows XP)':'winxpchrome20',
               'Firefox 12.0 (Windows XP)':'winxpfirefox12',
               'Safari 5.1.7 (Windows XP)':'winxpsafari5',
               'Internet Explorer 6.0 (Windows 2000)':'win2kie60',
               'Internet Explorer 8.0 (Windows 2000)':'win2kie80',
               'Internet Explorer 8.0 (Windows 7)':'win7ie80',
               'Internet Explorer 9.0 (Windows 7)':'win7ie90',
               'Chrome 20.0.1132.47 (Windows 7)':'win7chrome20',
               'Chrome 40.0.2214.91 (Windows 7)':'win7chrome40',
               'Firefox 3.6.13 (Windows 7)':'win7firefox3',
               'Safari 5.1.7 (Windows 7)':'win7safari5',
               'Safari 5.1.1 (MacOS X 10.7.2)':'osx10safari5',
               'Chrome 19.0.1084.54 (MacOS X 10.7.4)':'osx10chrome19',
               'Chrome 26.0.1410.19 (Linux)':'linuxchrome26',
               'Chrome 30.0.1599.15 (Linux)':'linuxchrome30',
               'Firefox 19.0 (Linux)':'linuxfirefox19',
               'Chrome 18.0.1025.166 (Samsung Galaxy S II, Android 4.0.3)':'galaxy2chrome18',
               'Chrome 25.0.1364.123 (Samsung Galaxy S II, Android 4.0.3)':'galaxy2chrome25',
               'Chrome 29.0.1547.59 (Samsung Galaxy S II, Android 4.1.2)':'galaxy2chrome29',
               'Chrome 18.0.1025.133 (Google Nexus, Android 4.0.4)':'nexuschrome18',
               'Safari 7.0 (iPad, iOS 7.0.4)':'ipadsafari7',
               'Safari 8.0 (iPad, iOS 8.0.2)':'ipadsafari8',
               'Chrome 33.0.1750.21 (iPad, iOS 7.1)':'ipadchrome33',
               'Chrome 35.0.1916.41 (iPad, iOS 7.1.1)':'ipadchrome35',
               'Chrome 37.0.2062.52 (iPad, iOS 7.1.2)':'ipadchrome37',
               'Chrome 38.0.2125.59 (iPad, iOS 8.0.2)':'ipadchrome38',
               'Chrome 39.0.2171.45 (iPad, iOS 8.1.1)':'ipadchrome39'}
               
    if ua in ua_list:
        ua = ua_list[ua]
    else:
        ua = 'winxpie60'

        
    # Run thug with useragent selected from form, max setTimeout/setInterval delay to 5 seconds, overall timeout in 5 minutes
    # Write logs to a file in ./uanalysis/static/urls/<md5>/
    cmd = [thug_loc+" -u "+ua+" -w 5000 -T 300 -F -n ./uanalysis/static/urls/"+md5+" \""+uri+"\""]
    run = subprocess.Popen(cmd,
                           stdout=subprocess.PIPE,
                           stdin=subprocess.PIPE,
                           stderr=subprocess.PIPE,
                           shell=True)
    
    #print str(run.communicate())
    #thug_res = run.communicate()
    #breakdebug
    
    thug_res = cgi.escape(run.communicate()[1])
    results['thug_res']=thug_res
    
    #Get filenames and their corresponding directories
    thug_files = findall("\(Content-type: (.*), MD5: (.*)\)", thug_res)
    
    #Get URI's that match the MD5's
    md5_url_pre = findall("URL: (.*)\s{1}\(Content-type:.*MD5: (.*)\)", thug_res)
    md5_url_pair = []
    for i in md5_url_pre:
        if i not in md5_url_pair:   #Deduplicate
            md5_url_pair.append(i)
    #print str(thug_files)
    
    file_list=[]
    for file in thug_files:
        subdir = file[0]
        filename = file[1]
        combined = str(subdir)+"/"+str(filename) #Get the path of the file created by Thug
        if combined not in file_list:   #Deduplicate
            file_list.append(combined)
    #print "File_List"
    #print str(file_list)
    
    
    for file in file_list: #For each file created by Thug... 
        
        #...get the HTML from it...
        results = get_html(file, md5, md5_url_pair, results)
        
        #...get any downloaded files from it...
        get_sample(file, md5, ticket)
        
        #...run didier's extractscripts.py against it
        run_extractscripts(file, md5, es_loc)
        # Now all the extracted scripts are in /wipster/uanalysis/static/urls/<md5>_files/
        
    base_dir = "./uanalysis/static/urls/"+md5+"/"
#   file_list = []
    
    
    #Process JavaScript
    results = get_js(base_dir, results)
    

                    
            
    
    return results

Example 22

Project: gconv_experiments
Source File: train.py
View license
def train(
    modelfn, trainfn, valfn,
    epochs, batchsize,
    opt, opt_kwargs,
    net_kwargs,
    transformations,
    val_freq,
    save_freq,
    seed,
    gpu,
    silent=False, logme=None):

    # Set the seed
    np.random.seed(seed)

    # Load an pre-process the data
    try:
        datadir = os.environ['DATADIR']
    except KeyError:
        raise RuntimeError('Please set DATADIR environment variable (e.g. in ~/.bashrc) '
                           'to a folder containing the required datasets.')

    train_set = np.load(os.path.join(datadir, trainfn))
    val_set = np.load(os.path.join(datadir, valfn))
    train_data = train_set['data']
    train_labels = train_set['labels']
    val_data = val_set['data']
    val_labels = val_set['labels']
    train_data, val_data, train_labels, val_labels = preprocess_mnist_data(
        train_data, val_data, train_labels, val_labels)

    # create result dir
    log_fn, result_dir = create_result_dir(modelfn, logme)

    # create model and optimizer
    model, optimizer = get_model_and_optimizer(result_dir, modelfn, opt, opt_kwargs, net_kwargs, gpu)

    # get the last commit
    subp = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
                            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    out, err = subp.communicate()
    commit = out.strip()
    if err.strip():
        logging.error('Subprocess returned %s' % err.strip())
    logging.info('Commit: ' + commit)

    # Get number of parameters
    # if not silent:
    #     print 'Parameter name, shape, size:'
    #     for p in model.params():
    #         print p.name, p.data.shape, p.data.size
    num_params = sum([p.data.size for p in model.params()])
    logging.info('Number of parameters:' + str(num_params))
    if not silent:
        print 'Number of parameters:' + str(num_params)

    n_train = train_data.shape[0]
    n_val = val_data.shape[0]

    logging.info('start training...')

    train_epochs = []
    train_errors = []
    train_losses = []
    train_times = []
    val_epochs = []
    val_errors = []
    val_losses = []
    val_times = []

    begin_time = time.time()

    sum_loss, sum_accuracy = validate(val_data, val_labels, model, batchsize, silent, gpu)
    val_times.append(time.time() - begin_time)
    val_epochs.append(0)
    val_errors.append(1. - sum_accuracy / n_val)
    val_losses.append(sum_loss / n_val)
    msg = 'epoch:{:02d}\ttest mean loss={}, error={}'.format(
        0, sum_loss / n_val, 1. - sum_accuracy / n_val)
    logging.info(msg)
    if not silent:
        print '\n%s' % msg

    # learning loop
    for epoch in range(1, epochs + 1):

        sum_loss, sum_accuracy = train_epoch(
            train_data, train_labels, model, optimizer,
            batchsize, transformations, silent, gpu)
        train_times.append(time.time() - begin_time)
        train_epochs.append(epoch)
        train_errors.append(1. - sum_accuracy / n_train)
        train_losses.append(sum_loss / n_train)
        msg = 'epoch:{:02d}\ttrain mean loss={}, error={}'.format(
            epoch, sum_loss / n_train, 1. - sum_accuracy / n_train)
        logging.info(msg)
        if not silent:
            print '\n%s' % msg

        if epoch % val_freq == 0:
            print 'FINETUNING'
            model.start_finetuning()
            sum_loss, sum_accuracy = train_epoch(
                    train_data, train_labels, model, optimizer,
                    batchsize, transformations, silent, gpu, finetune=True)
            msg = 'epoch:{:02d}\tfinetune mean loss={}, error={}'.format(
                epoch, sum_loss / n_train, 1. - sum_accuracy / n_train)
            logging.info(msg)
            if not silent:
                print '\n%s' % msg

            sum_loss, sum_accuracy = validate(val_data, val_labels, model, batchsize, silent, gpu)
            val_times.append(time.time() - begin_time)
            val_epochs.append(epoch)
            val_errors.append(1. - sum_accuracy / n_val)
            val_losses.append(sum_loss / n_val)
            msg = 'epoch:{:02d}\ttest mean loss={}, error={}'.format(
                epoch, sum_loss / n_val, 1. - sum_accuracy / n_val)
            logging.info(msg)
            if not silent:
                print '\n%s' % msg

            mean_error = 1.0 - sum_accuracy / n_val

        if save_freq > 0 and epoch % save_freq == 0:
            print 'Saving model...'
            serializers.save_hdf5(os.path.join(result_dir, 'epoch.' + str(epoch) + '.model'), model)

    print 'Saving model...'
    serializers.save_hdf5(os.path.join(result_dir, 'final.model'), model)

    resdict = {
        'train_times': train_times, 'train_epochs': train_epochs,
        'train_errors': train_errors, 'train_losses': train_losses,
        'val_times': val_times, 'val_epochs': val_epochs,
        'val_errors': val_errors, 'val_losses': val_losses
    }

    print 'Saving results...'
    with open(os.path.join(result_dir, 'results.pickle'), 'wb') as handle:
        pickle.dump(resdict, handle)

    return mean_error, model, resdict

Example 23

Project: reprozip
Source File: functional.py
View license
@in_temp_dir
def functional_tests(raise_warnings, interactive, run_vagrant, run_docker):
    rpz_python = [os.environ.get('REPROZIP_PYTHON', sys.executable)]
    rpuz_python = [os.environ.get('REPROUNZIP_PYTHON', sys.executable)]

    # Can't match on the SignalWarning category here because of a Python bug
    # http://bugs.python.org/issue22543
    if raise_warnings:
        rpz_python.extend(['-W', 'error:signal'])
        rpuz_python.extend(['-W', 'error:signal'])

    if 'COVER' in os.environ:
        rpz_python.extend(['-m'] + os.environ['COVER'].split(' '))
        rpuz_python.extend(['-m'] + os.environ['COVER'].split(' '))

    reprozip_main = tests.parent / 'reprozip/reprozip/main.py'
    reprounzip_main = tests.parent / 'reprounzip/reprounzip/main.py'

    verbose = ['-v'] * 3
    rpz = rpz_python + [reprozip_main.absolute().path] + verbose
    rpuz = rpuz_python + [reprounzip_main.absolute().path] + verbose

    print("Command lines are:\n%r\n%r" % (rpz, rpuz))

    # ########################################
    # testrun /bin/echo
    #

    output = check_output(rpz + ['testrun', '/bin/echo', 'outputhere'])
    assert any(b' 1 | /bin/echo outputhere ' in l
               for l in output.splitlines())

    output = check_output(rpz + ['testrun', '-a', '/fake/path/echo',
                                 '/bin/echo', 'outputhere'])
    assert any(b' 1 | (/bin/echo) /fake/path/echo outputhere ' in l
               for l in output.splitlines())

    # ########################################
    # testrun multiple commands
    #

    check_call(rpz + ['testrun', 'bash', '-c',
                      'cat ../../../../../etc/passwd;'
                      'cd /var/lib;'
                      'cat ../../etc/group'])
    check_call(rpz + ['trace', '--overwrite',
                      'bash', '-c', 'cat /etc/passwd;echo'])
    check_call(rpz + ['trace', '--continue',
                      'sh', '-c', 'cat /etc/group;/usr/bin/id'])
    check_call(rpz + ['pack'])
    check_call(rpuz + ['graph', 'graph.dot'])
    check_call(rpuz + ['graph', 'graph2.dot', 'experiment.rpz'])

    sudo = ['sudo', '-E']  # -E to keep REPROZIP_USAGE_STATS

    # ########################################
    # 'simple' program: trace, pack, info, unpack
    #

    def check_simple(args, stream, infile=1):
        output = check_output(args, stream).splitlines()
        try:
            first = output.index(b"Read 6 bytes")
        except ValueError:
            stderr.write("output = %r\n" % output)
            raise
        if infile == 1:
            assert output[first + 1] == b"a = 29, b = 13"
            assert output[first + 2] == b"result = 42"
        else:  # infile == 2
            assert output[first + 1] == b"a = 25, b = 11"
            assert output[first + 2] == b"result = 36"

    # Build
    build('simple', ['simple.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite', '-d', 'rpz-simple',
                      './simple',
                      (tests / 'simple_input.txt').path,
                      'simple_output.txt'])
    orig_output_location = Path('simple_output.txt').absolute()
    assert orig_output_location.is_file()
    with orig_output_location.open(encoding='utf-8') as fp:
        assert fp.read().strip() == '42'
    orig_output_location.remove()
    # Read config
    with Path('rpz-simple/config.yml').open(encoding='utf-8') as fp:
        conf = yaml.safe_load(fp)
    other_files = set(Path(f).absolute() for f in conf['other_files'])
    expected = [Path('simple'), (tests / 'simple_input.txt')]
    assert other_files.issuperset([f.resolve() for f in expected])
    # Check input and output files
    inputs_outputs = conf['inputs_outputs']
    # Exactly one input: "arg1", "...simple_input.txt"
    # Output: 'arg2', "...simple_output.txt"
    # There might be more output files: the C coverage files
    found = 0
    for fdict in inputs_outputs:
        if Path(fdict['path']).name == b'simple_input.txt':
            assert fdict['name'] == 'arg1'
            assert fdict['read_by_runs'] == [0]
            assert not fdict.get('written_by_runs')
            found |= 0x01
        elif Path(fdict['path']).name == b'simple_output.txt':
            assert fdict['name'] == 'arg2'
            assert not fdict.get('read_by_runs')
            assert fdict['written_by_runs'] == [0]
            found |= 0x02
        else:
            # No other inputs
            assert not fdict.get('read_by_runs')
    assert found == 0x03
    # Pack
    check_call(rpz + ['pack', '-d', 'rpz-simple', 'simple.rpz'])
    Path('simple').rename('simple.orig')
    # Info
    check_call(rpuz + ['info', 'simple.rpz'])
    # Show files
    check_call(rpuz + ['showfiles', 'simple.rpz'])
    # Lists packages
    check_call(rpuz + ['installpkgs', '--summary', 'simple.rpz'])
    # Unpack directory
    check_call(rpuz + ['directory', 'setup', 'simple.rpz', 'simpledir'])
    # Run directory
    check_simple(rpuz + ['directory', 'run', 'simpledir'], 'err')
    output_in_dir = join_root(Path('simpledir/root'), orig_output_location)
    with output_in_dir.open(encoding='utf-8') as fp:
        assert fp.read().strip() == '42'
    # Delete with wrong command (should fail)
    p = subprocess.Popen(rpuz + ['chroot', 'destroy', 'simpledir'],
                         stderr=subprocess.PIPE)
    out, err = p.communicate()
    assert p.poll() != 0
    err = err.splitlines()
    assert b"Wrong unpacker used" in err[0]
    assert err[1].startswith(b"usage: ")
    # Delete directory
    check_call(rpuz + ['directory', 'destroy', 'simpledir'])
    # Unpack chroot
    check_call(sudo + rpuz + ['chroot', 'setup', '--bind-magic-dirs',
                              'simple.rpz', 'simplechroot'])
    try:
        output_in_chroot = join_root(Path('simplechroot/root'),
                                     orig_output_location)
        # Run chroot
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err')
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Get output file
        check_call(sudo + rpuz + ['chroot', 'download', 'simplechroot',
                                  'arg2:output1.txt'])
        with Path('output1.txt').open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Get random file
        check_call(sudo + rpuz + ['chroot', 'download', 'simplechroot',
                                  '%s:binc.bin' % (Path.cwd() / 'simple')])
        assert same_files('simple.orig', 'binc.bin')
        # Replace input file
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot',
                                  '%s:arg1' % (tests / 'simple_input2.txt')])
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err', 2)
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '36'
        # Reset input file
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot', ':arg1'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err')
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Replace input file via path
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot',
                                  '%s:%s' % (tests / 'simple_input2.txt',
                                             tests / 'simple_input.txt')])
        check_call(sudo + rpuz + ['chroot', 'upload', 'simplechroot'])
        # Run again
        check_simple(sudo + rpuz + ['chroot', 'run', 'simplechroot'], 'err', 2)
        # Delete with wrong command (should fail)
        p = subprocess.Popen(rpuz + ['directory', 'destroy', 'simplechroot'],
                             stderr=subprocess.PIPE)
        out, err = p.communicate()
        assert p.poll() != 0
        err = err.splitlines()
        assert b"Wrong unpacker used" in err[0]
        assert err[1].startswith(b"usage:")
    finally:
        # Delete chroot
        check_call(sudo + rpuz + ['chroot', 'destroy', 'simplechroot'])

    # Use reprounzip-vistrails with chroot
    check_call(sudo + rpuz + ['chroot', 'setup', '--bind-magic-dirs',
                              'simple.rpz', 'simplechroot_vt'])
    try:
        output_in_chroot = join_root(Path('simplechroot_vt/root'),
                                     orig_output_location)
        # Run using reprounzip-vistrails
        check_simple(
            sudo + rpuz_python +
            ['-m', 'reprounzip.plugins.vistrails', '1',
             'chroot', 'simplechroot_vt', '0',
             '--input-file', 'arg1:%s' % (tests / 'simple_input2.txt'),
             '--output-file', 'arg2:output_vt.txt'],
            'err', 2)
        with output_in_chroot.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '36'
    finally:
        # Delete chroot
        check_call(sudo + rpuz + ['chroot', 'destroy', 'simplechroot_vt'])

    if not (tests / 'vagrant').exists():
        check_call(['sudo', 'sh', '-c',
                    'mkdir %(d)s; chmod 777 %(d)s' % {'d': tests / 'vagrant'}])

    # Unpack Vagrant-chroot
    check_call(rpuz + ['vagrant', 'setup/create', '--memory', '512',
                       '--use-chroot', 'simple.rpz',
                       (tests / 'vagrant/simplevagrantchroot').path])
    print("\nVagrant project set up in simplevagrantchroot")
    try:
        if run_vagrant:
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:binvc.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'binvc.bin')
            # Replace input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out', 2)
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput2.txt'])
            with Path('voutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Replace input file via path
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrantchroot').path,
                               '%s:%s' % (tests / 'simple_input2.txt',
                                          tests / 'simple_input.txt')])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrantchroot').path],
                         'out', 2)
            # Destroy
            check_call(rpuz + ['vagrant', 'destroy',
                               (tests / 'vagrant/simplevagrantchroot').path])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if (tests / 'vagrant/simplevagrantchroot').exists():
            (tests / 'vagrant/simplevagrantchroot').rmtree()
    # Unpack Vagrant without chroot
    check_call(rpuz + ['vagrant', 'setup/create', '--dont-use-chroot',
                       'simple.rpz',
                       (tests / 'vagrant/simplevagrant').path])
    print("\nVagrant project set up in simplevagrant")
    try:
        if run_vagrant:
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:woutput1.txt'])
            with Path('woutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               '%s:binvs.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'binvs.bin')
            # Replace input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path,
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out', 2)
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:woutput2.txt'])
            with Path('woutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['vagrant', 'upload',
                               (tests / 'vagrant/simplevagrant').path,
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['vagrant', 'run', '--no-stdin',
                                 (tests / 'vagrant/simplevagrant').path],
                         'out')
            # Get output file
            check_call(rpuz + ['vagrant', 'download',
                               (tests / 'vagrant/simplevagrant').path,
                               'arg2:voutput1.txt'])
            with Path('voutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Destroy
            check_call(rpuz + ['vagrant', 'destroy',
                               (tests / 'vagrant/simplevagrant').path])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if (tests / 'vagrant/simplevagrant').exists():
            (tests / 'vagrant/simplevagrant').rmtree()

    # Unpack Docker
    check_call(rpuz + ['docker', 'setup/create', 'simple.rpz', 'simpledocker'])
    print("\nDocker project set up in simpledocker")
    try:
        if run_docker:
            check_call(rpuz + ['docker', 'setup/build', 'simpledocker'])
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out')
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput1.txt'])
            with Path('doutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Get random file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               '%s:bind.bin' % (Path.cwd() / 'simple')])
            assert same_files('simple.orig', 'bind.bin')
            # Replace input file
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               '%s:arg1' % (tests / 'simple_input2.txt')])
            check_call(rpuz + ['docker', 'upload', 'simpledocker'])
            check_call(rpuz + ['showfiles', 'simpledocker'])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out', 2)
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput2.txt'])
            with Path('doutput2.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '36'
            # Reset input file
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               ':arg1'])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out')
            # Get output file
            check_call(rpuz + ['docker', 'download', 'simpledocker',
                               'arg2:doutput1.txt'])
            with Path('doutput1.txt').open(encoding='utf-8') as fp:
                assert fp.read().strip() == '42'
            # Replace input file via path
            check_call(rpuz + ['docker', 'upload', 'simpledocker',
                               '%s:%s' % (tests / 'simple_input2.txt',
                                          tests / 'simple_input.txt')])
            # Run again
            check_simple(rpuz + ['docker', 'run', 'simpledocker'], 'out', 2)
            # Destroy
            check_call(rpuz + ['docker', 'destroy', 'simpledocker'])
        elif interactive:
            print("Test and press enter")
            sys.stdin.readline()
    finally:
        if Path('simpledocker').exists():
            Path('simpledocker').rmtree()

    # ########################################
    # 'threads' program: testrun
    #

    # Build
    build('threads', ['threads.c'], ['-lpthread'])
    # Trace
    output = check_output(rpz + ['testrun', './threads'], 'err')
    assert any(b'successfully exec\'d /bin/./echo' in l
               for l in output.splitlines())

    # ########################################
    # 'threads2' program: testrun
    #

    # Build
    build('threads2', ['threads2.c'], ['-lpthread'])
    # Trace
    output = check_output(rpz + ['testrun', './threads2'], 'err')
    assert any(b'successfully exec\'d /bin/echo' in l
               for l in output.splitlines())

    # ########################################
    # 'segv' program: testrun
    #

    # Build
    build('segv', ['segv.c'])
    # Trace
    check_call(rpz + ['testrun', './segv'])

    # ########################################
    # 'exec_echo' program: trace, pack, run --cmdline
    #

    # Build
    build('exec_echo', ['exec_echo.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite',
                      './exec_echo', 'originalexecechooutput'])
    # Pack
    check_call(rpz + ['pack', 'exec_echo.rpz'])
    # Unpack chroot
    check_call(sudo + rpuz + ['chroot', 'setup',
                              'exec_echo.rpz', 'echochroot'])
    try:
        # Run original command-line
        output = check_output(sudo + rpuz + ['chroot', 'run',
                                             'echochroot'])
        assert output == b'originalexecechooutput\n'
        # Prints out command-line
        output = check_output(sudo + rpuz + ['chroot', 'run',
                                             'echochroot', '--cmdline'])
        assert any(b'./exec_echo originalexecechooutput' == s.strip()
                   for s in output.split(b'\n'))
        # Run with different command-line
        output = check_output(sudo + rpuz + [
            'chroot', 'run', 'echochroot',
            '--cmdline', './exec_echo', 'changedexecechooutput'])
        assert output == b'changedexecechooutput\n'
    finally:
        check_call(sudo + rpuz + ['chroot', 'destroy', 'echochroot'])

    # ########################################
    # 'exec_echo' program: testrun
    # This is built with -m32 so that we transition:
    #   python (x64) -> exec_echo (i386) -> echo (x64)
    #

    if sys.maxsize > 2 ** 32:
        # Build
        build('exec_echo32', ['exec_echo.c'], ['-m32'])
        # Trace
        check_call(rpz + ['testrun', './exec_echo32 42'])
    else:
        print("Can't try exec_echo transitions: not running on 64bits")

    # ########################################
    # Tracing non-existing program
    #

    check_call(rpz + ['testrun', './doesntexist'])

    # ########################################
    # 'connect' program: testrun
    #

    # Build
    build('connect', ['connect.c'])
    # Trace
    err = check_output(rpz + ['testrun', './connect'], 'err')
    err = err.split(b'\n')
    assert not any(b'program exited with non-zero code' in l for l in err)
    assert any(re.search(br'process connected to [0-9.]+:80', l)
               for l in err)

    # ########################################
    # 'vfork' program: testrun
    #

    # Build
    build('vfork', ['vfork.c'])
    # Trace
    err = check_output(rpz + ['testrun', './vfork'], 'err')
    err = err.split(b'\n')
    assert not any(b'program exited with non-zero code' in l for l in err)

    # ########################################
    # 'rename' program: trace
    #

    # Build
    build('rename', ['rename.c'])
    # Trace
    check_call(rpz + ['trace', '--overwrite', '-d', 'rename-trace',
                      './rename'])
    with Path('rename-trace/config.yml').open(encoding='utf-8') as fp:
        config = yaml.safe_load(fp)
    # Check that written files were logged
    database = Path.cwd() / 'rename-trace/trace.sqlite3'
    if PY3:
        # On PY3, connect() only accepts unicode
        conn = sqlite3.connect(str(database))
    else:
        conn = sqlite3.connect(database.path)
    conn.row_factory = sqlite3.Row
    rows = conn.execute(
        '''
        SELECT name FROM opened_files
        ''')
    files = set(Path(r[0]) for r in rows)
    for n in ('dir1/file', 'dir2/file', 'dir2/brokensymlink', 'dir2/symlink'):
        if (Path.cwd() / n) not in files:
            raise AssertionError("Missing file: %s" % (Path.cwd() / n))
    conn.close()
    # Check that created files won't be packed
    for f in config.get('other_files'):
        if 'dir2' in Path(f).parent.components:
            raise AssertionError("Created file shouldn't be packed: %s" %
                                 Path(f))

    # ########################################
    # Test shebang corner-cases
    #

    Path('a').symlink('b')
    with Path('b').open('w') as fp:
        fp.write('#!%s 0\nsome content\n' % (Path.cwd() / 'c'))
    Path('b').chmod(0o744)
    Path('c').symlink('d')
    with Path('d').open('w') as fp:
        fp.write('#!e')
    Path('d').chmod(0o744)
    with Path('e').open('w') as fp:
        fp.write('#!/bin/echo')
    Path('e').chmod(0o744)

    # Trace
    out = check_output(rpz + ['trace', '--overwrite', '-d', 'shebang-trace',
                              '--dont-identify-packages', './a', '1', '2'])
    out = out.splitlines()[0]
    assert out == ('e %s 0 ./a 1 2' % (Path.cwd() / 'c')).encode('ascii')

    # Check config
    with Path('shebang-trace/config.yml').open(encoding='utf-8') as fp:
        config = yaml.safe_load(fp)
    other_files = set(Path(f) for f in config['other_files']
                      if f.startswith('%s/' % Path.cwd()))

    # Check database
    database = Path.cwd() / 'shebang-trace/trace.sqlite3'
    if PY3:
        # On PY3, connect() only accepts unicode
        conn = sqlite3.connect(str(database))
    else:
        conn = sqlite3.connect(database.path)
    conn.row_factory = sqlite3.Row
    rows = conn.execute(
        '''
        SELECT name FROM opened_files
        ''')
    opened = [Path(r[0]) for r in rows
              if r[0].startswith('%s/' % Path.cwd())]
    rows = conn.execute(
        '''
        SELECT name, argv FROM executed_files
        ''')
    executed = [(Path(r[0]), r[1]) for r in rows
                if Path(r[0]).lies_under(Path.cwd())]

    print("other_files: %r" % sorted(other_files))
    print("opened: %r" % opened)
    print("executed: %r" % executed)

    assert other_files == set(Path.cwd() / p
                              for p in ('a', 'b', 'c', 'd', 'e'))
    assert opened == [Path.cwd() / 'c', Path.cwd() / 'e']
    assert executed == [(Path.cwd() / 'a', './a\x001\x002\x00')]

    # ########################################
    # Test old packages
    #

    old_packages = [
        ('simple-0.4.0.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBVG4xZW1V'
         'eDhXNTQ'),
        ('simple-0.6.0.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBbl9SUjhr'
         'cUdtbGs'),
        ('simple-0.7.1.rpz',
         'https://drive.google.com/uc?export=download&id=0B3ucPz7GSthBRGp2Vm5V'
         'QVpWOGs'),
    ]
    for name, url in old_packages:
        print("Testing old package %s" % name)
        f = Path(name)
        if not f.exists():
            download_file(url, f)
        # Info
        check_call(rpuz + ['info', name])
        # Show files
        check_call(rpuz + ['showfiles', name])
        # Lists packages
        check_call(rpuz + ['installpkgs', '--summary', name])
        # Unpack directory
        check_call(rpuz + ['directory', 'setup', name, 'simpledir'])
        # Run directory
        check_simple(rpuz + ['directory', 'run', 'simpledir'], 'err')
        output_in_dir = Path('simpledir/root/tmp')
        output_in_dir = output_in_dir.listdir('reprozip_*')[0]
        output_in_dir = output_in_dir / 'simple_output.txt'
        with output_in_dir.open(encoding='utf-8') as fp:
            assert fp.read().strip() == '42'
        # Delete with wrong command (should fail)
        p = subprocess.Popen(rpuz + ['chroot', 'destroy', 'simpledir'],
                             stderr=subprocess.PIPE)
        out, err = p.communicate()
        assert p.poll() != 0
        err = err.splitlines()
        assert b"Wrong unpacker used" in err[0]
        assert err[1].startswith(b"usage: ")
        # Delete directory
        check_call(rpuz + ['directory', 'destroy', 'simpledir'])

    # ########################################
    # Copies back coverage report
    #

    coverage = Path('.coverage')
    if coverage.exists():
        coverage.copyfile(tests.parent / '.coverage.runpy')

Example 24

Project: blendercam
Source File: __init__.py
View license
	def execute(self, context):
		Print3d.handle_add(self,context)
		
		
		s=bpy.context.scene
		settings=s.print3d_settings
		ob=bpy.context.active_object
		
		
		'''
		#this was first try - using the slicer directly. 
		if settings.slicer=='CURA':
			fpath=bpy.data.filepath+'_'+ob.name+'.stl'
			gcodepath=bpy.data.filepath+'_'+ob.name+'.gcode'
			enginepath=settings.filepath_engine

			#Export stl, with a scale correcting blenders and Cura size interpretation in stl:
			bpy.ops.export_mesh.stl(check_existing=False, filepath=fpath, filter_glob="*.stl", ascii=False, use_mesh_modifiers=True, axis_forward='Y', axis_up='Z', global_scale=1000)
			
			#this is Cura help line:
			#CuraEngine [-h] [-v] [-m 3x3matrix] [-s <settingkey>=<value>] -o <output.gcode> <model.stl>
			
			#we build the command line here:
			commands=[enginepath]
			
			#add the properties, here add whatever you want exported from cura props, so far it doesn't work. Going with .ini files will be probably better in future:
			unit=1000000#conversion between blender mm unit(0.001 of basic unit) and slicer unit (0.001 mm)
			
			
			
			
			for name in settings.propnames:
				#print(s)
				commands.append('-s')
				commands.append(name+'='+str(eval('settings.'+name)))
				#commands.extend([key,str(propsdict[key])])
				
			commands.extend(['-o', gcodepath,fpath])
			
			print(commands)
			#run cura in background:
			proc = subprocess.Popen(commands,bufsize=1, stdout=subprocess.PIPE,stdin=subprocess.PIPE)
			
			s=''
			for command in commands:
				s+=(command)+' '
			print(s)
			print('gcode file exported:')
			print(gcodepath)
		'''
		#second try - use cura command line options, with .ini files.
		if settings.slicer=='CURA':
			
			opath=bpy.data.filepath[:-6]
			fpath=opath+'_'+ob.name+'.stl'
			gcodepath=opath+'_'+ob.name+'.gcode'
			enginepath=settings.dirpath_engine
			inipath=settings.preset
			tweakCuraPreferences(enginepath,settings.printer)
			#return {'FINISHED'}
			#Export stl, with a scale correcting blenders and Cura size interpretation in stl:
			bpy.ops.export_mesh.stl(check_existing=False, filepath=fpath, filter_glob="*.stl", ascii=False, use_mesh_modifiers=True, axis_forward='Y', axis_up='Z', global_scale=1000)
			
			#this is Cura help line:
			#CuraEngine [-h] [-v] [-m 3x3matrix] [-s <settingkey>=<value>] -o <output.gcode> <model.stl>
			
			#we build the command line here:
			#commands=[enginepath+'python\python.exe,']#,'-m', 'Cura.cura', '%*']
			os.chdir(settings.dirpath_engine)
			#print('\n\n\n')
		
			#print(os.listdir())
			commands=['python\\python.exe','-m', 'Cura.cura','-i',inipath, '-s', fpath]
			#commands=[enginepath+'cura.bat', '-s', fpath]
			
			#commands.extend()#'-o', gcodepath,
			
			#print(commands)
			#print('\n\n\n')
			
			s=''
			for command in commands:
				s+=(command)+' '
			#print(s)
			
			
			#run cura in background:
			#proc = subprocess.call(commands,bufsize=1, stdout=subprocess.PIPE,stdin=subprocess.PIPE)
			#print(proc)
			proc= subprocess.Popen(commands,bufsize=1, stdout=subprocess.PIPE,stdin=subprocess.PIPE)#,env={"PATH": enginepath})
			#print(proc)
			tcom=threadComPrint3d(ob,proc)
			readthread=threading.Thread(target=threadread_print3d, args = ([tcom]), daemon=True)
			readthread.start()
			#self.__class__.print3d_processes=[]
			if not hasattr(bpy.ops.object.print3d.__class__,'print3d_processes'):
				bpy.ops.object.print3d.__class__.print3d_processes=[]
			bpy.ops.object.print3d.__class__.print3d_processes.append([readthread,tcom])
			
			#print('gcode file exported:')
			#print(gcodepath)
			
		return {'FINISHED'}

Example 25

Project: VisTrails
Source File: init.py
View license
def _add_tool(path):
    # first create classes
    tool_name = os.path.basename(path)
    if isinstance(tool_name, unicode):
        tool_name = tool_name.encode('utf-8')
    if not tool_name.endswith(SUFFIX): # pragma: no cover
        return
    (tool_name, _) = os.path.splitext(tool_name)

    if tool_name in cl_tools: # pragma: no cover
        debug.critical("Package CLTools already added: '%s'" % tool_name)
    try:
        conf = json.load(open(path))
    except ValueError as exc: # pragma: no cover
        debug.critical("Package CLTools could not parse '%s'" % path, exc)
        return

    def compute(self):
        """ 1. read inputs
            2. call with inputs
            3. set outputs
        """
        # add all arguments as an unordered list
        args = [self.conf['command']]
        file_std = 'options' in self.conf and 'std_using_files' in self.conf['options']
        fail_with_cmd = 'options' in self.conf and 'fail_with_cmd' in self.conf['options']
        setOutput = [] # (name, File) - set File contents as output for name
        open_files = []
        stdin = None
        kwargs = {}
        for type, name, klass, options in self.conf['args']:
            type = type.lower()
            klass = klass.lower()
            if "constant" == type:
                flag = 'flag' in options and options['flag']
                if flag:
                    args.append(flag)
                if name:
                    # if flag==name we assume user tried to name a constant
                    if not name == flag:
                        args.append('%s%s' % (options.get('prefix', ''), name))
            elif "input" == type:
                # handle multiple inputs
                values = self.force_get_input_list(name)
                if values and 'list' == klass:
                    values = values[0]
                    klass = options['type'].lower() \
                      if 'type' in options else 'string'
                for value in values:
                    if 'flag' == klass:
                        if not value:
                            continue
                        if 'flag' in options and options['flag']:
                            value = options['flag']
                        else:
                            # use name as flag
                            value = name
                    elif klass in ('file', 'directory', 'path'):
                        value = value.name
                    # check for flag and append file name
                    if not 'flag' == klass and 'flag' in options:
                        args.append(options['flag'])
                    value = '%s%s' % (options.get('prefix', ''),
                                      value)
                    args.append(value)
            elif "output" == type:
                # output must be a filename but we may convert the result to a string
                # create new file
                file = self.interpreter.filePool.create_file(
                        suffix=options.get('suffix', DEFAULTFILESUFFIX))
                fname = file.name
                if 'prefix' in options:
                    fname = options['prefix'] + fname
                if 'flag' in options:
                    args.append(options['flag'])
                args.append(fname)
                if "file" == klass:
                    self.set_output(name, file)
                elif "string" == klass:
                    setOutput.append((name, file))
                else:
                    raise ValueError
            elif "inputoutput" == type:
                # handle single file that is both input and output
                value = self.get_input(name)

                # create copy of infile to operate on
                outfile = self.interpreter.filePool.create_file(
                        suffix=options.get('suffix', DEFAULTFILESUFFIX))
                try:
                    shutil.copyfile(value.name, outfile.name)
                except IOError, e: # pragma: no cover
                    raise ModuleError(self,
                                      "Error copying file '%s': %s" %
                                      (value.name, debug.format_exception(e)))
                value = '%s%s' % (options.get('prefix', ''), outfile.name)
                # check for flag and append file name
                if 'flag' in options:
                    args.append(options['flag'])
                args.append(value)
                self.set_output(name, outfile)
        if "stdin" in self.conf:
            name, type, options = self.conf["stdin"]
            type = type.lower()
            if self.has_input(name):
                value = self.get_input(name)
                if "file" == type:
                    if file_std:
                        f = open(value.name, 'rb')
                    else:
                        f = open(value.name, 'rb')
                        stdin = f.read()
                        f.close()
                elif "string" == type:
                    if file_std:
                        file = self.interpreter.filePool.create_file()
                        f = open(file.name, 'wb')
                        f.write(value)
                        f.close()
                        f = open(file.name, 'rb')
                    else:
                        stdin = value
                else: # pragma: no cover
                    raise ValueError
                if file_std:
                    open_files.append(f)
                    kwargs['stdin'] = f.fileno()
                else:
                    kwargs['stdin'] = subprocess.PIPE
        if "stdout" in self.conf:
            if file_std:
                name, type, options = self.conf["stdout"]
                type = type.lower()
                file = self.interpreter.filePool.create_file(
                        suffix=DEFAULTFILESUFFIX)
                if "file" == type:
                    self.set_output(name, file)
                elif "string" == type:
                    setOutput.append((name, file))
                else: # pragma: no cover
                    raise ValueError
                f = open(file.name, 'wb')
                open_files.append(f)
                kwargs['stdout'] = f.fileno()
            else:
                kwargs['stdout'] = subprocess.PIPE
        if "stderr" in self.conf:
            if file_std:
                name, type, options = self.conf["stderr"]
                type = type.lower()
                file = self.interpreter.filePool.create_file(
                        suffix=DEFAULTFILESUFFIX)
                if "file" == type:
                    self.set_output(name, file)
                elif "string" == type:
                    setOutput.append((name, file))
                else: # pragma: no cover
                    raise ValueError
                f = open(file.name, 'wb')
                open_files.append(f)
                kwargs['stderr'] = f.fileno()
            else:
                kwargs['stderr'] = subprocess.PIPE

        if fail_with_cmd:
            return_code = 0
        else:
            return_code = self.conf.get('return_code', None)

        env = {}
        # 0. add defaults
        # 1. add from configuration
        # 2. add from module env
        # 3. add from env port
        if configuration.check('env'):
            try:
                for var in configuration.env.split(";"):
                    key, value = var.split('=')
                    key = key.strip()
                    value = value.strip()
                    if key:
                        env[key] = value
            except Exception, e: # pragma: no cover
                raise ModuleError(self,
                                  "Error parsing configuration env: %s" % (
                                  debug.format_exception(e)))

        if 'options' in self.conf and 'env' in self.conf['options']:
            try:
                for var in self.conf['options']['env'].split(";"):
                    key, value = var.split('=')
                    key = key.strip()
                    value = value.strip()
                    if key:
                        env[key] = value
            except Exception, e: # pragma: no cover
                raise ModuleError(self,
                                  "Error parsing module env: %s" % (
                                  debug.format_exception(e)))
            
        if 'options' in self.conf and 'env_port' in self.conf['options']:
            for e in self.force_get_input_list('env'):
                try:
                    for var in e.split(';'):
                        if not var:
                            continue
                        key, value = var.split('=')
                        key = key.strip()
                        value = value.strip()
                        if key:
                            env[key] = value
                except Exception, e: # pragma: no cover
                    raise ModuleError(self,
                                      "Error parsing env port: %s" % (
                                      debug.format_exception(e)))

        if env:
            kwargs['env'] = dict(os.environ)
            kwargs['env'].update(env)
            # write to execution provenance
            env = ';'.join(['%s=%s'%(k,v) for k,v in env.iteritems()])
            self.annotate({'execution_env': env})

        if 'dir' in self.conf:
            kwargs['cwd'] = self.conf['dir']

        process = subprocess.Popen(args, **kwargs)
        if file_std:
            process.wait()
        else:
            #if stdin:
            #    print "stdin:", len(stdin), stdin[:30]
            stdout, stderr = _eintr_retry_call(process.communicate, stdin)
            #stdout, stderr = process.communicate(stdin)
            #if stdout:
            #    print "stdout:", len(stdout), stdout[:30]
            #if stderr:
            #    print "stderr:", len(stderr), stderr[:30]

        if return_code is not None:
            if process.returncode != return_code:
                raise ModuleError(self, "Command returned %d (!= %d)" % (
                                  process.returncode, return_code))
        self.set_output('return_code', process.returncode)

        for f in open_files:
            f.close()

        for name, file in setOutput:
            f = open(file.name, 'rb')
            self.set_output(name, f.read())
            f.close()

        if not file_std:
            if "stdout" in self.conf:
                name, type, options = self.conf["stdout"]
                type = type.lower()
                if "file" == type:
                    file = self.interpreter.filePool.create_file(
                            suffix=DEFAULTFILESUFFIX)
                    f = open(file.name, 'wb')
                    f.write(stdout)
                    f.close()
                    self.set_output(name, file)
                elif "string" == type:
                    self.set_output(name, stdout)
                else: # pragma: no cover
                    raise ValueError
            if "stderr" in self.conf:
                name, type, options = self.conf["stderr"]
                type = type.lower()
                if "file" == type:
                    file = self.interpreter.filePool.create_file(
                            suffix=DEFAULTFILESUFFIX)
                    f = open(file.name, 'wb')
                    f.write(stderr)
                    f.close()
                    self.set_output(name, file)
                elif "string" == type:
                    self.set_output(name, stderr)
                else: # pragma: no cover
                    raise ValueError


    # create docstring
    d = """This module is a wrapper for the command line tool '%s'""" % \
        conf['command']
    # create module
    M = new_module(CLTools, tool_name, {"compute": compute,
                                        "conf": conf,
                                        "tool_name": tool_name,
                                        "__doc__": d})
    reg = vistrails.core.modules.module_registry.get_module_registry()
    reg.add_module(M, package=identifiers.identifier,
                   package_version=identifiers.version)

    def to_vt_type(s):
        # add recognized types here - default is String
        return '(basic:%s)' % \
          {'file':'File', 'path':'Path', 'directory': 'Directory',
           'flag':'Boolean', 'list':'List',
           'float':'Float','integer':'Integer'
          }.get(s.lower(), 'String')
    # add module ports
    if 'stdin' in conf:
        name, type, options = conf['stdin']
        optional = 'required' not in options
        reg.add_input_port(M, name, to_vt_type(type), optional=optional)
    if 'stdout' in conf:
        name, type, options = conf['stdout']
        optional = 'required' not in options
        reg.add_output_port(M, name, to_vt_type(type), optional=optional)
    if 'stderr' in conf:
        name, type, options = conf['stderr']
        optional = 'required' not in options
        reg.add_output_port(M, name, to_vt_type(type), optional=optional)
    if 'options' in conf and 'env_port' in conf['options']:
        reg.add_input_port(M, 'env', to_vt_type('string'))
    for type, name, klass, options in conf['args']:
        optional = 'required' not in options
        if 'input' == type.lower():
            reg.add_input_port(M, name, to_vt_type(klass), optional=optional)
        elif 'output' == type.lower():
            reg.add_output_port(M, name, to_vt_type(klass), optional=optional)
        elif 'inputoutput' == type.lower():
            reg.add_input_port(M, name, to_vt_type('file'), optional=optional)
            reg.add_output_port(M, name, to_vt_type('file'), optional=optional)
    reg.add_output_port(M, 'return_code', to_vt_type('integer'))
    cl_tools[tool_name] = M

Example 26

Project: pywikibot-core
Source File: utils.py
View license
def execute(command, data_in=None, timeout=0, error=None):
    """
    Execute a command and capture outputs.

    On Python 2.6 it adds an option to ignore the deprecation warning from
    the cryptography package after the first entry of the command parameter.

    @param command: executable to run and arguments to use
    @type command: list of unicode
    """
    if PYTHON_VERSION < (2, 7):
        command.insert(
            1, '-W ignore:{0}:DeprecationWarning'.format(PYTHON_26_CRYPTO_WARN))

    # Any environment variables added on Windows must be of type
    # str() on Python 2.
    env = os.environ.copy()

    # Python issue 6906
    if PYTHON_VERSION < (2, 6, 6):
        for var in ('TK_LIBRARY', 'TCL_LIBRARY', 'TIX_LIBRARY'):
            if var in env:
                env[var] = env[var].encode('mbcs')

    # Prevent output by test package; e.g. 'max_retries reduced from x to y'
    env[str('PYWIKIBOT_TEST_QUIET')] = str('1')

    # sys.path may have been modified by the test runner to load dependencies.
    pythonpath = os.pathsep.join(sys.path)
    if OSWIN32 and PY2:
        pythonpath = str(pythonpath)
    env[str('PYTHONPATH')] = pythonpath
    env[str('PYTHONIOENCODING')] = str(config.console_encoding)

    # LC_ALL is used by i18n.input as an alternative for userinterface_lang
    # A complete locale string needs to be created, so the country code
    # is guessed, however it is discarded when loading config.
    if config.userinterface_lang:
        current_locale = locale.getdefaultlocale()[0]
        if current_locale in [None, 'C']:
            current_locale = 'en'
        else:
            current_locale = current_locale.split('.')[0]
        locale_prefix = str(config.userinterface_lang + '_')

        if not current_locale.startswith(locale_prefix):
            locale_code = generate_locale(
                config.userinterface_lang,
                encoding=config.console_encoding)

            env[str('LC_ALL')] = str(locale_code)

            if OSWIN32:
                # This is not multiprocessing safe, as it affects all processes
                win32_set_global_locale(locale_code)
        else:
            current_locale = None
    else:
        current_locale = None

    # Set EDITOR to an executable that ignores all arguments and does nothing.
    env[str('EDITOR')] = str('call' if OSWIN32 else 'true')

    options = {
        'stdout': subprocess.PIPE,
        'stderr': subprocess.PIPE
    }
    if data_in is not None:
        options['stdin'] = subprocess.PIPE

    try:
        p = subprocess.Popen(command, env=env, **options)
    except TypeError as e:
        # Generate a more informative error
        if OSWIN32 and PY2:
            unicode_env = [(k, v) for k, v in os.environ.items()
                           if not isinstance(k, str) or
                           not isinstance(v, str)]
            if unicode_env:
                raise TypeError(
                    '%s: unicode in os.environ: %r' % (e, unicode_env))

            child_unicode_env = [(k, v) for k, v in env.items()
                                 if not isinstance(k, str) or
                                 not isinstance(v, str)]
            if child_unicode_env:
                raise TypeError(
                    '%s: unicode in child env: %r' % (e, child_unicode_env))
        raise

    if data_in is not None:
        p.stdin.write(data_in.encode(config.console_encoding))
        p.stdin.flush()  # _communicate() otherwise has a broken pipe

    stderr_lines = b''
    waited = 0
    while (error or (waited < timeout)) and p.poll() is None:
        # In order to kill 'shell' and others early, read only a single
        # line per second, and kill the process as soon as the expected
        # output has been seen.
        # Additional lines will be collected later with p.communicate()
        if error:
            line = p.stderr.readline()
            stderr_lines += line
            if error in line.decode(config.console_encoding):
                break
        time.sleep(1)
        waited += 1

    if (timeout or error) and p.poll() is None:
        p.kill()

    if p.poll() is not None:
        stderr_lines += p.stderr.read()

    data_out = p.communicate()

    if OSWIN32 and current_locale:
        win32_set_global_locale(current_locale)

    return {'exit_code': p.returncode,
            'stdout': data_out[0].decode(config.console_encoding),
            'stderr': (stderr_lines + data_out[1]).decode(config.console_encoding)}

Example 27

Project: Rubustat
Source File: rubustat_daemon.py
View license
    def run(self):
        lastLog = datetime.datetime.now()
        lastMail = datetime.datetime.now()
        self.configureGPIO()
        while True:

            #change cwd to wherever rubustat_daemon is
            abspath = os.path.abspath(__file__)
            dname = os.path.dirname(abspath)
            os.chdir(dname)

            indoorTemp = float(getIndoorTemp())
            hvacState = int(self.getHVACState())

            file = open("status", "r")
            targetTemp = float(file.readline())
            mode = file.readline()
            file.close()

            now = datetime.datetime.now()
            logElapsed = now - lastLog
            mailElapsed = now - lastMail

            ### check if we need to send error mail
            #cooling 
            #it's 78, we want it to be 72, and the error threshold is 5 = this triggers
            if mailEnabled == True and (mailElapsed > datetime.timedelta(minutes=20)) and (indoorTemp - float(targetTemp) ) > errorThreshold:
                self.sendErrorMail()
                lastMail = datetime.datetime.now()
                if DEBUG == 1:
                    log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                    log.write("MAIL: Sent mail to " + recipient + " at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                    log.close()

            #heat 
            #it's 72, we want it to be 78, and the error threshold is 5 = this triggers
            if mailEnabled == True and (mailElapsed > datetime.timedelta(minutes=20)) and (float(targetTemp) - indoorTemp ) > errorThreshold:
                self.sendErrorMail()
                lastMail = datetime.datetime.now()
                if DEBUG == 1:
                    log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                    log.write("MAIL: Sent mail to " + recipient + " at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                    log.close()


            #logging actual temp and indoor temp to sqlite database.
            #you can do fun things with this data, like make charts! 
            if logElapsed > datetime.timedelta(minutes=6) and sqliteEnabled:
                c.execute('INSERT INTO logging VALUES(?, ?, ?)', (now, indoorTemp, targetTemp))
                conn.commit()
                lastLog = datetime.datetime.now()

                
            # heater mode
            if mode == "heat":
                if hvacState == 0: #idle
                    if indoorTemp < targetTemp - inactive_hysteresis:
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to heat at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.heat()

                elif hvacState == 1: #heating
                    if indoorTemp > targetTemp + active_hysteresis:
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to fan_to_idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        self.fan_to_idle()
                        time.sleep(30)
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.idle()

                elif hvacState == -1: # it's cold out, why is the AC running?
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.idle()

            # ac mode
            elif mode == "cool":
                if hvacState == 0: #idle
                    if indoorTemp > targetTemp + inactive_hysteresis:
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to cool at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.cool()

                elif hvacState == -1: #cooling
                    if indoorTemp < targetTemp - active_hysteresis:
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to fan_to_idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        self.fan_to_idle()
                        time.sleep(30)
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.idle()

                elif hvacState == 1: # it's hot out, why is the heater on?
                        if DEBUG == 1:
                            log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                            log.write("STATE: Switching to fan_to_idle at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + "\n")
                            log.close()
                        hvacState = self.idle()
            else:
                print "It broke."

            #loggin'stuff
            if DEBUG == 1:
                heatStatus = int(subprocess.Popen("cat /sys/class/gpio/gpio" + str(HEATER_PIN) + "/value", shell=True, stdout=subprocess.PIPE).stdout.read().strip())
                coolStatus = int(subprocess.Popen("cat /sys/class/gpio/gpio" + str(AC_PIN) + "/value", shell=True, stdout=subprocess.PIPE).stdout.read().strip())
                fanStatus = int(subprocess.Popen("cat /sys/class/gpio/gpio" + str(FAN_PIN) + "/value", shell=True, stdout=subprocess.PIPE).stdout.read().strip())
                log = open("logs/debug_" + datetime.datetime.now().strftime('%Y%m%d') + ".log", "a")
                log.write("Report at " + time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) + ":\n")
                log.write("hvacState = " + str(hvacState)+ "\n")
                log.write("indoorTemp = " + str(indoorTemp)+ "\n")
                log.write("targetTemp = " + str(targetTemp)+ "\n")
                log.write("heatStatus = " + str(heatStatus) + "\n")
                log.write("coolStatus = " + str(coolStatus)+ "\n")
                log.write("fanStatus = " + str(fanStatus)+ "\n")
                log.close()
            
            time.sleep(5)

Example 28

Project: fullerite
Source File: ipvs.py
View license
    def collect(self):
        if not os.access(self.config['bin'], os.X_OK):
            self.log.error("%s does not exist, or is not executable",
                           self.config['bin'])
            return False

        if (str_to_bool(self.config['use_sudo'])
                and not os.access(self.config['sudo_cmd'], os.X_OK)):
            self.log.error("%s does not exist, or is not executable",
                           self.config['sudo_cmd'])
            return False

        p = subprocess.Popen(self.statcommand, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        p.wait()

        if p.returncode == 255:
            self.statcommand = filter(
                lambda a: a != '--exact', self.statcommand)

        p = subprocess.Popen(self.statcommand,
                             stdout=subprocess.PIPE).communicate()[0][:-1]

        columns = {
            'conns': 2,
            'inpkts': 3,
            'outpkts': 4,
            'inbytes': 5,
            'outbytes': 6,
        }

        external = ""
        backend = ""
        for i, line in enumerate(p.split("\n")):
            if i < 3:
                continue
            row = line.split()

            if row[0] == "TCP" or row[0] == "UDP":
                external = row[0] + "_" + string.replace(row[1], ".", "_")
                backend = "total"
            elif row[0] == "->":
                backend = string.replace(row[1], ".", "_")
            else:
                continue

            for metric, column in columns.iteritems():
                metric_name = ".".join([external, backend, metric])
                # metric_value = int(row[column])
                value = row[column]
                if (value.endswith('K')):
                        metric_value = int(value[0:len(value) - 1]) * 1024
                elif (value.endswith('M')):
                        metric_value = (int(value[0:len(value) - 1]) * 1024
                                        * 1024)
                elif (value.endswith('G')):
                        metric_value = (int(value[0:len(value) - 1]) * 1024.0
                                        * 1024.0 * 1024.0)
                else:
                        metric_value = float(value)

                self.publish(metric_name, metric_value)

        p = subprocess.Popen(self.concommand,
                             stdout=subprocess.PIPE).communicate()[0][:-1]

        columns = {
            'active': 4,
            'inactive': 5,
        }

        external = ""
        backend = ""
        total = {}
        for i, line in enumerate(p.split("\n")):
            if i < 3:
                continue
            row = line.split()

            if row[0] == "TCP" or row[0] == "UDP":
                if total:
                    for metric, value in total.iteritems():
                        self.publish(
                            ".".join([external, "total", metric]), value)

                for k in columns.keys():
                    total[k] = 0.0

                external = row[0] + "_" + string.replace(row[1], ".", "_")
                continue
            elif row[0] == "->":
                backend = string.replace(row[1], ".", "_")
            else:
                continue

            for metric, column in columns.iteritems():
                metric_name = ".".join([external, backend, metric])
                # metric_value = int(row[column])
                value = row[column]
                if (value.endswith('K')):
                        metric_value = int(value[0:len(value) - 1]) * 1024
                elif (value.endswith('M')):
                        metric_value = (int(value[0:len(value) - 1]) * 1024
                                        * 1024)
                elif (value.endswith('G')):
                        metric_value = (int(value[0:len(value) - 1]) * 1024.0
                                        * 1024.0 * 1024.0)
                else:
                        metric_value = float(value)

                total[metric] += metric_value
                self.publish(metric_name, metric_value)

        if total:
            for metric, value in total.iteritems():
                self.publish(".".join([external, "total", metric]), value)

Example 29

Project: backintime
Source File: sshtools.py
View license
    def checkRemoteCommands(self, retry = False):
        """
        Try out all relevant commands used by `Back In Time` on the remote host
        to make sure snapshots will be successful with the remote host.
        This will also check that hard-links are supported on the remote host.

        This check can be disabled with :py:func:`config.Config.sshCheckCommands`

        Args:
            retry (bool):               retry to run the commands if it failed
                                        because the command string was to long

        Raises:
            exceptions.MountException:  if a command is not supported on
                                        remote host or if hard-links are not
                                        supported
        """
        if not self.config.sshCheckCommands():
            return
        logger.debug('Check remote commands', self)
        def maxArg():
            if retry:
                raise MountException("Checking commands on remote host didn't return any output. "
                                     "We already checked the maximum argument lenght but it seem like "
                                     "there is an other problem")
            logger.warning('Looks like the command was to long for remote SSHd. We will test max arg length now and retry.',
                           self)
            import sshMaxArg
            mid = sshMaxArg.maxArgLength(self.config)
            sshMaxArg.reportResult(self.host, mid)
            self.config.setSshMaxArgLength(mid, self.profile_id)
            return self.checkRemoteCommands(retry = True)

        remote_tmp_dir_1 = os.path.join(self.path, 'tmp_%s' % self.randomId())
        remote_tmp_dir_2 = os.path.join(self.path, 'tmp_%s' % self.randomId())
        with tempfile.TemporaryDirectory() as tmp:
            tmp_file = os.path.join(tmp, 'a')
            with open(tmp_file, 'wt') as f:
                f.write('foo')

            #check rsync
            rsync1 =  tools.rsyncPrefix(self.config, no_perms = False, progress = False)
            rsync1.append(tmp_file)
            rsync1.append('%[email protected]%s:"%s"/' %(self.user,
                                        tools.escapeIPv6Address(self.host),
                                        remote_tmp_dir_1))

            #check remote rsync hard-link support
            rsync2 =  tools.rsyncPrefix(self.config, no_perms = False, progress = False)
            rsync2.append('--link-dest=../%s' %os.path.basename(remote_tmp_dir_1))
            rsync2.append(tmp_file)
            rsync2.append('%[email protected]%s:"%s"/' %(self.user,
                                        tools.escapeIPv6Address(self.host),
                                        remote_tmp_dir_2))

            for cmd in (rsync1, rsync2):
                logger.debug('Check rsync command: %s' %cmd, self)

                proc = subprocess.Popen(cmd,
                                        stdout = subprocess.PIPE,
                                        stderr = subprocess.PIPE,
                                        universal_newlines = True)
                out, err = proc.communicate()
                if err or proc.returncode:
                    logger.debug('rsync command returned error: %s' %err, self)
                    raise MountException(_('Remote host %(host)s doesn\'t support \'%(command)s\':\n'
                                            '%(err)s\nLook at \'man backintime\' for further instructions')
                                            % {'host' : self.host, 'command' : cmd, 'err' : err})

        #check cp chmod find and rm
        head  = 'tmp1="%s"; tmp2="%s"; ' %(remote_tmp_dir_1, remote_tmp_dir_2)
        #first define a function to clean up and exit
        head += 'cleanup(){ '
        head += 'test -e "$tmp1/a" && rm "$tmp1/a" >/dev/null 2>&1; '
        head += 'test -e "$tmp2/a" && rm "$tmp2/a" >/dev/null 2>&1; '
        head += 'test -e smr.lock && rm smr.lock >/dev/null 2>&1; '
        head += 'test -e "$tmp1" && rmdir "$tmp1" >/dev/null 2>&1; '
        head += 'test -e "$tmp2" && rmdir "$tmp2" >/dev/null 2>&1; '
        head += 'test -n "$tmp3" && test -e "$tmp3" && rmdir "$tmp3" >/dev/null 2>&1; '
        head += 'exit $1; }; '
        tail = []

        #list inodes
        cmd  = 'ls -i "$tmp1/a"; ls -i "$tmp2/a"; '
        tail.append(cmd)
        #try nice -n 19
        if self.nice:
            cmd  = 'echo \"nice -n 19\"; nice -n 19 true >/dev/null; err_nice=$?; '
            cmd += 'test $err_nice -ne 0 && cleanup $err_nice; '
            tail.append(cmd)
        #try ionice -c2 -n7
        if self.ionice:
            cmd  = 'echo \"ionice -c2 -n7\"; ionice -c2 -n7 true >/dev/null; err_nice=$?; '
            cmd += 'test $err_nice -ne 0 && cleanup $err_nice; '
            tail.append(cmd)
        #try nocache
        if self.nocache:
            cmd  = 'echo \"nocache\"; nocache true >/dev/null; err_nocache=$?; '
            cmd += 'test $err_nocache -ne 0 && cleanup $err_nocache; '
            tail.append(cmd)
        #try screen, bash and flock used by smart-remove running in background
        if self.config.smartRemoveRunRemoteInBackground(self.profile_id):
            cmd  = 'echo \"screen -d -m bash -c ...\"; screen -d -m bash -c \"true\" >/dev/null; err_screen=$?; '
            cmd += 'test $err_screen -ne 0 && cleanup $err_screen; '
            tail.append(cmd)
            cmd  = 'echo \"(flock -x 9) 9>smr.lock\"; bash -c \"(flock -x 9) 9>smr.lock\" >/dev/null; err_flock=$?; '
            cmd += 'test $err_flock -ne 0 && cleanup $err_flock; '
            tail.append(cmd)
            cmd  = 'echo \"rmdir \$(mktemp -d)\"; tmp3=$(mktemp -d); test -z "$tmp3" && cleanup 1; rmdir $tmp3 >/dev/null; err_rmdir=$?; '
            cmd += 'test $err_rmdir -ne 0 && cleanup $err_rmdir; '
            tail.append(cmd)
        #if we end up here, everything should be fine
        cmd = 'echo \"done\"; cleanup 0'
        tail.append(cmd)

        maxLength = self.config.sshMaxArgLength(self.profile_id)
        additionalChars = len('echo ""') + len(self.config.sshPrefixCmd(self.profile_id, cmd_type = str))

        output = ''
        err = ''
        returncode = 0
        for cmd in tools.splitCommands(tail,
                                       head = head,
                                       maxLength = maxLength - additionalChars):
            if cmd.endswith('; '):
                cmd += 'echo ""'
            c = self.config.sshCommand(cmd = [cmd],
                                        custom_args = ['-p', str(self.port), self.user_host],
                                        port = False,
                                        cipher = False,
                                        user_host = False,
                                        nice = False,
                                        ionice = False,
                                        profile_id = self.profile_id)
            try:
                logger.debug('Call command: %s' %' '.join(c), self)
                proc = subprocess.Popen(c,
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE,
                                        universal_newlines = True)
                ret = proc.communicate()
            except OSError as e:
                #Argument list too long
                if e.errno == 7:
                    logger.debug('Argument list too log (Python exception)', self)
                    return maxArg()
                else:
                    raise
            logger.debug('Command stdout: %s' %ret[0], self)
            logger.debug('Command stderr: %s' %ret[1], self)
            logger.debug('Command returncode: %s' %proc.returncode, self)
            output += ret[0].strip('\n') + '\n'
            err    += ret[1].strip('\n') + '\n'
            returncode += proc.returncode
            if proc.returncode:
                break

        output_split = output.strip('\n').split('\n')

        while True:
            if output_split and not output_split[-1]:
                output_split = output_split[:-1]
            else:
                break

        if not output_split:
            return maxArg()

        if returncode or not output_split[-1].startswith('done'):
            for command in ('rm', 'nice', 'ionice', 'nocache', 'screen', '(flock'):
                if output_split[-1].startswith(command):
                    raise MountException(_('Remote host %(host)s doesn\'t support \'%(command)s\':\n'
                                            '%(err)s\nLook at \'man backintime\' for further instructions')
                                            % {'host' : self.host, 'command' : output_split[-1], 'err' : err})
            raise MountException(_('Check commands on host %(host)s returned unknown error:\n'
                                    '%(err)s\nLook at \'man backintime\' for further instructions')
                                    % {'host' : self.host, 'err' : err})

        inodes = []
        for tmp in (remote_tmp_dir_1, remote_tmp_dir_2):
            for line in output_split:
                m = re.match(r'^(\d+).*?%s' %tmp, line)
                if m:
                    inodes.append(m.group(1))

        logger.debug('remote inodes: ' + ' | '.join(inodes), self)
        if len(inodes) == 2 and inodes[0] != inodes[1]:
            raise MountException(_('Remote host %s doesn\'t support hardlinks') % self.host)

Example 30

Project: bitcurator
Source File: bc_mounter.py
View license
    def __init__(self, dialog_message):
        gtk.Dialog.__init__(self, "BitCurator Mounter", None, 0)
            #(gtk.STOCK_CANCEL, gtk.ResponseType.CANCEL,
            # gtk.STOCK_OK, gtk.ResponseType.OK))

        self.set_border_width(6)
        self.set_default_size(600, 400)

        label = gtk.Label(dialog_message)

        # Get info about attached block devices
        blkid_cmd = "sudo blkid -o device | grep -v ram"
        p_blkid = subprocess.Popen(blkid_cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
        #Launch the shell command:
        output = p_blkid.communicate()
        #print(output[0].decode("utf-8"))

        device_list = output[0].decode("utf-8").split('\n')

        # Set up the 2D array for device information
        info_list = [[] for _ in range(len(device_list[0:-1]))]

        # Parse the device information for the dialog
        for index, this_dev in enumerate(device_list[0:-1]):
            # Testing only
            #print("Got here: " + this_dev + " " + str(index))

            # Add raw device point to info list
            info_list[index].append(this_dev)

            # --- Determine the file system type
            fs_cmd = "blkid -s TYPE -o value " + this_dev
            p_fs = subprocess.Popen(fs_cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
            #Launch the shell command:
            output = p_fs.communicate()
            #print(output[0].decode("utf-8"))

            # Add file system type to info list for this device
            info_list[index].append(output[0].decode("utf-8").rstrip('\n'))

            # --- Determine the volume label
            label_cmd = "blkid -s LABEL -o value " + this_dev
            p_label = subprocess.Popen(label_cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
            #Launch the shell command:
            output = p_label.communicate()
            #print(output[0].decode("utf-8"))

            # Add volume label to info list for this device
            info_list[index].append(output[0].decode("utf-8").rstrip('\n'))

            # --- Determine the volume size
            size_cmd = "sudo sfdisk -s " + this_dev
            p_size = subprocess.Popen(size_cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
            #Launch the shell command: - need to fix this for KB
            output = p_size.communicate()
            #print(output[0].decode("utf-8"))

            # Add volume size to info list for this device
            info_list[index].append(output[0].decode("utf-8").rstrip('\n'))

            # --- Determine if device is mounted
            mnt_cmd = "grep ^" + this_dev + " /etc/mtab"
            p_mnt = subprocess.Popen(mnt_cmd, stdout=subprocess.PIPE, stderr=None, shell=True)
            #Launch the shell command: - need to fix this for KB
            output = p_mnt.communicate()
            mnt_status_list = (output[0].decode("utf-8")).split(' ')

            if len(mnt_status_list) == 1:
                # Add negative mount point and status to info list for this device
                info_list[index].append("")
                info_list[index].append("(none)")

            else:
                # Set mounted location (fix this to not just print)
                #print("Mounted at: " + mnt_status_list[1])
                info_list[index].append(mnt_status_list[1])
                # Check for ro/rw status:
                if mnt_status_list[3].startswith("ro"):
                    #print("READ ONLY")
                    # Add read-only mount status to info list for this device
                    info_list[index].append("READ ONLY")
                if mnt_status_list[3].startswith("rw"):
                    #print("READ-WRITE")
                    # Add read-only mount status to info list for this device
                    info_list[index].append("WRITEABLE")

        #print(info_list)
        self.liststore = gtk.ListStore(bool, str, str, str, str, str, str)

        for dev_info in info_list:
            self.liststore.append([False, dev_info[0], dev_info[1], dev_info[2], dev_info[3], dev_info[4], dev_info[5]])

        treeview = gtk.TreeView(model=self.liststore)

        cell0 = gtk.CellRendererToggle()
        cell0.connect("toggled", self.on_cell_toggled)
        column_check = gtk.TreeViewColumn("Select", cell0, active=0)
        treeview.append_column(column_check)

        cell1 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("Raw Device", cell1, text=1)
        treeview.append_column(column_dev)

        cell2 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("File System", cell2, text=2)
        treeview.append_column(column_dev)

        cell3 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("Label", cell3, text=3)
        treeview.append_column(column_dev)

        cell4 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("Size (Bytes)", cell4, text=4)
        treeview.append_column(column_dev)

        cell5 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("Mount Point", cell5, text=5)
        treeview.append_column(column_dev)

        cell6 = gtk.CellRendererText()
        column_dev = gtk.TreeViewColumn("Read/Write Status", cell6, text=6)
        treeview.append_column(column_dev)
        
        mount_button = gtk.Button()
        mount_button.set_label("Mount Selected Devices")
        mount_button.connect('clicked', self.on_mount_clicked)

        # Add all items to the main box
        box = self.get_content_area()
        box.set_spacing(6)
        box.add(label)
        box.add(treeview)
        box.add(mount_button)
        self.show_all()

Example 31

Project: ursula
Source File: sshbb.py
View license
    def _run(self, cmd, in_data, sudoable=True):
        '''
        Starts the command and communicates with it until it ends.
        '''

        display_cmd = map(to_unicode, map(pipes.quote, cmd))
        display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)

        # Start the given command. If we don't need to pipeline data, we can try
        # to use a pseudo-tty (ssh will have been invoked with -tt). If we are
        # pipelining data, or can't create a pty, we fall back to using plain
        # old pipes.

        p = None

        if isinstance(cmd, (text_type, binary_type)):
            cmd = to_bytes(cmd)
        else:
            cmd = list(map(to_bytes, cmd))

        if not in_data:
            try:
                # Make sure stdin is a proper pty to avoid tcgetattr errors
                master, slave = pty.openpty()
                p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                stdin = os.fdopen(master, 'w', 0)
                os.close(slave)
            except (OSError, IOError):
                p = None

        if not p:
            p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdin = p.stdin

        # If we are using SSH password authentication, write the password into
        # the pipe we opened in _build_command.

        if self._play_context.password:
            os.close(self.sshpass_pipe[0])
            os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password)))
            os.close(self.sshpass_pipe[1])

        ## SSH state machine
        #
        # Now we read and accumulate output from the running process until it
        # exits. Depending on the circumstances, we may also need to write an
        # escalation password and/or pipelined input to the process.

        states = [
            'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
        ]

        # Are we requesting privilege escalation? Right now, we may be invoked
        # to execute sftp/scp with sudoable=True, but we can request escalation
        # only when using ssh. Otherwise we can send initial data straightaway.

        state = states.index('ready_to_send')
        if b'ssh' in cmd:
            if self._play_context.prompt:
                # We're requesting escalation with a password, so we have to
                # wait for a password prompt.
                state = states.index('awaiting_prompt')
                display.debug('Initial state: %s: %s' % (states[state], self._play_context.prompt))
            elif self._play_context.become and self._play_context.success_key:
                # We're requesting escalation without a password, so we have to
                # detect success/failure before sending any initial data.
                state = states.index('awaiting_escalation')
                display.debug('Initial state: %s: %s' % (states[state], self._play_context.success_key))

        # We store accumulated stdout and stderr output from the process here,
        # but strip any privilege escalation prompt/confirmation lines first.
        # Output is accumulated into tmp_*, complete lines are extracted into
        # an array, then checked and removed or copied to stdout or stderr. We
        # set any flags based on examining the output in self._flags.

        stdout = stderr = ''
        tmp_stdout = tmp_stderr = ''

        self._flags = dict(
            become_prompt=False, become_success=False,
            become_error=False, become_nopasswd_error=False
        )

        # select timeout should be longer than the connect timeout, otherwise
        # they will race each other when we can't connect, and the connect
        # timeout usually fails
        timeout = 2 + self._play_context.timeout
        rpipes = [p.stdout, p.stderr]
        for fd in rpipes:
            fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # If we can send initial data without waiting for anything, we do so
        # before we call select.

        if states[state] == 'ready_to_send' and in_data:
            self._send_initial_data(stdin, in_data)
            state += 1

        while True:
            rfd, wfd, efd = select.select(rpipes, [], [], timeout)

            # We pay attention to timeouts only while negotiating a prompt.

            if not rfd:
                if state <= states.index('awaiting_escalation'):
                    # If the process has already exited, then it's not really a
                    # timeout; we'll let the normal error handling deal with it.
                    if p.poll() is not None:
                        break
                    self._terminate_process(p)
                    raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, stdout))

            # Read whatever output is available on stdout and stderr, and stop
            # listening to the pipe if it's been closed.

            if p.stdout in rfd:
                chunk = p.stdout.read()
                if chunk == '':
                    rpipes.remove(p.stdout)
                tmp_stdout += chunk
                display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))

            if p.stderr in rfd:
                chunk = p.stderr.read()
                if chunk == '':
                    rpipes.remove(p.stderr)
                tmp_stderr += chunk
                display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, chunk))

            # We examine the output line-by-line until we have negotiated any
            # privilege escalation prompt and subsequent success/error message.
            # Afterwards, we can accumulate output without looking at it.

            if state < states.index('ready_to_send'):
                if tmp_stdout:
                    output, unprocessed = self._examine_output('stdout', states[state], tmp_stdout, sudoable)
                    stdout += output
                    tmp_stdout = unprocessed

                if tmp_stderr:
                    output, unprocessed = self._examine_output('stderr', states[state], tmp_stderr, sudoable)
                    stderr += output
                    tmp_stderr = unprocessed
            else:
                stdout += tmp_stdout
                stderr += tmp_stderr
                tmp_stdout = tmp_stderr = ''

            # If we see a privilege escalation prompt, we send the password.
            # (If we're expecting a prompt but the escalation succeeds, we
            # didn't need the password and can carry on regardless.)

            if states[state] == 'awaiting_prompt':
                if self._flags['become_prompt']:
                    display.debug('Sending become_pass in response to prompt')
                    stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
                    self._flags['become_prompt'] = False
                    state += 1
                elif self._flags['become_success']:
                    state += 1

            # We've requested escalation (with or without a password), now we
            # wait for an error message or a successful escalation.

            if states[state] == 'awaiting_escalation':
                if self._flags['become_success']:
                    display.debug('Escalation succeeded')
                    self._flags['become_success'] = False
                    state += 1
                elif self._flags['become_error']:
                    display.debug('Escalation failed')
                    self._terminate_process(p)
                    self._flags['become_error'] = False
                    raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
                elif self._flags['become_nopasswd_error']:
                    display.debug('Escalation requires password')
                    self._terminate_process(p)
                    self._flags['become_nopasswd_error'] = False
                    raise AnsibleError('Missing %s password' % self._play_context.become_method)
                elif self._flags['become_prompt']:
                    # This shouldn't happen, because we should see the "Sorry,
                    # try again" message first.
                    display.debug('Escalation prompt repeated')
                    self._terminate_process(p)
                    self._flags['become_prompt'] = False
                    raise AnsibleError('Incorrect %s password' % self._play_context.become_method)

            # Once we're sure that the privilege escalation prompt, if any, has
            # been dealt with, we can send any initial data and start waiting
            # for output.

            if states[state] == 'ready_to_send':
                if in_data:
                    self._send_initial_data(stdin, in_data)
                state += 1

            # Now we're awaiting_exit: has the child process exited? If it has,
            # and we've read all available output from it, we're done.

            if p.poll() is not None:
                if not rpipes or not rfd:
                    break

                # When ssh has ControlMaster (+ControlPath/Persist) enabled, the
                # first connection goes into the background and we never see EOF
                # on stderr. If we see EOF on stdout and the process has exited,
                # we're probably done. We call select again with a zero timeout,
                # just to make certain we don't miss anything that may have been
                # written to stderr between the time we called select() and when
                # we learned that the process had finished.

                if p.stdout not in rpipes:
                    timeout = 0
                    continue

            # If the process has not yet exited, but we've already read EOF from
            # its stdout and stderr (and thus removed both from rpipes), we can
            # just wait for it to exit.

            elif not rpipes:
                p.wait()
                break

            # Otherwise there may still be outstanding data to read.

        # close stdin after process is terminated and stdout/stderr are read
        # completely (see also issue #848)
        stdin.close()

        if C.HOST_KEY_CHECKING:
            if cmd[0] == b"sshpass" and p.returncode == 6:
                raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.')

        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
        if p.returncode != 0 and controlpersisterror:
            raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')

        if p.returncode == 255 and in_data:
            raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')

        return (p.returncode, stdout, stderr)

Example 32

Project: termsaver
Source File: position.py
View license
    def get_terminal_size(self):
        """ 
        Retrieves the screen terminal dimensions, returning a tuple
        (width, height), and will also store them in internal property 
        `geometry`.
        
        Copyright note:
        This code has been adapted from:
        http://stackoverflow.com/questions/566746/\
            how-to-get-console-window-width-in-python

        posted by Harco Kuppens, at Jul 1 '11 at 16:23.

        """
        
        # This is required if you are programming from non-windows platforms
        # more on this at: http://pydev.org/manual_adv_assistants.html
        #@PydevCodeAnalysisIgnore
        def _get_terminal_size_windows():
            res = None
            try:
                from ctypes import windll, create_string_buffer
        
                # stdin handle is -10
                # stdout handle is -11
                # stderr handle is -12
                h = windll.kernel32.GetStdHandle(-12)
                csbi = create_string_buffer(22)
                res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
            except:
                return None
            if res:
                import struct
                (_, _, _, _, _,
                 left, top, right, bottom, _, _) = struct.unpack("hhhhHhhhhhh",
                                                                 csbi.raw)
                sizex = right - left + 1
                sizey = bottom - top + 1
                return sizex, sizey
            else:
                return None
        
        def _get_terminal_size_tput():
            try:
                import subprocess
                proc = subprocess.Popen(["tput", "cols"], 
                                        stdin = subprocess.PIPE, 
                                        stdout = subprocess.PIPE)
                output = proc.communicate(input = None)
                cols = int(output[0])
                proc = subprocess.Popen(["tput", "lines"], 
                                        stdin = subprocess.PIPE, 
                                        stdout = subprocess.PIPE)
                output = proc.communicate(input = None)
                rows = int(output[0])
                return (cols, rows)
            except:
                return None
        
        
        def _get_terminal_size_linux():
            def ioctl_GWINSZ(fd):
                try:
                    import fcntl, termios, struct
                    cr = struct.unpack('hh', fcntl.ioctl(fd, 
                                                         termios.TIOCGWINSZ, 
                                                         '1234'))
                except:
                    return None
                return cr
            cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
            if not cr:
                try:
                    fd = os.open(os.ctermid(), os.O_RDONLY)
                    cr = ioctl_GWINSZ(fd)
                    os.close(fd)
                except:
                    pass
            if not cr:
                try:
                    cr = (os.environ['LINES'], os.environ['COLUMNS'])
                except:
                    return None
            return int(cr[1]), int(cr[0])
        
        current_os = platform.system()
        tuple_xy = None
        if current_os == 'Windows':
            tuple_xy = _get_terminal_size_windows()
            if tuple_xy is None:
                tuple_xy = _get_terminal_size_tput()
                # needed for window's python in cygwin's xterm!
        if current_os == 'Linux' or current_os == 'Darwin' or \
                current_os.startswith('CYGWIN'):
            tuple_xy = _get_terminal_size_linux()
        if tuple_xy is None:
            tuple_xy = (80, 25)  # default value
        
        self.geometry['x'], self.geometry['y'] = tuple_xy

        # store geometry changes
        if self.__old_geometry == {'x': 0, 'y': 0}:
            # first time checking geometry
            self.__old_geometry = self.geometry.copy()
            self.changed_geometry = False
        elif self.__old_geometry != self.geometry:
            self.__old_geometry = self.geometry.copy()
            self.changed_geometry = True
        else:
            self.changed_geometry = False

Example 33

Project: stonix
Source File: MuteMic.py
View license
    def report(self):
        '''
        Report method for MuteMic. Uses the platform native method to read
        the input levels. Levels must be zero to pass. Note for Linux the use
        of amixer presumes pulseaudio.

        @author: dkennel
        '''
        darwin = False
        chklevels = None
        if self.environ.getosfamily() == 'darwin':
            darwin = True
            chklevels = "/usr/bin/osascript -e 'get the input volume of (get volume settings)'"
        elif os.path.exists('/usr/bin/amixer'):
            chklevels = '/usr/bin/amixer sget Capture Volume'

        if chklevels != None:
            level = 99
            try:
                if darwin:
                    self.logdispatch.log(LogPriority.DEBUG,
                                         ['MuteMic.report',
                                          'Doing Mac level check'])
                    proc = subprocess.Popen(chklevels, stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE, shell=True)
                    level = proc.stdout.readline()
                    level = level.strip()
                    if level == 'missing value':
                        level = 0
                    else:
                        int(level)
                else:
                    self.logdispatch.log(LogPriority.DEBUG,
                                         ['MuteMic.report',
                                          'Doing amixer level check'])
                    proc = subprocess.Popen(chklevels, stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            shell=True)
                    results = proc.stdout.readlines()
                    zeroed = True
                    level = 0
                    self.logdispatch.log(LogPriority.DEBUG,
                                         ['MuteMic.report',
                                          'results = ' + str(results)])
                    for line in results:
                        if re.search('Capture [0-9]', line) and not \
                        re.search('Limits:', line):
                            match = re.search('Capture [0-9]+', line)
                            capturevol = match.group(0).split()[1]
                            try:
                                vol = int(capturevol)
                            except(ValueError):
                                zeroed = False
                                self.logdispatch.log(LogPriority.DEBUG,
                                                     ['MuteMic.report',
                                                      'zeroed set to False VE'])
                            if vol != 0:
                                zeroed = False
                                self.logdispatch.log(LogPriority.DEBUG,
                                                     ['MuteMic.report',
                                                      'zeroed set to False'])
                    if not zeroed:
                        level = 100

            except (KeyboardInterrupt, SystemExit):
                # User initiated exit
                raise
            except Exception, err:
                self.rulesuccess = False
                self.detailedresults = self.detailedresults + "\n" + str(err) + \
                " - " + str(traceback.format_exc())
                self.logdispatch.log(LogPriority.ERROR, self.detailedresults)

            try:
                level = int(level)
            except(ValueError):
                level = 100
            except (KeyboardInterrupt, SystemExit):
                # User initiated exit
                raise
            except Exception, err:
                self.rulesuccess = False
                self.detailedresults = self.detailedresults + "\n" + str(err) + \
                " - " + str(traceback.format_exc())
                self.logdispatch.log(LogPriority.ERROR, self.detailedresults)
            self.logdispatch.log(LogPriority.DEBUG,
                                 ['MuteMic.report',
                                  'Value of level: ' + str(level)])
            if level > 0 and self.checkpulseaudio():
                self.compliant = False
                self.detailedresults = 'Microphone input not set to zero!'
            elif level > 0 and not self.checkpulseaudio():
                self.compliant = False
                self.detailedresults = 'Microphone input not set to zero! and microphone not set for default mute in Pulse Audio defaults.'
            elif level == 0 and not self.checkpulseaudio():
                self.compliant = False
                self.detailedresults = 'Microphone not set for default mute in Pulse Audio defaults.'
            else:
                self.compliant = True
                self.detailedresults = 'Microphone input set to zero.'
        self.formatDetailedResults("report", self.compliant,
                                   self.detailedresults)
        self.logdispatch.log(LogPriority.INFO, self.detailedresults)
        return self.compliant

Example 34

Project: musicbox
Source File: player.py
View license
    def popen_recall(self, onExit, popenArgs):
        '''
        Runs the given args in subprocess.Popen, and then calls the function
        onExit when the subprocess completes.
        onExit is a callable object, and popenArgs is a lists/tuple of args
        that would give to subprocess.Popen.
        '''

        def runInThread(onExit, arg):
            para = ['mpg123', '-R']
            para[1:1] = self.mpg123_parameters
            self.popen_handler = subprocess.Popen(para,
                                                  stdin=subprocess.PIPE,
                                                  stdout=subprocess.PIPE,
                                                  stderr=subprocess.PIPE)
            self.popen_handler.stdin.write(b'V ' + str(self.info['playing_volume']).encode('utf-8') + b'\n')
            if arg:
                self.popen_handler.stdin.write(b'L ' + arg.encode('utf-8') + b'\n')
            else:
                self.next_idx()
                onExit()
                return

            self.popen_handler.stdin.flush()

            self.process_first = True
            while True:
                if self.playing_flag is False:
                    break

                strout = self.popen_handler.stdout.readline().decode('utf-8')

                if re.match('^\@F.*$', strout):
                    process_data = strout.split(' ')
                    process_location = float(process_data[4])
                    if self.process_first:
                        self.process_length = process_location
                        self.process_first = False
                        self.process_location = 0
                    else:
                        self.process_location = self.process_length - process_location  # NOQA
                    continue
                elif strout[:2] == '@E':
                    # get a alternative url from new api
                    sid = popenArgs['song_id']
                    new_url = NetEase().songs_detail_new_api([sid])[0]['url']
                    if new_url is None:
                        log.warning(('Song {} is unavailable '
                                     'due to copyright issue.').format(sid))
                        break
                    log.warning(
                        'Song {} is not compatible with old api.'.format(sid))
                    popenArgs['mp3_url'] = new_url

                    self.popen_handler.stdin.write(b'\nL ' + new_url.encode('utf-8') + b'\n')
                    self.popen_handler.stdin.flush()
                    self.popen_handler.stdout.readline()
                elif strout == '@P 0\n':
                    self.popen_handler.stdin.write(b'Q\n')
                    self.popen_handler.stdin.flush()
                    self.popen_handler.kill()
                    break

            if self.playing_flag:
                self.next_idx()
                onExit()
            return

        def getLyric():
            if 'lyric' not in self.songs[str(self.playing_id)].keys():
                self.songs[str(self.playing_id)]['lyric'] = []
            if len(self.songs[str(self.playing_id)]['lyric']) > 0:
                return
            netease = NetEase()
            lyric = netease.song_lyric(self.playing_id)
            if lyric == [] or lyric == '未找到歌词':
                return
            lyric = lyric.split('\n')
            self.songs[str(self.playing_id)]['lyric'] = lyric
            return

        def gettLyric():
            if 'tlyric' not in self.songs[str(self.playing_id)].keys():
                self.songs[str(self.playing_id)]['tlyric'] = []
            if len(self.songs[str(self.playing_id)]['tlyric']) > 0:
                return
            netease = NetEase()
            tlyric = netease.song_tlyric(self.playing_id)
            if tlyric == [] or tlyric == '未找到歌词翻译':
                return
            tlyric = tlyric.split('\n')
            self.songs[str(self.playing_id)]['tlyric'] = tlyric
            return

        def cacheSong(song_id, song_name, artist, song_url):
            def cacheExit(song_id, path):
                self.songs[str(song_id)]['cache'] = path

            self.cache.add(song_id, song_name, artist, song_url, cacheExit)
            self.cache.start_download()

        if 'cache' in popenArgs.keys() and os.path.isfile(popenArgs['cache']):
            thread = threading.Thread(target=runInThread,
                                      args=(onExit, popenArgs['cache']))
        else:
            thread = threading.Thread(target=runInThread,
                                      args=(onExit, popenArgs['mp3_url']))
            cache_thread = threading.Thread(
                target=cacheSong,
                args=(popenArgs['song_id'], popenArgs['song_name'], popenArgs[
                    'artist'], popenArgs['mp3_url']))
            cache_thread.start()
        thread.start()
        lyric_download_thread = threading.Thread(target=getLyric, args=())
        lyric_download_thread.start()
        tlyric_download_thread = threading.Thread(target=gettLyric, args=())
        tlyric_download_thread.start()
        # returns immediately after the thread starts
        return thread

Example 35

Project: tz.js
Source File: build-tests.py
View license
def output_tests(source_prefix, zdump_command, io):
    all_zones = list(generate_zones(source_prefix))

    io.write("""<!DOCTYPE HTML>
<title>tz.js tests (generated by """ + __file__ + """)</title>
<script src="tz.js"></script>
<pre id="output"></pre>
<script>
var output_node = document.createTextNode("");
document.getElementById("output").appendChild(output_node);
function print(s)
{
    output_node.appendData(s + "\\n");
}

var pass_count = 0, fail_count = 0;

function assert(cond, description)
{
    if (cond) {
        ++pass_count;
    } else {
        ++fail_count;
        print("FAIL: " + description);
    }
}

function is(value, expected, description)
{
    assert(value == expected,
           description + ":  " + value + " should equal " + expected);
}

function check_offset(zone, d, utcoff, abbr)
{
    var z = tz.zoneAt(zone, new Date(d * 1000));
    is(z.offset, utcoff, zone + " at " + d);
    is(z.abbr, abbr, zone + " at " + d);
}

/*
 * Check a non-round-second values, since the tests below are largely round.
 *
 * The last two could become invalid in the future.
 */
check_offset("America/Los_Angeles", 1300010399.999, -28800, "PST");
check_offset("America/Los_Angeles", 1300010400.001, -25200, "PDT");
check_offset("America/Los_Angeles", 1308469553.734, -25200, "PDT");
check_offset("America/Los_Angeles", 2519888399.999, -25200, "PDT");
check_offset("America/Los_Angeles", 2519888400.001, -28800, "PST");

/*
 * Use the same values to test Etc/UTC, which we don't otherwise test.
 */
check_offset("Etc/UTC", 1300010399.999, 0, "UTC");
check_offset("Etc/UTC", 1300010400, 0, "UTC");
check_offset("Etc/UTC", 1300010400.001, 0, "UTC");
check_offset("Etc/UTC", 1308469553.734, 0, "UTC");
check_offset("Etc/UTC", 2519888399.999, 0, "UTC");
check_offset("Etc/UTC", 2519888400, 0, "UTC");
check_offset("Etc/UTC", 2519888400.001, 0, "UTC");
""")

    def output_check_offset(zone, d, utcoff, abbr):
        io.write("check_offset(\"{0}\", {1}, {2}, \"{3}\");\n" \
                   .format(zone, d, utcoff, abbr));

    date_zone_re = re.compile("^([^ ]*) ([+-])(\d{2}):(\d{2}):(\d{2})$")
    def write_expected(time):
        return "@" + str(math.trunc(time))
    def read_expected(dateprocess):
        (abbr, sign, hours, mins, secs) = date_zone_re.match(
            dateprocess.stdout.readline().rstrip("\n")).groups()
        utcoff = ((sign == "+") * 2 - 1) * \
                 (3600 * int(hours) + 60 * int(mins) + int(secs))
        return (utcoff, abbr)
    def expected_for(zone, time):
        date_process = subprocess.Popen(['date',
                                         '--date=' + write_expected(time),
                                         '+%Z %::z'],
                                        stdout = subprocess.PIPE,
                                        env={"TZ": os.path.join(source_prefix, zone)})
        result = read_expected(date_process)
        date_process.stdout.close()
        return result

    io.write("""
/*
 * Generate tests based on all the transitions shown by zdump for each zone.
 */
""")

    sys.stderr.write("Preparing to build transition tests.\n")

    date_process = subprocess.Popen(['date',
                                     '--date=' + str(STOP_YEAR) +
                                     '-01-01 00:00:00 UTC', '+%s'],
                                    stdout = subprocess.PIPE)
    stop_d = int(date_process.stdout.read().rstrip("\n"))
    date_process.stdout.close()
    def zdump_for(zone):
        zdump = subprocess.Popen([zdump_command,
                                  '-v',
                                  '-c', str(START_YEAR) + "," + str(STOP_YEAR),
                                  zone],
                                 stdout=subprocess.PIPE)
        zdump_re = re.compile("^" + zone + "  ([^=]+) = ([^=]+) isdst=([01]) gmtoff=(-?\d+)$")
        for line in zdump.stdout:
            line = line.rstrip("\n")
            if line.endswith(" = NULL"):
                continue
            yield zdump_re.match(line).groups()
    # FIXME: spread this across cores
    zdumps = [(zone, list(zdump_for(zone))) for zone in all_zones]
    # Write all the dates to one file and run them through a single
    # date process, for speed.
    datefile = tempfile.NamedTemporaryFile(delete=False)
    for (zone, zdump) in zdumps:
        for (date_utc, date_loc, isdst, utcoff) in zdump:
            datefile.write(date_utc + "\n")
    datefile.close()
    date_process = subprocess.Popen(['date',
                                     '--file=' + datefile.name, '+%s'],
                                    stdout = subprocess.PIPE)
    prev_zone = None
    for (zone, zdump) in zdumps:
        if zone != prev_zone:
            prev_zone = zone
            sys.stderr.write("Building transition tests for zone " + zone + "\n")
        def output_test(d, utcoff, abbr):
            output_check_offset(zone, d, utcoff, abbr)
        first = True
        first_after_1970 = True
        prev_utcoff = None
        prev_abbr = None
        for (date_utc, date_loc, isdst, utcoff) in zdump:
            isdst = bool(isdst) # not really needed
            utcoff = int(utcoff)
            d = int(date_process.stdout.readline().rstrip("\n"))
            abbr = date_loc.split(" ")[-1]
            if d >= 0:
                if first_after_1970 and d != 0 and not first:
                    output_test(0, prev_utcoff, prev_abbr)
                if first and d > 0:
                    output_test(0, utcoff, abbr)
                output_test(d, utcoff, abbr)
                first_after_1970 = False
            first = False
            prev_utcoff = utcoff
            prev_abbr = abbr
        if first:
            # This zone (Pacific/Johnston) has no transitions, but we
            # can still test it.
            (prev_utcoff, prev_abbr) = expected_for(zone, 0)
        if first_after_1970:
            output_test(0, prev_utcoff, prev_abbr)
        output_test(stop_d, prev_utcoff, prev_abbr)
    date_process.stdout.close()
    os.unlink(datefile.name)
    io.write("""

/*
 * Generate a fixed set of random tests using a linear-congruential
 * PRNG.  This does a good bit of testing of the space in a random way,
 * but uses a fixed random seed to always get the same set of tests.
 * See http://en.wikipedia.org/wiki/Linear_congruential_generator (using
 * the numbers from Numerical Recipes).
 *
 * And while we're here, toss in some tests for midnight boundaries
 * around the new year.
 */
""")
    def lc_prng(): # a generator
        # a randomly (once) generated number in [0,2^32)
        rand_state = 1938266273;
        while True:
            yield 1.0 * rand_state / 0x100000000 # value in [0,1)
            rand_state = ((rand_state * 1664525) + 1013904223) % 0x100000000

    prng = lc_prng()
    def random_time():
        # pick a random time in 1970...STOP_SECS.  Use two random
        # numbers so we use the full space, random down to the
        # millisecond.
        time = (prng.next() * STOP_SECS) + (prng.next() * 0x100000000 / 1000)
        time = time % STOP_SECS
        time = math.floor(time * 1000) / 1000
        return time
    def midnight_rule_time(i):
        # return 2049-12-31 00:30 UTC + i hours
        return 2524523400 + 3600 * i
    # For each time zone, we make 100 random tests, and some additional
    # tests.  Do each zone together so that we can easily use a single
    # date process for each zone.
    for zone in all_zones:
        sys.stderr.write("Building tests for zone " + zone + "\n")
        # 100 random tests, then specifically test 48 hours around new
        # years 2050 to test rule edge cases
        test_times = [random_time() for i in range(100)] + \
                     [midnight_rule_time(i) for i in range(48)]
        # Write all the dates to one file and run them through a single
        # date process, for speed.
        datefile = tempfile.NamedTemporaryFile(delete=False)
        for time in test_times:
            datefile.write(write_expected(time) + "\n")
        datefile.close()
        # FIXME: This is using the system's date command, which might
        # not be compatible with the timezone data it's being given.
        # (For example, if you have a system date command that doesn't
        # understand version 3 timezone file formats, you'll fail some
        # post-2038 tests for America/Godthab.)
        date_process = subprocess.Popen(['date',
                                         '--file=' + datefile.name,
                                         '+%Z %::z'],
                                        stdout = subprocess.PIPE,
                                        env={"TZ": os.path.join(source_prefix, zone)})
        for time in test_times:
            (utcoff, abbr) = read_expected(date_process)
            output_check_offset(zone, time, utcoff, abbr)
        date_process.stdout.close()
        os.unlink(datefile.name)
    io.write("""
/*
 * Some fixed tests for window.tz.datesFor
 */
var df = window.tz.datesFor("America/Los_Angeles", 2011, 1, 1, 0, 0, 0);
is(df.length, 1, "datesFor (1) length");
is(df[0].offset, -28800, "datesFor(1) [0].offset");
is(df[0].abbr, "PST", "datesFor(1) [0].abbr");
is(df[0].date.valueOf(), 1293868800000, "datesFor(1) [0].date.valueOf()");
df = window.tz.datesFor("America/Los_Angeles", 2011, 3, 13, 2, 30, 0);
is(df.length, 0, "datesFor (2) length");
df = window.tz.datesFor("America/Los_Angeles", 2011, 11, 6, 1, 30, 0);
is(df.length, 2, "datesFor (3) length");
is(df[0].offset, -25200, "datesFor(3) [0].offset");
is(df[0].abbr, "PDT", "datesFor(3) [0].abbr");
is(df[0].date.valueOf(), 1320568200000, "datesFor(3) [0].date.valueOf()");
is(df[1].offset, -28800, "datesFor(3) [1].offset");
is(df[1].abbr, "PST", "datesFor(3) [1].abbr");
is(df[1].date.valueOf(), 1320571800000, "datesFor(3) [1].date.valueOf()");
""")

    io.write("""
print("Totals:  " + pass_count + " passed, " + fail_count + " failed.");
</script>
""")

Example 36

Project: letsencrypt-nosudo
Source File: sign_csr.py
View license
def sign_csr(pubkey, csr, email=None, file_based=False):
    """Use the ACME protocol to get an ssl certificate signed by a
    certificate authority.

    :param string pubkey: Path to the user account public key.
    :param string csr: Path to the certificate signing request.
    :param string email: An optional user account contact email
                         (defaults to [email protected]<shortest_domain>)
    :param bool file_based: An optional flag indicating that the
                            hosting should be file-based rather
                            than providing a simple python HTTP
                            server.

    :returns: Signed Certificate (PEM format)
    :rtype: string

    """
    #CA = "https://acme-staging.api.letsencrypt.org"
    CA = "https://acme-v01.api.letsencrypt.org"
    TERMS = "https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf"
    nonce_req = urllib2.Request("{0}/directory".format(CA))
    nonce_req.get_method = lambda : 'HEAD'

    def _b64(b):
        "Shortcut function to go from bytes to jwt base64 string"
        return base64.urlsafe_b64encode(b).replace("=", "")

    # Step 1: Get account public key
    sys.stderr.write("Reading pubkey file...\n")
    proc = subprocess.Popen(["openssl", "rsa", "-pubin", "-in", pubkey, "-noout", "-text"],
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = proc.communicate()
    if proc.returncode != 0:
        raise IOError("Error loading {0}".format(pubkey))
    pub_hex, pub_exp = re.search(
        "Modulus(?: \((?:2048|4096) bit\)|)\:\s+00:([a-f0-9\:\s]+?)Exponent\: ([0-9]+)",
        out, re.MULTILINE|re.DOTALL).groups()
    pub_mod = binascii.unhexlify(re.sub("(\s|:)", "", pub_hex))
    pub_mod64 = _b64(pub_mod)
    pub_exp = int(pub_exp)
    pub_exp = "{0:x}".format(pub_exp)
    pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
    pub_exp = binascii.unhexlify(pub_exp)
    pub_exp64 = _b64(pub_exp)
    header = {
        "alg": "RS256",
        "jwk": {
            "e": pub_exp64,
            "kty": "RSA",
            "n": pub_mod64,
        },
    }
    accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
    thumbprint = _b64(hashlib.sha256(accountkey_json).digest())
    sys.stderr.write("Found public key!\n")

    # Step 2: Get the domain names to be certified
    sys.stderr.write("Reading csr file...\n")
    proc = subprocess.Popen(["openssl", "req", "-in", csr, "-noout", "-text"],
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = proc.communicate()
    if proc.returncode != 0:
        raise IOError("Error loading {0}".format(csr))
    domains = set([])
    common_name = re.search("Subject:.*? CN=([^\s,;/]+)", out)
    if common_name is not None:
        domains.add(common_name.group(1))
    subject_alt_names = re.search("X509v3 Subject Alternative Name: \n +([^\n]+)\n", out, re.MULTILINE|re.DOTALL)
    if subject_alt_names is not None:
        for san in subject_alt_names.group(1).split(", "):
            if san.startswith("DNS:"):
                domains.add(san[4:])
    sys.stderr.write("Found domains {0}\n".format(", ".join(domains)))

    # Step 3: Ask user for contact email
    if not email:
        default_email = "[email protected]{0}".format(min(domains, key=len))
        stdout = sys.stdout
        sys.stdout = sys.stderr
        input_email = raw_input("STEP 1: What is your contact email? ({0}) ".format(default_email))
        email = input_email if input_email else default_email
        sys.stdout = stdout

    # Step 4: Generate the payloads that need to be signed
    # registration
    sys.stderr.write("Building request payloads...\n")
    reg_nonce = urllib2.urlopen(nonce_req).headers['Replay-Nonce']
    reg_raw = json.dumps({
        "resource": "new-reg",
        "contact": ["mailto:{0}".format(email)],
        "agreement": TERMS,
    }, sort_keys=True, indent=4)
    reg_b64 = _b64(reg_raw)
    reg_protected = copy.deepcopy(header)
    reg_protected.update({"nonce": reg_nonce})
    reg_protected64 = _b64(json.dumps(reg_protected, sort_keys=True, indent=4))
    reg_file = tempfile.NamedTemporaryFile(dir=".", prefix="register_", suffix=".json")
    reg_file.write("{0}.{1}".format(reg_protected64, reg_b64))
    reg_file.flush()
    reg_file_name = os.path.basename(reg_file.name)
    reg_file_sig = tempfile.NamedTemporaryFile(dir=".", prefix="register_", suffix=".sig")
    reg_file_sig_name = os.path.basename(reg_file_sig.name)

    # need signature for each domain identifiers
    ids = []
    for domain in domains:
        sys.stderr.write("Building request for {0}...\n".format(domain))
        id_nonce = urllib2.urlopen(nonce_req).headers['Replay-Nonce']
        id_raw = json.dumps({
            "resource": "new-authz",
            "identifier": {
                "type": "dns",
                "value": domain,
            },
        }, sort_keys=True)
        id_b64 = _b64(id_raw)
        id_protected = copy.deepcopy(header)
        id_protected.update({"nonce": id_nonce})
        id_protected64 = _b64(json.dumps(id_protected, sort_keys=True, indent=4))
        id_file = tempfile.NamedTemporaryFile(dir=".", prefix="domain_", suffix=".json")
        id_file.write("{0}.{1}".format(id_protected64, id_b64))
        id_file.flush()
        id_file_name = os.path.basename(id_file.name)
        id_file_sig = tempfile.NamedTemporaryFile(dir=".", prefix="domain_", suffix=".sig")
        id_file_sig_name = os.path.basename(id_file_sig.name)
        ids.append({
            "domain": domain,
            "protected64": id_protected64,
            "data64": id_b64,
            "file": id_file,
            "file_name": id_file_name,
            "sig": id_file_sig,
            "sig_name": id_file_sig_name,
        })

    # need signature for the final certificate issuance
    sys.stderr.write("Building request for CSR...\n")
    proc = subprocess.Popen(["openssl", "req", "-in", csr, "-outform", "DER"],
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    csr_der, err = proc.communicate()
    csr_der64 = _b64(csr_der)
    csr_nonce = urllib2.urlopen(nonce_req).headers['Replay-Nonce']
    csr_raw = json.dumps({
        "resource": "new-cert",
        "csr": csr_der64,
    }, sort_keys=True, indent=4)
    csr_b64 = _b64(csr_raw)
    csr_protected = copy.deepcopy(header)
    csr_protected.update({"nonce": csr_nonce})
    csr_protected64 = _b64(json.dumps(csr_protected, sort_keys=True, indent=4))
    csr_file = tempfile.NamedTemporaryFile(dir=".", prefix="cert_", suffix=".json")
    csr_file.write("{0}.{1}".format(csr_protected64, csr_b64))
    csr_file.flush()
    csr_file_name = os.path.basename(csr_file.name)
    csr_file_sig = tempfile.NamedTemporaryFile(dir=".", prefix="cert_", suffix=".sig")
    csr_file_sig_name = os.path.basename(csr_file_sig.name)

    # Step 5: Ask the user to sign the registration and requests
    sys.stderr.write("""\
STEP 2: You need to sign some files (replace 'user.key' with your user private key).

openssl dgst -sha256 -sign user.key -out {0} {1}
{2}
openssl dgst -sha256 -sign user.key -out {3} {4}

""".format(
    reg_file_sig_name, reg_file_name,
    "\n".join("openssl dgst -sha256 -sign user.key -out {0} {1}".format(i['sig_name'], i['file_name']) for i in ids),
    csr_file_sig_name, csr_file_name))

    stdout = sys.stdout
    sys.stdout = sys.stderr
    raw_input("Press Enter when you've run the above commands in a new terminal window...")
    sys.stdout = stdout

    # Step 6: Load the signatures
    reg_file_sig.seek(0)
    reg_sig64 = _b64(reg_file_sig.read())
    for n, i in enumerate(ids):
        i['sig'].seek(0)
        i['sig64'] = _b64(i['sig'].read())

    # Step 7: Register the user
    sys.stderr.write("Registering {0}...\n".format(email))
    reg_data = json.dumps({
        "header": header,
        "protected": reg_protected64,
        "payload": reg_b64,
        "signature": reg_sig64,
    }, sort_keys=True, indent=4)
    reg_url = "{0}/acme/new-reg".format(CA)
    try:
        resp = urllib2.urlopen(reg_url, reg_data)
        result = json.loads(resp.read())
    except urllib2.HTTPError as e:
        err = e.read()
        # skip already registered accounts
        if "Registration key is already in use" in err:
            sys.stderr.write("Already registered. Skipping...\n")
        else:
            sys.stderr.write("Error: reg_data:\n")
            sys.stderr.write("POST {0}\n".format(reg_url))
            sys.stderr.write(reg_data)
            sys.stderr.write("\n")
            sys.stderr.write(err)
            sys.stderr.write("\n")
            raise

    # Step 8: Request challenges for each domain
    responses = []
    tests = []
    for n, i in enumerate(ids):
        sys.stderr.write("Requesting challenges for {0}...\n".format(i['domain']))
        id_data = json.dumps({
            "header": header,
            "protected": i['protected64'],
            "payload": i['data64'],
            "signature": i['sig64'],
        }, sort_keys=True, indent=4)
        id_url = "{0}/acme/new-authz".format(CA)
        try:
            resp = urllib2.urlopen(id_url, id_data)
            result = json.loads(resp.read())
        except urllib2.HTTPError as e:
            sys.stderr.write("Error: id_data:\n")
            sys.stderr.write("POST {0}\n".format(id_url))
            sys.stderr.write(id_data)
            sys.stderr.write("\n")
            sys.stderr.write(e.read())
            sys.stderr.write("\n")
            raise
        challenge = [c for c in result['challenges'] if c['type'] == "http-01"][0]
        keyauthorization = "{0}.{1}".format(challenge['token'], thumbprint)

        # challenge request
        sys.stderr.write("Building challenge responses for {0}...\n".format(i['domain']))
        test_nonce = urllib2.urlopen(nonce_req).headers['Replay-Nonce']
        test_raw = json.dumps({
            "resource": "challenge",
            "keyAuthorization": keyauthorization,
        }, sort_keys=True, indent=4)
        test_b64 = _b64(test_raw)
        test_protected = copy.deepcopy(header)
        test_protected.update({"nonce": test_nonce})
        test_protected64 = _b64(json.dumps(test_protected, sort_keys=True, indent=4))
        test_file = tempfile.NamedTemporaryFile(dir=".", prefix="challenge_", suffix=".json")
        test_file.write("{0}.{1}".format(test_protected64, test_b64))
        test_file.flush()
        test_file_name = os.path.basename(test_file.name)
        test_file_sig = tempfile.NamedTemporaryFile(dir=".", prefix="challenge_", suffix=".sig")
        test_file_sig_name = os.path.basename(test_file_sig.name)
        tests.append({
            "uri": challenge['uri'],
            "protected64": test_protected64,
            "data64": test_b64,
            "file": test_file,
            "file_name": test_file_name,
            "sig": test_file_sig,
            "sig_name": test_file_sig_name,
        })

        # challenge response for server
        responses.append({
            "uri": ".well-known/acme-challenge/{0}".format(challenge['token']),
            "data": keyauthorization,
        })

    # Step 9: Ask the user to sign the challenge responses
    sys.stderr.write("""\
STEP 3: You need to sign some more files (replace 'user.key' with your user private key).

{0}

""".format(
    "\n".join("openssl dgst -sha256 -sign user.key -out {0} {1}".format(
        i['sig_name'], i['file_name']) for i in tests)))

    stdout = sys.stdout
    sys.stdout = sys.stderr
    raw_input("Press Enter when you've run the above commands in a new terminal window...")
    sys.stdout = stdout

    # Step 10: Load the response signatures
    for n, i in enumerate(ids):
        tests[n]['sig'].seek(0)
        tests[n]['sig64'] = _b64(tests[n]['sig'].read())

    # Step 11: Ask the user to host the token on their server
    for n, i in enumerate(ids):
        if file_based:
            sys.stderr.write("""\
STEP {0}: Please update your server to serve the following file at this URL:

--------------
URL: http://{1}/{2}
File contents: \"{3}\"
--------------

Notes:
- Do not include the quotes in the file.
- The file should be one line without any spaces.

""".format(n + 4, i['domain'], responses[n]['uri'], responses[n]['data']))

            stdout = sys.stdout
            sys.stdout = sys.stderr
            raw_input("Press Enter when you've got the file hosted on your server...")
            sys.stdout = stdout
        else:
            sys.stderr.write("""\
STEP {0}: You need to run this command on {1} (don't stop the python command until the next step).

sudo python -c "import BaseHTTPServer; \\
    h = BaseHTTPServer.BaseHTTPRequestHandler; \\
    h.do_GET = lambda r: r.send_response(200) or r.end_headers() or r.wfile.write('{2}'); \\
    s = BaseHTTPServer.HTTPServer(('0.0.0.0', 80), h); \\
    s.serve_forever()"

""".format(n + 4, i['domain'], responses[n]['data']))

            stdout = sys.stdout
            sys.stdout = sys.stderr
            raw_input("Press Enter when you've got the python command running on your server...")
            sys.stdout = stdout

        # Step 12: Let the CA know you're ready for the challenge
        sys.stderr.write("Requesting verification for {0}...\n".format(i['domain']))
        test_data = json.dumps({
            "header": header,
            "protected": tests[n]['protected64'],
            "payload": tests[n]['data64'],
            "signature": tests[n]['sig64'],
        }, sort_keys=True, indent=4)
        test_url = tests[n]['uri']
        try:
            resp = urllib2.urlopen(test_url, test_data)
            test_result = json.loads(resp.read())
        except urllib2.HTTPError as e:
            sys.stderr.write("Error: test_data:\n")
            sys.stderr.write("POST {0}\n".format(test_url))
            sys.stderr.write(test_data)
            sys.stderr.write("\n")
            sys.stderr.write(e.read())
            sys.stderr.write("\n")
            raise

        # Step 13: Wait for CA to mark test as valid
        sys.stderr.write("Waiting for {0} challenge to pass...\n".format(i['domain']))
        while True:
            try:
                resp = urllib2.urlopen(test_url)
                challenge_status = json.loads(resp.read())
            except urllib2.HTTPError as e:
                sys.stderr.write("Error: test_data:\n")
                sys.stderr.write("GET {0}\n".format(test_url))
                sys.stderr.write(test_data)
                sys.stderr.write("\n")
                sys.stderr.write(e.read())
                sys.stderr.write("\n")
                raise
            if challenge_status['status'] == "pending":
                time.sleep(2)
            elif challenge_status['status'] == "valid":
                sys.stderr.write("Passed {0} challenge!\n".format(i['domain']))
                break
            else:
                raise KeyError("'{0}' challenge did not pass: {1}".format(i['domain'], challenge_status))

    # Step 14: Get the certificate signed
    sys.stderr.write("Requesting signature...\n")
    csr_file_sig.seek(0)
    csr_sig64 = _b64(csr_file_sig.read())
    csr_data = json.dumps({
        "header": header,
        "protected": csr_protected64,
        "payload": csr_b64,
        "signature": csr_sig64,
    }, sort_keys=True, indent=4)
    csr_url = "{0}/acme/new-cert".format(CA)
    try:
        resp = urllib2.urlopen(csr_url, csr_data)
        signed_der = resp.read()
    except urllib2.HTTPError as e:
        sys.stderr.write("Error: csr_data:\n")
        sys.stderr.write("POST {0}\n".format(csr_url))
        sys.stderr.write(csr_data)
        sys.stderr.write("\n")
        sys.stderr.write(e.read())
        sys.stderr.write("\n")
        raise

    # Step 15: Convert the signed cert from DER to PEM
    sys.stderr.write("Certificate signed!\n")

    if file_based:
        sys.stderr.write("You can remove the acme-challenge file from your webserver now.\n")
    else:
        sys.stderr.write("You can stop running the python command on your server (Ctrl+C works).\n")

    signed_der64 = base64.b64encode(signed_der)
    signed_pem = """\
-----BEGIN CERTIFICATE-----
{0}
-----END CERTIFICATE-----
""".format("\n".join(textwrap.wrap(signed_der64, 64)))

    return signed_pem

Example 37

Project: sandy
Source File: sands4getedk.py
View license
   def getedk(self):

      """
      Write the the help of the gdb and gdbserver, gets out the EDK on a S4.
      It puts a breakpoint on verify_EDK which is called when vdc cryptfs verifypw
      is called. The R0 register contains the EDK>
      """

      state=checkcryptostate(self.adb)
      if(state==""):
	 self.printer.print_err("The phone is not encrypted. ro.crypto.state is empty!")
	 return -1
      self.printer.print_debug("Check for su!")
      su=checkforsu(self.adb)
      if (su==""):
	 self.printer.print_err("The su comand was not found, hope adb have immediate root access!")
	 su=""
      
      self.printer.print_info("Downloading vold and libsec_km.so for gdb...")
      out=self.downloadforgdb("/system/bin/vold")
      if(out==None):
	 self.printer.print_err("Could not download vold! Exiting...")
	 return -1
      out=self.downloadforgdb("/system/lib/libsec_km.so")
      if(out==None):
	 self.printer.print_err("Could not download libsec_km.so! Exiting...")
	 return -1
      self.printer.print_ok("Download - Done")

      self.printer.print_debug("Check the destination directory!")
      dest_dir=""
      out=self.adb.shell_command(su+"ls /tmp")
      if (out.find("No such file or directory")!=-1):
	 self.printer.print_err("The /tmp directory was not found! We try the /data/local/tmp directory!")
	 out=self.adb.shell_command(su+"ls /data/local/tmp")
	 #if the directory empty, we will receive NoneType
	 if (out):
	    if (out.find("No such file or directory")!=-1):
	       self.printer.print_err("The /data/local/tmp directory was not found!")
	       self.printer.print_err("We did not found suitable destination directory! Please start the phone with the right recovery.")
	       return -1
	 dest_dir="/data/local/tmp"
      else:
	 dest_dir="/tmp"
      self.printer.print_debug("Use %s on the phone as a detination directory!" % dest_dir)
      
      self.printer.print_info("Uploading the gdb server...")
      push=self.adb.push_local_file("gdb/gdbserver","%s" % dest_dir)
      if(push.find("bytes")==-1):
	 self.printer.print_err("Could not upload the gdb server: %s" % push)
	 return -1
      self.printer.print_ok("Upload - Done")      

      self.print_info_adb(push.rstrip('\n'))
      self.printer.print_info("Staring gdbserver to listen on port 23946...")
      command="%s shell %s%s/gdbserver --multi :23946" % (self.adb.get_adb_path(),su,dest_dir)
      popen_args=shlex.split(command)
      self.printer.print_debug(command)
      gdbserver=subprocess.Popen(popen_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      out=gdbserver.stdout.read(10)
      if(out.find("Listen")==-1):
	 self.printer.print_err("Could not start the gdbserver:")
	 self.printer.print_err("%s" % out.replace("\r\n","\\r\\n"))
	 self.printer.print_err("Cleaning up! You should check the %s directory on the phone!" % (dest_dir))
	 self.cleanup()
	 return -1
      
      self.printer.print_debug("Forwarding tcp socket for gdbserver...")
      out=self.adb.forward_socket("tcp:23946","tcp:23946")
      #out=self.adb.forward_socket("tcp:4446","tcp:4446")
      if(out):
	 self.printer.print_err("Could not bind on 127.0.0.1:23946! Cleaning up! You should check the %s directory on the phone!" % (dest_dir))
	 self.cleanup(gdbserver=gdbserver)
	 return -1
      
      self.printer.print_ok("Server start - Done")
      self.printer.print_info("Starting gdb...") 
      #gdb=pexpect.spawn("gdb/gdb", logfile=sys.stdout)
      gdb=pexpect.spawn("gdb/gdb")
      gdb.sendline("target extended-remote :23946")
      ret=gdb.expect(["Remote.*",".*Operation.*", ".*Ignoring.*", pexpect.TIMEOUT])
      if(ret!=0):
	 self.printer.print_err("Could not connect to gdb server! Cleaning up! You should check the %s directory on the phone!" % (dest_dir))
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      self.printer.print_ok("gdb connected - Done")
      
      gdb.sendline("file recovery/s4/system/bin/vold") 
      ret=gdb.expect(["Reading.*",".*No such.*", pexpect.TIMEOUT])
      if(ret!=0):
	 self.printer.print_err("We need the vold executable from the phone! Cleaning up! You should check the %s directory on the phone!" % (dest_dir))
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      self.printer.print_debug("We sent the file command")
      
      gdb.sendline("set solib-search-path recovery/s4/system/lib/")

      ps=self.adb.shell_command("su -c ps")
      for process in ps.splitlines():
	 if(process.rfind("vold") != -1 ):
	    process=re.sub("\s+", ' ' , process)	
	    self.voldpid=process.split(' ')[1]
      self.printer.print_ok("Found vold process id: %s!" % self.voldpid)
     
      gdb.sendline("attach %s" % (self.voldpid))
      ret=gdb.expect(["0x.*",".*to process.*", pexpect.TIMEOUT])
      if(ret!=0):
	 self.printer.print_err("Could not attach to the vold process: %s! Cleaning up! You should check the %s directory on the phone!" % (self.voldpid, dest_dir))
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      self.printer.print_ok("Attached vold process: %s!" % self.voldpid)
      
      gdb.sendline("info threads")
      gdb.expect("  4")
      thread4=gdb.readline().split(' ')[5].split('.')[1]
      #Read the rests
      for i in xrange(3):
	 gdb.readline()
      gdb.sendline("detach")
      gdb.sendline("attach %s" % (thread4))
      ret=gdb.expect(["0x.*",".*to process.*", pexpect.TIMEOUT])
      if(ret!=0):
	 self.printer.print_err("Could not attach to the vold thread: %s! Cleaning up! You should check the %s directory on the phone!" % (thread4, dest_dir))
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      self.printer.print_ok("Attached vold thread: %s!" % thread4)
      
      gdb.sendline("break verify_EDK")
      ret=gdb.expect([".*Breakpoint.*",".*pending.*"])
      if(ret!=0):
	 self.printer.print_err("Could not set breakpoint on the verify_EDK function! Cleaning up! You should check the %s directory on the phone!")
	 gdb.sendline("n")
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      self.printer.print_debug("Breakpoint was set on the verify_EDK function.")
      
      self.printer.print_debug("Staring vdc:")
      command="%s shell %s vdc cryptfs verifypw 1234" % (self.adb.get_adb_path(),su)
      popen_args=shlex.split(command)
      self.printer.print_debug(command)
      vdc=subprocess.Popen(popen_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      out=vdc.stdout.read(15)
      if(out.find("command cryptfs")==-1):
	 self.printer.print_err("Could not start vdc: %s" % (out))
	 self.printer.print_err(" Cleaning up! You should check the %s directory on the phone!" % (dest_dir))
	 self.cleanup(gdb=gdb, gdbserver=gdbserver)
	 return -1
      
      gdb.sendline("c")
      ret=gdb.expect(".*Breakpoint.*")
      self.printer.print_debug("Breakpoint hit! Let's investigate the first parameter (r0)!")
      gdb.sendline("x /80bx $r0+32")
      gdb.expect(":")
      hex_list=[]
      for i in range(0,10):
	 if(i==0):
	    line=gdb.before+gdb.readline()
	 else:
	    line=gdb.readline()
	 hex_line=''.join(line.split("\t")[1:]).replace("0x","").rstrip("\r\n").upper()
	 hex_list.append(hex_line)
       
      self.write2john(''.join(hex_list))
      self.write2footer(''.join(hex_list))
      
      self.cleanup(gdb=gdb, gdbserver=gdbserver)
   
      return 1

Example 38

Project: wicd
Source File: setup.py
View license
    def initialize_options(self):
        self.lib = '/usr/lib/wicd/'
        self.share = '/usr/share/wicd/'
        self.etc = '/etc/wicd/'
        self.scripts = self.etc + "scripts/"
        self.encryption = self.etc + 'encryption/templates/'
        self.bin = '/usr/bin/'
        self.sbin = '/usr/sbin/'
        self.daemon = self.share + 'daemon'
        self.backends = self.share + 'backends'
        self.curses = self.share + 'curses'
        self.gtk = self.share + 'gtk'
        self.cli = self.share + 'cli'
        self.gnome_shell_extensions = '/usr/share/gnome-shell/extensions/'
        self.icons = '/usr/share/icons/hicolor/'
        self.pixmaps = '/usr/share/pixmaps/'
        self.images = self.share + 'icons'
        self.varlib = '/var/lib/wicd/'
        self.networks = self.varlib + 'configurations/'
        self.log = '/var/log/wicd/'
        self.resume = '/etc/acpi/resume.d/'
        self.suspend = '/etc/acpi/suspend.d/'
        self.pmutils = '/usr/lib/pm-utils/sleep.d/'
        self.dbus = '/etc/dbus-1/system.d/'
        self.dbus_service = '/usr/share/dbus-1/system-services/'
        self.systemd = '/lib/systemd/system/'
        self.logrotate = '/etc/logrotate.d/'
        self.desktop = '/usr/share/applications/'
        self.translations = '/usr/share/locale/'
        self.autostart = '/etc/xdg/autostart/'
        self.docdir = '/usr/share/doc/wicd/'
        self.mandir = '/usr/share/man/'
        self.kdedir = '/usr/share/autostart/'
        self.distro = 'auto'
        
        self.no_install_init = False
        self.no_install_man = False
        self.no_install_i18n = False
        self.no_install_i18n_man = False
        self.no_install_kde = False
        self.no_install_acpi = False
        self.no_install_pmutils = False
        self.no_install_docs = False
        self.no_install_gtk = False
        self.no_install_ncurses = False
        self.no_install_cli = False
        self.no_install_gnome_shell_extensions = False
        self.no_use_notifications = False

        # Determine the default init file location on several different distros
        self.distro_detect_failed = False
        
        self.initfile = 'init/default/wicd'
        # ddistro is the detected distro
        if os.path.exists('/etc/redhat-release'):
            self.ddistro = 'redhat'
        elif os.path.exists('/etc/SuSE-release'):
            self.ddistro = 'suse'
        elif os.path.exists('/etc/fedora-release'):
            self.ddistro = 'redhat'
        elif os.path.exists('/etc/gentoo-release'):
            self.ddistro = 'gentoo'
        elif os.path.exists('/etc/debian_version'):
            self.ddistro = 'debian'
        elif os.path.exists('/etc/arch-release'):
            self.ddistro = 'arch'
        elif os.path.exists('/etc/slackware-version') or \
             os.path.exists('/etc/slamd64-version') or \
             os.path.exists('/etc/bluewhite64-version'):
            self.ddistro = 'slackware'
        elif os.path.exists('/etc/pld-release'):
            self.ddistro = 'pld'
        elif os.path.exists('/usr/bin/crux'):
            self.ddistro = 'crux'
        elif os.path.exists('/etc/lunar.release'):
            self.distro = 'lunar'
        else:
            self.ddistro = 'FAIL'
            #self.no_install_init = True
            #self.distro_detect_failed = True
            print 'WARNING: Unable to detect the distribution in use.  ' + \
                  'If you have specified --distro or --init and --initfile, configure will continue.  ' + \
                  'Please report this warning, along with the name of your ' + \
                  'distribution, to the wicd developers.'

        # Try to get the pm-utils sleep hooks directory from pkg-config and
        # the kde prefix from kde-config
        # Don't run these in a shell because it's not needed and because shell 
        # swallows the OSError we would get if {pkg,kde}-config do not exist
        # If we don't get anything from *-config, or it didn't run properly, 
        # or the path is not a proper absolute path, raise an error
        try:
            pmtemp = subprocess.Popen(["pkg-config", "--variable=pm_sleephooks", 
                                       "pm-utils"], stdout=subprocess.PIPE)
            returncode = pmtemp.wait() # let it finish, and get the exit code
            pmutils_candidate = pmtemp.stdout.readline().strip() # read stdout
            if len(pmutils_candidate) == 0 or returncode != 0 or \
               not os.path.isabs(pmutils_candidate):
                raise ValueError
            else:
                self.pmutils = pmutils_candidate
        except (OSError, ValueError):
            pass # use our default

        try:
            kdetemp = subprocess.Popen(["kde-config","--prefix"], stdout=subprocess.PIPE)
            returncode = kdetemp.wait() # let it finish, and get the exit code
            kdedir_candidate = kdetemp.stdout.readline().strip() # read stdout
            if len(kdedir_candidate) == 0 or returncode != 0 or \
               not os.path.isabs(kdedir_candidate):
                raise ValueError
            else:
                self.kdedir = kdedir_candidate + '/share/autostart'
        except (OSError, ValueError):
            # If kde-config isn't present, we'll check for kde-4.x
            try:
                kde4temp = subprocess.Popen(["kde4-config","--prefix"], stdout=subprocess.PIPE)
                returncode = kde4temp.wait() # let it finish, and get the exit code
                kde4dir_candidate = kde4temp.stdout.readline().strip() # read stdout
                if len(kde4dir_candidate) == 0 or returncode != 0 or \
                   not os.path.isabs(kde4dir_candidate):
                    raise ValueError
                else:
                    self.kdedir = kde4dir_candidate + '/share/autostart'
            except (OSError, ValueError):
                # If neither kde-config nor kde4-config are not present or 
                # return an error, then we can assume that kde isn't installed
                # on the user's system
                self.no_install_kde = True
                # If the assumption above turns out to be wrong, do this:
                #pass # use our default

        self.python = '/usr/bin/python'
        self.pidfile = '/var/run/wicd/wicd.pid'
        self.initfilename = os.path.basename(self.initfile)
        self.wicdgroup = 'users'
        self.loggroup = ''
        self.logperms = '0600'

Example 39

Project: pwnypack
Source File: asm.py
View license
def asm(code, addr=0, syntax=None, target=None, gnu_binutils_prefix=None):
    """
    Assemble statements into machine readable code.

    Args:
        code(str): The statements to assemble.
        addr(int): The memory address where the code will run.
        syntax(AsmSyntax): The input assembler syntax for x86. Defaults to
            nasm, ignored on other platforms.
        target(~pwnypack.target.Target): The target architecture. The
            global target is used if this argument is ``None``.
        gnu_binutils_prefix(str): When the syntax is AT&T, gnu binutils'
            as and ld will be used. By default, it selects
            ``arm-*-as/ld`` for 32bit ARM targets,
            ``aarch64-*-as/ld`` for 64 bit ARM targets,
            ``i386-*-as/ld`` for 32bit X86 targets and
            ``amd64-*-as/ld`` for 64bit X86 targets (all for various flavors
            of ``*``. This option allows you to pick a different toolchain.
            The prefix should always end with a '-' (or be empty).

    Returns:
        bytes: The assembled machine code.

    Raises:
        SyntaxError: If the assembler statements are invalid.
        NotImplementedError: In an unsupported target platform is specified.

    Example:
        >>> from pwny import *
        >>> asm('''
        ...     pop rdi
        ...     ret
        ... ''', target=Target(arch=Target.Arch.x86, bits=64))
        b'_\\xc3'
    """

    if target is None:
        target = pwnypack.target.target

    if syntax is None and target.arch is pwnypack.target.Target.Arch.x86:
        syntax = AsmSyntax.nasm

    if HAVE_KEYSTONE and WANT_KEYSTONE:
        ks_mode = 0
        ks_syntax = None

        if target.arch is pwnypack.target.Target.Arch.x86:
            ks_arch = keystone.KS_ARCH_X86
            if target.bits is pwnypack.target.Target.Bits.bits_32:
                ks_mode |= keystone.KS_MODE_32
            else:
                ks_mode |= keystone.KS_MODE_64
            if syntax is AsmSyntax.nasm:
                ks_syntax = keystone.KS_OPT_SYNTAX_NASM
            elif syntax is AsmSyntax.intel:
                ks_syntax = keystone.KS_OPT_SYNTAX_INTEL
            else:
                ks_syntax = keystone.KS_OPT_SYNTAX_ATT

        elif target.arch is pwnypack.target.Target.Arch.arm:
            if target.bits is pwnypack.target.Target.Bits.bits_32:
                ks_arch = keystone.KS_ARCH_ARM

                if target.mode & pwnypack.target.Target.Mode.arm_thumb:
                    ks_mode |= keystone.KS_MODE_THUMB
                else:
                    ks_mode |= keystone.KS_MODE_ARM

                if target.mode & pwnypack.target.Target.Mode.arm_v8:
                    ks_mode |= keystone.KS_MODE_V8

                if target.mode & pwnypack.target.Target.Mode.arm_m_class:
                    ks_mode |= keystone.KS_MODE_MICRO

                if target.endian is pwnypack.target.Target.Endian.little:
                    ks_mode |= keystone.KS_MODE_LITTLE_ENDIAN
                else:
                    ks_mode |= keystone.KS_MODE_BIG_ENDIAN
            else:
                ks_arch = keystone.KS_ARCH_ARM64
                ks_mode |= keystone.KS_MODE_LITTLE_ENDIAN
        else:
            raise NotImplementedError('Unsupported syntax or target platform.')

        ks = keystone.Ks(ks_arch, ks_mode)
        if ks_syntax is not None:
            ks.syntax = ks_syntax
        try:
            data, insn_count = ks.asm(code, addr)
        except keystone.KsError as e:
            import traceback
            traceback.print_exc()
            raise SyntaxError(e.message)
        return b''.join(six.int2byte(b) for b in data)

    if target.arch is pwnypack.target.Target.Arch.x86 and syntax is AsmSyntax.nasm:
        with tempfile.NamedTemporaryFile() as tmp_asm:
            tmp_asm.write(('bits %d\norg %d\n%s' % (target.bits.value, addr, code)).encode('utf-8'))
            tmp_asm.flush()

            tmp_bin_fd, tmp_bin_name = tempfile.mkstemp()
            os.close(tmp_bin_fd)

            try:
                p = subprocess.Popen(
                    [
                        'nasm',
                        '-o', tmp_bin_name,
                        '-f', 'bin',
                        tmp_asm.name,
                    ],
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE,
                )
                stdout, stderr = p.communicate()

                if p.returncode:
                    raise SyntaxError(stderr.decode('utf-8'))

                tmp_bin = open(tmp_bin_name, 'rb')
                result = tmp_bin.read()
                tmp_bin.close()
                return result
            finally:
                try:
                    os.unlink(tmp_bin_name)
                except OSError:
                    pass
    elif target.arch in (pwnypack.target.Target.Arch.x86, pwnypack.target.Target.Arch.arm):
        preamble = ''
        as_flags = []
        ld_flags = []

        if target.arch is pwnypack.target.Target.Arch.x86:
            if target.bits == 32:
                binutils_arch = 'i386'
            else:
                binutils_arch = 'amd64'

            if syntax is AsmSyntax.intel:
                preamble = '.intel_syntax noprefix\n'

            ld_flags.extend(['--oformat', 'binary'])
        else:
            if target.bits == 32:
                binutils_arch = 'arm'
                if target.mode & pwnypack.target.Target.Mode.arm_v8:
                    as_flags.append('-march=armv8-a')
                elif target.mode & pwnypack.target.Target.Mode.arm_m_class:
                    as_flags.append('-march=armv7m')
            else:
                binutils_arch = 'aarch64'

            if target.endian is pwnypack.target.Target.Endian.little:
                as_flags.append('-mlittle-endian')
                ld_flags.append('-EL')
            else:
                as_flags.append('-mbig-endian')
                ld_flags.append('-EB')

            if target.mode & pwnypack.target.Target.Mode.arm_thumb:
                as_flags.append('-mthumb')

        if gnu_binutils_prefix is None:
            gnu_binutils_prefix = find_binutils_prefix(binutils_arch)

        tmp_out_fd, tmp_out_name = tempfile.mkstemp()
        try:
            os.close(tmp_out_fd)

            p = subprocess.Popen(
                [
                    '%sas' % gnu_binutils_prefix,
                    '-o', tmp_out_name
                ] + as_flags,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            stdout, stderr = p.communicate((preamble + code).encode('utf-8'))

            if p.returncode:
                raise SyntaxError(stderr.decode('utf-8'))

            tmp_bin_fd, tmp_bin_name = tempfile.mkstemp()
            try:
                os.close(tmp_bin_fd)

                p = subprocess.Popen(
                    [
                        '%sld' % gnu_binutils_prefix,
                        '-Ttext', str(addr),
                    ] + ld_flags + [
                        '-o', tmp_bin_name,
                        tmp_out_name,
                    ],
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE,
                )
                stdout, stderr = p.communicate()

                if p.returncode:
                    raise SyntaxError(stderr.decode('utf-8'))

                if 'binary' in ld_flags:
                    tmp_bin = open(tmp_bin_name, 'rb')
                    result = tmp_bin.read()
                    tmp_bin.close()
                    return result
                else:
                    tmp_bin = ELF(tmp_bin_name)
                    return tmp_bin.get_section_header('.text').content
            finally:
                try:
                    os.unlink(tmp_bin_name)
                except OSError:
                    pass
        finally:
            try:
                os.unlink(tmp_out_name)
            except OSError:
                pass  # pragma: no cover

    else:
        raise NotImplementedError('Unsupported syntax or target platform.')

Example 40

Project: program.plexus
Source File: autoconf.py
View license
def configure_sopcast(latest_version):
	#Configuration for LINUX 
	if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
		print("Detected OS: Linux")
		#Linux Armv
		if "arm" in os.uname()[4]:
			print("Sopcast Configuration - LINUX ARM")
			if settings.getSetting('rpi2') == "true":
				print("Raspberry PI 2")
				SPSC_KIT = os.path.join(addonpath,sopcast_raspberry.split("/")[-1])
				download_tools().Downloader(sopcast_raspberry,SPSC_KIT,translate(30076),translate(30000))
				if tarfile.is_tarfile(SPSC_KIT):
					path_libraries = os.path.join(pastaperfil,"sopcast")
					download_tools().extract(SPSC_KIT,path_libraries)
					xbmc.sleep(500)
					download_tools().remove(SPSC_KIT)
				if latest_version: settings.setSetting('sopcast_version',value=latest_version)
				return

		elif os.uname()[4] == "x86_64":
			generic = False
			if settings.getSetting('openelecx86_64') == "true":
				print("Detected OpenELEC x86_64")
				SPSC_KIT = os.path.join(addonpath,openelecx86_64_sopcast.split("/")[-1])
				download_tools().Downloader(openelecx86_64_sopcast,SPSC_KIT,translate(30076),translate(30000))
				if tarfile.is_tarfile(SPSC_KIT):
					download_tools().extract(SPSC_KIT,pastaperfil)
					xbmc.sleep(500)
					download_tools().remove(SPSC_KIT)
				if latest_version: settings.setSetting('sopcast_version',value=latest_version)
				return
			else: generic = True
		elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
			generic = False
			if settings.getSetting('openeleci386') == "true":
				SPSC_KIT = os.path.join(addonpath,openelecxi386_sopcast.split("/")[-1])
				download_tools().Downloader(openelecxi386_sopcast,SPSC_KIT,translate(30076),translate(30000))
				if tarfile.is_tarfile(SPSC_KIT):
					download_tools().extract(SPSC_KIT,pastaperfil)
					xbmc.sleep(500)
					download_tools().remove(SPSC_KIT)
				if latest_version: settings.setSetting('sopcast_version',value=latest_version)
				return
			else: generic = True
		if generic == True:
			SPSC_KIT = os.path.join(addonpath,sopcast_linux_generico.split("/")[-1])
			download_tools().Downloader(sopcast_linux_generico,SPSC_KIT,translate(30076),translate(30000))
			if tarfile.is_tarfile(SPSC_KIT):
				path_libraries = os.path.join(pastaperfil,"sopcast")
				download_tools().extract(SPSC_KIT,path_libraries)
				xbmc.sleep(500)
				download_tools().remove(SPSC_KIT)
			#set every single file from the bundle as executable
			path_libraries = os.path.join(pastaperfil,"sopcast")
			dirs, files = xbmcvfs.listdir(path_libraries)
			for ficheiro in files:
				binary_path = os.path.join(path_libraries,ficheiro)
				st = os.stat(binary_path)
				import stat
				os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
			path_libraries = os.path.join(path_libraries,"lib")
			dirs, files = xbmcvfs.listdir(path_libraries)
			for ficheiro in files:
				binary_path = os.path.join(path_libraries,ficheiro)
				st = os.stat(binary_path)
				import stat
				os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
			if latest_version: settings.setSetting('sopcast_version',value=latest_version)
			return

	elif xbmc.getCondVisibility('system.platform.windows'):
		print("Detected OS: Windows")
		if not xbmcvfs.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
        	#Sop
		import ctypes
                is_admin=ctypes.windll.shell32.IsUserAnAdmin() != 0
                if is_admin == False:
                    mensagemok(translate(30000),translate(30077),translate(30078))
                else:
                    cmd = ['sc','delete','sopcastp2p']
                    proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
                    for line in proc.stdout:
                        print("cmd out: " + line.rstrip())
                    xbmc.sleep(1000)
                    ret = mensagemprogresso.create(translate(30000),translate(30000))
                    mensagemprogresso.update(0,translate(30117),"  ")
                    xbmc.sleep(1000)
                    import _winreg
                    aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
                    try:
                        aKey = _winreg.OpenKey(aReg, r'SOFTWARE\SopCast\Player\InstallPath',0, _winreg.KEY_READ)
                        name, value, type = _winreg.EnumValue(aKey, 0)
                        sopcast_executable = value
                        print("Installation executable of sopcast was found: " + sopcast_executable)
                        _winreg.CloseKey(aKey)
                        mensagemprogresso.update(10,translate(30079),translate(30080))
                    except:
                        sopcast_executable = ""
                        mensagemok(translate(30000),translate(30081),translate(30082))
                    if not sopcast_executable: pass
                    else:
                        xbmc.sleep(1000)
                        mensagemprogresso.update(20,translate(30083),"  ")
                        xbmc.sleep(1000)
                        print ("Getting windows users IDS")
                        aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
                        aKey = _winreg.OpenKey(aReg, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList')
                        users = []
                        for i in range(1024):
                            try:
                                asubkey=_winreg.EnumKey(aKey,i)
                                print(asubkey)
                                aKeydois = _winreg.OpenKey(aReg, os.path.join('SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList',asubkey))
                                val=_winreg.QueryValueEx(aKeydois, "ProfileImagePath")
                                try:
                                    print(val[0])
                                except:
                                    print("Notice: User with strange characters, print cmd ignored.")
                                if "Windows" in val[0] or "%systemroot%" in val[0]:
                                    pass
                                else:
                                    users.append(asubkey)
                            except:
                                pass
                        if not users:
                            mensagemok(translate(30000),translate(30084))
                        else:
                            mensagemprogresso.update(30,translate(30085),translate(30080))
                            xbmc.sleep(200)
                            mensagemprogresso.update(30,translate(30086),"   ")
                            xbmc.sleep(1000)
                            print("System Users", users)
                            srvany_final_location = os.path.join(sopcast_executable.replace("SopCast.exe",""),"srvany.exe")
                            srvany_download_location = os.path.join(addonpath,"srvany.exe")
                            srvanytgz_download_location = os.path.join(addonpath,"srvany.tar.gz")                            
                            download_tools().Downloader(srvany_executable,srvanytgz_download_location,translate(30087),translate(30000)) 
                            xbmc.sleep(1000)
                            if tarfile.is_tarfile(srvanytgz_download_location):
                                path_libraries = addonpath
                                download_tools().extract(srvanytgz_download_location,path_libraries)
                                xbmcvfs.copy(srvany_download_location,srvany_final_location)
                                download_tools().remove(srvanytgz_download_location)
                                download_tools().remove(srvany_download_location)
                            xbmc.sleep(1000)
                            ret = mensagemprogresso.create(translate(30000),translate(30000))
                            xbmc.sleep(200)
                            mensagemprogresso.update(35,translate(30088),"  ")
                            xbmc.sleep(1000)
                            cmd = ['sc','create','sopcastp2p','binpath=',os.path.join(os.path.join(sopcast_executable.replace("SopCast.exe","")),'srvany.exe')]
                            proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
                            servicecreator = False
                            for line in proc.stdout:
                                print ("cmd out: " + line.rstrip())
                                servicecreator = True
                            if servicecreator == False:
                                mensagemok(translate(30000),translate(30089))
                            else:
                                mensagemprogresso.update(40,translate(30088),translate(30080))
                                xbmc.sleep(1000)
                                mensagemprogresso.update(45,translate(30090),"  ")
                                xbmc.sleep(1000)
                                print("Trying to modify regedit....")
                                try:
                                    aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
                                    key = _winreg.CreateKey(aReg, r'SYSTEM\CurrentControlSet\Services\sopcastp2p\Parameters')
                                    _winreg.SetValueEx(key, 'AppDirectory', 0, _winreg.REG_SZ, os.path.join(sopcast_executable.replace("SopCast.exe","")))
                                    _winreg.SetValueEx(key, 'Application', 0, _winreg.REG_SZ, os.path.join(os.path.join(sopcast_executable.replace("SopCast.exe","")),"SopCast.exe"))
                                    _winreg.SetValueEx(key, 'AppParameters', 0, _winreg.REG_SZ, "sop://")
                                    mensagemprogresso.update(50,translate(30090), translate(30080))
                                    regedit = True
                                except:
                                    mensagemok(translate(30000),translate(30091))
                                    regedit = False
                                if regedit == False: pass
                                else:
                                    xbmc.sleep(1000)
                                    mensagemprogresso.update(50,translate(30092), "   ")
                                    cmd = ['sc','sdshow','sopcastp2p']
                                    proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
                                    lines = []
                                    for line in proc.stdout:
					print(line.rstrip())
                                        if line.rstrip() != "" and "(" in line.rstrip(): lines.append(line.rstrip())
                                        else: pass
                                    if len(lines) != 1: mensagemok(translate(30000),translate(30093))
                                    else:
                                        linha_arr = []
                                        for user in users:
                                            linha_arr.append('(A;;RPWPCR;;;' + user + ')')
                                        linha_add = ''
                                        for linha in linha_arr:
                                            linha_add += linha
                                        print("line piece to add: " + linha_add)
                                        linha_final = lines[0].replace("S:(",linha_add + "S:(")
                                        print("Final line: " + linha_final)
                                        permissions = False
                                        xbmc.sleep(500)
                                        mensagemprogresso.update(60,translate(30092), translate(30080))
                                        xbmc.sleep(500)
                                        mensagemprogresso.update(60,translate(30094), "   ")
                                        cmd = ['sc','sdset','sopcastp2p',linha_final]
                                        proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
                                        for line in proc.stdout:
                                            print(line.rstrip())
                                            permissions = True
                                        if permissions == False: mensagemok(translate(30000),translate(30095))
                                        else:
                                            mensagemprogresso.update(70,translate(30094), translate(30080))
                                            xbmc.sleep(1000)
                                            mensagemprogresso.update(70,translate(30096), "   ")
                                            print("Trying to set sopcastp2p service regedit permissions...")
                                            download_tools().Downloader(srvany_permissions,os.path.join(pastaperfil,"sopcastp2p-permissions.txt"),translate(30097),translate(30000))
                                            xbmc.sleep(500)
                                            ret = mensagemprogresso.create(translate(30000),translate(30000))
                                            xbmc.sleep(500)
                                            mensagemprogresso.update(80,translate(30098), "   ")
                                            xbmc.sleep(1000)
                                            cmd = ['regini',os.path.join(pastaperfil,"sopcastp2p-permissions.txt")]
                                            proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
                                            for line in proc.stdout:
                                                print(line.rstrip())
                                            mensagemprogresso.update(90,translate(30098), translate(30098))
                                            mensagemprogresso.update(100,translate(30099), "   ")
                                            xbmc.sleep(2000)
                                            mensagemprogresso.close()
                                            if latest_version: settings.setSetting('sopcast_version',value=latest_version)
                                            return
    
	elif xbmc.getCondVisibility('System.Platform.OSX'):
		print("Detected OS: Mac OSX")
		available = False
		if os.uname()[-1] == "x86_64":
			mac_package = osx_x64_sopcast
			available = True
		elif os.uname()[-1] == "i386":
			mac_package = osx_i386_sopcast
			available = True
		else:
			available = False
		if available == True:		
			if not os.path.exists(pastaperfil):
				xbmcvfs.mkdir(pastaperfil)		
			MAC_KIT = os.path.join(addonpath,mac_package.split("/")[-1])
			download_tools().Downloader(mac_package,MAC_KIT,translate(30076),translate(30000))
			if tarfile.is_tarfile(MAC_KIT):
				path_libraries = os.path.join(pastaperfil)
				download_tools().extract(MAC_KIT,pastaperfil)
				download_tools().remove(MAC_KIT)
				sp_sc_auth = os.path.join(pastaperfil,"sopcast","sp-sc-auth")
				st = os.stat(sp_sc_auth)
				import stat
				os.chmod(sp_sc_auth, st.st_mode | stat.S_IEXEC)
			if latest_version: settings.setSetting('sopcast_version',value=latest_version)
			return
		else:
			mensagemok(translate(30000),translate(30100))
			return
				
	elif xbmc.getCondVisibility('System.Platform.Android'):

		print("Detected OS: Android")
		#Sopcast configuration
		print("Starting SopCast Configuration")

		#Moving sopclient to ext4 hack - tks steeve from xbmctorrent

		sopclient_builtin_location = os.path.join(addonpath,"resources","binaries","sopclient")

		#Hack to get current xbmc app id
		xbmcfolder=xbmc.translatePath(addonpath).split("/")

		found = False
		if settings.getSetting('auto_appid') == 'true':
			i = 0
			sopcast_installed = False
			for folder in xbmcfolder:
				if folder.count('.') >= 2 and folder != addon_id :
					found = True
					break
				else:
					i+=1
			if found == True:
				uid = os.getuid()
				app_id = xbmcfolder[i]
		else:
			if settings.getSetting('custom_appid') != '':
				uid = os.getuid()
				app_id = settings.getSetting('custom_appid')
				found = True

		if found == True:
			xbmc_data_path = os.path.join("/data", "data", app_id)
			
			
			if os.path.exists(xbmc_data_path) and uid == os.stat(xbmc_data_path).st_uid:
				android_binary_dir = os.path.join(xbmc_data_path, "files", "program.plexus")
				if not os.path.exists(android_binary_dir):
            				os.makedirs(android_binary_dir)
				android_binary_path = os.path.join(android_binary_dir, "sopclient")
		        	if not os.path.exists(android_binary_path) or os.path.getsize(android_binary_path) != os.path.getsize(sopclient_builtin_location):
					shutil.copy2(sopclient_builtin_location, android_binary_path)
				binary_path = android_binary_path
				st = os.stat(binary_path)
				import stat
				os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
				settings.setSetting('android_sopclient',value=binary_path)
				opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30101),translate(30103))
				if not opcao:
					settings.setSetting('external-sopcast',value='1')
					sopcast_installed = True
					mensagemok(translate(30000),translate(30099))
				else:
					mensagemok(translate(30000),translate(30104))
					if os.path.exists(os.path.join("sdcard","Download")):
						pasta = os.path.join("sdcard","Download")
						sopfile = os.path.join("sdcard","Download",sopcast_apk.split("/")[-1])
					else:
						dialog = xbmcgui.Dialog()
						pasta = dialog.browse(int(0), translate(30105), 'videos')
						sopfile = os.path.join(pasta,sopcast_apk.split("/")[-1])
					download_tools().Downloader(sopcast_apk,sopfile,translate(30106),translate(30000))
					if tarfile.is_tarfile(sopfile):
						download_tools().extract(sopfile,pasta)
						download_tools().remove(sopfile)
					mensagemok(translate(30000),translate(30107),pasta,translate(30108))
					sopcast_installed = True
					settings.setSetting('external-sopcast',value='0')
					mensagemok(translate(30000),translate(30099))
				if latest_version: settings.setSetting('sopcast_version',value=latest_version)
				return

		else:
			mensagemok(translate(30000),translate(30109))
			return

Example 41

Project: program.plexus
Source File: sopcast.py
View license
def sopstreams_builtin(name,iconimage,sop):
	try:
		global spsc
        	if xbmc.getCondVisibility('System.Platform.Linux') and not xbmc.getCondVisibility('System.Platform.Android'):

			if os.uname()[4] == "armv6l" or os.uname()[4] == "armv7l" or settings.getSetting('openelecx86_64') == "true":
				if settings.getSetting('jynxbox_arm7') == "true":
					cmd = [os.path.join(pastaperfil,'sopcast','ld-linux.so.3'),'--library-path',os.path.join(pastaperfil,'sopcast','libqemu'),os.path.join(pastaperfil,'sopcast','qemu-i386'),os.path.join(pastaperfil,'sopcast','lib/ld-linux.so.2'),"--library-path",os.path.join(pastaperfil,'sopcast',"lib"),os.path.join(pastaperfil,'sopcast','sp-sc-auth'),sop,str(LOCAL_PORT),str(VIDEO_PORT)]
				else:
					cmd = [os.path.join(pastaperfil,'sopcast','qemu-i386'),os.path.join(pastaperfil,'sopcast','lib/ld-linux.so.2'),"--library-path",os.path.join(pastaperfil,'sopcast',"lib"),os.path.join(pastaperfil,'sopcast','sp-sc-auth'),sop,str(LOCAL_PORT),str(VIDEO_PORT)]
			elif settings.getSetting('openeleci386') == "true":
				cmd = [os.path.join(pastaperfil,'sopcast','lib/ld-linux.so.2'),"--library-path",os.path.join(pastaperfil,'sopcast',"lib"),os.path.join(pastaperfil,'sopcast','sp-sc-auth'),sop,str(LOCAL_PORT),str(VIDEO_PORT)]
			else: 
				cmd = [os.path.join(pastaperfil,'sopcast','ld-linux.so.2'),'--library-path',os.path.join(pastaperfil,'sopcast','lib'),os.path.join(pastaperfil,'sopcast',SPSC_BINARY), sop, str(LOCAL_PORT), str(VIDEO_PORT)]
				
		elif xbmc.getCondVisibility('System.Platform.OSX'):
			cmd = [os.path.join(pastaperfil,'sopcast','sp-sc-auth'), str(sop), str(LOCAL_PORT), str(VIDEO_PORT)]
			
		elif xbmc.getCondVisibility('System.Platform.Android'):
			cmd = [str(settings.getSetting('android_sopclient')), str(sop), str(LOCAL_PORT), str(VIDEO_PORT)]

		print(cmd)
				
		#Check if another instance of the sopcast executable might still be running on the same port. Attempt to connect to server and video ports giving the user the choice before creating a new subprocess
		try:
			sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
			sock.connect((LOCAL_IP, int(LOCAL_PORT)))
			sock.close()
			sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
			sock.connect((LOCAL_IP, int(VIDEO_PORT)))
			sock.close()
			existing_instance = True
		except: existing_instance = False
		if existing_instance == True:
			option = xbmcgui.Dialog().yesno(translate(30000), translate(30033),translate(30034))
			if not option:
				if xbmc.getCondVisibility('System.Platform.Android') == "true":
					xbmc_user = os.getlogin()
					procshut = subprocess.Popen(['ps','|','grep','sopclient'],shell=False,stdout=subprocess.PIPE)
					for line in procshut.stdout:
						match = re.findall(r'\S+', line.rstrip())
						if match:
							if 'sopclient' in match[-1] and len(match)>2:
								if xbmc_user == match[0]:
									os.system("kill " + match[1])
									xbmc.sleep(200)
								else:
									os.system("su -c kill " + match[1])
									xbmc.sleep(200)
				elif xbmc.getCondVisibility('System.Platform.Linux'):
					os.system("kill $(ps aux | grep '[s]p-sc-auth' | awk '{print $1}')") #openelec
					os.system("kill $(ps aux | grep '[s]p-sc-auth' | awk '{print $2}')")
				elif xbmc.getCondVisibility('System.Platform.OSX'):
					os.system("kill $(ps aux | grep '[s]p-sc-auth')")
			else: pass
		else: pass
		
		#opening the subprocess
		if settings.getSetting('debug_mode') == "false":
			spsc = subprocess.Popen(cmd, shell=False, bufsize=BUFER_SIZE,stdin=None, stdout=None, stderr=None)
		else:
			spsc = subprocess.Popen(cmd, shell=False, bufsize=BUFER_SIZE,stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)    
		listitem = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
		listitem.setLabel(name)
		listitem.setInfo('video', {'Title': name})
		url = "http://"+LOCAL_IP+":"+str(VIDEO_PORT)+"/"
		xbmc.sleep(int(settings.getSetting('wait_time')))
		res=False
		#counter=50
		counter = int(int(settings.getSetting("loading_time"))*2+10)
		ret = mensagemprogresso.create(translate(30000),"SopCast",translate(30035) % (str(20)))
		mensagemprogresso.update(0)
		warning = 0
		while counter > 0 and spsc.pid:
			if mensagemprogresso.iscanceled():
				mensagemprogress.close()
				break
			xbmc.sleep(400)
			counter -= 1
			mensagemprogresso.update(int((1-(counter/50.0))*100),"SopCast",translate(30035) % str(int(int(settings.getSetting("loading_time")) * (1-((1-(counter/50.0)))))))
			try:
				urllib2.urlopen(url)
				counter=0
				res=sop_sleep(200 , spsc.pid)
				break
			except:
				if warning == 0:
				    print("Other instance of sopcast is still running")
				    warning += 1
				else: pass
                    
		if res:
			mensagemprogresso.update(100)
			if not xbmc.getCondVisibility('System.Platform.OSX'):
				listitem.setPath(path=url)
				xbmcplugin.setResolvedUrl(int(sys.argv[1]),True,listitem)
				player = streamplayer(xbmc.PLAYER_CORE_AUTO , spsc_pid=spsc.pid , listitem=listitem)
				if int(sys.argv[1]) < 0:
					player.play(url, listitem)
				while player._playbackLock:
					xbmc.sleep(500)
			else:
				xbmc.sleep(200)
				video_file = os.path.join(pastaperfil,'sopcast.avi')
				start_new_thread(osx_sopcast_downloader,())
				handle_wait(int(settings.getSetting('stream_time_osx')),translate(30000),translate(30031),segunda='')
				listitem.setPath(path=video_file)
				xbmcplugin.setResolvedUrl(int(sys.argv[1]),True,listitem)
				player = streamplayer(xbmc.PLAYER_CORE_AUTO , spsc_pid=spsc.pid , listitem=listitem)
				player.play(video_file, listitem)
				while player._playbackLock:
					xbmc.sleep(500)
		else:
		    xbmc.sleep(200)
		    xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % (translate(30000), translate(30032), 1,os.path.join(addonpath,"icon.png")))

	except: pass
	if settings.getSetting('debug_mode') == "true":
		try:	
			stdout, stderr = spsc.communicate()
			print(stdout,stderr)
		except: pass
	try: os.kill(spsc.pid,9)
	except: pass
	xbmc.sleep(100)
	try:os.system("killall -9 "+SPSC_BINARY)
	except:pass
	xbmc.sleep(100)
	try:spsc.kill()
	except:pass
	xbmc.sleep(100)
	try:spsc.wait()
	except:pass
	xbmc.sleep(100)           
	try: os.kill(spsc.pid,9)
	except: pass
	mensagemprogresso.close()
	print("Player ended at last")

Example 42

Project: git-repo
Source File: git_command.py
View license
  def __init__(self,
               project,
               cmdv,
               bare = False,
               provide_stdin = False,
               capture_stdout = False,
               capture_stderr = False,
               disable_editor = False,
               ssh_proxy = False,
               cwd = None,
               gitdir = None):
    env = os.environ.copy()

    for key in [REPO_TRACE,
              GIT_DIR,
              'GIT_ALTERNATE_OBJECT_DIRECTORIES',
              'GIT_OBJECT_DIRECTORY',
              'GIT_WORK_TREE',
              'GIT_GRAFT_FILE',
              'GIT_INDEX_FILE']:
      if key in env:
        del env[key]

    # If we are not capturing std* then need to print it.
    self.tee = {'stdout': not capture_stdout, 'stderr': not capture_stderr}

    if disable_editor:
      _setenv(env, 'GIT_EDITOR', ':')
    if ssh_proxy:
      _setenv(env, 'REPO_SSH_SOCK', ssh_sock())
      _setenv(env, 'GIT_SSH', _ssh_proxy())
    if 'http_proxy' in env and 'darwin' == sys.platform:
      s = "'http.proxy=%s'" % (env['http_proxy'],)
      p = env.get('GIT_CONFIG_PARAMETERS')
      if p is not None:
        s = p + ' ' + s
      _setenv(env, 'GIT_CONFIG_PARAMETERS', s)
    if 'GIT_ALLOW_PROTOCOL' not in env:
      _setenv(env, 'GIT_ALLOW_PROTOCOL',
              'file:git:http:https:ssh:persistent-http:persistent-https:sso:rpc')

    if project:
      if not cwd:
        cwd = project.worktree
      if not gitdir:
        gitdir = project.gitdir

    command = [GIT]
    if bare:
      if gitdir:
        _setenv(env, GIT_DIR, gitdir)
      cwd = None
    command.append(cmdv[0])
    # Need to use the --progress flag for fetch/clone so output will be
    # displayed as by default git only does progress output if stderr is a TTY.
    if sys.stderr.isatty() and cmdv[0] in ('fetch', 'clone'):
      if '--progress' not in cmdv and '--quiet' not in cmdv:
        command.append('--progress')
    command.extend(cmdv[1:])

    if provide_stdin:
      stdin = subprocess.PIPE
    else:
      stdin = None

    stdout = subprocess.PIPE
    stderr = subprocess.PIPE

    if IsTrace():
      global LAST_CWD
      global LAST_GITDIR

      dbg = ''

      if cwd and LAST_CWD != cwd:
        if LAST_GITDIR or LAST_CWD:
          dbg += '\n'
        dbg += ': cd %s\n' % cwd
        LAST_CWD = cwd

      if GIT_DIR in env and LAST_GITDIR != env[GIT_DIR]:
        if LAST_GITDIR or LAST_CWD:
          dbg += '\n'
        dbg += ': export GIT_DIR=%s\n' % env[GIT_DIR]
        LAST_GITDIR = env[GIT_DIR]

      dbg += ': '
      dbg += ' '.join(command)
      if stdin == subprocess.PIPE:
        dbg += ' 0<|'
      if stdout == subprocess.PIPE:
        dbg += ' 1>|'
      if stderr == subprocess.PIPE:
        dbg += ' 2>|'
      Trace('%s', dbg)

    try:
      p = subprocess.Popen(command,
                           cwd = cwd,
                           env = env,
                           stdin = stdin,
                           stdout = stdout,
                           stderr = stderr)
    except Exception as e:
      raise GitError('%s: %s' % (command[1], e))

    if ssh_proxy:
      _add_ssh_client(p)

    self.process = p
    self.stdin = p.stdin

Example 43

Project: ganeti
Source File: process.py
View license
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, noclose_fds,
                input_fd, postfork_fn=None,
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
  """Run a command and return its output.

  @type  cmd: string or list
  @param cmd: Command to run
  @type env: dict
  @param env: The environment to use
  @type via_shell: bool
  @param via_shell: if we should run via the shell
  @type cwd: string
  @param cwd: the working directory for the program
  @type interactive: boolean
  @param interactive: Run command interactive (without piping)
  @type timeout: int
  @param timeout: Timeout after the programm gets terminated
  @type noclose_fds: list
  @param noclose_fds: list of additional (fd >=3) file descriptors to leave
                      open for the child process
  @type input_fd: C{file}-like object containing an actual file descriptor
                  or numeric file descriptor
  @param input_fd: File descriptor for process' standard input
  @type postfork_fn: Callable receiving PID as parameter
  @param postfork_fn: Function run after fork but before timeout
  @rtype: tuple
  @return: (out, err, status)

  """
  poller = select.poll()

  if interactive:
    stderr = None
    stdout = None
  else:
    stderr = subprocess.PIPE
    stdout = subprocess.PIPE

  if input_fd:
    stdin = input_fd
  elif interactive:
    stdin = None
  else:
    stdin = subprocess.PIPE

  if noclose_fds:
    preexec_fn = lambda: CloseFDs(noclose_fds)
    close_fds = False
  else:
    preexec_fn = None
    close_fds = True

  child = subprocess.Popen(cmd, shell=via_shell,
                           stderr=stderr,
                           stdout=stdout,
                           stdin=stdin,
                           close_fds=close_fds, env=env,
                           cwd=cwd,
                           preexec_fn=preexec_fn)

  if postfork_fn:
    postfork_fn(child.pid)

  out = StringIO()
  err = StringIO()

  linger_timeout = None

  if timeout is None:
    poll_timeout = None
  else:
    poll_timeout = utils_algo.RunningTimeout(timeout, True).Remaining

  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
                 (cmd, child.pid))
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
                (cmd, child.pid))

  timeout_action = _TIMEOUT_NONE

  # subprocess: "If the stdin argument is PIPE, this attribute is a file object
  # that provides input to the child process. Otherwise, it is None."
  assert (stdin == subprocess.PIPE) ^ (child.stdin is None), \
    "subprocess' stdin did not behave as documented"

  if not interactive:
    if child.stdin is not None:
      child.stdin.close()
    poller.register(child.stdout, select.POLLIN)
    poller.register(child.stderr, select.POLLIN)
    fdmap = {
      child.stdout.fileno(): (out, child.stdout),
      child.stderr.fileno(): (err, child.stderr),
      }
    for fd in fdmap:
      utils_wrapper.SetNonblockFlag(fd, True)

    while fdmap:
      if poll_timeout:
        pt = poll_timeout() * 1000
        if pt < 0:
          if linger_timeout is None:
            logging.warning(msg_timeout)
            if child.poll() is None:
              timeout_action = _TIMEOUT_TERM
              utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid,
                                                  signal.SIGTERM)
            linger_timeout = \
              utils_algo.RunningTimeout(_linger_timeout, True).Remaining
          pt = linger_timeout() * 1000
          if pt < 0:
            break
      else:
        pt = None

      pollresult = utils_wrapper.RetryOnSignal(poller.poll, pt)

      for fd, event in pollresult:
        if event & select.POLLIN or event & select.POLLPRI:
          data = fdmap[fd][1].read()
          # no data from read signifies EOF (the same as POLLHUP)
          if not data:
            poller.unregister(fd)
            del fdmap[fd]
            continue
          fdmap[fd][0].write(data)
        if (event & select.POLLNVAL or event & select.POLLHUP or
            event & select.POLLERR):
          poller.unregister(fd)
          del fdmap[fd]

  if timeout is not None:
    assert callable(poll_timeout)

    # We have no I/O left but it might still run
    if child.poll() is None:
      _WaitForProcess(child, poll_timeout())

    # Terminate if still alive after timeout
    if child.poll() is None:
      if linger_timeout is None:
        logging.warning(msg_timeout)
        timeout_action = _TIMEOUT_TERM
        utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
        lt = _linger_timeout
      else:
        lt = linger_timeout()
      _WaitForProcess(child, lt)

    # Okay, still alive after timeout and linger timeout? Kill it!
    if child.poll() is None:
      timeout_action = _TIMEOUT_KILL
      logging.warning(msg_linger)
      utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)

  out = out.getvalue()
  err = err.getvalue()

  status = child.wait()
  return out, err, status, timeout_action

Example 44

Project: ganga
Source File: execute.py
View license
def execute(command,
            timeout=None,
            env=None,
            cwd=None,
            shell=True,
            python_setup='',
            eval_includes=None,
            update_env=False,
            ):
    """
    Execute an external command.
    This will execute an external python command when shell=False or an external bash command when shell=True
    Args:
        command (str): This is the command that we want to execute in string format
        timeout (int): This is the timeout which we want to assign to a function and it will be killed if it runs for longer than n seconds
        env (dict): This is the environment to use for launching the new command
        cwd (str): This is the cwd the command is to be executed within.
        shell (bool): True for a bash command to be executed, False for a command to be executed within Python
        python_setup (str): A python command to be executed beore the main command is
        eval_includes (str): An string used to construct an environment which, if passed, is used to eval the stdout into a python object
        update_env (bool): Should we update the env being passed to what the env was after the command finished running
    """

    if update_env and env is None:
        raise GangaException('Cannot update the environment if None given.')

    if not shell:
        # We want to run a python command inside a small Python wrapper
        stream_command = 'python -'
        command, pkl_file_pipes, env_file_pipes = python_wrapper(command, python_setup, update_env)
    else:
        # We want to run a shell command inside a _NEW_ shell environment.
        # i.e. What I run here I expect to behave in the same way from the command line after I exit Ganga
        stream_command = "bash "
        if update_env:
            # note the exec gets around the problem of indent and base64 gets
            # around the \n
            command_update, env_file_pipes = env_update_script()
            command += ''';python -c "import base64;exec(base64.b64decode('%s'))"''' % base64.b64encode(command_update)

    # Some minor changes to cleanup the getting of the env
    if env is None and not update_env:
        env = get_env()

    # Construct the object which will contain the environment we want to run the command in
    p = subprocess.Popen(stream_command, shell=True, env=env, cwd=cwd, preexec_fn=os.setsid,
                         stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    # This is where we store the output
    thread_output = {}

    # Start the timer thread used to kill commands which have likely stalled
    timer, timed_out = start_timer(p, timeout)

    if update_env:
        env_output_key = 'env_output'
        update_env_thread = update_thread(env_file_pipes, thread_output, env_output_key, require_output=True)
    if not shell:
        pkl_output_key = 'pkl_output'
        update_pkl_thread = update_thread(pkl_file_pipes, thread_output, pkl_output_key, require_output=False)

    # Execute the main command of interest
    logger.debug("Executing Command:\n'%s'" % str(command))
    stdout, stderr = p.communicate(command)

    # Close the timeout watching thread
    logger.debug("stdout: %s" % stdout)
    logger.debug("stderr: %s" % stderr)

    timer.cancel()
    if timeout is not None:
        timer.join()

    # Finish up and decide what to return
    if stderr != '':
        # this is still debug as using the environment from dirac default_env maked a stderr message dump out
        # even though it works
        logger.debug(stderr)

    if timed_out.isSet():
        return 'Command timed out!'

    # Decode any pickled objects from disk
    if update_env:
        update_env_thread.join()
        if env_output_key in thread_output:
            env.update(thread_output[env_output_key])
        else:
            logger.error("Expected to find the updated env after running a command")
            logger.error("Command: %s" % command)
            logger.error("stdout: %s" % stdout)
            logger.error("stderr: %s" % stderr)
            raise RuntimeError("Missing update env after running command")

    if not shell:
        update_pkl_thread.join()
        if pkl_output_key in thread_output:
            return thread_output[pkl_output_key]

    try:
        if stdout:
            stdout = pickle.loads(stdout)
    except (pickle.UnpicklingError, EOFError) as err:
        if not shell:
            logger.error("Execute Err: %s", err)
        else:
            logger.debug("Execute Err: %s", err)
        local_ns = {}
        if isinstance(eval_includes, str):
            try:
                exec(eval_includes, {}, local_ns)
            except:
                logger.error("Failed to eval the env, can't eval stdout")
                pass
            try:
                stdout = eval(stdout, {}, local_ns)
            except Exception as err2:
                logger.error("Err2: %s" % str(err2))
                pass

    return stdout

Example 45

Project: jhbuild
Source File: gtkui.py
View license
    def execute(self, command, hint=None, cwd=None, extra_env=None):
        if not command:
            raise CommandError(_('No command given'))

        if isinstance(command, (str, unicode)):
            short_command = command.split()[0]
        else:
            short_command = command[0]

        if vte is None:
            textbuffer = self.terminal.get_buffer()

            if isinstance(command, (str, unicode)):
                self.terminal.get_buffer().insert_with_tags_by_name(
                        textbuffer.get_end_iter(),
                        ' $ ' + command + '\n', 'stdin')
            else:
                self.terminal.get_buffer().insert_with_tags_by_name(
                        textbuffer.get_end_iter(),
                        ' $ ' + ' '.join(command) + '\n', 'stdin')

            kws = {
                'close_fds': True,
                'shell': isinstance(command, (str,unicode)),
                'stdin': subprocess.PIPE,
                'stdout': subprocess.PIPE,
                'stderr': subprocess.PIPE,
                }

            if cwd is not None:
                kws['cwd'] = cwd

            if extra_env is not None:
                kws['env'] = os.environ.copy()
                kws['env'].update(extra_env)

            command = self._prepare_execute(command)

            try:
                p = subprocess.Popen(command, **kws)
            except OSError as e:
                raise CommandError(str(e))
            self.child_pid = p.pid

            p.stdin.close()

            def make_non_blocking(fd):
                fl = fcntl.fcntl(fd, fcntl.F_GETFL)
                fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NDELAY)

            make_non_blocking(p.stdout)
            make_non_blocking(p.stderr)

            build_paused = False
            read_set = [p.stdout, p.stderr]

            while read_set:
                # Allow the frontend to get a little time
                while gtk.events_pending():
                    gtk.main_iteration()
                    if self.quit:
                        raise ExitRequestedException()

                rlist, wlist, xlist = select.select(read_set, [], [], 0)

                if p.stdout in rlist:
                    chunk = p.stdout.read()
                    if chunk == '':
                        p.stdout.close()
                        read_set.remove(p.stdout)
                    textbuffer.insert_with_tags_by_name(
                            textbuffer.get_end_iter(), chunk, 'stdout')

                if p.stderr in rlist:
                    chunk = p.stderr.read()
                    if chunk == '':
                        p.stderr.close()
                        read_set.remove(p.stderr)
                    textbuffer.insert_with_tags_by_name(
                            textbuffer.get_end_iter(), chunk, 'stderr')

                if textbuffer.get_line_count() > 200:
                    textbuffer.delete(textbuffer.get_start_iter(),
                            textbuffer.get_iter_at_line_offset(
                                textbuffer.get_line_count()-200, 0))

                mark = textbuffer.get_mark('end')
                if mark:
                    textbuffer.move_mark(mark, textbuffer.get_end_iter())
                else:
                    mark = textbuffer.create_mark('end', textbuffer.get_end_iter(), False)

                if self.terminal_sclwin.get_vadjustment().upper == \
                        (self.terminal_sclwin.size_request()[1] + 
                         self.terminal_sclwin.get_vadjustment().get_value()):
                    # currently at the bottom of the textview, therefore scroll
                    # automatically
                    self.terminal.scroll_to_mark(mark, 0.05, True, 0.0, 1.0)

                # See if we should pause the current command
                if not build_paused and self.is_build_paused():
                    os.kill(p.pid, signal.SIGSTOP)
                    build_paused = True
                elif build_paused and not self.is_build_paused():
                    os.kill(p.pid, signal.SIGCONT)
                    build_paused = False

                time.sleep(0.05)

            rc = p.wait()
            self.child_pid = None
        else:
            # use the vte widget
            if isinstance(command, (str, unicode)):
                self.terminal.feed(' $ ' + command + '\n\r')
                command = [os.environ.get('SHELL', '/bin/sh'), '-c', command]
            else:
                self.terminal.feed(' $ ' + ' '.join(command) + '\n\r')

            kws = {}
            if extra_env is not None:
                env = os.environ.copy()
                env.update(extra_env)
                kws['envv'] = ['%s=%s' % x for x in env.items()]

            if cwd:
                kws['directory'] = cwd

            self.vte_fork_running = True
            self.vte_child_exit_status = None
            # In earlier python-vte versions,
            #  - the environment had to be passed as a sequence of strings
            #    ("FOO=1", "BAR=2") (GNOME bug 583078)
            #  - directory keyword could not be set to None (GNOME bug 583129)
            # The bugs have been fixed, but for compatibility reasons the old
            # compatibility code is still in place.
            self.child_pid = self.terminal.fork_command(
                    command=command[0], argv=command, **kws)
            while self.vte_fork_running:
                gtk.main_iteration()
                if self.quit:
                    raise ExitRequestedException()
            self.child_pid = None
            if os.WIFEXITED(self.vte_child_exit_status):
                rc = os.WEXITSTATUS(self.vte_child_exit_status)
            elif os.WIFSIGNALED(self.vte_child_exit_status):
                raise CommandError(_('%(command)s died with signal %(rc)s') % {
                        'command': short_command,
                        'rc': os.WTERMSIG(self.vte_child_exit_status)})

        if rc:
            raise CommandError(_('%(command)s returned with an error code (%(rc)s)') % {
                    'command': short_command, 'rc': rc})

Example 46

Project: jhbuild
Source File: terminal.py
View license
    def execute(self, command, hint=None, cwd=None, extra_env=None):
        if not command:
            raise CommandError(_('No command given'))

        kws = {
            'close_fds': True
            }
        print_args = {'cwd': ''}
        if cwd:
            print_args['cwd'] = cwd
        else:
            try:
                print_args['cwd'] = os.getcwd()
            except OSError:
                pass

        if isinstance(command, (str, unicode)):
            kws['shell'] = True
            print_args['command'] = command
        else:
            print_args['command'] = ' '.join(command)

        # get rid of hint if pretty printing is disabled.
        if not self.config.pretty_print:
            hint = None
        elif os.name == 'nt':
            # pretty print also doesn't work on Windows;
            # see https://bugzilla.gnome.org/show_bug.cgi?id=670349 
            hint = None

        if not self.config.quiet_mode:
            if self.config.print_command_pattern:
                try:
                    print self.config.print_command_pattern % print_args
                except TypeError as e:
                    raise FatalError('\'print_command_pattern\' %s' % e)
                except KeyError as e:
                    raise FatalError(_('%(configuration_variable)s invalid key'
                                       ' %(key)s' % \
                                       {'configuration_variable' :
                                            '\'print_command_pattern\'',
                                        'key' : e}))

        kws['stdin'] = subprocess.PIPE
        if hint in ('cvs', 'svn', 'hg-update.py'):
            kws['stdout'] = subprocess.PIPE
            kws['stderr'] = subprocess.STDOUT
        else:
            kws['stdout'] = None
            kws['stderr'] = None

        if self.config.quiet_mode:
            kws['stdout'] = subprocess.PIPE
            kws['stderr'] = subprocess.STDOUT

        if cwd is not None:
            kws['cwd'] = cwd

        if extra_env is not None:
            kws['env'] = os.environ.copy()
            kws['env'].update(extra_env)

        command = self._prepare_execute(command)

        try:
            p = subprocess.Popen(command, **kws)
        except OSError as e:
            raise CommandError(str(e))

        output = []
        if hint in ('cvs', 'svn', 'hg-update.py'):
            conflicts = []
            def format_line(line, error_output, conflicts = conflicts, output = output):
                if line.startswith('C '):
                    conflicts.append(line)

                if self.config.quiet_mode:
                    output.append(line)
                    return

                if line[-1] == '\n': line = line[:-1]

                if line.startswith('C '):
                    print '%s%s%s' % (t_colour[12], line, t_reset)
                elif line.startswith('M '):
                    print '%s%s%s' % (t_colour[10], line, t_reset)
                elif line.startswith('? '):
                    print '%s%s%s' % (t_colour[8], line, t_reset)
                else:
                    print line

            cmds.pprint_output(p, format_line)
            if conflicts:
                uprint(_('\nConflicts during checkout:\n'))
                for line in conflicts:
                    sys.stdout.write('%s  %s%s\n'
                                     % (t_colour[12], line, t_reset))
                # make sure conflicts fail
                if p.returncode == 0 and hint == 'cvs': p.returncode = 1
        elif self.config.quiet_mode:
            def format_line(line, error_output, output = output):
                output.append(line)
            cmds.pprint_output(p, format_line)
        else:
            try:
                p.communicate()
            except KeyboardInterrupt:
                try:
                    os.kill(p.pid, signal.SIGINT)
                except OSError:
                    # process might already be dead.
                    pass
        try:
            if p.wait() != 0:
                if self.config.quiet_mode:
                    print ''.join(output)
                raise CommandError(_('########## Error running %s')
                                   % print_args['command'], p.returncode)
        except OSError:
            # it could happen on a really badly-timed ctrl-c (see bug 551641)
            raise CommandError(_('########## Error running %s')
                               % print_args['command'])

Example 47

Project: macops
Source File: gmacpyutil.py
View license
def _RunProcess(cmd, stdinput=None, env=None, cwd=None, sudo=False,
                sudo_password=None, background=False, stream_output=False,
                timeout=0, waitfor=0):
  """Executes cmd using suprocess.

  Args:
    cmd: An array of strings as the command to run
    stdinput: An optional string as stdin
    env: An optional dictionary as the environment
    cwd: An optional string as the current working directory
    sudo: An optional boolean on whether to do the command via sudo
    sudo_password: An optional string of the password to use for sudo
    background: Launch command in background mode
    stream_output: An optional boolean on whether to send output to the screen
    timeout: An optional int or float; if >0, Exec() will stop waiting for
      output after timeout seconds and kill the process it started. Return code
      might be undefined, or -SIGTERM, use waitfor to make sure to obtain it.
      values <1 will be crudely rounded off because of select() sleep time.
    waitfor: An optional int or float, if >0, Exec() will wait waitfor seconds
      before asking for the process exit status one more time.
  Returns:
    Tuple: two strings and an integer: (stdout, stderr, returncode);
    stdout/stderr may also be None. If the process is set to launch in
    background mode, an instance of <subprocess.Popen object> is
    returned, in order to be able to read from its pipes *and* use poll() to
    check when it is finished.
  Raises:
    GmacpyutilException: If both stdinput and sudo_password are specified
    GmacpyutilException: If both sudo and background are specified
    GmacpyutilException: If both timeout and background, stream_output, sudo, or
      sudo_password, or stdinput are specified
    GmacpyutilException: If timeout is less than 0
    GmacpyutilException: If subprocess raises an OSError
  """
  if timeout and (background or stream_output or sudo or sudo_password or
                  stdinput):
    raise GmacpyutilException('timeout is not compatible with background, '
                              'stream_output, sudo, sudo_password, or '
                              'stdinput.')
  if waitfor and not timeout:
    raise GmacpyutilException('waitfor only valid with timeout.')
  if timeout < 0:
    raise GmacpyutilException('timeout must be greater than 0.')
  if stream_output:
    stdoutput = None
    stderror = None
  else:
    stdoutput = subprocess.PIPE
    stderror = subprocess.PIPE
  if sudo and not background:
    sudo_cmd = ['sudo']
    if sudo_password and not stdinput:
      # Set sudo to get password from stdin
      sudo_cmd.extend(['-S'])
      stdinput = sudo_password + '\n'
    elif sudo_password and stdinput:
      raise GmacpyutilException('stdinput and sudo_password are mutually '
                                'exclusive')
    else:
      sudo_cmd.extend(['-p', "%u's password is required for admin access: "])
    sudo_cmd.extend(cmd)
    cmd = sudo_cmd
  elif sudo and background:
    raise GmacpyutilException('sudo is not compatible with background.')
  environment = os.environ.copy()
  if env is not None:
    environment.update(env)
  try:
    task = subprocess.Popen(cmd, stdout=stdoutput, stderr=stderror,
                            stdin=subprocess.PIPE, env=environment, cwd=cwd)
  except OSError, e:
    raise GmacpyutilException('Could not execute: %s' % e.strerror)
  if timeout == 0:
    # communicate() will wait until the process is finished, so if we are in
    # background mode, just send the input and take the pipe objects as output.
    if not background:
      (stdout, stderr) = task.communicate(input=stdinput)
      return (stdout, stderr, task.returncode)
    else:
      if stdinput:
        task.stdin.write(stdinput)
      return task
  else:
    # TODO(user): See if it's possible to pass stdinput when using a timeout
    inactive = 0
    stdoutput = []
    stderror = []
    SetFileNonBlocking(task.stdout)
    SetFileNonBlocking(task.stderr)
    returncode = None
    while returncode is None:
      rlist, _, _ = select.select([task.stdout, task.stderr], [], [], 1.0)
      if not rlist:
        inactive += 1
        if inactive >= timeout:
          logging.error('cmd has timed out: %s', cmd)
          logging.error('Sending SIGTERM to PID=%s', task.pid)
          os.kill(task.pid, signal.SIGTERM)
          break  # note: this is a hard timeout, we don't read() again
      else:
        inactive = 0
        for fd in rlist:
          if fd is task.stdout:
            stdoutput.append(fd.read())
          elif fd is task.stderr:
            stderror.append(fd.read())

      returncode = task.poll()

    # if the process was just killed, wait for waitfor seconds.
    if inactive >= timeout and waitfor > 0:
      time.sleep(waitfor)
    # attempt to obtain returncode one last chance
    returncode = task.poll()
    stdoutput = ''.join(stdoutput)
    stderror = ''.join(stderror)
    return (stdoutput, stderror, task.returncode)

Example 48

Project: alignak
Source File: test_launch_daemons.py
View license
    def _run_daemons_and_test_api(self, ssl=False):
        """ Running all the Alignak daemons to check their correct launch and API

        :return:
        """
        req = requests.Session()

        # copy etc config files in test/cfg/run_test_launch_daemons and change folder
        # in the files for pid and log files
        if os.path.exists('./cfg/run_test_launch_daemons'):
            shutil.rmtree('./cfg/run_test_launch_daemons')

        shutil.copytree('../etc', './cfg/run_test_launch_daemons')
        files = ['cfg/run_test_launch_daemons/daemons/arbiterd.ini',
                 'cfg/run_test_launch_daemons/daemons/brokerd.ini',
                 'cfg/run_test_launch_daemons/daemons/pollerd.ini',
                 'cfg/run_test_launch_daemons/daemons/reactionnerd.ini',
                 'cfg/run_test_launch_daemons/daemons/receiverd.ini',
                 'cfg/run_test_launch_daemons/daemons/schedulerd.ini',
                 'cfg/run_test_launch_daemons/alignak.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/arbiter-master.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/broker-master.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/poller-master.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/reactionner-master.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/receiver-master.cfg',
                 'cfg/run_test_launch_daemons/arbiter/daemons/scheduler-master.cfg']
        replacements = {
            '/usr/local/var/run/alignak': '/tmp',
            '/usr/local/var/log/alignak': '/tmp',
            '%(workdir)s': '/tmp',
            '%(logdir)s': '/tmp',
            '%(etcdir)s': '/tmp'
        }
        if ssl:
            shutil.copy('./cfg/ssl/server.csr', '/tmp/')
            shutil.copy('./cfg/ssl/server.key', '/tmp/')
            shutil.copy('./cfg/ssl/server.pem', '/tmp/')
            # Set daemons configuration to use SSL
            print replacements
            replacements.update({
                'use_ssl=0': 'use_ssl=1',
                '#server_cert=': 'server_cert=',
                '#server_key=': 'server_key=',
                '#server_dh=': 'server_dh=',
                '#hard_ssl_name_check=0': 'hard_ssl_name_check=0',
                'certs/': '',
                'use_ssl	                0': 'use_ssl	                1'
            })
        for filename in files:
            lines = []
            with open(filename) as infile:
                for line in infile:
                    for src, target in replacements.iteritems():
                        line = line.replace(src, target)
                    lines.append(line)
            with open(filename, 'w') as outfile:
                for line in lines:
                    outfile.write(line)

        self.procs = {}
        satellite_map = {
            'arbiter': '7770', 'scheduler': '7768', 'broker': '7772',
            'poller': '7771', 'reactionner': '7769', 'receiver': '7773'
        }

        print("Cleaning pid and log files...")
        for daemon in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            if os.path.exists('/tmp/%sd.pid' % daemon):
                os.remove('/tmp/%sd.pid' % daemon)
                print("- removed /tmp/%sd.pid" % daemon)
            if os.path.exists('/tmp/%sd.log' % daemon):
                os.remove('/tmp/%sd.log' % daemon)
                print("- removed /tmp/%sd.log" % daemon)

        print("Launching the daemons...")
        for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            args = ["../alignak/bin/alignak_%s.py" %daemon,
                    "-c", "./cfg/run_test_launch_daemons/daemons/%sd.ini" % daemon]
            self.procs[daemon] = \
                subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            sleep(1)
            print("- %s launched (pid=%d)" % (daemon, self.procs[daemon].pid))

        sleep(1)

        print("Testing daemons start")
        for name, proc in self.procs.items():
            ret = proc.poll()
            if ret is not None:
                print("*** %s exited on start!" % (name))
                for line in iter(proc.stdout.readline, b''):
                    print(">>> " + line.rstrip())
                for line in iter(proc.stderr.readline, b''):
                    print(">>> " + line.rstrip())
            self.assertIsNone(ret, "Daemon %s not started!" % name)
            print("%s running (pid=%d)" % (name, self.procs[daemon].pid))

        # Let the daemons start ...
        sleep(5)

        print("Testing pid files and log files...")
        for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            self.assertTrue(os.path.exists('/tmp/%sd.pid' % daemon), '/tmp/%sd.pid does not exist!' % daemon)
            self.assertTrue(os.path.exists('/tmp/%sd.log' % daemon), '/tmp/%sd.log does not exist!' % daemon)

        sleep(1)

        print("Launching arbiter...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-c", "cfg/run_test_launch_daemons/daemons/arbiterd.ini",
                "-a", "cfg/run_test_launch_daemons/alignak.cfg"]
        self.procs['arbiter'] = \
            subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', self.procs['arbiter'].pid))

        sleep(5)

        name = 'arbiter'
        print("Testing Arbiter start %s" % name)
        ret = self.procs[name].poll()
        if ret is not None:
            print("*** %s exited on start!" % (name))
            for line in iter(self.procs[name].stdout.readline, b''):
                print(">>> " + line.rstrip())
            for line in iter(self.procs[name].stderr.readline, b''):
                print(">>> " + line.rstrip())
        self.assertIsNone(ret, "Daemon %s not started!" % name)
        print("%s running (pid=%d)" % (name, self.procs[name].pid))

        sleep(1)

        print("Testing pid files and log files...")
        for daemon in ['arbiter']:
            self.assertTrue(os.path.exists('/tmp/%sd.pid' % daemon), '/tmp/%sd.pid does not exist!' % daemon)
            self.assertTrue(os.path.exists('/tmp/%sd.log' % daemon), '/tmp/%sd.log does not exist!' % daemon)

        # Let the arbiter build and dispatch its configuration
        sleep(5)

        http = 'http'
        if ssl:
            http = 'https'

        print("Testing ping")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/ping" % (http, port), verify=False)
            data = raw_data.json()
            self.assertEqual(data, 'pong', "Daemon %s  did not ping back!" % name)

        print("Testing ping with satellite SSL and client not SSL")
        if ssl:
            for name, port in satellite_map.items():
                raw_data = req.get("http://localhost:%s/ping" % port)
                self.assertEqual('The client sent a plain HTTP request, but this server only speaks HTTPS on this port.', raw_data.text)

        print("Testing get_satellite_list")
        raw_data = req.get("%s://localhost:%s/get_satellite_list" % (http,
                                                                     satellite_map['arbiter']), verify=False)
        expected_data ={"reactionner": ["reactionner-master"],
                        "broker": ["broker-master"],
                        "arbiter": ["arbiter-master"],
                        "scheduler": ["scheduler-master"],
                        "receiver": ["receiver-master"],
                        "poller": ["poller-master"]}
        data = raw_data.json()
        self.assertIsInstance(data, dict, "Data is not a dict!")
        for k, v in expected_data.iteritems():
            self.assertEqual(set(data[k]), set(v))

        print("Testing have_conf")
        for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            raw_data = req.get("%s://localhost:%s/have_conf" % (http, satellite_map[daemon]), verify=False)
            data = raw_data.json()
            self.assertTrue(data, "Daemon %s has no conf!" % daemon)
            # TODO: test with magic_hash

        print("Testing api")
        name_to_interface = {'arbiter': ArbiterInterface,
                             'scheduler': SchedulerInterface,
                             'broker': BrokerInterface,
                             'poller': GenericInterface,
                             'reactionner': GenericInterface,
                             'receiver': ReceiverInterface}
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/api" % (http, port), verify=False)
            data = raw_data.json()
            expected_data = set(name_to_interface[name](None).api())
            self.assertIsInstance(data, list, "Data is not a list!")
            self.assertEqual(set(data), expected_data, "Daemon %s has a bad API!" % name)

        print("Testing get_checks on scheduler")
        # TODO: if have poller running, the poller will get the checks before us
        #
        # We need to sleep 10s to be sure the first check can be launched now (check_interval = 5)
        # sleep(4)
        # raw_data = req.get("http://localhost:%s/get_checks" % satellite_map['scheduler'], params={'do_checks': True})
        # data = unserialize(raw_data.json(), True)
        # self.assertIsInstance(data, list, "Data is not a list!")
        # self.assertNotEqual(len(data), 0, "List is empty!")
        # for elem in data:
        #     self.assertIsInstance(elem, Check, "One elem of the list is not a Check!")

        print("Testing get_raw_stats")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/get_raw_stats" % (http, port), verify=False)
            data = raw_data.json()
            if name == 'broker':
                self.assertIsInstance(data, list, "Data is not a list!")
            else:
                self.assertIsInstance(data, dict, "Data is not a dict!")

        print("Testing what_i_managed")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/what_i_managed" % (http, port), verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, dict, "Data is not a dict!")
            if name != 'arbiter':
                self.assertEqual(1, len(data), "The dict must have 1 key/value!")

        print("Testing get_external_commands")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/get_external_commands" % (http, port), verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, list, "Data is not a list!")

        print("Testing get_log_level")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/get_log_level" % (http, port), verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, unicode, "Data is not an unicode!")
            # TODO: seems level get not same tham defined in *d.ini files

        print("Testing get_all_states")
        raw_data = req.get("%s://localhost:%s/get_all_states" % (http, satellite_map['arbiter']), verify=False)
        data = raw_data.json()
        self.assertIsInstance(data, dict, "Data is not a dict!")
        for daemon_type in data:
            daemons = data[daemon_type]
            print("Got Alignak state for: %ss / %d instances" % (daemon_type, len(daemons)))
            for daemon in daemons:
                print(" - %s: %s", daemon['%s_name' % daemon_type], daemon['alive'])
                self.assertTrue(daemon['alive'])
                self.assertFalse('realm' in daemon)
                self.assertTrue('realm_name' in daemon)

        print("Testing get_running_id")
        for name, port in satellite_map.items():
            raw_data = req.get("%s://localhost:%s/get_running_id" % (http, port), verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, unicode, "Data is not an unicode!")

        print("Testing fill_initial_broks")
        raw_data = req.get("%s://localhost:%s/fill_initial_broks" % (http, satellite_map['scheduler']), params={'bname': 'broker-master'}, verify=False)
        data = raw_data.json()
        self.assertIsNone(data, "Data must be None!")

        print("Testing get_broks")
        for name in ['scheduler', 'poller']:
            raw_data = req.get("%s://localhost:%s/get_broks" % (http, satellite_map[name]),
                               params={'bname': 'broker-master'}, verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, dict, "Data is not a dict!")

        print("Testing get_returns")
        # get_return requested by scheduler to poller daemons
        for name in ['reactionner', 'receiver', 'poller']:
            raw_data = req.get("%s://localhost:%s/get_returns" % (http, satellite_map[name]), params={'sched_id': 0}, verify=False)
            data = raw_data.json()
            self.assertIsInstance(data, list, "Data is not a list!")

        print("Testing signals")
        for name, proc in self.procs.items():
            # SIGUSR1: memory dump
            self.procs[name].send_signal(signal.SIGUSR1)
            time.sleep(0.5)
            # SIGUSR2: objects dump
            self.procs[name].send_signal(signal.SIGUSR2)
            # SIGHUP: reload configuration
            self.procs[name].send_signal(signal.SIGUSR2)

            # Other signals is considered as a request to stop...

        for name, proc in self.procs.items():
            print("Asking %s to end..." % name)
            os.kill(self.procs[name].pid, signal.SIGTERM)

        time.sleep(1)

        for name, proc in self.procs.items():
            data = self._get_subproc_data(name)
            print("%s stdout:" % (name))
            for line in iter(proc.stdout.readline, b''):
                print(">>> " + line.rstrip())
            print("%s stderr:" % (name))
            for line in iter(proc.stderr.readline, b''):
                print(">>> " + line.rstrip())

        print("Done testing")

Example 49

View license
    def test_daemons_modules(self):
        """ Running the Alignak daemons with configured modules

        :return: None
        """
        self.print_header()

        # copy etc config files in test/cfg/run_test_launch_daemons_modules and change folder
        # in the files for pid and log files
        if os.path.exists('./cfg/run_test_launch_daemons_modules'):
            shutil.rmtree('./cfg/run_test_launch_daemons_modules')

        shutil.copytree('../etc', './cfg/run_test_launch_daemons_modules')
        files = ['cfg/run_test_launch_daemons_modules/daemons/arbiterd.ini',
                 'cfg/run_test_launch_daemons_modules/daemons/brokerd.ini',
                 'cfg/run_test_launch_daemons_modules/daemons/pollerd.ini',
                 'cfg/run_test_launch_daemons_modules/daemons/reactionnerd.ini',
                 'cfg/run_test_launch_daemons_modules/daemons/receiverd.ini',
                 'cfg/run_test_launch_daemons_modules/daemons/schedulerd.ini',
                 'cfg/run_test_launch_daemons_modules/alignak.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/arbiter-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/broker-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/poller-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/reactionner-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/receiver-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/scheduler-master.cfg']
        replacements = {
            '/usr/local/var/run/alignak': '/tmp',
            '/usr/local/var/log/alignak': '/tmp',
        }
        for filename in files:
            lines = []
            with open(filename) as infile:
                for line in infile:
                    for src, target in replacements.iteritems():
                        line = line.replace(src, target)
                    lines.append(line)
            with open(filename, 'w') as outfile:
                for line in lines:
                    outfile.write(line)

        # declare modules in the daemons configuration
        shutil.copy('./cfg/default/mod-example.cfg', './cfg/run_test_launch_daemons_modules/arbiter/modules')
        files = ['cfg/run_test_launch_daemons_modules/arbiter/daemons/arbiter-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/broker-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/poller-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/reactionner-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/receiver-master.cfg',
                 'cfg/run_test_launch_daemons_modules/arbiter/daemons/scheduler-master.cfg']
        replacements = {
            'modules': 'modules Example'
        }
        for filename in files:
            lines = []
            with open(filename) as infile:
                for line in infile:
                    for src, target in replacements.iteritems():
                        line = line.replace(src, target)
                    lines.append(line)
            with open(filename, 'w') as outfile:
                for line in lines:
                    outfile.write(line)

        self.setup_with_file('cfg/run_test_launch_daemons_modules/alignak.cfg')
        self.assertTrue(self.conf_is_correct)

        self.procs = {}
        satellite_map = {
            'arbiter': '7770', 'scheduler': '7768', 'broker': '7772',
            'poller': '7771', 'reactionner': '7769', 'receiver': '7773'
        }

        print("Cleaning pid and log files...")
        for daemon in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            if os.path.exists('/tmp/%sd.pid' % daemon):
                os.remove('/tmp/%sd.pid' % daemon)
                print("- removed /tmp/%sd.pid" % daemon)
            if os.path.exists('/tmp/%sd.log' % daemon):
                os.remove('/tmp/%sd.log' % daemon)
                print("- removed /tmp/%sd.log" % daemon)

        print("Launching the daemons...")
        for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            args = ["../alignak/bin/alignak_%s.py" %daemon,
                    "-c", "./cfg/run_test_launch_daemons_modules/daemons/%sd.ini" % daemon]
            self.procs[daemon] = \
                subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            sleep(1)
            print("- %s launched (pid=%d)" % (daemon, self.procs[daemon].pid))

        sleep(1)

        print("Testing daemons start")
        for name, proc in self.procs.items():
            ret = proc.poll()
            if ret is not None:
                print("*** %s exited on start!" % (name))
                for line in iter(proc.stdout.readline, b''):
                    print(">>> " + line.rstrip())
                for line in iter(proc.stderr.readline, b''):
                    print(">>> " + line.rstrip())
            self.assertIsNone(ret, "Daemon %s not started!" % name)
            print("%s running (pid=%d)" % (name, self.procs[daemon].pid))

        # Let the daemons start ...
        sleep(5)

        print("Testing pid files and log files...")
        for daemon in ['scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            self.assertTrue(os.path.exists('/tmp/%sd.pid' % daemon), '/tmp/%sd.pid does not exist!' % daemon)
            self.assertTrue(os.path.exists('/tmp/%sd.log' % daemon), '/tmp/%sd.log does not exist!' % daemon)

        sleep(1)

        print("Launching arbiter...")
        args = ["../alignak/bin/alignak_arbiter.py",
                "-c", "cfg/run_test_launch_daemons_modules/daemons/arbiterd.ini",
                "-a", "cfg/run_test_launch_daemons_modules/alignak.cfg"]
        self.procs['arbiter'] = \
            subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("%s launched (pid=%d)" % ('arbiter', self.procs['arbiter'].pid))

        sleep(5)

        name = 'arbiter'
        print("Testing Arbiter start %s" % name)
        ret = self.procs[name].poll()
        if ret is not None:
            print("*** %s exited on start!" % (name))
            for line in iter(self.procs[name].stdout.readline, b''):
                print(">>> " + line.rstrip())
            for line in iter(self.procs[name].stderr.readline, b''):
                print(">>> " + line.rstrip())
        self.assertIsNone(ret, "Daemon %s not started!" % name)
        print("%s running (pid=%d)" % (name, self.procs[name].pid))

        sleep(1)

        print("Testing pid files and log files...")
        for daemon in ['arbiter']:
            self.assertTrue(os.path.exists('/tmp/%sd.pid' % daemon), '/tmp/%sd.pid does not exist!' % daemon)
            self.assertTrue(os.path.exists('/tmp/%sd.log' % daemon), '/tmp/%sd.log does not exist!' % daemon)

        # Let the arbiter build and dispatch its configuration
        sleep(5)

        print("Get module information from log files...")
        nb_errors = 0
        for daemon in ['arbiter', 'scheduler', 'broker', 'poller', 'reactionner', 'receiver']:
            self.assertTrue(os.path.exists('/tmp/%sd.log' % daemon), '/tmp/%sd.log does not exist!' % daemon)
            daemon_errors = False
            print("-----\n%s log file\n-----\n" % daemon)
            with open('/tmp/%sd.log' % daemon) as f:
                for line in f:
                    if '***' in line:
                        print("Coverage log: %s" % line)
                    if 'Example' in line:
                        print("Example module log: %s" % line)
                    if 'WARNING' in line or daemon_errors:
                        print(line)
                    if 'ERROR' in line or 'CRITICAL' in line:
                        if not daemon_errors:
                            print(line[:-1])
                        daemon_errors = True
                        nb_errors += 1
        self.assertEqual(nb_errors, 0, "Error logs raised!")
        print("No error logs raised when daemons loaded the modules")

        print("Stopping the daemons...")
        for name, proc in self.procs.items():
            print("Asking %s to end..." % name)
            os.kill(self.procs[name].pid, signal.SIGTERM)

        time.sleep(1)

        for name, proc in self.procs.items():
            data = self._get_subproc_data(name)
            print("%s stdout:" % (name))
            for line in iter(proc.stdout.readline, b''):
                print(">>> " + line.rstrip())
            print("%s stderr:" % (name))
            for line in iter(proc.stderr.readline, b''):
                print(">>> " + line.rstrip())

        print("Daemons stopped")

Example 50

View license
    def setUp(self, withSSH=False, withFilePersona=False):

        self.withSSH = withSSH
        self.withFilePersona = withFilePersona

        cwd = os.path.dirname(os.path.abspath(
            inspect.getfile(inspect.currentframe())))

        if self.unitTest:
            self.printHeader('Using flask ' + self.flask_url)
            parsed_url = urlparse(self.flask_url)
            userArg = '-user=%s' % self.user
            passwordArg = '-password=%s' % self.password
            portArg = '-port=%s' % parsed_url.port

            script = 'HPE3ParMockServer_flask.py'
            path = "%s/%s" % (cwd, script)
            try:
                self.mockServer = subprocess.Popen([sys.executable,
                                                    path,
                                                    userArg,
                                                    passwordArg,
                                                    portArg],
                                                   stdout=subprocess.PIPE,
                                                   stderr=subprocess.PIPE,
                                                   stdin=subprocess.PIPE
                                                   )
            except Exception:
                pass

            time.sleep(1)
            if self.withFilePersona:
                self.cl = file_client.HPE3ParFilePersonaClient(self.flask_url)
            else:
                self.cl = client.HPE3ParClient(self.flask_url)

            if self.withSSH:

                self.printHeader('Using paramiko SSH server on port %s' %
                                 self.ssh_port)

                ssh_script = 'HPE3ParMockServer_ssh.py'
                ssh_path = "%s/%s" % (cwd, ssh_script)

                self.mockSshServer = subprocess.Popen([sys.executable,
                                                       ssh_path,
                                                       str(self.ssh_port)],
                                                      stdout=subprocess.PIPE,
                                                      stderr=subprocess.PIPE,
                                                      stdin=subprocess.PIPE)
                time.sleep(1)

        else:
            if withFilePersona:
                self.printHeader('Using 3PAR %s with File Persona' %
                                 self.url_3par)
                self.cl = file_client.HPE3ParFilePersonaClient(self.url_3par)
            else:
                self.printHeader('Using 3PAR ' + self.url_3par)
                self.cl = client.HPE3ParClient(self.url_3par)

        if self.withSSH:
            # This seems to slow down the test cases, so only use this when
            # requested
            if self.unitTest:
                # The mock SSH server can be accessed at 0.0.0.0.
                ip = '0.0.0.0'
            else:
                parsed_3par_url = urlparse(self.url_3par)
                ip = parsed_3par_url.hostname.split(':').pop()
            try:
                # Now that we don't do keep-alive, the conn_timeout needs to
                # be set high enough to avoid sometimes slow response in
                # the File Persona tests.
                self.cl.setSSHOptions(
                    ip,
                    self.user,
                    self.password,
                    port=self.ssh_port,
                    conn_timeout=500,
                    known_hosts_file=self.known_hosts_file,
                    missing_key_policy=self.missing_key_policy)
            except Exception as ex:
                print(ex)
                self.fail("failed to start ssh client")

        # Setup remote copy target
        if self.run_remote_copy:
            parsed_3par_url = urlparse(self.secondary_url_3par)
            ip = parsed_3par_url.hostname.split(':').pop()
            self.secondary_cl = client.HPE3ParClient(self.secondary_url_3par)
            try:
                self.secondary_cl.setSSHOptions(
                    ip,
                    self.secondary_user,
                    self.secondary_password,
                    port=self.ssh_port,
                    conn_timeout=500,
                    known_hosts_file=self.known_hosts_file,
                    missing_key_policy=self.missing_key_policy)
            except Exception as ex:
                print(ex)
                self.fail("failed to start ssh client")
            self.secondary_cl.login(self.secondary_user,
                                    self.secondary_password)

        if self.debug:
            self.cl.debug_rest(True)

        self.cl.login(self.user, self.password)

        if not self.port:
            ports = self.cl.getPorts()
            ports = [p for p in ports['members']
                     if p['linkState'] == 4 and  # Ready
                     ('device' not in p or not p['device']) and
                     p['mode'] == self.cl.PORT_MODE_TARGET]
            self.port = ports[0]['portPos']