sys.stdin.readlines

Here are the examples of the python api sys.stdin.readlines taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

75 Examples 7

Example 1

Project: mavelous
Source File: calcdeps.py
View license
def GetInputsFromOptions(options):
  """Generates the inputs from flag options.

  Args:
    options: The flags to calcdeps.
  Returns:
    A list of inputs (strings).
  """
  inputs = options.inputs
  if not inputs:  # Parse stdin
    logging.info('No inputs specified. Reading from stdin...')
    inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()])

  logging.info('Scanning files...')
  inputs = ExpandDirectories(inputs)

  return FilterByExcludes(options, inputs)

Example 2

Project: pan-python
Source File: panconf.py
View license
def read_file(path):
    if path == '-':
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(path)
        except IOError as msg:
            print('open %s: %s' % (path, msg), file=sys.stderr)
            sys.exit(1)
        lines = f.readlines()
        f.close()

    return ''.join(lines)

Example 3

View license
def post_receive(mailer, subject_prefix, subject_template=None):
    lines = sys.stdin.readlines()
    commits = {}
    for line in lines:
        old_rev, new_rev, ref_name = parse_post_receive_line(line)
        commits[ref_name] = get_commits(old_rev, new_rev)
    process_commits(commits, mailer, subject_prefix, subject_template)

Example 4

View license
    def handle(self, *args, **options):
        lines = sys.stdin.readlines()
        handler = EmailHandler()
        try:
            answer = handler.handle(lines)
        except CouldNotFindIdentifier:
            pass
        answer.save()

Example 5

Project: euscan
Source File: scan_upstream.py
View license
    def handle(self, *args, **options):
        set_verbosity_level(logger, options.get("verbosity", 1))

        if options['all']:
            packages = None

        elif len(args):
            packages = [pkg for pkg in args]
        else:
            packages = [pkg[:-1] for pkg in sys.stdin.readlines()]

        scan_upstream(
            packages=packages,
            purge_versions=options["purge-versions"],
            logger=logger,
        )

Example 6

Project: euscan
Source File: scan_metadata.py
View license
    def handle(self, *args, **options):
        set_verbosity_level(logger, options.get("verbosity", 1))

        if options['all'] or options['category']:
            packages = None

        elif len(args):
            packages = [pkg for pkg in args]
        else:
            packages = [pkg[:-1] for pkg in sys.stdin.readlines()]

        scan_metadata(
            packages=packages,
            category=options['category'],
            logger=logger,
            populate=options['populate'],
        )

Example 7

Project: pan-python
Source File: panconf.py
View license
def read_file(path):
    if path == '-':
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(path)
        except IOError as msg:
            print('open %s: %s' % (path, msg), file=sys.stderr)
            sys.exit(1)
        lines = f.readlines()
        f.close()

    return ''.join(lines)

Example 8

Project: subscription-manager
Source File: test_lock.py
View license
def main(args):
    lock_file_path = args[1]
    test_lock = lock.Lock(lock_file_path)

    # could return a useful value, so the thread communicating with
    # it could notice it couldn't get the lock
    res = test_lock.acquire(blocking=False)
    if res is False:
        return 128

    # exit on any stdin input
    for line in sys.stdin.readlines():
        return 1

Example 9

View license
def main():
    args = _parse_args()

    lp = Launchpad.login_with('openstack-releasing', 'production')

    bugs = [line.strip() for line in sys.stdin.readlines()]
    for bug in _filter_bugs(lp, args.project, args.importance, bugs):
        print(bug)

Example 10

View license
def main():
    args = _parse_args()

    lp = Launchpad.login_with('openstack-releasing', 'production')

    bugs = [line.strip() for line in sys.stdin.readlines()]
    for bug in _filter_bugs(lp, args.project, args.tag, bugs):
        print(bug)

Example 11

Project: fuel-octane
Source File: clean_env.py
View license
def main():
    hosts = [line.rstrip('\n') for line in sys.stdin.readlines()]
    access_data = {
        'user': os.environ['OS_USERNAME'],
        'password': os.environ['OS_PASSWORD'],
        'tenant': os.environ['OS_TENANT_NAME'],
        'auth_url': os.environ['OS_AUTH_URL'],
    }

    cleanup_nova_services(access_data, hosts)
    cleanup_neutron_agents(access_data, hosts)

Example 12

Project: phabricator-tools
Source File: aoncmd_query.py
View license
def _set_options(args, d):
    phlsys_dictutil.set_if_true(d, 'ids', args.ids)
    phlsys_dictutil.set_if_true(d, 'arcanistProjects', args.arcanist_projects)
    phlsys_dictutil.set_if_true(d, 'branches', args.branches)
    phlsys_dictutil.set_if_true(d, 'limit', args.max_results)
    phlsys_dictutil.set_if_true(d, 'offset', args.offset_results)

    if args.ids_stdin:
        ids = [int(i) for i in " ".join(sys.stdin.readlines()).split()]
        d["ids"] = args.ids + ids

    if args.status_type:
        d["status"] = "status-" + args.status_type

Example 13

Project: release-tools
Source File: lp-tag.py
View license
def main():
    args = _parse_args()
    lp = Launchpad.login_with('openstack-releasing', 'production')
    bugnums = [line.strip() for line in sys.stdin.readlines()]
    for bugnum in bugnums:
        bug = lp.bugs[bugnum]
        tag = args.tag
        tags = bug.tags
        if tag not in tags:
            tags.append(tag)
            bug.tags = tags
            bug.lp_save()

Example 14

Project: euscan
Source File: scan_portage.py
View license
    def handle(self, *args, **options):
        set_verbosity_level(logger, options.get("verbosity", 1))

        if options['all'] or options['category']:
            packages = None
        elif len(args):
            packages = [pkg for pkg in args]
        else:
            packages = [pkg[:-1] for pkg in sys.stdin.readlines()]

        scan_portage(
            packages=packages,
            category=options['category'],
            no_log=options["no-log"],
            purge_packages=options["purge-packages"],
            purge_versions=options["purge-versions"],
            upstream=options["upstream"],
            logger=logger,
        )

Example 15

Project: shinken
Source File: send_nsca.py
View license
def main(hostname, port, encryption, password):
    notif = NSCANotifier(hostname, port, encryption, password)

    for line in sys.stdin.readlines():
        line = line.rstrip()
        if not line:
            continue
        notif = line.split(opts.delimiter)
        if len(notif) == 3:
            # only host, rc, output
            notif.insert(1, '')  # insert service
        # line consists of host, service, rc, output
        assert len(notif) == 4
        notif.svc_result(*notif)

Example 16

Project: clustershell
Source File: Nodeset.py
View license
def process_stdin(xsetop, xsetcls, autostep):
    """Process standard input and operate on xset."""
    # Build temporary set (stdin accumulator)
    tmpset = xsetcls(autostep=autostep)
    for line in sys.stdin.readlines():
        # Support multi-lines and multi-nodesets per line
        line = line[0:line.find('#')].strip()
        for elem in line.split():
            # Do explicit object creation for RangeSet
            tmpset.update(xsetcls(elem, autostep=autostep))
    # Perform operation on xset
    if tmpset:
        xsetop(tmpset)

Example 17

Project: misc-scripts
Source File: check_url_list.py
View license
def main():
    urls = {}

    for line in sys.stdin.readlines():
        line = line.strip()
        if line not in urls:
            sys.stderr.write("+ checking URL: %s\n" % line)
            urls[line] = {'code': get_url_nofollow(line), 'count': 1}
            sys.stderr.write("++ %s\n" % str(urls[line]))
        else:
            urls[line]['count'] = urls[line]['count'] + 1

    for url in urls:
        if urls[url]['code'] != 200:
            print "%d\t%d\t%s" % (urls[url]['count'], urls[url]['code'], url)

Example 18

Project: timed
Source File: client.py
View license
@cmdapp.cmd
def parse(logfile, time_format):
  "parses a stream with text formatted as a Timed logfile and shows a summary"

  records = [server.record_from_txt(line, only_elapsed=True,
    time_format=time_format) for line in sys.stdin.readlines()]

  # TODO: make this code better.
  def output(summary):
    width = max([len(p[0]) for p in summary]) + 3
    print '\n'.join([
      "%s%s%s" % (p[0], ' ' * (width - len(p[0])),
        colored(minutes_to_txt(p[1]), 'red')) for p in summary])

  output(server.summarize(records))

Example 19

Project: pockyt
Source File: client.py
View license
    def _get_redirect_input(self):
        for line in sys.stdin.readlines():
            data = line.strip()
            if data:
                info = self._unformat_spec.parse(data)
                self._input.append(info)
            else:
                continue

Example 20

Project: devassistant
Source File: actions.py
View license
    @classmethod
    def gather_input(cls, received):
        if received == '-':
            # read from stdin
            to_run = []
            for l in sys.stdin.readlines():
                to_run.append(l)
            to_run = ''.join(to_run)
        else:
            to_run = received
        return to_run

Example 21

View license
def read_file(path):
    if path == '-':
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(path)
        except IOError as msg:
            print('open %s: %s' % (path, msg), file=sys.stderr)
            sys.exit(1)
        lines = f.readlines()
        f.close()

    return ''.join(lines)

Example 22

Project: release-tools
Source File: annotate-lp-bugs.py
View license
def main():
    args = _parse_args()

    lp = Launchpad.login_with('openstack-releasing', 'production')

    for line in sys.stdin.readlines():
        bugnum = line.strip()
        _annotate_bug(lp, args.project, bugnum)

Example 23

View license
def read_file(path):
    if path == '-':
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(path)
        except IOError as msg:
            print('open %s: %s' % (path, msg), file=sys.stderr)
            sys.exit(1)
        lines = f.readlines()
        f.close()

    return ''.join(lines)

Example 24

Project: mysql-size-estimator
Source File: cli.py
View license
    def _read_table_from_stdin(self):
        data = sys.stdin.readlines()
        table_str = " ".join(data)
        try:
            self.table = self._parser.parse_table(table_str)
        except:
            raise CliSQLParseException("Cannot parse given table")

Example 25

Project: pockyt
Source File: client.py
View license
    def _get_redirect_input(self):
        for line in sys.stdin.readlines():
            data = line.strip()
            if data:
                info = self._unformat_spec.parse(data)
                self._input.append(info)
            else:
                continue

Example 26

Project: shinken
Source File: send_nsca.py
View license
def main(hostname, port, encryption, password):
    notif = NSCANotifier(hostname, port, encryption, password)

    for line in sys.stdin.readlines():
        line = line.rstrip()
        if not line:
            continue
        notif = line.split(opts.delimiter)
        if len(notif) == 3:
            # only host, rc, output
            notif.insert(1, '')  # insert service
        # line consists of host, service, rc, output
        assert len(notif) == 4
        notif.svc_result(*notif)

Example 27

Project: onion-py
Source File: onion.py
View license
def atlas(m, n):
  fields = 'nickname,fingerprint,last_seen,running,flags,advertised_bandwidth,or_addresses'
  print(fields)
  for line in sys.stdin.readlines():
    l = line.strip().split(",")
    fp = l[2] if len(l) >= 3 else l[0]
    d = m.query('details',lookup=fp, limit=1, type='relay', field=fields)
    if len(d.relays) < 1:
      print('not_found,{},...'.format(fp))
    else:
      r = d.relays[0]
      print(",".join([str(x) for x in [r.nickname,r.fingerprint,r.last_seen,r.running,r.flags,r.bandwidth[3],r.or_addresses[0]]]))

Example 28

Project: pyjip
Source File: jip_pipe.py
View license
def main():
    args = parse_args(__doc__, options_first=False)
    pipeline = jip.Pipeline()
    if not args['--cmd']:
        args['--cmd'] = "\n".join(sys.stdin.readlines())
    if not args['--cmd']:
        print >>sys.stderr, "No Command specified!"
        sys.exit(1)

    @jip.pipeline()
    def embedded_pipeline():
        """Embedded pipeline to run a custom pipeline script

        usage:
            embedded [-i <input>] [-I <inputs>...]

        Inputs:
            -i, --input <input>       Single input file
                                      [default: stdin]
            -I, --inputs <inputs>...  List of input files
        """
        return "\n".join(args['--cmd'])
    pipeline.job(
        args['--name'] if args['--name'] else 'pipeline'
    ).run('embedded_pipeline',
          input=[sys.stdin if a == 'stdin' else a for a in args['--input']],
          inputs=args['--inputs'])

    if args['--dry'] or args['--show']:
        jip.cli.dry(pipeline, [],
                    dry=args['--dry'],
                    show=args['--show'])
        return

    profile = jip.profiles.get(name='default'
                               if not args['--profile']
                               else args['--profile'])
    profile.load_args(args)

    jobs = jip.jobs.create_jobs(pipeline, [], keep=args['--keep'],
                                profile=profile,
                                profiler=args['--with-profiler'])

    force = args['--force']
    if not args["--submit"]:
        # assign job ids
        for i, j in enumerate(jobs):
            j.id = i + 1
        for exe in jip.jobs.create_executions(jobs):
            if exe.completed and not force:
                print >>sys.stderr, colorize("Skipping", YELLOW), exe.name
            else:
                success = jip.jobs.run_job(exe.job)
                if not success:
                    print >>sys.stderr, colorize(exe.job.state, RED)
                    sys.exit(1)
    else:
        try:
            #####################################################
            # Iterate the executions and submit
            #####################################################
            for exe in jip.jobs.create_executions(jobs, save=True,
                                                  check_outputs=not force,
                                                  check_queued=not force):
                if exe.completed and not force:
                    print colorize("Skipping %s" % exe.name, YELLOW)
                else:
                    if jip.jobs.submit_job(exe.job, force=force):
                        print "Submitted %s with remote id %s" % (
                            exe.job.id, exe.job.job_id
                        )
        except Exception as err:
            log.debug("Submission error: %s", err, exc_info=True)
            print >>sys.stderr, colorize("Error while submitting job:", RED), \
                colorize(str(err), RED)
            ##################################################
            # delete all submitted jobs
            ##################################################
            jip.jobs.delete(jobs, clean_logs=True)

Example 29

Project: bashplotlib
Source File: scatterplot.py
View license
def main():

    parser = optparse.OptionParser(usage=scatter['usage'])

    parser.add_option('-f', '--file', help='a csv w/ x and y coordinates', default=None, dest='f')
    parser.add_option('-t', '--title', help='title for the chart', default="", dest='t')
    parser.add_option('-x', help='x coordinates', default=None, dest='x')
    parser.add_option('-y', help='y coordinates', default=None, dest='y')
    parser.add_option('-s', '--size', help='y coordinates', default=20, dest='size', type='int')
    parser.add_option('-p', '--pch', help='shape of point', default="x", dest='pch')
    parser.add_option('-c', '--colour', help='colour of the plot (%s)' %
                      colour_help, default='default', dest='colour')

    opts, args = parser.parse_args()

    if opts.f is None and (opts.x is None or opts.y is None):
        opts.f = sys.stdin.readlines()

    if opts.f or (opts.x and opts.y):
        plot_scatter(opts.f, opts.x, opts.y, opts.size, opts.pch, opts.colour, opts.t)
    else:
        print("nothing to plot!")

Example 30

Project: brenda
Source File: config.py
View license
    def __init__(self, config_file, env_prefix=None, default_stdin=False, use_s3cfg=True):
        # load and parse config file
        if config_file:
            with open(config_file) as f:
                for line in f.readlines():
                    self._process_line(line)
        elif default_stdin:
            for line in sys.stdin.readlines():
                self._process_line(line)

        # load environmental vars
        self._load_from_env(env_prefix)

        # get access_key and secret_key from ~/.s3cfg (it it exists)
        if use_s3cfg:
            for k, s3k in (('AWS_ACCESS_KEY', 'access_key'), ('AWS_SECRET_KEY', 'secret_key')):
                if not self.get(k):
                    v = self._s3cfg_get(s3k)
                    if v:
                        self[k] = v

Example 31

Project: translate
Source File: mozfunny2prop.py
View license
def main(argv=None):
    import sys
    lines = sys.stdin.readlines()
    for line in funny2prop(lines):
        sys.stdout.write(line)

Example 32

Project: machinelearning
Source File: mltail.py
View license
def main():
    dictionary=corpora.Dictionary()
    stoplist=set('- get http/1.0 http/1.1 302 200 404 403'.split())
    tokenDelimiters="/", r'\\', r'\\'," ","&","?"
    splitPattern = '|'.join(map(re.escape, tokenDelimiters))
    
    if not os.path.exists(options.dictionaryFile):
        #create a dictionary (id/word mapping) of all words
        texts = [[word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]for line in open(options.goodLogFile).readlines()]
        texts += [[word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]for line in open(options.badLogFile).readlines()]

        dictionary.add_documents(texts)
        dictionary.save(options.dictionaryFile)
        dictionary.save_as_text(options.dictionaryFile+'.txt')
    else:
        dictionary=corpora.Dictionary().load(options.dictionaryFile)
        print('loaded dictionary')
    
    #todo: save the corpus and update on dict removal or corpus removal    
    print('create the good corpus')
    #corpusGood=[dictionary.doc2bow(line.lower().split()) for line in open(options.goodLogFile).readlines() ]
    corpusGood=[dictionary.doc2bow([word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]) for line in open(options.goodLogFile).readlines() ]
    print('creating the bad corpus')
    #corpusBad=[dictionary.doc2bow(line.lower().split()) for line in open(options.badLogFile).readlines() ]
    corpusBad=[dictionary.doc2bow([word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]) for line in open(options.badLogFile).readlines() ]
    
    #create lsi models of the corpus
    #TODO: save these models if the building-block files haven't changed.
    print('creating models')
    modelGood = models.LsiModel(corpusGood, id2word=dictionary)
    modelBad = models.LsiModel(corpusBad, id2word=dictionary)
    
    #examine incoming log entries
    actors=dict()
    for stdinline in sys.stdin.readlines():
        ip=None
        badHit=False
        #determine source ip:
        for word in stdinline.split():
            if isIP(word):
                ip=word
                break
        
        #use only the request field for classification
        stdinline=(' '.join(apachequotedfieldsre.findall(stdinline)[0:1]) )
        #compare an incoming line against good/bad corpus for highest hit
        vecHit = dictionary.doc2bow([word for word in re.split(splitPattern,stdinline.lower()) if word not in stoplist])
        vecmodelGood = modelGood[vecHit] # convert the query to LSI space
        vecmodelBad = modelBad[vecHit]
        
        simsGood = sorted(vecmodelGood, key=lambda item: item[1])
        simsBad = sorted(vecmodelBad, key=lambda item: item[1])

        if len(simsBad)>0 and len(simsGood)==0:
            badHit=True
            #sys.stdout.write('bad hit: {0} {1} {2}\n'.format(ip,simsBad[-1],stdinline[:40]))
        elif len(simsBad)>0 and len(simsGood)>0 and (simsGood[-1][1]<simsBad[-1][1]):# and max(simsGood[-1][1],simsBad[-1][1])>.5: #good vs bad
            badHit=True
            #sys.stdout.write('bad hit: {0} {1} {2} {3}\n'.format(ip,simsGood[-1],simsBad[-1],stdinline[:40]))            

        if badHit:
            entry=('{0} {1:.2%} {2}'.format(ip,simsBad[-1][1],stdinline[:40]))
            #print(entry)
            if ip not in actors.keys():
                print('adding bad actor{0}'.format(ip))
                actors[ip]=dict()
                actors[ip]['hits']=list()
                actors[ip]['hits'].append(entry)
            else:
                actors[ip]['hits'].append(entry)

    for actor in actors:
        print(actor)
        for hit in actors[actor]['hits']:
            print('\t{0}'.format(hit))

Example 33

Project: machinelearning
Source File: mltail.py
View license
def main():
    dictionary=corpora.Dictionary()
    stoplist=set('- get http/1.0 http/1.1 302 200 404 403'.split())
    tokenDelimiters="/", r'\\', r'\\'," ","&","?"
    splitPattern = '|'.join(map(re.escape, tokenDelimiters))
    
    if not os.path.exists(options.dictionaryFile):
        #create a dictionary (id/word mapping) of all words
        texts = [[word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]for line in open(options.goodLogFile).readlines()]
        texts += [[word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]for line in open(options.badLogFile).readlines()]

        dictionary.add_documents(texts)
        dictionary.save(options.dictionaryFile)
        dictionary.save_as_text(options.dictionaryFile+'.txt')
    else:
        dictionary=corpora.Dictionary().load(options.dictionaryFile)
        print('loaded dictionary')
    
    #todo: save the corpus and update on dict removal or corpus removal    
    print('create the good corpus')
    #corpusGood=[dictionary.doc2bow(line.lower().split()) for line in open(options.goodLogFile).readlines() ]
    corpusGood=[dictionary.doc2bow([word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]) for line in open(options.goodLogFile).readlines() ]
    print('creating the bad corpus')
    #corpusBad=[dictionary.doc2bow(line.lower().split()) for line in open(options.badLogFile).readlines() ]
    corpusBad=[dictionary.doc2bow([word for word in re.split(splitPattern,line.decode('ascii','ignore').lower()) if word not in stoplist]) for line in open(options.badLogFile).readlines() ]
    
    #create lsi models of the corpus
    #TODO: save these models if the building-block files haven't changed.
    print('creating models')
    modelGood = models.LsiModel(corpusGood, id2word=dictionary)
    modelBad = models.LsiModel(corpusBad, id2word=dictionary)
    
    #examine incoming log entries
    actors=dict()
    for stdinline in sys.stdin.readlines():
        ip=None
        badHit=False
        #determine source ip:
        for word in stdinline.split():
            if isIP(word):
                ip=word
                break
        
        #use only the request field for classification
        stdinline=(' '.join(apachequotedfieldsre.findall(stdinline)[0:1]) )
        #compare an incoming line against good/bad corpus for highest hit
        vecHit = dictionary.doc2bow([word for word in re.split(splitPattern,stdinline.lower()) if word not in stoplist])
        vecmodelGood = modelGood[vecHit] # convert the query to LSI space
        vecmodelBad = modelBad[vecHit]
        
        simsGood = sorted(vecmodelGood, key=lambda item: item[1])
        simsBad = sorted(vecmodelBad, key=lambda item: item[1])

        if len(simsBad)>0 and len(simsGood)==0:
            badHit=True
            #sys.stdout.write('bad hit: {0} {1} {2}\n'.format(ip,simsBad[-1],stdinline[:40]))
        elif len(simsBad)>0 and len(simsGood)>0 and (simsGood[-1][1]<simsBad[-1][1]):# and max(simsGood[-1][1],simsBad[-1][1])>.5: #good vs bad
            badHit=True
            #sys.stdout.write('bad hit: {0} {1} {2} {3}\n'.format(ip,simsGood[-1],simsBad[-1],stdinline[:40]))            

        if badHit:
            entry=('{0} {1:.2%} {2}'.format(ip,simsBad[-1][1],stdinline[:40]))
            #print(entry)
            if ip not in actors.keys():
                print('adding bad actor{0}'.format(ip))
                actors[ip]=dict()
                actors[ip]['hits']=list()
                actors[ip]['hits'].append(entry)
            else:
                actors[ip]['hits'].append(entry)

    for actor in actors:
        print(actor)
        for hit in actors[actor]['hits']:
            print('\t{0}'.format(hit))

Example 34

Project: pandashells
Source File: p_parallel.py
View license
def main():
    msg = "Tool to run shell commands in parallel.  Spawns processes "
    msg += "to concurrently run commands supplied on stdin. "

    msg = textwrap.dedent(
        """
        Read a list of commands from stdin and execute them in parrallel.

        -----------------------------------------------------------------------
        Examples:

            * This line generates commands that will be used in the examples.
                time seq 10 \\
                | p.format -t 'sleep 1; echo done {n}' --names n -i noheader

            * Execute the commands one at a time, no parallelism
                time seq 10 \\
                | p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
                | p.parallel -n 1

            * Execute all commands in parallel
                time seq 10 \\
                | p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
                | p.parallel -n 10

            * Suppress stdout from processes and echo commands
                time seq 10 \\
                | p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
                | p.parallel -n 10 -c -s stdout

            * Make a histogram of how long the individual jobs took
                time seq 100 \\
                | p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
                | p.parallel -n 50 -v \\
                | grep __job__ \\
                | p.df 'df.dropna()' 'df.duration_sec.hist(bins=20)'
        -----------------------------------------------------------------------
        """
    )

    # read command line arguments
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)

    msg = "The number of jobs to run in parallel. If not supplied, will "
    msg += "default to the number of detected cores."
    parser.add_argument('--njobs', '-n', dest='njobs', default=[None],
                        nargs=1, type=int, help=msg)
    parser.add_argument("-v", "--verbose", action="store_true", default=False,
                        help="Enable verbose output")

    parser.add_argument("-c", "--show_commands", action="store_true",
                        default=False, help="Print commands to stdout")

    msg = "Suppress stdout, stderr, or both for all running jobs"
    parser.add_argument("-s", "--suppress",
                        choices=['stdout', 'stderr', 'both'], default=[None],
                        nargs=1, help=msg)

    # parse arguments
    args = parser.parse_args()

    # get the commands from stdin
    cmd_list = sys.stdin.readlines()

    # get suppression vars from args
    suppress_stdout = 'stdout' in args.suppress or 'both' in args.suppress
    suppress_stderr = 'stderr' in args.suppress or 'both' in args.suppress

    # run the commands
    parallel_lib.parallel(
        cmd_list,
        njobs=args.njobs[0],
        verbose=args.verbose,
        suppress_cmd=(not args.show_commands),
        suppress_stdout=suppress_stdout,
        suppress_stderr=suppress_stderr,
        assume_hyperthread=True)

Example 35

Project: translate
Source File: pydiff.py
View license
    def writediff(self, outfile):
        """writes the actual diff to the given file"""
        validfiles = True
        if os.path.exists(self.fromfile):
            with open(self.fromfile, 'U') as fh:
                self.from_lines = fh.readlines()
            fromfiledate = os.stat(self.fromfile).st_mtime
        elif self.fromfile == "-":
            self.from_lines = sys.stdin.readlines()
            fromfiledate = time.time()
        elif self.options.new_file or self.options.unidirectional_new_file:
            self.from_lines = []
            fromfiledate = 0
        else:
            outfile.write("%s: No such file or directory\n" % self.fromfile)
            validfiles = False
        if os.path.exists(self.tofile):
            with open(self.tofile, 'U') as fh:
                self.to_lines = fh.readlines()
            tofiledate = os.stat(self.tofile).st_mtime
        elif self.tofile == "-":
            self.to_lines = sys.stdin.readlines()
            tofiledate = time.time()
        elif self.options.new_file:
            self.to_lines = []
            tofiledate = 0
        else:
            outfile.write("%s: No such file or directory\n" % self.tofile)
            validfiles = False
        if not validfiles:
            return
        fromfiledate = time.ctime(fromfiledate)
        tofiledate = time.ctime(tofiledate)
        compare_from_lines = self.from_lines
        compare_to_lines = self.to_lines
        if self.options.ignore_case:
            compare_from_lines = [line.lower() for line in compare_from_lines]
            compare_to_lines = [line.lower() for line in compare_to_lines]
        matcher = difflib.SequenceMatcher(None, compare_from_lines, compare_to_lines)
        groups = matcher.get_grouped_opcodes(self.options.unified_lines)
        started = False
        fromstring = '--- %s\t%s%s' % (self.fromfile, fromfiledate, lineterm)
        tostring = '+++ %s\t%s%s' % (self.tofile, tofiledate, lineterm)

        for group in groups:
            hunk = "".join([line for line in self.unified_diff(group)])
            if self.options.fromcontains:
                if self.options.ignore_case_contains:
                    hunk_from_lines = "".join([line.lower() for line in self.get_from_lines(group)])
                else:
                    hunk_from_lines = "".join(self.get_from_lines(group))
                for accelerator in self.options.accelchars:
                    hunk_from_lines = hunk_from_lines.replace(accelerator, "")
                if self.options.fromcontains not in hunk_from_lines:
                    continue
            if self.options.tocontains:
                if self.options.ignore_case_contains:
                    hunk_to_lines = "".join([line.lower() for line in self.get_to_lines(group)])
                else:
                    hunk_to_lines = "".join(self.get_to_lines(group))
                for accelerator in self.options.accelchars:
                    hunk_to_lines = hunk_to_lines.replace(accelerator, "")
                if self.options.tocontains not in hunk_to_lines:
                    continue
            if self.options.contains:
                if self.options.ignore_case_contains:
                    hunk_lines = "".join([line.lower() for line in self.get_from_lines(group) + self.get_to_lines(group)])
                else:
                    hunk_lines = "".join(self.get_from_lines(group) + self.get_to_lines(group))
                for accelerator in self.options.accelchars:
                    hunk_lines = hunk_lines.replace(accelerator, "")
                if self.options.contains not in hunk_lines:
                    continue
            if not started:
                outfile.write(fromstring)
                outfile.write(tostring)
                started = True
            outfile.write(hunk)
        if not started and self.options.report_identical_files:
            outfile.write("Files %s and %s are identical\n" %
                          (self.fromfile, self.tofile))

Example 36

Project: pan-python
Source File: panafapi.py
View license
def process_arg(s, list=False):
    stdin_char = '-'

    if s == stdin_char:
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(s)
            lines = f.readlines()
            f.close()
        except IOError:
            lines = [s]

    if debug > 1:
        print('lines:', lines, file=sys.stderr)

    if list:
        l = [x.rstrip('\r\n') for x in lines]
        return l

    lines = ''.join(lines)
    return lines

Example 37

Project: cgat
Source File: PdbTools.py
View license
def ConvertSequence2StructuralAlignment( src1, src2, source=None, format="plain", check_residues = 1):
    """calculate a structural alignment from two pdb files.
    """

    ca1 = GetPdbCoordinates( src1, renumber = 1)

    if len(ca1) == 0:
        raise "no coordinates found for %s" % src1

    ca2 = GetPdbCoordinates( src2, renumber = 1 )

    if len(ca2) == 0:
        raise "no coordinates found for %s" % src2

    if string.lower(format) not in ("plain",):
        raise "unknown alignment format %s" % format

    if source:
        lines = open(source, "r").readlines()
    else:
        lines = sys.stdin.readlines()

    ## replace gap characters
    lines = map(lambda x: re.sub( "\s", "", string.replace(x, ".", "-")), lines)
    if not lines:
        raise ValueError, "alignment is empty"

    lali = len(lines[0])

    current1 = 0
    current2 = 0

    index1 = 0
    index2 = 0

    output = []

    alignment = []

    for x in range(0, lali):

        res1 = lines[0][x]
        res2 = lines[1][x]

        if res1 != "-": current1+=1
        if res2 != "-": current2+=1

        try:
            while (ca1[index1][0] < current1): index1 += 1
            while (ca2[index2][0] < current2): index2 += 1                    
        except IndexError:
            break

        if res1 == "-" or res2 == "-":
            continue

        (i1, aa1, x1, y1, z1) = ca1[index1]
        (i2, aa2, x2, y2, z2) = ca2[index2]        

        if check_residues:
            if aa1 != res1:
                sys.stderr.write("# mismatch in 1:%s at residue alignment %i(%s) -> structure %i(%s)\n" %\
                                 (source, current1, res1, index1, aa1))
            if aa2 != res2:
                sys.stderr.write("# mismatch in 2:%s at residue %i(%s) -> %i(%s)\n" %\
                                 (source, current2, res2, index2, aa2))

        alignment.append( (x1, y1, z1, x2, y2, z2, 1) )

    return alignment

Example 38

Project: pan-python
Source File: panafapi.py
View license
def process_arg(s, list=False):
    stdin_char = '-'

    if s == stdin_char:
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(s)
            lines = f.readlines()
            f.close()
        except IOError:
            lines = [s]

    if debug > 1:
        print('lines:', lines, file=sys.stderr)

    if list:
        l = [x.rstrip('\r\n') for x in lines]
        return l

    lines = ''.join(lines)
    return lines

Example 39

Project: pan-python
Source File: panwfapi.py
View license
def process_hashes(list):
    stdin_char = '-'

    hashes = []
    for hash in list:
        lines = []
        if hash == stdin_char:
            lines = sys.stdin.readlines()
        else:
            try:
                f = open(hash)
                lines = f.readlines()
                f.close()
            except IOError:
                # only validate hash from command line
                validate_hash(hash)
                hashes.append(hash)
        if len(lines) > 0:
            [hashes.append(x.rstrip('\r\n')) for x in lines]

    if debug > 1:
        print('hashes:', len(hashes), file=sys.stderr)

    return hashes

Example 40

Project: pan-python
Source File: panwfapi.py
View license
def process_arg(s, list=False):
    stdin_char = '-'

    if s == stdin_char:
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(s)
            lines = f.readlines()
            f.close()
        except IOError:
            lines = [s]

    if debug > 1:
        print('lines:', lines, file=sys.stderr)

    if list:
        l = [x.rstrip('\r\n') for x in lines]
        return l

    lines = ''.join(lines)
    return lines

Example 41

Project: podoc
Source File: cli.py
View license
@click.command(help=PODOC_HELP)
@click.argument('files',
                # TODO: nargs=-1 for multiple files concat
                required=False,
                type=click.Path(exists=True, file_okay=True,
                                dir_okay=True, resolve_path=True))
@click.option('-f', '-r', '--from', '--read', default='markdown',
              help='Source format.')
@click.option('-t', '-w', '--to', '--write', default='ast',
              help='Target format.')
@click.option('-o', '--output',
              help='Output path.')
@click.option('--data-dir',
              help='Output directory.')
@click.option('--no-pandoc', default=False, is_flag=True,
              help='Disable pandoc formats.')
@click.version_option(__version__)
@click.help_option()
def podoc(files=None,
          read=None,
          write=None,
          output=None,
          data_dir=None,
          no_pandoc=False,
          ):
    """Convert a file or a string from one format to another."""
    # Create the Podoc instance.
    podoc = Podoc(with_pandoc=not(no_pandoc))
    # If no files are provided, read from the standard input (like pandoc).
    if not files:
        logger.debug("Reading contents from stdin...")
        contents_s = ''.join(sys.stdin.readlines())
        # From string to object.
        contents = podoc.loads(contents_s, read)
        logger.debug("Converting `%s` from %s to %s (file: `%s`).",
                     _shorten_string(contents_s),
                     read, write, output,
                     )
        out = podoc.convert(contents, source=read, target=write,
                            output=output)
    else:
        # TODO: multiple files
        logger.debug("Converting file `%s` from %s to %s in %s.",
                     files, read, write, output)
        out = podoc.convert(files, source=read, target=write, output=output)
    if output is None:
        click.echo(podoc.dumps(out, write))
        return

Example 42

Project: pan-python
Source File: panwfapi.py
View license
def process_hashes(list):
    stdin_char = '-'

    hashes = []
    for hash in list:
        lines = []
        if hash == stdin_char:
            lines = sys.stdin.readlines()
        else:
            try:
                f = open(hash)
                lines = f.readlines()
                f.close()
            except IOError:
                # only validate hash from command line
                validate_hash(hash)
                hashes.append(hash)
        if len(lines) > 0:
            [hashes.append(x.rstrip('\r\n')) for x in lines]

    if debug > 1:
        print('hashes:', len(hashes), file=sys.stderr)

    return hashes

Example 43

Project: twitter
Source File: archiver.py
View license
def main(args=sys.argv[1:]):
    options = {
        'oauth': False,
        'save-dir': ".",
        'api-rate': False,
        'timeline': "",
        'mentions': "",
        'dms': "",
        'favorites': False,
        'follow-redirects': False,
        'redirect-sites': None,
        'isoformat': False,
    }
    try:
        parse_args(args, options)
    except GetoptError as e:
        err("I can't do that, %s." % e)
        raise SystemExit(1)

    # exit if no user given
    # except if asking for API rate, or archive of timeline or mentions
    if not options['extra_args'] and not (options['api-rate'] or
                                          options['timeline'] or
                                          options['mentions'] or
                                          options['dms']):
        print(__doc__)
        return

    # authenticate using OAuth, asking for token if necessary
    if options['oauth']:
        oauth_filename = (os.environ.get('HOME', 
                          os.environ.get('USERPROFILE', '')) 
                          + os.sep
                          + '.twitter-archiver_oauth')
        
        if not os.path.exists(oauth_filename):
            oauth_dance("Twitter-Archiver", CONSUMER_KEY, CONSUMER_SECRET,
                        oauth_filename)
        oauth_token, oauth_token_secret = read_token_file(oauth_filename)
        auth = OAuth(oauth_token, oauth_token_secret, CONSUMER_KEY,
                     CONSUMER_SECRET)
    else:
        auth = NoAuth()

    twitter = Twitter(auth=auth, api_version='1.1', domain='api.twitter.com')

    if options['api-rate']:
        rate_limit_status(twitter)
        return

    global format_text
    if options['follow-redirects'] or options['redirect-sites'] :
        if options['redirect-sites']:
            hosts = parse_host_list(options['redirect-sites'])
        else:
            hosts = None
        format_text = functools.partial(expand_format_text, hosts)
    else:
        format_text = direct_format_text

    # save own timeline or mentions (the user used in OAuth)
    if options['timeline'] or options['mentions']:
        if isinstance(auth, NoAuth):
            err("You must be authenticated to save timeline or mentions.")
            raise SystemExit(1)

        if options['timeline']:
            filename = options['save-dir'] + os.sep + options['timeline']
            print("* Archiving own timeline in %s" % filename)
        elif options['mentions']:
            filename = options['save-dir'] + os.sep + options['mentions']
            print("* Archiving own mentions in %s" % filename)

        tweets = {}
        try:
            tweets = load_tweets(filename)
        except Exception as e:
            err("Error when loading saved tweets: %s - continuing without"
                % str(e))

        try:
            statuses(twitter, "", tweets, options['mentions'], options['favorites'], isoformat=options['isoformat'])
        except KeyboardInterrupt:
            err()
            err("Interrupted")
            raise SystemExit(1)

        save_tweets(filename, tweets)
        if options['timeline']:
            print("Total tweets in own timeline: %i" % len(tweets))
        elif options['mentions']:
            print("Total mentions: %i" % len(tweets))

    if options['dms']:
        if isinstance(auth, NoAuth):
            err("You must be authenticated to save DMs.")
            raise SystemExit(1)

        filename = options['save-dir'] + os.sep + options['dms']
        print("* Archiving own DMs in %s" % filename)

        dms = {}
        try:
            dms = load_tweets(filename)
        except Exception as e:
            err("Error when loading saved DMs: %s - continuing without"
                % str(e))

        try:
            statuses(twitter, "", dms, received_dms=True, isoformat=options['isoformat'])
            statuses(twitter, "", dms, received_dms=False, isoformat=options['isoformat'])
        except KeyboardInterrupt:
            err()
            err("Interrupted")
            raise SystemExit(1)

        save_tweets(filename, dms)
        print("Total DMs sent and received: %i" % len(dms))


    # read users from command-line or stdin
    users = options['extra_args']
    if len(users) == 1 and users[0] == "-":
        users = [line.strip() for line in sys.stdin.readlines()]

    # save tweets for every user
    total, total_new = 0, 0
    for user in users:
        filename = options['save-dir'] + os.sep + user
        if options['favorites']:
            filename = filename + "-favorites"
        print("* Archiving %s tweets in %s" % (user, filename))

        tweets = {}
        try:
            tweets = load_tweets(filename)
        except Exception as e:
            err("Error when loading saved tweets: %s - continuing without"
                % str(e))

        new = 0
        before = len(tweets)
        try:
            statuses(twitter, user, tweets, options['mentions'], options['favorites'], isoformat=options['isoformat'])
        except KeyboardInterrupt:
            err()
            err("Interrupted")
            raise SystemExit(1)

        save_tweets(filename, tweets)
        total += len(tweets)
        new = len(tweets) - before
        total_new += new
        print("Total tweets for %s: %i (%i new)" % (user, len(tweets), new))

    print("Total: %i tweets (%i new) for %i users"
          % (total, total_new, len(users)))

Example 44

Project: TrustRouter
Source File: ndiff.py
View license
def restore(which):
    restored = difflib.restore(sys.stdin.readlines(), which)
    sys.stdout.writelines(restored)

Example 45

Project: write-it
Source File: handleemail.py
View license
    def handle(self, *args, **options):
        lines = sys.stdin.readlines()
        if settings.INCOMING_EMAIL_LOGGING == 'ALL':
            if not settings.ADMINS:
                return
            text_content = "New incomming email"
            subject = "New incomming email"

            mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
                text_content,  # content
                settings.DEFAULT_FROM_EMAIL,  # From
                [a[1] for a in settings.ADMINS]  # To
                )
            mail.attach('mail.txt', ''.join(lines), 'text/plain')
            mail.send()

        handler = EmailHandler(answer_class=AnswerForManageCommand)
        try:
            answer = handler.handle(lines)
            answer.send_back()
        except CouldNotFindIdentifier:
            pass
        except:
            tb = traceback.format_exc()
            text_content = "Error the traceback was:\n" + tb
            #mail_admins('Error handling incoming email', html_message, html_message=html_message)
            subject = "Error handling incoming email"
            mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
                text_content,  # content
                settings.DEFAULT_FROM_EMAIL,  # From
                [a[1] for a in settings.ADMINS],  # To
                )
            mail.attach('mail.txt', ''.join(lines), 'text/plain')
            mail.send()

Example 46

Project: pan-python
Source File: panwfapi.py
View license
def process_arg(s, list=False):
    stdin_char = '-'

    if s == stdin_char:
        lines = sys.stdin.readlines()
    else:
        try:
            f = open(s)
            lines = f.readlines()
            f.close()
        except IOError:
            lines = [s]

    if debug > 1:
        print('lines:', lines, file=sys.stderr)

    if list:
        l = [x.rstrip('\r\n') for x in lines]
        return l

    lines = ''.join(lines)
    return lines

Example 47

Project: pan-python
Source File: panxapi.py
View license
def get_element(s):
    stdin_char = '-'

    if s == stdin_char:
        element = sys.stdin.readlines()
    elif os.path.isfile(s):
        try:
            f = open(s)
        except IOError as msg:
            print('open %s: %s' % (s, msg), file=sys.stderr)
            sys.exit(1)
        element = f.readlines()
        f.close()
    else:
        element = s

    element = ''.join(element)
    if debug > 1:
        print('element: \"%s\"' % element, file=sys.stderr)

    return element

Example 48

Project: pan-python
Source File: panxapi.py
View license
def get_element(s):
    stdin_char = '-'

    if s == stdin_char:
        element = sys.stdin.readlines()
    elif os.path.isfile(s):
        try:
            f = open(s)
        except IOError as msg:
            print('open %s: %s' % (s, msg), file=sys.stderr)
            sys.exit(1)
        element = f.readlines()
        f.close()
    else:
        element = s

    element = ''.join(element)
    if debug > 1:
        print('element: \"%s\"' % element, file=sys.stderr)

    return element

Example 49

Project: urwid
Source File: old_str_util.py
View license
def process_east_asian_width():
    import sys
    out = []
    last = None
    for line in sys.stdin.readlines():
        if line[:1] == "#": continue
        line = line.strip()
        hex,rest = line.split(";",1)
        wid,rest = rest.split(" # ",1)
        word1 = rest.split(" ",1)[0]

        if "." in hex:
            hex = hex.split("..")[1]
        num = int(hex, 16)

        if word1 in ("COMBINING","MODIFIER","<control>"):
            l = 0
        elif wid in ("W", "F"):
            l = 2
        else:
            l = 1

        if last is None:
            out.append((0, l))
            last = l

        if last == l:
            out[-1] = (num, l)
        else:
            out.append( (num, l) )
            last = l

    print("widths = [")
    for o in out[1:]:  # treat control characters same as ascii
        print("\t%r," % (o,))
    print("]")

Example 50

Project: python3-xlib
Source File: genprottest.py
View license
def read_defs():
    global request_defs, reply_defs, struct_defs
    global mini_request_defs, resource_request_defs
    global event_defs

    request_defs = {}
    mini_request_defs = {}
    resource_request_defs = {}
    reply_defs = {}
    struct_defs = {}
    event_defs = {}

    for line in sys.stdin.readlines():
        parts = string.split(string.strip(line))

        fields = []
        for f in parts[2:]:
            fields.append(string.split(f, ':'))

        if parts[0] == 'REQUEST':
            request_defs[parts[1]] = fields
        elif parts[0] == 'MINIREQUEST':
            mini_request_defs[parts[1]] = MINI_DEF
        elif parts[0] == 'RESOURCEREQUEST':
            resource_request_defs[parts[1]] = RESOURCE_DEF
        elif parts[0] == 'REPLY':
            reply_defs[parts[1]] = fields
        elif parts[0] == 'STRUCT':
            struct_defs[parts[1]] = fields
        elif parts[0] == 'EVENT':
            event_defs[parts[1]] = fields