logging.getLogger

Here are the examples of the python api logging.getLogger taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

164 Examples 7

Example 101

Project: yalih Source File: honeypot.py
def main():
#Create the threads
	thread = threading.Thread(target=threadmaker)
	thread.setDaemon(True)
	thread.start()

	parser = argparse.ArgumentParser(description="Examples:\n/honeypot.py --url www.yahoo.com\nhoneypot.py --file <file path>\n./honeypot.py --blacklist\n./honeypot.py --email\n./honeypot.py --update\n./honeypot.py --search <warez>\n./honeypot.py --local <file/directory path>", formatter_class=argparse.RawTextHelpFormatter)
	parser.add_argument("--email", help="Retrieves your Spam emails from your mail server and crawls the extracted URLS. Enter your email credentials in honeypotconfig.py file!", action="store_true")
	parser.add_argument("--update", help="Updates the anti-virus signatures", action="store_true")
	parser.add_argument("--blacklist", help="Downloads list of suspicious malicious websites from three databases and retrieves/scans them accordingly", action="store_true")
	parser.add_argument("--file", nargs=1, help="Provide an input file", action="store")
	parser.add_argument("--url", nargs=1, help="Provide a url", action="store")
	parser.add_argument("--search", nargs=1, help="searches Bing search engine for a keyword (1 single keyword at the moment) and returns 100 results starting from the 20th result.", action="store")
	parser.add_argument("--local", nargs=1, help="scans a local file or directory for malicious signatures.", action="store")
	parser.add_argument("--debug", help="Include http header", action="store_true")
	parser.add_argument("--crawler", help="Crawl the sites and save any executables found", action="store_true")

	if len(sys.argv) == 1:
		parser.print_help()
		sys.exit(1)
	args = parser.parse_args()
	path = honeypotconfig.wdir + honeypotconfig.tmpfolder


#create the tmp folder
	if not os.path.isdir(os.path.join(honeypotconfig.wdir, honeypotconfig.tmpfolder)):
		os.makedirs(os.path.join(honeypotconfig.wdir, honeypotconfig.tmpfolder))           

		
#Crawler
	if args.crawler:
		executemechanize.exe_crawler = True
		
#Logging
	"""Initialize logger."""
	command = "mkdir -p "+honeypotconfig.wdir+"debug/" #create a temporary folder in your working space folder
	os.system(command)
	sys.stdin=open(honeypotconfig.wdir+"debug/" +  time.asctime(time.localtime(time.time())) +".log", "a")
	logger = logging.getLogger()
	
	sh = logging.StreamHandler()
	sh.setFormatter(SpecialFormatter())

	sh2 = logging.StreamHandler(sys.stdin)
	sh2.setFormatter(SpecialFormatter())
	
	logger.addHandler(sh)
	logger.addHandler(sh2)
	logger.setLevel(logging.INFO)
	
	if args.debug:
		logger.setLevel(logging.DEBUG)
		executemechanize.set_logging_level(logging.DEBUG)

#Update antivirus signatures
	if args.update:
		updateantivirus.updateantivirus()


#Blacklist Databases
	if args.blacklist:
		try:
			if not os.path.exists(os.path.join(honeypotconfig.wdir, "list")):
				os.mkdir(os.path.join(honeypotconfig.wdir, "list"))
		except OSError as e:	
			logger.error(e)	
		malwebsites.domaindownload()
		malwebsites.duplicateremover()
		urls = open(honeypotconfig.wdir+"list/malwebsites.txt", "r")
		counter = 0
		for line in urls:
			dict={}
			counter += 1
			dict["url"] = line.strip()
			dict["counter"] = counter
			queue.put(dict)
		queue.join()
		
		scan.scanning(path)
		yaradetection.listandscan(path)
		unquote.unquoteDirectory(path)


#Email
	if args.email:
		imapfile.imap()
		extractlink.extracturl()#extracts urls from emails.txt file 
		extractlink.duplicateremover() #removes the duplicate urls from crawler.txt files (which now contain extracted urls from emails.txt)
		urls = open('crawler.txt', "r")
		counter = 0
		for line in urls:
			dict={}
			counter += 1
			dict["url"] = line
			dict["counter"] = counter
			queue.put(dict)
		queue.join()
		scan.scanning(path)
		yaradetection.listandscan(path)
#		unquote.unquoteDirectory(path)

#File

	if args.file:
		mylist = list()
		mylist2 = list()
		counter =0
		fopen3 = open(sys.argv[2],"r")	
		for line in fopen3:
			dict={}
			line = line.strip()
			counter += 1
			if not (line.startswith("http://")) and not (line.startswith("https://")):
				line = "http://"+line
			dict["url"] = line
			dict["counter"] = counter
			queue.put(dict)
		queue.join()
		fopen3.close()
		scan.scanning(path)
		yaradetection.listandscan(path)
#		unquote.unquoteDirectory(path)



#URL
	if args.url:
		url = readurl()
		url = normalize.normalizeurl(url)
		dict={}
		counter = 1
		if not (url.startswith("http://")) and not (url.startswith("https://")):
			url = "http://"+url
		dict["url"] = url
		dict["counter"] = counter
		queue.put(dict)
		queue.join()
#		executemechanize.executemechanize(url)
		scan.scanning(path)
		yaradetection.listandscan(path)
#		unquote.unquoteDirectory(path)


#Search
	if args.search:
		keyword = sys.argv[2]
		bing.searchBing(keyword)
		mylist = list()
		fopen = open("list/searchresult.txt","r")
		for line in fopen:
			line = line.strip()
			if not line:
				continue
			mylist.append(line)
		fopen.close()
		counter = 0
		for line in mylist:
			dict={}
			counter += 1
			dict["url"] = line
			dict["counter"] = counter
			queue.put(dict)
		queue.join()

		scan.scanning(path)
		yaradetection.listandscan(path)
#		unquote.unquoteDirectory(path)




#Local Scan
	if args.local:
		path = sys.argv[2]
		scan.scanning(path)
		yaradetection.listandscan(path)

Example 102

Project: grokmirror Source File: pull.py
def pull_mirror(name, config, verbose=False, force=False, nomtime=False,
                verify=False, verify_subpath='*', noreuse=False,
                purge=False, pretty=False, forcepurge=False):
    global logger
    global lock_fails

    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    if 'log' in config.keys():
        ch = logging.FileHandler(config['log'])
        formatter = logging.Formatter(
            "[%(process)d] %(asctime)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        loglevel = logging.INFO

        if 'loglevel' in config.keys():
            if config['loglevel'] == 'debug':
                loglevel = logging.DEBUG

        ch.setLevel(loglevel)
        logger.addHandler(ch)

    ch = logging.StreamHandler()
    formatter = logging.Formatter('%(message)s')
    ch.setFormatter(formatter)

    if verbose:
        ch.setLevel(logging.INFO)
    else:
        ch.setLevel(logging.CRITICAL)

    logger.addHandler(ch)

    # push it into grokmirror to override the default logger
    grokmirror.logger = logger

    logger.info('Checking [%s]' % name)
    mymanifest = config['mymanifest']

    if verify:
        logger.info('Verifying mirror against %s' % config['manifest'])
        nomtime = True

    if config['manifest'].find('file:///') == 0:
        manifile = config['manifest'].replace('file://', '')
        if not os.path.exists(manifile):
            logger.critical('Remote manifest not found in %s! Quitting!'
                            % config['manifest'])
            return 1

        fstat = os.stat(manifile)
        last_modified = fstat[8]
        logger.debug('mtime on %s is: %s' % (manifile, fstat[8]))

        if os.path.exists(config['mymanifest']):
            fstat = os.stat(config['mymanifest'])
            my_last_modified = fstat[8]
            logger.debug('Our last-modified is: %s' % my_last_modified)
            if not (force or nomtime) and last_modified <= my_last_modified:
                logger.info('Manifest file unchanged. Quitting.')
                return 0

        logger.info('Reading new manifest from %s' % manifile)
        manifest = grokmirror.read_manifest(manifile)
        # Don't accept empty manifests -- that indicates something is wrong
        if not len(manifest.keys()):
            logger.critical('Remote manifest empty or unparseable! Quitting.')
            return 1

    else:
        # Load it from remote host using http and header magic
        logger.info('Fetching remote manifest from %s' % config['manifest'])

        # Do we have username:password@ in the URL?
        chunks = urlparse.urlparse(config['manifest'])
        if chunks.netloc.find('@') > 0:
            logger.debug('Taking username/password from the URL for basic auth')
            (upass, netloc) = chunks.netloc.split('@')
            if upass.find(':') > 0:
                (username, password) = upass.split(':')
            else:
                username = upass
                password = ''

            manifesturl = config['manifest'].replace(chunks.netloc, netloc)
            logger.debug('manifesturl=%s' % manifesturl)
            request = urllib2.Request(manifesturl)

            password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
            password_mgr.add_password(None, manifesturl, username, password)
            auth_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
            opener = urllib2.build_opener(auth_handler)

        else:
            request = urllib2.Request(config['manifest'])
            opener = urllib2.build_opener()

        # Find out if we need to run at all first
        if not (force or nomtime) and os.path.exists(mymanifest):
            fstat = os.stat(mymanifest)
            mtime = fstat[8]
            logger.debug('mtime on %s is: %s' % (mymanifest, mtime))
            my_last_modified = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                                             time.gmtime(mtime))
            logger.debug('Our last-modified is: %s' % my_last_modified)
            request.add_header('If-Modified-Since', my_last_modified)

        try:
            ufh = opener.open(request, timeout=30)
        except urllib2.HTTPError, ex:
            if ex.code == 304:
                logger.info('Server says we have the latest manifest. '
                            'Quitting.')
                return 0
            logger.warning('Could not fetch %s' % config['manifest'])
            logger.warning('Server returned: %s' % ex)
            return 1
        except urllib2.URLError, ex:
            logger.warning('Could not fetch %s' % config['manifest'])
            logger.warning('Error was: %s' % ex)
            return 1

        last_modified = ufh.headers.get('Last-Modified')
        last_modified = time.strptime(last_modified, '%a, %d %b %Y %H:%M:%S %Z')
        last_modified = calendar.timegm(last_modified)

        # We don't use read_manifest for the remote manifest, as it can be
        # anything, really. For now, blindly open it with gzipfile if it ends
        # with .gz. XXX: some http servers will auto-deflate such files.
        try:
            if config['manifest'].find('.gz') > 0:
                fh = gzip.GzipFile(fileobj=StringIO(ufh.read()))
            else:
                fh = ufh

            jdata = fh.read()
            fh.close()

            manifest = anyjson.deserialize(jdata)

        except Exception, ex:
            logger.warning('Failed to parse %s' % config['manifest'])
            logger.warning('Error was: %s' % ex)
            return 1

    mymanifest = grokmirror.read_manifest(mymanifest)

    culled = cull_manifest(manifest, config)

    to_clone = []
    to_pull = []
    existing = []

    toplevel = config['toplevel']
    if not os.access(toplevel, os.W_OK):
        logger.critical('Toplevel %s does not exist or is not writable'
                        % toplevel)
        sys.exit(1)

    if 'pull_threads' in config.keys():
        pull_threads = int(config['pull_threads'])
        if pull_threads < 1:
            logger.info('pull_threads is less than 1, forcing to 1')
            pull_threads = 1
    else:
        # be conservative
        logger.info('pull_threads is not set, consider setting it')
        pull_threads = 5

    logger.info('Comparing repository info')

    for gitdir in culled.keys():
        fullpath = os.path.join(toplevel, gitdir.lstrip('/'))

        # fingerprints were added in later versions, so deal if the upstream
        # manifest doesn't have a fingerprint
        if 'fingerprint' not in culled[gitdir]:
            culled[gitdir]['fingerprint'] = None

        # Attempt to lock the repo
        try:
            grokmirror.lock_repo(fullpath, nonblocking=True)
        except IOError:
            logger.info('Could not lock %s, skipping' % gitdir)
            lock_fails.append(gitdir)
            # Force the fingerprint to what we have in mymanifest,
            # if we have it.
            culled[gitdir]['fingerprint'] = None
            if gitdir in mymanifest and 'fingerprint' in mymanifest[gitdir]:
                culled[gitdir]['fingerprint'] = mymanifest[gitdir][
                    'fingerprint']
            if len(lock_fails) >= pull_threads:
                logger.info('Too many repositories locked (%s). Exiting.'
                            % len(lock_fails))
                return 0
            continue

        if verify:
            if culled[gitdir]['fingerprint'] is None:
                logger.debug('No fingerprint for %s, not verifying' % gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            if not fnmatch.fnmatch(gitdir, verify_subpath):
                grokmirror.unlock_repo(fullpath)
                continue

            logger.debug('Verifying %s' % gitdir)
            if not os.path.exists(fullpath):
                verify_fails.append(gitdir)
                logger.info('Verify: %s ABSENT' % gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            my_fingerprint = grokmirror.get_repo_fingerprint(
                toplevel, gitdir, force=force)

            if my_fingerprint == culled[gitdir]['fingerprint']:
                logger.info('Verify: %s OK' % gitdir)
            else:
                logger.critical('Verify: %s FAILED' % gitdir)
                verify_fails.append(gitdir)

            grokmirror.unlock_repo(fullpath)
            continue

        # Is the directory in place?
        if os.path.exists(fullpath):
            # Fix owner and description, if necessary
            if gitdir in mymanifest.keys():
                # This code is hurky and needs to be cleaned up
                desc = culled[gitdir].get('description')
                owner = culled[gitdir].get('owner')
                ref = None
                if config['ignore_repo_references'] != 'yes':
                    ref = culled[gitdir].get('reference')

                mydesc = mymanifest[gitdir].get('description')
                myowner = mymanifest[gitdir].get('owner')
                myref = None
                if config['ignore_repo_references'] != 'yes':
                    myref = mymanifest[gitdir].get('reference')

                if owner is None:
                    owner = config['default_owner']
                if myowner is None:
                    myowner = config['default_owner']

                if desc != mydesc or owner != myowner or ref != myref:
                    # we can do this right away without waiting
                    set_repo_params(toplevel, gitdir, owner, desc, ref)

            else:
                # It exists on disk, but not in my manifest?
                if noreuse:
                    logger.critical('Found existing git repo in %s' % fullpath)
                    logger.critical('But you asked NOT to reuse repos')
                    logger.critical('Skipping %s' % gitdir)
                    grokmirror.unlock_repo(fullpath)
                    continue

                logger.info('Setting new origin for %s' % gitdir)
                fix_remotes(gitdir, toplevel, config['site'])
                to_pull.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

            # fingerprints were added late, so if we don't have them
            # in the remote manifest, fall back on using timestamps
            changed = False
            if culled[gitdir]['fingerprint'] is not None:
                logger.debug('Will use fingerprints to compare %s' % gitdir)
                my_fingerprint = grokmirror.get_repo_fingerprint(toplevel,
                                                                 gitdir,
                                                                 force=force)

                if my_fingerprint != culled[gitdir]['fingerprint']:
                    logger.debug('No fingerprint match, will pull %s' % gitdir)
                    changed = True
                else:
                    logger.debug('Fingerprints match, skipping %s' % gitdir)
            else:
                logger.debug('Will use timestamps to compare %s' % gitdir)
                if force:
                    logger.debug('Will force-pull %s' % gitdir)
                    changed = True
                    # set timestamp to 0 as well
                    grokmirror.set_repo_timestamp(toplevel, gitdir, 0)
                else:
                    ts = grokmirror.get_repo_timestamp(toplevel, gitdir)
                    if ts < culled[gitdir]['modified']:
                        changed = True

            if changed:
                to_pull.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue
            else:
                logger.debug('Repo %s unchanged' % gitdir)
                # if we don't have a fingerprint for it, add it now
                if culled[gitdir]['fingerprint'] is None:
                    fpr = grokmirror.get_repo_fingerprint(toplevel, gitdir)
                    culled[gitdir]['fingerprint'] = fpr
                existing.append(gitdir)
                grokmirror.unlock_repo(fullpath)
                continue

        else:
            # Newly incoming repo
            to_clone.append(gitdir)
            grokmirror.unlock_repo(fullpath)
            continue

        # If we got here, something is odd.
        # noinspection PyUnreachableCode
        logger.critical('Could not figure out what to do with %s' % gitdir)
        grokmirror.unlock_repo(fullpath)

    if verify:
        if len(verify_fails):
            logger.critical('%s repos failed to verify' % len(verify_fails))
            return 1
        else:
            logger.info('Verification successful')
            return 0

    hookscript = config['post_update_hook']

    # XXX: 0.4.0 final: fix so we can ctrl-c out of threads

    if len(to_pull):
        if len(lock_fails) > 0:
            pull_threads -= len(lock_fails)

        # Don't spin up more threads than we need
        if pull_threads > len(to_pull):
            pull_threads = len(to_pull)

        # exit if we're ever at 0 pull_threads. Shouldn't happen, but some extra
        # precaution doesn't hurt
        if pull_threads <= 0:
            logger.info('Too many repositories locked. Exiting.')
            return 0

        logger.info('Will use %d threads to pull repos' % pull_threads)

        logger.info('Updating %s repos from %s'
                    % (len(to_pull), config['site']))
        in_queue = Queue.Queue()
        out_queue = Queue.Queue()

        for gitdir in to_pull:
            in_queue.put((gitdir, culled[gitdir]['fingerprint'],
                          culled[gitdir]['modified']))

        for i in range(pull_threads):
            logger.debug('Spun up thread %s' % i)
            t = PullerThread(in_queue, out_queue, config, i)
            t.setDaemon(True)
            t.start()

        # wait till it's all done
        in_queue.join()
        logger.info('All threads finished.')

        while not out_queue.empty():
            # see if any of it failed
            (gitdir, my_fingerprint, status) = out_queue.get()
            # We always record our fingerprint in our manifest
            culled[gitdir]['fingerprint'] = my_fingerprint
            if not status:
                # To make sure we check this again during next run,
                # fudge the manifest accordingly.
                logger.debug('Will recheck %s during next run' % gitdir)
                culled[gitdir] = mymanifest[gitdir]
                # this is rather hackish, but effective
                last_modified -= 1

    # how many lockfiles have we seen?
    # If there are more lock_fails than there are
    # pull_threads configured, we skip cloning out of caution
    if len(to_clone) and len(lock_fails) > pull_threads:
        logger.info('Too many repositories locked. Skipping cloning new repos.')
        to_clone = []

    if len(to_clone):
        logger.info('Cloning %s repos from %s'
                    % (len(to_clone), config['site']))
        # we use "existing" to track which repos can be used as references
        existing.extend(to_pull)

        to_clone_sorted = []
        clone_order(to_clone, manifest, to_clone_sorted, existing)

        for gitdir in to_clone_sorted:
            # Do we still need to clone it, or has another process
            # already done this for us?
            ts = grokmirror.get_repo_timestamp(toplevel, gitdir)

            if ts > 0:
                logger.debug('Looks like %s already cloned, skipping' % gitdir)
                continue

            fullpath = os.path.join(toplevel, gitdir.lstrip('/'))

            try:
                grokmirror.lock_repo(fullpath, nonblocking=True)
            except IOError:
                logger.info('Could not lock %s, skipping' % gitdir)
                lock_fails.append(gitdir)
                continue

            reference = None
            if config['ignore_repo_references'] != 'yes':
                reference = culled[gitdir]['reference']

            if reference is not None and reference in existing:
                # Make sure we can lock the reference repo
                refrepo = os.path.join(toplevel, reference.lstrip('/'))
                try:
                    grokmirror.lock_repo(refrepo, nonblocking=True)
                    success = clone_repo(toplevel, gitdir, config['site'],
                                         reference=reference)
                    grokmirror.unlock_repo(refrepo)
                except IOError:
                    logger.info('Cannot lock reference repo %s, skipping %s' %
                                (reference, gitdir))
                    if reference not in lock_fails:
                        lock_fails.append(reference)

                    grokmirror.unlock_repo(fullpath)
                    continue
            else:
                success = clone_repo(toplevel, gitdir, config['site'])

            # check dir to make sure cloning succeeded and then add to existing
            if os.path.exists(fullpath) and success:
                logger.debug('Cloning of %s succeeded, adding to existing'
                             % gitdir)
                existing.append(gitdir)

                desc = culled[gitdir].get('description')
                owner = culled[gitdir].get('owner')
                ref = culled[gitdir].get('reference')

                if owner is None:
                    owner = config['default_owner']
                set_repo_params(toplevel, gitdir, owner, desc, ref)
                set_agefile(toplevel, gitdir, culled[gitdir]['modified'])
                my_fingerprint = grokmirror.set_repo_fingerprint(toplevel,
                                                                 gitdir)
                culled[gitdir]['fingerprint'] = my_fingerprint
                run_post_update_hook(hookscript, toplevel, gitdir)
            else:
                logger.critical('Was not able to clone %s' % gitdir)
                # Remove it from our manifest so we can try re-cloning
                # next time grok-pull runs
                del culled[gitdir]
                git_fails.append(gitdir)

            grokmirror.unlock_repo(fullpath)

    # loop through all entries and find any symlinks we need to set
    # We also collect all symlinks to do purging correctly
    symlinks = []
    for gitdir in culled.keys():
        if 'symlinks' in culled[gitdir].keys():
            source = os.path.join(config['toplevel'], gitdir.lstrip('/'))
            for symlink in culled[gitdir]['symlinks']:
                if symlink not in symlinks:
                    symlinks.append(symlink)
                target = os.path.join(config['toplevel'], symlink.lstrip('/'))

                if os.path.exists(source):
                    if os.path.islink(target):
                        # are you pointing to where we need you?
                        if os.path.realpath(target) != source:
                            # Remove symlink and recreate below
                            logger.debug('Removed existing wrong symlink %s'
                                         % target)
                            os.unlink(target)
                    elif os.path.exists(target):
                        logger.warn('Deleted repo %s, because it is now'
                                    ' a symlink to %s' % (target, source))
                        shutil.rmtree(target)

                    # Here we re-check if we still need to do anything
                    if not os.path.exists(target):
                        logger.info('Symlinking %s -> %s' % (target, source))
                        # Make sure the leading dirs are in place
                        if not os.path.exists(os.path.dirname(target)):
                            os.makedirs(os.path.dirname(target))
                        os.symlink(source, target)

    manifile = config['mymanifest']
    grokmirror.manifest_lock(manifile)

    # Is the local manifest newer than last_modified? That would indicate
    # that another process has run and "culled" is no longer the latest info
    if os.path.exists(manifile):
        fstat = os.stat(manifile)
        if fstat[8] > last_modified:
            logger.info('Local manifest is newer, not saving.')
            grokmirror.manifest_unlock(manifile)
            return 0

    if purge:
        to_purge = []
        found_repos = 0
        for founddir in grokmirror.find_all_gitdirs(config['toplevel']):
            gitdir = founddir.replace(config['toplevel'], '')
            found_repos += 1

            if gitdir not in culled.keys() and gitdir not in symlinks:
                to_purge.append(founddir)

        if len(to_purge):
            # Purge-protection engage
            try:
                purge_limit = int(config['purgeprotect'])
                assert 1 <= purge_limit <= 99
            except (ValueError, AssertionError):
                logger.critical('Warning: "%s" is not valid for purgeprotect.'
                                % config['purgeprotect'])
                logger.critical('Please set to a number between 1 and 99.')
                logger.critical('Defaulting to purgeprotect=5.')
                purge_limit = 5

            purge_pc = len(to_purge) * 100 / found_repos
            logger.debug('purgeprotect=%s' % purge_limit)
            logger.debug('purge prercentage=%s' % purge_pc)

            if not forcepurge and purge_pc >= purge_limit:
                logger.critical('Refusing to purge %s repos (%s%%)'
                                % (len(to_purge), purge_pc))
                logger.critical('Set purgeprotect to a higher percentage, or'
                                ' override with --force-purge.')
                logger.info('Not saving local manifest')
                return 1
            else:
                for founddir in to_purge:
                    if os.path.islink(founddir):
                        logger.info('Removing unreferenced symlink %s' % gitdir)
                        os.unlink(founddir)
                    else:
                        try:
                            logger.info('Purging %s' % founddir)
                            grokmirror.lock_repo(founddir, nonblocking=True)
                            shutil.rmtree(founddir)
                        except IOError:
                            lock_fails.append(gitdir)
                            logger.info('%s is locked, not purging' % gitdir)

    # Go through all repos in culled and get the latest local timestamps.
    for gitdir in culled:
        ts = grokmirror.get_repo_timestamp(toplevel, gitdir)
        culled[gitdir]['modified'] = ts

    # If there were any lock failures, we fudge last_modified to always
    # be older than the server, which will force the next grokmirror run.
    if len(lock_fails):
        logger.info('%s repos could not be locked. Forcing next run.'
                    % len(lock_fails))
        last_modified -= 1
    elif len(git_fails):
        logger.info('%s repos failed. Forcing next run.'
                    % len(git_fails))
        last_modified -= 1

    # Once we're done, save culled as our new manifest
    grokmirror.write_manifest(manifile, culled, mtime=last_modified,
                              pretty=pretty)

    grokmirror.manifest_unlock(manifile)

    # write out projects.list, if asked to
    write_projects_list(culled, config)

    return 127

Example 103

Project: qqqfome Source File: backend.py
Function: run
    def run(self, database, msg, interval, log_file, max_old=10):
        c.check_type(database, 'database', str)

        L = logging.getLogger('qqqfome-backend')
        formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s')
        fh = logging.FileHandler(log_file)
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        sh = logging.StreamHandler()
        sh.setLevel(logging.DEBUG)
        sh.setFormatter(formatter)
        L.setLevel(logging.DEBUG)
        L.addHandler(fh)
        L.addHandler(sh)

        try:
            L.info(s.log_connected_to_db.format(database))
            conn = db.connect_db(database)
            L.info(s.success)
        except FileNotFoundError:
            L.exception(s.log_file_not_exist.format(database))
            L.info(s.exit)
            return

        # get cookies from database
        cookies = db.get_cookies(conn)

        if not cookies:
            L.exception(s.log_no_cookies_in_database)
            L.info(s.exit)
            return

        L.info(s.log_get_cookies_from_database)
        L.debug(cookies)

        try:
            client = ZhihuClient(cookies)
            L.info(s.log_build_zhihu_client)
        except Exception as e:
            L.exception(e)
            return

        while True:
            L.info(s.log_start_a_pass)

            i = 0
            while i < 5:
                try:
                    L.info(s.log_build_me)
                    me = client.me()
                    break
                except Exception as e:
                    L.exception(e)
                    i += 1
            else:
                L.error(s.log_fail_to_build_me)
                L.info(s.exit)
                return

            try:
                follower_num = me.follower_num
            except Exception as e:
                L.exception(e)
                L.info(s.log_get_follower_num_failed)
                L.info(s.log_finish_a_pass)
                time.sleep(interval)
                continue

            L.info(s.log_get_follower_num.format(follower_num))
            db.log_to_db(conn, follower_num, s.log_start_a_pass)

            continue_in_db = 0
            new_follower_num = 0

            try:
                for follower in me.followers:
                    L.info(s.log_check_follower.format(
                        follower.name, follower.id))
                    if db.is_in_db(conn, follower.id):
                        L.info(s.log_follower_in_db.format(follower.id))
                        continue_in_db += 1
                    else:
                        L.info(s.log_follower_not_in_db.format(follower.name))
                        continue_in_db = 0

                        L.info(s.log_send_message.format(follower.name))

                        try:
                            message = calc_message(msg, me, follower,
                                                   new_follower_num)
                            new_follower_num += 1
                        except Exception as e:
                            L.exception(e)
                            message = msg

                        L.debug(message)

                        i = 0
                        while i < 5:
                            try:
                                me.send_message(follower, message)
                                break
                            except Exception as e:
                                L.exception(e)
                                L.debug(s.log_send_failed)
                                i += 1
                        else:
                            L.info(s.log_send_pass)
                            continue

                        L.info(s.success)
                        L.info(s.log_add_user_to_db.format(
                            follower.name))
                        db.add_user_to_db(conn, follower)

                    if continue_in_db == max_old:
                        L.info(s.log_continue_reach_max.format(max_old))
                        break
            except Exception as e:
                L.exception(e)

            L.info(s.log_finish_a_pass)
            time.sleep(interval)

Example 104

Project: anybox.recipe.odoo Source File: upgrade.py
def upgrade(upgrade_script, upgrade_callable, conf, buildout_dir):
    """Run the upgrade from a source file.

    All arguments are set in the standalone script produced by buildout through
    entry point options.

    * ``upgrade_script``: absolute path to the upgrade script python source.
    * ``upgrade_callable``: name of the callable in source file actually
      running the script.

      It must accept the two following positional arguments, in that order:

        - a :class:`.Session` instance (as in standard "Odoo scripts")
        - a logger (standard object from the :mod:`logging` module)

      and may return a non zero status code to indicate an error.
      Both ``None`` and 0 are interpreted as success.

    * ``conf``: path to the Odoo configuration file (managed by the recipe)
    * ``buildout_dir``: directory of the buildout
    """

    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
                            epilog="")
    parser.add_argument('--log-file', default=DEFAULT_LOG_FILE,
                        help="File to log sub-operations to, relative to the "
                        "current working directory, supports homedir "
                        "expansion ('~' on POSIX systems).")
    parser.add_argument('--log-level', default='info',
                        help="Main Odoo logging level. Does not affect the "
                        "logging from the main upgrade script itself.")
    parser.add_argument('--console-log-level', default='info',
                        help="Level for the upgrade process console "
                        "logging. This is for the main upgrade script itself "
                        "meaning that usually only major steps should be "
                        "logged ")
    parser.add_argument('-q', '--quiet', action='store_true',
                        help="Suppress console output from the main upgrade "
                             "script (lower level stages can still write)")
    parser.add_argument('-d', '--db-name', default=SUPPRESS,
                        help="Database name. If ommitted, the general default "
                        "values from Odoo config file or libpq will apply.")
    parser.add_argument('--init-load-demo-data', action='store_true',
                        help="Demo data will be loaded with module "
                        "installations if and only if "
                        "this modifier is specified")

    arguments = parser.parse_args()  # 'args' would shadow the one of pdb
    log_path = os.path.abspath(os.path.expanduser(arguments.log_file))
    log_level = arguments.log_level
    console_level = arguments.console_log_level.upper()
    quiet = arguments.quiet

    try:
        log_file = open(log_path, 'a')
    except IOError:
        sys.stderr.write("Cannot open %r for write" % log_path + os.linesep)
        sys.exit(-1)

    session = Session(conf, buildout_dir)

    from openerp.tools import config
    config['logfile'] = log_path
    config['log-level'] = log_level

    start_time = datetime.utcnow()
    if not quiet:
        print("Starting upgrade, logging details to %s at level %s, "
              "and major steps to console at level %s" % (
                  log_path, log_level.upper(), console_level.upper()))
        print('')

    logger = logging.getLogger('openerp.upgrade')
    console_handler = logging.StreamHandler()
    console_handler.setLevel(getattr(logging, console_level))
    console_handler.setFormatter(logging.Formatter(
        "%(asctime)s %(levelname)s  %(message)s"))

    if not arguments.quiet:
        logger.addHandler(console_handler)

    db_name = getattr(arguments, 'db_name', None)
    logger.info("Opening database %r", db_name)
    session.open(db=db_name, with_demo=bool(arguments.init_load_demo_data))
    # actual value after all defaultings have been done
    db_name = session.cr.dbname

    if session.is_initialization:
        logger.info("Database %r base initialization done. Proceeding further",
                    db_name)
    else:
        logger.info("Database %r loaded. Actual upgrade begins.", db_name)

    pkg_version = session.package_version
    if pkg_version is None:
        logger.warn("Expected package version file %r does not exist. "
                    "version won't be set in database at the end of upgrade. "
                    "Consider including such a version file in your project "
                    "*before* version dependent logic is actually needed.",
                    session.version_file_path)
    else:
        logger.info("Read package version: %s from %s", pkg_version,
                    session.version_file_path)

    db_version = session.db_version
    if db_version is None:
        if not session.is_initialization:
            logger.warn("No version currently set in database (the present "
                        "upgrade script has never been run). Consider setting "
                        "database version even for fresh instances, to "
                        "eliminate any guesswork in the upgrade scripts.")
    else:
        logger.info("Database latest upgrade version : %s", db_version)

    upgrade_module = imp.load_source('anybox.recipe.odoo.upgrade_openerp',
                                     upgrade_script)
    statuscode = getattr(upgrade_module, upgrade_callable)(session, logger)
    if statuscode is None or statuscode == 0:
        if pkg_version is not None:
            logger.info("setting version %s in database" % pkg_version)
            session.db_version = pkg_version
        session.cr.commit()
        session.close()
        logger.info("%s successful. Total time: %d seconds." % (
            "Initialization" if session.is_initialization else "Upgrade",
            ceil(total_seconds((datetime.utcnow() - start_time)))
        ))
    else:
        logger.error("Please check logs at %s" % log_path)

    log_file.close()
    sys.exit(statuscode)

Example 105

Project: mediarover Source File: schedule.py
def __schedule(broker, options):

	logger = logging.getLogger("mediarover")

	# grab config object
	config = broker[CONFIG_OBJECT]

	# grab quality management flag.  This will determine if Media Rover
	# will actively manage the quality of filesystem episodes or not
	manage_quality = config['tv']['library']['quality']['managed']
	if manage_quality and config['tv']['library']['quality']['desired'] is None:
		raise ConfigurationError("when quality management is on you must indicate a desired quality level at [tv] [[quality]] desired =")

	# check if user has requested a dry-run
	if options.dry_run:
		logger.info("--dry-run flag detected!  No new downloads will be queued during execution!")

	tv_root = config['tv']['tv_root']
	if not len(tv_root):
		raise ConfigurationError("You must declare at least one tv_root directory!")

	# build dict of watched series
	series_lists = build_series_lists(config)
	logger.info("watching %d tv show(s)", len(series_lists[0]))

	# register series dictionary with dependency broker
	broker.register(WATCHED_SERIES_LIST, series_lists[0])

	logger.debug("finished processing watched tv")
	logger.info("begin processing sources")

	# grab list of source url's from config file and build appropriate Source objects
	sources = []
	for name, params in config['source'].items():
		logger.debug("found feed '%s'", name)

		# first things first: if manage_quality is True, make sure the user
		# has specified a quality for this source
		if manage_quality and params['quality'] is None:
			raise ConfigurationError("missing quality flag for source '%s'" % name)

		params['name'] = name
		params['priority'] = config[params['type']]['priority']
		
		provider = params['provider']
		del params['provider']

		# grab source object
		factory = broker[provider]

		logger.debug("creating source for feed %r", name)
		try:
			source = factory.create_source(**params)
		except UrlRetrievalError, e:
			logger.error("skipping source '%s', reason: %s" % (name, e))
			continue
		except InvalidRemoteData, e:
			logger.error("skipping source '%s', unable to process remote data: %s", name, e)
			continue
		else:
			logger.info("created source %r" % name)
			sources.append(source)

	# if we don't have any sources there isn't any reason to continue.  Print
	# message and exit
	if not len(sources):
		logger.warning("No sources found!")
		print "ERROR: Did not find any configured sources in configuration file.  Nothing to do!"
		exit(1)

	logger.info("watching %d source(s)", len(sources))
	logger.debug("finished processing sources")

	logger.info("begin queue configuration")

	# build list of supported categories
	supported_categories = set([config['tv']['category'].lower()])

	# loop through list of available queues and find one that the user
	# has configured
	queue = None
	for client in config['__SYSTEM__']['__available_queues__']:

			logger.debug("looking for configured queue: %s", client)
			if client in config['queue']:
				logger.debug("using %s nntp client", client)

				# attept to load the nntp client Queue object
				module = None
				try:
					module = __import__("mediarover.queue.%s" % client, globals(), locals(), [client.capitalize() + "Queue"], -1)
				except ImportError:
					logger.error("error loading queue module %sQueue", client)
					raise

				# grab list of config options for current queue
				params = dict(config['queue'][client])
				logger.debug("queue source: %s", params["root"])

				# grab constructor and create new queue object
				try:
					init = getattr(module, "%sQueue" % client.capitalize())
				except AttributeError:
					logger.error("error retrieving queue init method")
					raise 
				else:
					queue = init(params['root'], supported_categories, params)
					break
	else:
		logger.warning("No queue found!")
		print "ERROR: Did not find a configured queue in configuration file.  Unable to proceed!"
		exit(1)
	logger.debug("finished queue configuration")

	if manage_quality:
		logger.info("cleaning database of stale jobs")

		# grab queue and list of in_progress jobs from database
		in_queue = []
		in_progress = set([row['title'] for row in broker[METADATA_OBJECT].list_in_progress()])
		for job in queue.jobs():
			if job.title in in_progress:
				in_queue.append(job.title)

		# find the difference between the two.  If there are any items in the in_progress
		# table that aren't in the queue, remove them
		not_in_queue = in_progress.difference(set(in_queue))
		if len(not_in_queue) > 0:
			logger.debug("found %d stale job(s) in the database, removing..." % len(not_in_queue))
			broker[METADATA_OBJECT].delete_in_progress(*not_in_queue)

	"""
		for each Source object, loop through the list of available Items and
		check:
		
			if item represents an Episode object:
				a) the Item matches a watched series
				b) the season for current episode isn't being ignored
				c) the watched series is missing the Episode representation of 
					the current Item
				d) the Item is not currently in the Queue list of Jobs
	"""
	scheduled = []
	drop_from_queue = []

	# start by processing any items that have been delayed and 
	# are now eligible for processing
	logger.info("retrieving delayed items...")
	for item in broker[METADATA_OBJECT].get_actionable_delayed_items():
		logger.debug("begin processing delayed item '%s'", item.title)
		__process_item(broker, item, queue, scheduled, drop_from_queue)

	# now process items from any configured sources
	for source in sources:
		logger.info("processing '%s' items", source.name())

		try:
			items = source.items()
		except (InvalidRemoteData), e:
			logger.warning(e)
			continue
			
		for item in items:
			logger.debug("begin processing item '%s'", item.title)

			# process current item
			__process_item(broker, item, queue, scheduled, drop_from_queue)

	logger.debug("finished processing items")

	if not options.dry_run:
		if len(drop_from_queue) > 0:
			logger.info("removing flagged items from download")
			for job in drop_from_queue:
				try:
					queue.remove_from_queue(job)
				except QueueDeletionError:
					logger.warning("unable to remove job %r from queue", job.title)

		# remove processed items from delayed_item table
		broker[METADATA_OBJECT].delete_stale_delayed_items()

		# now that we've fully parsed all source items
		# lets add the collected downloads to the queue...
		delayed = []
		if len(scheduled) > 0:
			logger.info("scheduling items for download")
			for item in scheduled:
				if item.delay > 0:
					delayed.append(item)
				else:
					try:
						queue.add_to_queue(item)
					except (IOError, QueueInsertionError), e:
						logger.warning("unable to schedule item %s for download: %s" % (item.title, e.args[0]))
					else:
						broker[NOTIFICATION_OBJECT].process(
							QUEUED_ITEM_NOTIFICATION, 
							"'%s' was queued for download" % item.title
						)
		else:
			logger.info("no items to schedule for download")

		if len(delayed) > 0:
			logger.info("identified %d item(s) with a schedule delay" % len(delayed))
			existing = broker[METADATA_OBJECT].get_delayed_items()
			for item in delayed:
				if item not in existing:
					broker[METADATA_OBJECT].add_delayed_item(item)
					broker[NOTIFICATION_OBJECT].process(
						DELAYED_ITEM_NOTIFICATION, 
						"'%s' was delayed for %d iteration(s)" % (item.title, item.delay)
					)
				else:
					logger.debug("skipping %s, already delayed" % item.title)

		# reduce delay count for all items in delayed_item table
		broker[METADATA_OBJECT].reduce_item_delay()
	else:
		if len(scheduled) > 0:
			logger.info("the following items were identified as being eligible for download:")
			for item in scheduled:
				logger.info(item.title)

Example 106

Project: Photini Source File: editor.py
    def __init__(self, options, initial_files):
        super(MainWindow, self).__init__()
        self.setWindowTitle(self.tr("Photini photo metadata editor"))
        pixmap = QtGui.QPixmap()
        pixmap.loadFromData(pkg_resources.resource_string(
            'photini', 'data/icons/48/photini.png'))
        icon = QtGui.QIcon(pixmap)
        self.setWindowIcon(icon)
        self.selection = list()
        # logger window
        self.loggerwindow = LoggerWindow(options.verbose)
        self.loggerwindow.setWindowIcon(icon)
        self.logger = logging.getLogger(self.__class__.__name__)
        # set network proxy
        proxies = getproxies()
        if 'http' in proxies:
            parsed = urlparse(proxies['http'])
            QNetworkProxy.setApplicationProxy(QNetworkProxy(
                QNetworkProxy.HttpProxy, parsed.hostname, parsed.port))
        # create shared global objects
        self.app = QtWidgets.QApplication.instance()
        self.app.config_store = ConfigStore('editor', parent=self)
        self.app.spell_check = SpellCheck(parent=self)
        self.app.test_mode = options.test
        # restore size
        size = self.width(), self.height()
        self.resize(*eval(
            self.app.config_store.get('main_window', 'size', str(size))))
        # image selector
        self.image_list = ImageList()
        self.image_list.selection_changed.connect(self.new_selection)
        self.image_list.new_metadata.connect(self.new_metadata)
        # prepare list of tabs and associated stuff
        self.tab_list = (
            {'name'  : self.tr('&Descriptive metadata'),
             'key'   : 'descriptive_metadata',
             'class' : Descriptive},
            {'name'  : self.tr('&Technical metadata'),
             'key'   : 'technical_metadata',
             'class' : Technical},
            {'name'  : self.tr('Map (&Google)'),
             'key'   : 'map_google',
             'class' : GoogleMap},
            {'name'  : self.tr('Map (&Bing)'),
             'key'   : 'map_bing',
             'class' : BingMap},
            {'name'  : self.tr('Map (&OSM)'),
             'key'   : 'map_osm',
             'class' : OpenStreetMap},
            {'name'  : self.tr('&Flickr upload'),
             'key'   : 'flickr_upload',
             'class' : FlickrUploader},
            {'name'  : self.tr('Google &Photos upload'),
             'key'   : 'picasa_upload',
             'class' : PicasaUploader},
            {'name'  : self.tr('Faceboo&k upload'),
             'key'   : 'facebook_upload',
             'class' : FacebookUploader},
            {'name'  : self.tr('&Import photos'),
             'key'   : 'import_photos',
             'class' : Importer},
            )
        for tab in self.tab_list:
            if tab['class']:
                tab['object'] = tab['class'](self.image_list)
            else:
                tab['object'] = None
        # file menu
        file_menu = self.menuBar().addMenu(self.tr('File'))
        open_action = QtWidgets.QAction(self.tr('Open images'), self)
        open_action.setShortcuts(QtGui.QKeySequence.Open)
        open_action.triggered.connect(self.image_list.open_files)
        file_menu.addAction(open_action)
        self.save_action = QtWidgets.QAction(
            self.tr('Save images with new data'), self)
        self.save_action.setShortcuts(QtGui.QKeySequence.Save)
        self.save_action.setEnabled(False)
        self.save_action.triggered.connect(self.image_list.save_files)
        file_menu.addAction(self.save_action)
        self.close_action = QtWidgets.QAction(
            self.tr('Close selected images'), self)
        self.close_action.setEnabled(False)
        self.close_action.triggered.connect(self.close_files)
        file_menu.addAction(self.close_action)
        close_all_action = QtWidgets.QAction(self.tr('Close all images'), self)
        close_all_action.triggered.connect(self.close_all_files)
        file_menu.addAction(close_all_action)
        file_menu.addSeparator()
        quit_action = QtWidgets.QAction(self.tr('Quit'), self)
        quit_action.setShortcuts(
            [QtGui.QKeySequence.Quit, QtGui.QKeySequence.Close])
        quit_action.triggered.connect(
            QtWidgets.QApplication.instance().closeAllWindows)
        file_menu.addAction(quit_action)
        # options menu
        options_menu = self.menuBar().addMenu(self.tr('Options'))
        settings_action = QtWidgets.QAction(self.tr('Settings'), self)
        settings_action.triggered.connect(self.edit_settings)
        options_menu.addAction(settings_action)
        options_menu.addSeparator()
        for tab in self.tab_list:
            name = tab['name'].replace('&', '')
            tab['action'] = QtWidgets.QAction(name, self)
            tab['action'].setCheckable(True)
            if tab['class']:
                tab['action'].setChecked(
                    eval(self.app.config_store.get('tabs', tab['key'], 'True')))
            else:
                tab['action'].setEnabled(False)
            tab['action'].triggered.connect(self.add_tabs)
            options_menu.addAction(tab['action'])
        # spelling menu
        languages = self.app.spell_check.available_languages()
        spelling_menu = self.menuBar().addMenu(self.tr('Spelling'))
        enable_action = QtWidgets.QAction(self.tr('Enable spell check'), self)
        enable_action.setEnabled(bool(languages))
        enable_action.setCheckable(True)
        enable_action.setChecked(self.app.spell_check.enabled)
        enable_action.toggled.connect(self.app.spell_check.enable)
        spelling_menu.addAction(enable_action)
        language_menu = QtWidgets.QMenu(self.tr('Choose language'), self)
        language_menu.setEnabled(bool(languages))
        language_group = QtWidgets.QActionGroup(self)
        current_language = self.app.spell_check.current_language()
        for tag in languages:
            language_action = QtWidgets.QAction(tag, self)
            language_action.setCheckable(True)
            language_action.setChecked(tag == current_language)
            language_action.setActionGroup(language_group)
            language_menu.addAction(language_action)
        language_group.triggered.connect(self.app.spell_check.set_language)
        spelling_menu.addMenu(language_menu)
        # help menu
        help_menu = self.menuBar().addMenu(self.tr('Help'))
        about_action = QtWidgets.QAction(self.tr('About Photini'), self)
        about_action.triggered.connect(self.about)
        help_menu.addAction(about_action)
        help_menu.addSeparator()
        help_action = QtWidgets.QAction(self.tr('Photini docuementation'), self)
        help_action.triggered.connect(self.open_docs)
        help_menu.addAction(help_action)
        # main application area
        self.central_widget = QtWidgets.QSplitter()
        self.central_widget.setOrientation(Qt.Vertical)
        self.central_widget.setChildrenCollapsible(False)
        self.tabs = QtWidgets.QTabWidget()
        self.tabs.setTabBar(QTabBar())
        self.tabs.setElideMode(Qt.ElideRight)
        self.tabs.currentChanged.connect(self.new_tab)
        self.add_tabs()
        self.central_widget.addWidget(self.tabs)
        self.central_widget.addWidget(self.image_list)
        size = self.central_widget.sizes()
        self.central_widget.setSizes(eval(
            self.app.config_store.get('main_window', 'split', str(size))))
        self.central_widget.splitterMoved.connect(self.new_split)
        self.setCentralWidget(self.central_widget)
        # open files given on command line, after GUI is displayed
        self.initial_files = initial_files
        if self.initial_files:
            QtCore.QTimer.singleShot(0, self.open_initial_files)

Example 107

Project: genmod Source File: annotate_models.py
@click.command()
@variant_file
@family_file
@family_type
@click.option('-r', '--reduced_penetrance',
                    nargs=1, 
                    type=click.File('r'),
                    metavar='<tsv_file>',
                    help='File with gene ids that have reduced penetrance.'
)
@click.option('--vep', 
                    is_flag=True,
                    help='If variants are annotated with the Variant Effect Predictor.'
)
@click.option('--phased', 
                    is_flag=True,
                    help='If data is phased use this flag.'
)
@click.option('-s' ,'--strict', 
                    is_flag=True,
                    help='If strict model annotations should be used(see docuementation).'
)
@processes
@silent
@click.option('-w', '--whole_gene',
                    is_flag=True,
                    help='If compounds should be checked over the whole gene.'
)
@click.option('-k' ,'--keyword', 
                    default="Annotation",
                    help="""What annotation keyword that should be used when 
                    searching for features."""
)
@outfile
@temp_dir
def models(variant_file, family_file, family_type, reduced_penetrance, vep,
keyword, phased, strict, silent, processes, whole_gene, outfile, temp_dir):
    """
    Annotate genetic models for vcf variants. 
    
    Checks what patterns of inheritance that are followed in a VCF file.
    The analysis is family based so each family that are specified in the family
    file and exists in the variant file will get it's own annotation.
    """
    logger = logging.getLogger(__name__)
    
    ######### This is for logging the command line string #########
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    argument_list = [
        i+'='+str(values[i]) for i in values if values[i] and 
        i not in ['frame']
    ]
    
    variant_file = get_file_handle(variant_file)
    ###########################################################################
    
    logger.info("Running GENMOD annotate version {0}".format(__version__))
    logger.debug("Arguments: {0}".format(', '.join(argument_list)))
    
    reduced_penetrance_genes = set()
    nr_reduced_penetrance_genes = 0
    if reduced_penetrance:
        logger.info("Found file with genes that have reduced penetrance")
        for line in reduced_penetrance:
            if not line.startswith('#'):
                nr_reduced_penetrance_genes += 1
                gene_id = line.rstrip().split()[0]
                logger.debug("Adding gene {0} to reduced penetrance genes".format(
                    gene_id
                ))
                reduced_penetrance_genes.add(
                    gene_id
                )
    
        logger.info("Found {0} genes with reduced penetrance".format(
            nr_reduced_penetrance_genes))
    
    
    if not family_file:
        logger.warning("Please provide a family file with -f/--family_file")
        logger.info("Exiting")
        sys.exit(1)
    
    logger.info("Setting up a family parser")
    family_parser = FamilyParser(family_file, family_type)
    logger.debug("Family parser done")
    
    families = {}
    logger.info("Check if the familys have any affected")
    for family_id in family_parser.families:
        found_affected = False
        family_obj = family_parser.families[family_id]
        for ind_id in family_obj.individuals:
            ind_obj = family_obj.individuals[ind_id]
            if ind_obj.affected:
                found_affected = True
        
        if found_affected:
            families[family_id] = family_obj
        else:
            logger.warning("No affected individuals found for family {0}."\
                           " Skipping family.".format(family_id))
    
    if not families:
        logger.warning("Please provide at least one family with affected individuals")
        sys.exit(0)
    # The individuals in the ped file must be present in the variant file:
    logger.info("Families used in analysis: {0}".format(
                    ','.join(list(families.keys()))))
    logger.info("Individuals included in analysis: {0}".format(
                    ','.join(list(family_parser.individuals.keys()))))
    
    
    head = HeaderParser()
    
    for line in variant_file:
        line = line.rstrip()
        if line.startswith('#'):
            if line.startswith('##'):
                head.parse_meta_data(line)
            else:
                head.parse_header_line(line)
        else:
            break
    
    #Add the first variant to the iterator
    variant_file = itertools.chain([line], variant_file)
    
    if vep:
        if not "CSQ" in head.info_dict:
            logger.warning("vep flag is used but there is no CSQ field specified in header")
            logger.info("Please check VCF file")
            logger.info("Exiting...")
            sys.exit(1)
        else:
            logger.info("Using VEP annotation")
    else:
        if not keyword in head.info_dict:
            logger.warning("Annotation key {0} could not be found in VCF header".format(keyword))
            logger.info("Please check VCF file")
            logger.info("Exiting...")
            sys.exit(1)
        else:
            logger.info("Using {0} annotation".format(keyword))
        
    
    if "GeneticModels" in head.info_dict:
        logger.warning("Genetic models are already annotated according to vcf"\
        " header.")
        logger.info("Exiting...")
        sys.exit(1)
    
    logger.info("Adding genmod version to vcf header")
    head.add_version_tracking(
                    info_id='genmod',
                    version=__version__,
                    date=datetime.now().strftime("%Y-%m-%d %H:%M"),
                    command_line=' '.join(argument_list)
                )
    
    logger.debug("Version added")
    logger.info("Adding genetic models to vcf header")
    add_metadata(
        head,
        'info',
        'GeneticModels',
        annotation_number='.',
        entry_type='String',
        description="':'-separated list of genetic models for this variant."
    )
    
    logger.debug("Genetic models added")
    logger.info("Adding model score to vcf header")
    add_metadata(
        head,
        'info',
        'ModelScore',
        annotation_number='.',
        entry_type='String',
        description="PHRED score for genotype models."
    )
    logger.debug("Model score added")
    
    logger.info("Adding Compounds to vcf header")
    add_metadata(
        head,
        'info',
        'Compounds',
        annotation_number='.',
        entry_type='String',
        description=("List of compound pairs for this variant."
        "The list is splitted on ',' family id is separated with compounds"
        "with ':'. Compounds are separated with '|'.")
    )
    logger.debug("Compounds added")
    
    vcf_individuals = head.individuals
    logger.debug("Individuals found in vcf file: {}".format(', '.join(vcf_individuals)))
    

    start_time_analysis = datetime.now()
    
    try:
        check_individuals(family_parser.individuals, vcf_individuals)
    except IOError as e:
        logger.error(e)
        logger.info("Individuals in PED file: {0}".format(
                        ', '.join(family_parser.individuals)))
        logger.info("Individuals in VCF file: {0}".format(', '.join(vcf_individuals)))
        logger.info("Exiting...")
        sys.exit(1)

    analysis_individuals = list(family_parser.individuals.keys())
    
    logger.info("Individuals used in analysis: {0}".format(
        ', '.join(analysis_individuals)))
    
    ###################################################################
    ### The task queue is where all jobs(in this case batches that  ###
    ### represents variants in a region) is put. The consumers will ###
    ### then pick their jobs from this queue.                       ###
    ###################################################################

    logger.debug("Setting up a JoinableQueue for storing variant batches")
    variant_queue = JoinableQueue(maxsize=1000)
    logger.debug("Setting up a Queue for storing results from workers")
    results = Manager().Queue()

    num_model_checkers = processes
    #Adapt the number of processes to the machine that run the analysis
    logger.info('Number of CPU:s {}'.format(cpu_count()))
    logger.info('Number of model checkers: {}'.format(num_model_checkers))


    # These are the workers that do the heavy part of the analysis
    logger.info('Seting up the workers')
    model_checkers = [
        VariantAnnotator(
            task_queue=variant_queue,
            results_queue=results,
            families=families,
            individuals=analysis_individuals,
            phased=phased,
            strict=strict,
            whole_gene=whole_gene,
            vep=vep,
            reduced_penetrance_genes = reduced_penetrance_genes
        )
        for i in range(num_model_checkers)
    ]
    logger.info('Starting the workers')
    for worker in model_checkers:
        logger.debug('Starting worker {0}'.format(worker))
        worker.start()

    # This process prints the variants to temporary files
    logger.info('Seting up the variant printer')
    if len(model_checkers) == 1:
        print_headers(head=head, outfile=outfile, silent=silent)
        variant_printer = VariantPrinter(
                task_queue=results,
                head=head,
                mode='normal',
                outfile = outfile
        )
    else:
        # We use a temp file to store the processed variants
        logger.debug("Build a tempfile for printing the variants")
        if temp_dir:
            temp_file = NamedTemporaryFile(delete=False, dir=temp_dir)
        else:
            temp_file = NamedTemporaryFile(delete=False)
        temp_file.close()
        
        variant_printer = VariantPrinter(
                task_queue=results,
                head=head,
                mode='chromosome',
                outfile = temp_file.name
        )
    
    logger.info('Starting the variant printer process')
    variant_printer.start()

    start_time_variant_parsing = datetime.now()
    
    # This process parses the original vcf and create batches to put in the variant queue:
    logger.info('Start parsing the variants')
    chromosome_list = get_batches(
                                variants = variant_file,
                                batch_queue = variant_queue,
                                header = head,
                                vep = vep,
                                annotation_keyword = keyword
                            )
    
    logger.debug("Put stop signs in the variant queue")
    for i in range(num_model_checkers):
        variant_queue.put(None)
    
    variant_queue.join()
    results.put(None)
    variant_printer.join()
    
    if len(model_checkers) > 1:
        sort_variants(infile=temp_file.name, mode='chromosome')

        print_headers(head=head, outfile=outfile, silent=silent)

        with open(temp_file.name, 'r', encoding='utf-8') as f:
            for line in f:
                print_variant(
                    variant_line=line,
                    outfile=outfile,
                    mode='modified',
                    silent=silent
                )
    
        logger.debug("Removing temp file")
        os.remove(temp_file.name)
        logger.debug("Temp file removed")

    logger.info('Time for whole analyis: {0}'.format(
        str(datetime.now() - start_time_analysis)))

Example 108

Project: pdfmasher Source File: stylizer.py
    def __init__(self, tree, path, oeb, profile, extra_css='', user_css='',
            change_justification='left'):
        assert profile is not None
        # XXX str/bytes hackfix
        if isinstance(path, bytes):
            decoded_path = path.decode('utf-8')
        else:
            decoded_path = path
        self.oeb = oeb
        self.profile = profile
        self.change_justification = change_justification
        item = oeb.manifest.hrefs[path]
        basename = os.path.basename(decoded_path)
        cssname = os.path.splitext(basename)[0] + '.css'
        stylesheets = [html_css_stylesheet()]
        head = xpath(tree, '/h:html/h:head')
        if head:
            head = head[0]
        else:
            head = []

        parser = CSSParser(fetcher=self._fetch_css_file,
                log=logging.getLogger('calibre.css'))
        self.font_face_rules = []
        for elem in head:
            if (elem.tag == XHTML('style') and
                elem.get('type', CSS_MIME) in OEB_STYLES):
                text = elem.text if elem.text else ''
                for x in elem:
                    t = getattr(x, 'text', None)
                    if t:
                        text += '\n\n' + force_unicode(t, 'utf-8')
                    t = getattr(x, 'tail', None)
                    if t:
                        text += '\n\n' + force_unicode(t, 'utf-8')
                if text:
                    text = XHTML_CSS_NAMESPACE + elem.text
                    text = oeb.css_preprocessor(text)
                    stylesheet = parser.parseString(text, href=cssname)
                    stylesheet.namespaces['h'] = XHTML_NS
                    stylesheets.append(stylesheet)
            elif elem.tag == XHTML('link') and elem.get('href') \
                 and elem.get('rel', 'stylesheet').lower() == 'stylesheet' \
                 and elem.get('type', CSS_MIME).lower() in OEB_STYLES:
                href = urlnormalize(elem.attrib['href'])
                path = item.abshref(href)
                sitem = oeb.manifest.hrefs.get(path, None)
                if sitem is None:
                    logging.warn(
                        'Stylesheet %r referenced by file %r not in manifest' %
                        (path, item.href))
                    continue
                if not hasattr(sitem.data, 'cssRules'):
                    logging.warn(
                    'Stylesheet %r referenced by file %r is not CSS'%(path,
                        item.href))
                    continue
                stylesheets.append(sitem.data)
        csses = {'extra_css':extra_css, 'user_css':user_css}
        for w, x in list(csses.items()):
            if x:
                try:
                    text = XHTML_CSS_NAMESPACE + x
                    stylesheet = parser.parseString(text, href=cssname)
                    stylesheet.namespaces['h'] = XHTML_NS
                    stylesheets.append(stylesheet)
                except:
                    logging.exception('Failed to parse %s, ignoring.'%w)
                    logging.debug('Bad css: ')
                    logging.debug(x)
        rules = []
        index = 0
        self.stylesheets = set()
        self.page_rule = {}
        for stylesheet in stylesheets:
            href = stylesheet.href
            self.stylesheets.add(href)
            for rule in stylesheet.cssRules:
                rules.extend(self.flatten_rule(rule, href, index))
                index = index + 1
        # XXX had to fix crash about unsortable type, so that's why we only sort by first item of tuple
        rules.sort(key=lambda tup: tup[:1])
        self.rules = rules
        self._styles = {}
        class_sel_pat = re.compile(r'\.[a-z]+', re.IGNORECASE)
        capital_sel_pat = re.compile(r'h|[A-Z]+')
        for _, _, cssdict, text, _ in rules:
            fl = ':first-letter' in text
            if fl:
                text = text.replace(':first-letter', '')
            try:
                selector = CSSSelector(text)
            except (AssertionError, ExpressionError, etree.XPathSyntaxError,
                    NameError, # thrown on OS X instead of SelectorSyntaxError
                    SelectorSyntaxError):
                continue
            try:
                matches = selector(tree)
            except etree.XPathEvalError:
                continue

            if not matches:
                ntext = capital_sel_pat.sub(lambda m: m.group().lower(), text)
                if ntext != text:
                    logging.warn('Transformed CSS selector' + text + 'to' + ntext)
                    selector = CSSSelector(ntext)
                    matches = selector(tree)

            if not matches and class_sel_pat.match(text) and text.lower() != text:
                found = False
                ltext = text.lower()
                for x in tree.xpath('//*[@class]'):
                    if ltext.endswith('.'+x.get('class').lower()):
                        matches.append(x)
                        found = True
                if found:
                    logging.warn('Ignoring case mismatches for CSS selector: %s in %s'%(text, item.href))
            if fl:
                from lxml.builder import ElementMaker
                E = ElementMaker(namespace=XHTML_NS)
                for elem in matches:
                    for x in elem.iter():
                        if x.text:
                            punctuation_chars = []
                            text = str(x.text)
                            while text:
                                if not unicodedata.category(text[0]).startswith('P'):
                                    break
                                punctuation_chars.append(text[0])
                                text = text[1:]

                            special_text = ''.join(punctuation_chars) + \
                                    (text[0] if text else '')
                            span = E.span(special_text)
                            span.tail = text[1:]
                            x.text = None
                            x.insert(0, span)
                            self.style(span)._update_cssdict(cssdict)
                            break
            else:
                for elem in matches:
                    self.style(elem)._update_cssdict(cssdict)
        for elem in xpath(tree, '//h:*[@style]'):
            self.style(elem)._apply_style_attr()
        num_pat = re.compile(r'\d+$')
        for elem in xpath(tree, '//h:img[@width or @height]'):
            style = self.style(elem)
            # Check if either height or width is not default
            is_styled = style._style.get('width', 'auto') != 'auto' or \
                    style._style.get('height', 'auto') != 'auto'
            if not is_styled:
                # Update img style dimension using width and height
                upd = {}
                for prop in ('width', 'height'):
                    val = elem.get(prop, '').strip()
                    try:
                        del elem.attrib[prop]
                    except:
                        pass
                    if val:
                        if num_pat.match(val) is not None:
                            val += 'px'
                        upd[prop] = val
                if upd:
                    style._update_cssdict(upd)

Example 109

Project: xuebao Source File: application.py
    def __init__(self, use_local_mic=False):
        self._logger = logging.getLogger(__name__)
        
        # Read config
        configfile = settings.config('config.info')
        self._logger.debug("Trying to read config file: '%s'", configfile)
        
        self.config = configreader.ConfigReader()
        self.config.read(configfile)
        
        audio_engine_slug = self.config.get('Audio', 'Engine', 'alsa')
        self._logger.debug("Using Audio engine '%s'", audio_engine_slug)
        
        active_stt_slug = self.config.get('STT', 'ActiveEngine', 'sphinx')
        self._logger.debug("Using STT engine '%s'", active_stt_slug)
        
        passive_stt_slug = self.config.get('STT', 'PassiveEngine', active_stt_slug)
        self._logger.debug("Using passive STT engine '%s'", passive_stt_slug)
        
        tts_slug = self.config.get('TTS', 'Engine', 'espeak-tts')
        self._logger.debug("Using TTS engine '%s'", tts_slug)
        
        player_slug = self.config.get('Mp3Player', 'Engine', 'pygame-player')
        self._logger.debug("Using Mp3 player '%s'", player_slug)
        
        # Load plugins
        plugin_directories = [settings.PLUGIN_PATH]
        self.plugins = pluginstore.PluginStore(plugin_directories)
        self.plugins.detect_plugins()

        # Initialize AudioEngine
        ae_info = self.plugins.get_plugin(audio_engine_slug,
                                          category='audioengine')
        self.audio = ae_info.plugin_class(ae_info, self.config)

        # Initialize audio input device
        device_slug = self.config.get('Audio', 'InputDevice', None)
        try:
            if device_slug:
                input_device = self.audio.get_device_by_slug(device_slug)
            else:
                input_device = self.audio.get_default_device(False)
            
            if audioengine.DEVICE_TYPE_INPUT not in input_device.types:
                raise audioengine.UnsupportedFormat(
                    "Audio device with slug '%s' is not an input device"
                    % input_device.slug)
        except (audioengine.DeviceException) as e:
            devices = [device.slug for device in
                       self.audio.get_devices(device_type=audioengine.DEVICE_TYPE_INPUT)]
            self._logger.critical(e.args[0])
            self._logger.warning('Valid output devices: %s',
                                 ', '.join(devices))
            raise

        # Initialize audio output device
        device_slug = self.config.get('Audio', 'OutputDevice', None)
        try:
            if device_slug:
                output_device = self.audio.get_device_by_slug(device_slug)
            else:
                output_device = self.audio.get_default_device(True)
            
            if audioengine.DEVICE_TYPE_OUTPUT not in output_device.types:
                raise audioengine.UnsupportedFormat(
                    "Audio device with slug '%s' is not an output device"
                    % output_device.slug)
        except (audioengine.DeviceException) as e:
            devices = [device.slug for device in
                       self.audio.get_devices(device_type=audioengine.DEVICE_TYPE_OUTPUT)]
            self._logger.critical(e.args[0])
            self._logger.warning('Valid output devices: %s',
                                 ', '.join(devices))
            raise

        # Initialize Brain
        self.brain = brain.Brain(self.config)
        for info in self.plugins.get_plugins_by_category('speechhandler'):
            try:
                plugin = info.plugin_class(info, self.config)
            except Exception as e:
                self._logger.warning(
                    "Plugin '%s' skipped! (Reason: %s)", info.name,
                    e.message if hasattr(e, 'message') else 'Unknown',
                    exc_info=(
                        self._logger.getEffectiveLevel() == logging.DEBUG))
            else:
                self.brain.add_plugin(plugin)

        if len(self.brain.get_plugins()) == 0:
            msg = 'No plugins for handling speech found!'
            self._logger.error(msg)
            raise RuntimeError(msg)
        elif len(self.brain.get_all_phrases()) == 0:
            msg = 'No command phrases found!'
            self._logger.error(msg)
            raise RuntimeError(msg)

        active_stt_plugin_info = self.plugins.get_plugin(
            active_stt_slug, category='stt')
        active_stt_plugin = active_stt_plugin_info.plugin_class(
            'default', self.brain.get_plugin_phrases(), active_stt_plugin_info,
            self.config)

        if passive_stt_slug != active_stt_slug:
            passive_stt_plugin_info = self.plugins.get_plugin(
                passive_stt_slug, category='stt')
        else:
            passive_stt_plugin_info = active_stt_plugin_info

        keyword = settings.KEYWORD
        passive_stt_plugin = passive_stt_plugin_info.plugin_class(
            'keyword', self.brain.get_standard_phrases() + [keyword],
            passive_stt_plugin_info, self.config)

        tts_plugin_info = self.plugins.get_plugin(tts_slug, category='tts')
        tts_plugin = tts_plugin_info.plugin_class(tts_plugin_info, self.config)
        
        player_plugin_info = self.plugins.get_plugin(player_slug, category='mp3player')
        player_plugin = player_plugin_info.plugin_class(player_plugin_info, self.config)

        # Initialize Mic
        if use_local_mic:
            self.mic = mic_mock.Mic()
        else:
            self.mic = mic.Mic(
                input_device, output_device,
                passive_stt_plugin, active_stt_plugin,
                tts_plugin, player_plugin, self.config, keyword=keyword)

        self.conversation = conversation.Conversation(
            self.mic, self.brain, self.config)

Example 110

Project: osrframework Source File: usufy.py
def main(args):
    '''
        Main function. This function is created in this way so as to let other applications make use of the full configuration capabilities of the application.
    '''
    # Recovering the logger
    # Calling the logger when being imported
    osrframework.utils.logger.setupLogger(loggerName="osrframework.usufy", verbosity=args.verbose, logFolder=args.logfolder)
    # From now on, the logger can be recovered like this:
    logger = logging.getLogger("osrframework.usufy")
    # Printing the results if requested
    if not args.maltego:
        print banner.text

        sayingHello = """usufy.py Copyright (C) F. Brezo and Y. Rubio (i3visio) 2016
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it under certain conditions. For additional info, visit <http://www.gnu.org/licenses/gpl-3.0.txt>."""
        logger.info(sayingHello)
        print sayingHello
        print
        logger.info("Starting usufy.py...")

    if args.license:
        logger.info("Looking for the license...")
        # showing the license
        try:
            with open ("COPYING", "r") as iF:
                contenido = iF.read().splitlines()
                for linea in contenido:
                    print linea
        except Exception:
            try:
                # Trying to recover the COPYING file...
                with open ("/usr/share/osrframework/COPYING", "r") as iF:
                    contenido = iF.read().splitlines()
                    for linea in contenido:
                        print linea
            except:
                logger.error("ERROR: there has been an error when opening the COPYING file.\n\tThe file contains the terms of the GPLv3 under which this software is distributed.\n\tIn case of doubts, verify the integrity of the files or contact [email protected].")
    elif args.fuzz:
        logger.info("Performing the fuzzing tasks...")
        res = fuzzUsufy(args.fuzz, args.fuzz_config)
        logger.info("Recovered platforms:\n" + str(res))
    else:
        logger.debug("Recovering the list of platforms to be processed...")
        # Recovering the list of platforms to be launched
        listPlatforms = platform_selection.getPlatformsByName(platformNames=args.platforms, tags=args.tags, mode="usufy")
        logger.debug("Platforms recovered.")

        if args.info:
            # Information actions...
            if args.info == 'list_platforms':
                infoPlatforms="Listing the platforms:\n"
                for p in listPlatforms:
                    infoPlatforms += "\t\t" + (str(p) + ": ").ljust(16, ' ') + str(p.tags)+"\n"
                logger.info(infoPlatforms)
                return infoPlatforms
            elif args.info == 'list_tags':
                logger.info("Listing the tags:")
                tags = {}
                # Going through all the selected platforms to get their tags
                for p in listPlatforms:
                    for t in p.tags:
                        if t not in tags.keys():
                            tags[t] = 1
                        else:
                            tags[t] += 1
                infoTags = "List of tags:\n"
                # Displaying the results in a sorted list
                for t in tags.keys():
                    infoTags += "\t\t" + (t + ": ").ljust(16, ' ') + str(tags[t]) + "  time(s)\n"
                logger.info(infoTags)
                return infoTags
            else:
                pass

        # performing the test
        elif args.benchmark:
            logger.warning("The benchmark mode may last some minutes as it will be performing similar queries to the ones performed by the program in production. ")
            logger.info("Launching the benchmarking tests...")
            platforms = platform_selection.getAllPlatformNames("usufy")
            res = benchmark.doBenchmark(platforms)
            strTimes = ""
            for e in sorted(res.keys()):
                strTimes += str(e) + "\t" + str(res[e]) + "\n"
            logger.info(strTimes)
            return strTimes
        # Executing the corresponding process...
        else:
            # Showing the execution time...
            if not args.maltego:
                startTime= dt.datetime.now()
                print str(startTime) +"\tStarting search in " + str(len(listPlatforms)) + " platform(s)... Relax!\n"

            # Defining the list of users to monitor
            nicks = []
            logger.debug("Recovering nicknames to be processed...")
            if args.nicks:
                for n in args.nicks:
                    # TO-DO
                    #     A trick to avoid having the processing of the properties when being queried by Maltego
                    if "properties.i3visio" not in n:
                        nicks.append(n)
            else:
                # Reading the nick files
                try:
                    nicks = args.list.read().splitlines()
                except:
                    logger.error("ERROR: there has been an error when opening the file that stores the nicks.\tPlease, check the existence of this file.")

            # Definning the results
            res = []

            if args.output_folder != None:
                # if Verifying an output folder was selected
                logger.debug("Preparing the output folder...")
                if not args.maltego:
                    if not os.path.exists(args.output_folder):
                        logger.warning("The output folder \'" + args.output_folder + "\' does not exist. The system will try to create it.")
                        os.makedirs(args.output_folder)
                # Launching the process...
                try:
                    res = processNickList(nicks, listPlatforms, args.output_folder, avoidProcessing = args.avoid_processing, avoidDownload = args.avoid_download, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
                except Exception as e:
                    print "Exception grabbed when processing the nicks: " + str(e)
                    print traceback.print_stack()
            else:
                try:
                    res = processNickList(nicks, listPlatforms, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
                except Exception as e:
                    print "Exception grabbed when processing the nicks: " + str(e)
                    print traceback.print_stack()

            logger.info("Listing the results obtained...")
            # We are going to iterate over the results...
            strResults = "\t"

            # Structure returned
            """
            [
                {
                  "attributes": [
                    {
                      "attributes": [],
                      "type": "i3visio.uri",
                      "value": "http://twitter.com/i3visio"
                    },
                    {
                      "attributes": [],
                      "type": "i3visio.alias",
                      "value": "i3visio"
                    },
                    {
                      "attributes": [],
                      "type": "i3visio.platform",
                      "value": "Twitter"
                    }
                  ],
                  "type": "i3visio.profile",
                  "value": "Twitter - i3visio"
                }
                ,
                ...
            ]
            """
            for r in res:
                # The format of the results (attributes) for a given nick is a list as follows:

                for att in r["attributes"]:
                    # iterating through the attributes
                    platform = ""
                    uri = ""
                    for details in att["attributes"]:
                        if details["type"] == "i3visio.platform":
                            platform = details["value"]
                        if details["type"] == "i3visio.uri":
                            uri = details["value"]
                    try:
                        strResults+= (str(platform) + ":").ljust(16, ' ')+ " "+ str(uri)+"\n\t\t"
                    except:
                        pass

                logger.info(strResults)

            # Generating summary files for each ...
            if args.extension:
                # Storing the file...
                logger.info("Creating output files as requested.")
                if not args.maltego:
                    # Verifying if the outputPath exists
                    if not os.path.exists (args.output_folder):
                        logger.warning("The output folder \'" + args.output_folder + "\' does not exist. The system will try to create it.")
                        os.makedirs(args.output_folder)

                # Grabbing the results
                fileHeader = os.path.join(args.output_folder, args.file_header)

                # Iterating through the given extensions to print its values
                if not args.maltego:
                    for ext in args.extension:
                        # Generating output files
                        general.exportUsufy(res, ext, fileHeader)

            # Generating the Maltego output
            if args.maltego:
                general.listToMaltego(res)

            # Printing the results if requested
            if not args.maltego:
                print "A summary of the results obtained are shown in the following table:"
                #print res
                print unicode(general.usufyToTextExport(res))

                print

                if args.web_browser:
                    general.openResultsInBrowser(res)

                print "You can find all the information collected in the following files:"
                for ext in args.extension:
                    # Showing the output files
                    print "\t-" + fileHeader + "." + ext

            # Showing the execution time...
            if not args.maltego:
                print
                endTime= dt.datetime.now()
                print str(endTime) +"\tFinishing execution..."
                print
                print "Total time used:\t" + str(endTime-startTime)
                print "Average seconds/query:\t" + str((endTime-startTime).total_seconds()/len(listPlatforms)) +" seconds"
                print

            # Urging users to place an issue on Github...
            if not args.maltego:
                print
                print "Did something go wrong? Is a platform reporting false positives? Do you need to integrate a new one?"
                print "Then, place an issue in the Github project: <https://github.com/i3visio/osrframework/issues>."
                print "Note that otherwise, we won't know about it!"
                print

            return res

Example 111

Project: laikaboss Source File: laika.py
def main():
    # Define default configuration location

    parser = OptionParser(usage="usage: %prog [options] /path/to/file")
    parser.add_option("-d", "--debug",
                      action="store_true",
                      dest="debug",
                      help="enable debug messages to the console.")
    parser.add_option("-c", "--config-path",
                      action="store", type="string",
                      dest="config_path",
                      help="path to configuration for laikaboss framework.")
    parser.add_option("-o", "--out-path",
                      action="store", type="string",
                      dest="save_path",
                      help="Write all results to the specified path")
    parser.add_option("-s", "--source",
                      action="store", type="string",
                      dest="source",
                      help="Set the source (may affect dispatching) [default:laika]")
    parser.add_option("-p", "--num_procs",
                      action="store", type="int",
                      dest="num_procs",
                      default=8,
                      help="Specify the number of CPU's to use for a recursive scan. [default:8]")
    parser.add_option("-l", "--log",
                      action="store_true",
                      dest="log_result",
                      help="enable logging to syslog")
    parser.add_option("-j", "--log-json",
                      action="store", type="string",
                      dest="log_json",
                      help="enable logging JSON results to file")
    parser.add_option("-m", "--module",
                      action="store", type="string",
                      dest="scan_modules",
                      help="Specify individual module(s) to run and their arguments. If multiple, must be a space-separated list.")
    parser.add_option("--parent",
                      action="store", type="string",
                      dest="parent", default="",
                      help="Define the parent of the root object")
    parser.add_option("-e", "--ephID",
                      action="store", type="string",
                      dest="ephID", default="",
                      help="Specify an ephemeralID to send with the object")
    parser.add_option("--metadata",
                      action="store",
                      dest="ext_metadata",
                      help="Define metadata to add to the scan or specify a file containing the metadata.")
    parser.add_option("--size-limit",
                      action="store", type="int", default=10,
                      dest="sizeLimit",
                      help="Specify a size limit in MB (default: 10)")
    parser.add_option("--file-limit",
                      action="store", type="int", default=0,
                      dest="fileLimit",
                      help="Specify a limited number of files to scan (default: off)")
    parser.add_option("--progress",
                      action="store_true",
                      dest="progress",
                      default=False,
                      help="enable the progress bar")
    (options, args) = parser.parse_args()
    
    logger = logging.getLogger()

    if options.debug:
        # stdout is added by default, we'll capture this object here
        #lhStdout = logger.handlers[0]
        fileHandler = logging.FileHandler('laika-debug.log', 'w')
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        fileHandler.setFormatter(formatter)
        logger.addHandler(fileHandler)
        # remove stdout from handlers so that debug info is only written to the file
        #logger.removeHandler(lhStdout)
        logging.basicConfig(level=logging.DEBUG)
        logger.setLevel(logging.DEBUG)

    global EXT_METADATA
    if options.ext_metadata:
        if os.path.exists(options.ext_metadata):
            with open(options.ext_metadata) as metafile:
                EXT_METADATA = json.loads(metafile.read())
        else:
            EXT_METADATA = json.loads(options.ext_metadata)
    else:
        EXT_METADATA = getConfig("ext_metadata")
    
    global EPHID
    if options.ephID:
        EPHID = options.ephID
    else:
        EPHID = getConfig("ephID")

    global SCAN_MODULES
    if options.scan_modules:
        SCAN_MODULES = options.scan_modules.split()
    else:
        SCAN_MODULES = None
    logging.debug("SCAN_MODULES: %s"  % (SCAN_MODULES))

    global PROGRESS_BAR
    if options.progress:
        PROGRESS_BAR = 1
    else:
        PROGRESS_BAR = strtobool(getConfig('progress_bar'))
    logging.debug("PROGRESS_BAR: %s"  % (PROGRESS_BAR))

    global LOG_RESULT
    if options.log_result:
        LOG_RESULT = 1
    else:
        LOG_RESULT = strtobool(getConfig('log_result'))
    logging.debug("LOG_RESULT: %s" % (LOG_RESULT))

    global LOG_JSON
    if options.log_json:
        LOG_JSON = options.log_json
    else:
        LOG_JSON = getConfig('log_json')

    global NUM_PROCS
    if options.num_procs:
        NUM_PROCS = options.num_procs
    else:
        NUM_PROCS = int(getConfig('num_procs'))
    logging.debug("NUM_PROCS: %s"  % (NUM_PROCS))

    global MAX_BYTES
    if options.sizeLimit:
        MAX_BYTES = options.sizeLimit * 1024 * 1024
    else:
        MAX_BYTES = int(getConfig('max_bytes'))
    logging.debug("MAX_BYTES: %s"  % (MAX_BYTES))

    global MAX_FILES
    if options.fileLimit:
        MAX_FILES = options.fileLimit
    else:
        MAX_FILES = int(getConfig('max_files'))
    logging.debug("MAX_FILES: %s"  % (MAX_FILES))

    global SOURCE
    if options.source:
        SOURCE = options.source
    else:
        SOURCE = getConfig('source')

    global SAVE_PATH
    if options.save_path:
        SAVE_PATH = options.save_path
    else:
        SAVE_PATH = getConfig('save_path')

    global CONFIG_PATH
    # Highest priority configuration is via argument
    if options.config_path:
        CONFIG_PATH = options.config_path
        logging.debug("using alternative config path: %s" % options.config_path)
        if not os.path.exists(options.config_path):
            error("the provided config path is not valid, exiting")
            return 1
    # Next, check to see if we're in the top level source directory (dev environment)
    elif os.path.exists(default_configs['dev_config_path']):
        CONFIG_PATH = default_configs['dev_config_path']
    # Next, check for an installed copy of the default configuration
    elif os.path.exists(default_configs['sys_config_path']):
        CONFIG_PATH = default_configs['sys_config_path']
    # Exit
    else:
        error('A valid framework configuration was not found in either of the following locations:\
\n%s\n%s' % (default_configs['dev_config_path'],default_configs['sys_config_path']))
        return 1
       

    # Check for stdin in no arguments were provided
    if len(args) == 0:

        DATA_PATH = []

        if not sys.stdin.isatty():
            while True:
                f = sys.stdin.readline().strip()
                if not f:
                    break
                else:
                    if not os.path.isfile(f):
                        error("One of the specified files does not exist: %s" % (f))
                        return 1
                    if os.path.isdir(f):
                        error("One of the files you specified is actually a directory: %s" % (f))
                        return 1
                    DATA_PATH.append(f)

        if not DATA_PATH:
            error("You must provide files via stdin when no arguments are provided")
            return 1
        logging.debug("Loaded %s files from stdin" % (len(DATA_PATH)))
    elif len(args) == 1:
        if os.path.isdir(args[0]):
            DATA_PATH = args[0]
        elif os.path.isfile(args[0]):
            DATA_PATH = [args[0]]
        else:
            error("File or directory does not exist: %s" % (args[0]))
            return 1
    else:
        for f in args:
            if not os.path.isfile(f):
                error("One of the specified files does not exist: %s" % (f))
                return 1
            if os.path.isdir(f):
                error("One of the files you specified is actually a directory: %s" % (f))
                return 1
        
        DATA_PATH = args

   
    tasks = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    
    fileList = []
    if type(DATA_PATH) is str:
        for root, dirs, files in os.walk(DATA_PATH):
            files = [f for f in files if not f[0] == '.']
            dirs[:] = [d for d in dirs if not d[0] == '.']
            for fname in files:
                fullpath = os.path.join(root, fname)
                if not os.path.islink(fullpath) and os.path.isfile(fullpath):
                    fileList.append(fullpath)
    else:
        fileList = DATA_PATH

    if MAX_FILES:
        fileList = fileList[:MAX_FILES]

    num_jobs = len(fileList)
    logging.debug("Loaded %s files for scanning" % (num_jobs))
    
    # Start consumers
    # If there's less files to process than processes, reduce the number of processes
    if num_jobs < NUM_PROCS:
        NUM_PROCS = num_jobs
    logging.debug("Starting %s processes" % (NUM_PROCS))
    consumers = [ Consumer(tasks, results)
                  for i in xrange(NUM_PROCS) ]
    try:
        
        for w in consumers:
            w.start()

        # Enqueue jobs
        for fname in fileList:
            tasks.put(fname)
        
        # Add a poison pill for each consumer
        for i in xrange(NUM_PROCS):
            tasks.put(None)

        if PROGRESS_BAR:
            monitor = QueueMonitor(tasks, num_jobs)
            monitor.start()

        # Wait for all of the tasks to finish
        tasks.join()
        if PROGRESS_BAR:
            monitor.join()

        while num_jobs:
            answer = zlib.decompress(results.get())
            print(answer)
            num_jobs -= 1

    except KeyboardInterrupt:
        error("Cancelled by user.. Shutting down.")
        for w in consumers:
            w.terminate()
            w.join()
        return None
    except:
        raise

Example 112

Project: pyLAR Source File: nglra.py
def _runIteration(vector_length, level, currentIter, config, im_fns, sigma, gridSize, maxDisp, software):
    """Iterative unbiased low-rank atlas creation from a selection of images"""
    log = logging.getLogger(__name__)
    result_dir = config.result_dir
    selection = config.selection
    reference_im_fn = config.reference_im_fn
    use_healthy_atlas = config.use_healthy_atlas
    registration_type = config.registration_type
    lamda = config.lamda
    listOutputImages = []
    if registration_type == 'BSpline' or registration_type == 'Demons':
        EXE_BRAINSResample = software.EXE_BRAINSResample
        EXE_InvertDeformationField = software.EXE_InvertDeformationField
        if registration_type == 'BSpline':
            EXE_BRAINSFit = software.EXE_BRAINSFit
            EXE_BSplineToDeformationField = software.EXE_BSplineToDeformationField
        elif registration_type == 'Demons':
            EXE_BRAINSDemonWarp = software.EXE_BRAINSDemonWarp
    elif registration_type == 'ANTS':
        EXE_antsRegistration = software.EXE_antsRegistration
        EXE_WarpImageMultiTransform = software.EXE_WarpImageMultiTransform
        ants_params = config.ants_params
    # Prepares data matrix for low-rank decomposition
    num_of_data = len(selection)
    Y = np.zeros((vector_length, num_of_data))
    iter_prefix = 'L' + str(level) + '_Iter'
    iter_path = os.path.join(result_dir, iter_prefix)
    current_path_iter = iter_path + str(currentIter)
    prev_path_iter = iter_path + str(currentIter-1)
    for i in range(num_of_data):
        im_file = prev_path_iter + '_' + str(i) + '.nrrd'
        inIm = sitk.ReadImage(im_file)
        tmp = sitk.GetArrayFromImage(inIm)
        if sigma > 0:  # blurring
            log.info("Blurring: " + str(sigma))
            outIm = pyLAR.GaussianBlur(inIm, None, sigma)
            tmp = sitk.GetArrayFromImage(outIm)
        Y[:, i] = tmp.reshape(-1)
        del tmp

    # Low-rank and sparse decomposition
    low_rank, sparse, n_iter, rank, sparsity, sum_sparse = pyLAR.rpca(Y, lamda)
    lr = pyLAR.saveImagesFromDM(low_rank, current_path_iter + '_LowRank_', reference_im_fn)
    sp = pyLAR.saveImagesFromDM(sparse, current_path_iter + '_Sparse_', reference_im_fn)
    listOutputImages = lr + sp
    # Visualize and inspect
    try:
        import matplotlib.pyplot as plt
        fig = plt.figure(figsize=(15, 5))
        slice_prefix = 'L' + str(level) + '_' + str(currentIter)
        pyLAR.showSlice(Y, slice_prefix + ' Input', plt.cm.gray, 0, reference_im_fn)
        pyLAR.showSlice(low_rank, slice_prefix + ' low rank', plt.cm.gray, 1, reference_im_fn)
        pyLAR.showSlice(np.abs(sparse), slice_prefix + ' sparse', plt.cm.gray, 2, reference_im_fn)
        plt.savefig(current_path_iter + '.png')
        fig.clf()
        plt.close(fig)
    except ImportError:
        pass

    del low_rank, sparse, Y

    # Unbiased low-rank atlas building (ULAB)
    if not use_healthy_atlas:
        EXE_AverageImages = software.EXE_AverageImages
        # Average the low-rank images to produce the Atlas
        atlasIm = current_path_iter + '_atlas.nrrd'
        listOfImages = []
        num_of_data = len(selection)
        for i in range(num_of_data):
            lrIm = current_path_iter + '_LowRank_' + str(i) + '.nrrd'
            listOfImages.append(lrIm)
        pyLAR.AverageImages(EXE_AverageImages, listOfImages, atlasIm)

        im = sitk.ReadImage(atlasIm)
        im_array = sitk.GetArrayFromImage(im)
        z_dim, x_dim, y_dim = im_array.shape
        try:
            import matplotlib.pyplot as plt
            plt.figure()
            implot = plt.imshow(np.flipud(im_array[z_dim / 2, :, :]), plt.cm.gray)
            plt.title(iter_prefix + str(currentIter) + ' atlas')
            plt.savefig(current_path_iter + '.png')
        except ImportError:
            pass
        reference_im_fn = atlasIm
    listOutputImages += [reference_im_fn]
    for i in range(num_of_data):
        # Warps the low-rank image back to the initial state (the non-greedy way)
        invWarpedlowRankIm = ''
        if currentIter == 1:
            invWarpedlowRankIm = current_path_iter + '_LowRank_' + str(i) + '.nrrd'
        else:
            lowRankIm = current_path_iter + '_LowRank_' + str(i) + '.nrrd'
            invWarpedlowRankIm = current_path_iter + '_InvWarped_LowRank_' + str(i) + '.nrrd'
            if registration_type == 'BSpline' or registration_type == 'Demons':
                previousIterDVF = prev_path_iter + '_DVF_' + str(i) + '.nrrd'
                inverseDVF = prev_path_iter + '_INV_DVF_' + str(i) + '.nrrd'
                pyLAR.genInverseDVF(EXE_InvertDeformationField, previousIterDVF, inverseDVF, True)
                pyLAR.updateInputImageWithDVF(EXE_BRAINSResample, lowRankIm, reference_im_fn,
                                              inverseDVF, invWarpedlowRankIm, True)
            if registration_type == 'ANTS':
                previousIterTransformPrefix = prev_path_iter + '_' + str(i) + '_'
                pyLAR.ANTSWarpImage(EXE_WarpImageMultiTransform, lowRankIm, invWarpedlowRankIm, reference_im_fn,
                                    previousIterTransformPrefix, True, True)

        # Registers each inversely-warped low-rank image to the Atlas image
        outputIm = current_path_iter + '_Deformed_LowRank' + str(i) + '.nrrd'
        # .tfm for BSpline only
        outputTransform = current_path_iter + '_Transform_' + str(i) + '.tfm'
        outputDVF = current_path_iter + '_DVF_' + str(i) + '.nrrd'

        movingIm = invWarpedlowRankIm
        fixedIm = reference_im_fn

        initial_prefix = 'L' + str(level) + '_Iter0_'
        initialInputImage = os.path.join(result_dir, initial_prefix + str(i) + '.nrrd')
        newInputImage = current_path_iter + '_' + str(i) + '.nrrd'

        if registration_type == 'BSpline':
            pyLAR.BSplineReg_BRAINSFit(EXE_BRAINSFit, fixedIm, movingIm, outputIm, outputTransform,
                                              gridSize, maxDisp, EXECUTE=True)
            pyLAR.ConvertTransform(EXE_BSplineToDeformationField, reference_im_fn,
                                                outputTransform, outputDVF, EXECUTE=True)
            pyLAR.updateInputImageWithDVF(EXE_BRAINSResample, initialInputImage, reference_im_fn,
                                                       outputDVF, newInputImage, EXECUTE=True)
        elif registration_type == 'Demons':
            pyLAR.DemonsReg(EXE_BRAINSDemonWarp, fixedIm, movingIm, outputIm, outputDVF, EXECUTE=True)
            pyLAR.updateInputImageWithDVF(EXE_BRAINSResample, initialInputImage, reference_im_fn,
                                                       outputDVF, newInputImage, EXECUTE=True)
        elif registration_type == 'ANTS':
            # Generates a warp(DVF) file and an affine file
            outputTransformPrefix = current_path_iter + '_' + str(i) + '_'
            # if currentIter > 1:
            # initialTransform = os.path.join(result_dir, iter_prefix + str(currentIter-1) + '_' + str(i) + '_0Warp.nii.gz')
            # else:
            pyLAR.ANTS(EXE_antsRegistration, fixedIm, movingIm, outputTransformPrefix, ants_params, EXECUTE=True)
            # Generates the warped input image with the specified file name
            pyLAR.ANTSWarpImage(EXE_WarpImageMultiTransform, initialInputImage, newInputImage,
                                             reference_im_fn, outputTransformPrefix, EXECUTE=True)
        else:
            raise('Unrecognized registration type:', registration_type)
        listOutputImages += [newInputImage]
    return sparsity, sum_sparse, listOutputImages

Example 113

Project: auditok Source File: cmdline.py
Function: main
def main(argv=None):
    '''Command line options.'''

    program_name = os.path.basename(sys.argv[0])
    program_version = version
    program_build_date = "%s" % __updated__

    program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
    #program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
    program_longdesc = '''''' # optional - give further explanation about what the program does
    program_license = "Copyright 2015 Mohamed El Amine SEHILI                                            \
                Licensed under the General Public License (GPL) Version 3 \nhttp://www.gnu.org/licenses/"

    if argv is None:
        argv = sys.argv[1:]
    try:
        # setup option parser
        parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
        
        group = OptionGroup(parser, "[Input-Output options]")
        group.add_option("-i", "--input", dest="input", help="Input audio or video file. Use - for stdin [default: read from microphone using pyaudio]", metavar="FILE")
        group.add_option("-t", "--input-type", dest="input_type", help="Input audio file type. Mandatory if file name has no extension [default: %default]", type=str, default=None, metavar="String")
        group.add_option("-M", "--max_time", dest="max_time", help="Max data (in seconds) to read from microphone/file [default: read until the end of file/stream]", type=float, default=None, metavar="FLOAT")
        group.add_option("-O", "--output-main", dest="output_main", help="Save main stream as. If omitted main stream will not be saved [default: omitted]", type=str, default=None, metavar="FILE")
        group.add_option("-o", "--output-tokens", dest="output_tokens", help="Output file name format for detections. Use {N} and {start} and {end} to build file names, example: 'Det_{N}_{start}-{end}.wav'", type=str, default=None, metavar="STRING")
        group.add_option("-T", "--output-type", dest="output_type", help="Audio type used to save detections and/or main stream. If not supplied will: (1). guess from extension or (2). use wav format", type=str, default=None, metavar="STRING")
        group.add_option("-u", "--use-channel", dest="use_channel", help="Choose channel to use from a multi-channel audio file (requires pydub). 'left', 'right' and 'mix' are accepted values. [Default: 1 (i.e. 1st or left channel)]", type=str, default="1", metavar="STRING")
        parser.add_option_group(group)
        
        
        group = OptionGroup(parser, "[Tokenization options]", "Set tokenizer options and energy threshold.")
        group.add_option("-a", "--analysis-window", dest="analysis_window", help="Size of analysis window in seconds [default: %default (10ms)]", type=float, default=0.01, metavar="FLOAT")
        group.add_option("-n", "--min-duration", dest="min_duration", help="Min duration of a valid audio event in seconds [default: %default]", type=float, default=0.2, metavar="FLOAT")
        group.add_option("-m", "--max-duration", dest="max_duration", help="Max duration of a valid audio event in seconds [default: %default]", type=float, default=5, metavar="FLOAT")
        group.add_option("-s", "--max-silence", dest="max_silence", help="Max duration of a consecutive silence within a valid audio event in seconds [default: %default]", type=float, default=0.3, metavar="FLOAT")
        group.add_option("-d", "--drop-trailing-silence", dest="drop_trailing_silence", help="Drop trailing silence from a detection [default: keep trailing silence]",  action="store_true", default=False)
        group.add_option("-e", "--energy-threshold", dest="energy_threshold", help="Log energy threshold for detection [default: %default]", type=float, default=50, metavar="FLOAT")
        parser.add_option_group(group)
        
        
        group = OptionGroup(parser, "[Audio parameters]", "Define audio parameters if data is read from a headerless file (raw or stdin) or you want to use different microphone parameters.")        
        group.add_option("-r", "--rate", dest="sampling_rate", help="Sampling rate of audio data [default: %default]", type=int, default=16000, metavar="INT")
        group.add_option("-c", "--channels", dest="channels", help="Number of channels of audio data [default: %default]", type=int, default=1, metavar="INT")
        group.add_option("-w", "--width", dest="sample_width", help="Number of bytes per audio sample [default: %default]", type=int, default=2, metavar="INT")
        parser.add_option_group(group)
        
        group = OptionGroup(parser, "[Do something with detections]", "Use these options to print, play or plot detections.") 
        group.add_option("-C", "--command", dest="command", help="Command to call when an audio detection occurs. Use $ to represent the file name to use with the command (e.g. -C 'du -h $')", default=None, type=str, metavar="STRING")
        group.add_option("-E", "--echo", dest="echo", help="Play back each detection immediately using pyaudio [default: do not play]",  action="store_true", default=False)
        group.add_option("-p", "--plot", dest="plot", help="Plot and show audio signal and detections (requires matplotlib)",  action="store_true", default=False)
        group.add_option("", "--save-image", dest="save_image", help="Save plotted audio signal and detections as a picture or a PDF file (requires matplotlib)",  type=str, default=None, metavar="FILE")
        group.add_option("", "--printf", dest="printf", help="print detections, one per line, using a user supplied format (e.g. '[{id}]: {start} -- {end}'). Available keywords {id}, {start}, {end} and {duration}",  type=str, default="{id} {start} {end}", metavar="STRING")
        group.add_option("", "--time-format", dest="time_format", help="format used to print {start} and {end}. [Default= %default]. %S: absolute time in sec. %I: absolute time in ms. If at least one of (%h, %m, %s, %i) is used, convert time into hours, minutes, seconds and millis (e.g. %h:%m:%s.%i). Only required fields are printed",  type=str, default="%S", metavar="STRING")
        parser.add_option_group(group)
        
        parser.add_option("-q", "--quiet", dest="quiet", help="Do not print any information about detections [default: print 'id', 'start' and 'end' of each detection]",  action="store_true", default=False)
        parser.add_option("-D", "--debug", dest="debug", help="Print processing operations to STDOUT",  action="store_true", default=False)
        parser.add_option("", "--debug-file", dest="debug_file", help="Print processing operations to FILE",  type=str, default=None, metavar="FILE")
        
        

        # process options
        (opts, args) = parser.parse_args(argv)
        
        if opts.input == "-":
            asource = StdinAudioSource(sampling_rate = opts.sampling_rate,
                                       sample_width = opts.sample_width,
                                       channels = opts.channels)
        #read data from a file
        elif opts.input is not None:
            asource = file_to_audio_source(filename=opts.input, filetype=opts.input_type, uc=opts.use_channel)
        
        # read data from microphone via pyaudio
        else:
            try:
                asource = PyAudioSource(sampling_rate = opts.sampling_rate,
                                        sample_width = opts.sample_width,
                                        channels = opts.channels)
            except Exception:
                sys.stderr.write("Cannot read data from audio device!\n")
                sys.stderr.write("You should either install pyaudio or read data from STDIN\n")
                sys.exit(2)
               
        logger = logging.getLogger(LOGGER_NAME)
        logger.setLevel(logging.DEBUG)
        
        handler = logging.StreamHandler(sys.stdout)
        if opts.quiet or not opts.debug:
            # only critical messages will be printed
            handler.setLevel(logging.CRITICAL)
        else:
            handler.setLevel(logging.DEBUG)
        
        logger.addHandler(handler)
        
        if opts.debug_file is not None:
            logger.setLevel(logging.DEBUG)
            opts.debug = True
            handler = logging.FileHandler(opts.debug_file, "w")
            fmt = logging.Formatter('[%(asctime)s] | %(message)s')
            handler.setFormatter(fmt)
            handler.setLevel(logging.DEBUG)
            logger.addHandler(handler)
        
        record = opts.output_main is not None or opts.plot or opts.save_image is not None
                        
        ads = ADSFactory.ads(audio_source = asource, block_dur = opts.analysis_window, max_time = opts.max_time, record = record)
        validator = AudioEnergyValidator(sample_width=asource.get_sample_width(), energy_threshold=opts.energy_threshold)
        
        
        if opts.drop_trailing_silence:
            mode = StreamTokenizer.DROP_TRAILING_SILENCE
        else:
            mode = 0
        
        analysis_window_per_second = 1. / opts.analysis_window
        tokenizer = StreamTokenizer(validator=validator, min_length=opts.min_duration * analysis_window_per_second,
                                    max_length=int(opts.max_duration * analysis_window_per_second),
                                    max_continuous_silence=opts.max_silence * analysis_window_per_second,
                                    mode = mode)
        
        
        observers = []
        tokenizer_worker = None
        
        if opts.output_tokens is not None:
            
            try:
                # check user format is correct
                fname  = opts.output_tokens.format(N=0, start=0, end=0)
                
                # find file type for detections
                tok_type =  opts.output_type
                if tok_type is None:
                    tok_type = os.path.splitext(opts.output_tokens)[1][1:]
                if tok_type == "": 
                    tok_type = "wav"
                
                token_saver = TokenSaverWorker(name_format=opts.output_tokens, filetype=tok_type,
                                               debug=opts.debug, logger=logger, sr=asource.get_sampling_rate(),
                                               sw=asource.get_sample_width(),
                                               ch=asource.get_channels())
                observers.append(token_saver)
            
            except Exception:
                sys.stderr.write("Wrong format for detections file name: '{0}'\n".format(opts.output_tokens))
                sys.exit(2)
            
        if opts.echo:
            try:
                player = player_for(asource)
                player_worker = PlayerWorker(player=player, debug=opts.debug, logger=logger)
                observers.append(player_worker)
            except Exception:
                sys.stderr.write("Cannot get an audio player!\n")
                sys.stderr.write("You should either install pyaudio or supply a command (-C option) to play audio\n")
                sys.exit(2)
                
        if opts.command is not None and len(opts.command) > 0:
            cmd_worker = CommandLineWorker(command=opts.command, debug=opts.debug, logger=logger)
            observers.append(cmd_worker)
        
        if not opts.quiet or opts.plot is not None or opts.save_image is not None:    
            oformat = opts.printf.replace("\\n", "\n").replace("\\t", "\t").replace("\\r", "\r")
            converter = seconds_to_str_fromatter(opts.time_format)
            log_worker = LogWorker(print_detections = not opts.quiet, output_format=oformat,
                                   time_formatter=converter, logger=logger, debug=opts.debug)
            observers.append(log_worker)
        
        tokenizer_worker = TokenizerWorker(ads, tokenizer, opts.analysis_window, observers)
        
        def _save_main_stream():
            # find file type
            main_type =  opts.output_type
            if main_type is None:
                main_type = os.path.splitext(opts.output_main)[1][1:]
            if main_type == "": 
                main_type = "wav"
            ads.close()
            ads.rewind()
            data = ads.get_audio_source().get_data_buffer()
            if len(data) > 0:
                save_audio_data(data=data, filename=opts.output_main, filetype=main_type, sr=asource.get_sampling_rate(),
                                sw = asource.get_sample_width(),
                                ch = asource.get_channels())
        
        def _plot():
            import numpy as np
            ads.close()
            ads.rewind()
            data = ads.get_audio_source().get_data_buffer()
            signal = AudioEnergyValidator._convert(data, asource.get_sample_width())
            detections = [(det[3] , det[4]) for det in log_worker.detections]
            max_amplitude = 2**(asource.get_sample_width() * 8 - 1) - 1
            energy_as_amp = np.sqrt(np.exp(opts.energy_threshold * np.log(10) / 10)) / max_amplitude
            plot_all(signal / max_amplitude, asource.get_sampling_rate(), energy_as_amp, detections, show = opts.plot, save_as = opts.save_image)
        
        
        # start observer threads
        for obs in observers:
            obs.start()
        # start tokenization thread
        tokenizer_worker.start()
        
        while True:
            time.sleep(1)
            if len(threading.enumerate()) == 1:
                break
            
        tokenizer_worker = None
            
        if opts.output_main is not None:
            _save_main_stream()
        if opts.plot or opts.save_image is not None:
            _plot()
            
        return 0
            
    except KeyboardInterrupt:
        
        if tokenizer_worker is not None:
            tokenizer_worker.stop()
        for obs in observers:
            obs.stop()
            
        if opts.output_main is not None:
            _save_main_stream()
        if opts.plot or opts.save_image is not None:
            _plot()
        
        return 0

    except Exception as e:
        sys.stderr.write(program_name + ": " + str(e) + "\n")
        sys.stderr.write("for help use -h\n")
        
        return 2

Example 114

Project: rio-mbtiles Source File: cli.py
@click.command(short_help="Export a dataset to MBTiles.")
@click.argument(
    'files',
    nargs=-1,
    type=click.Path(resolve_path=True),
    required=True,
    metavar="INPUT [OUTPUT]")
@output_opt
@force_overwrite_opt
@click.option('--title', help="MBTiles dataset title.")
@click.option('--description', help="MBTiles dataset description.")
@click.option('--overlay', 'layer_type', flag_value='overlay', default=True,
              help="Export as an overlay (the default).")
@click.option('--baselayer', 'layer_type', flag_value='baselayer',
              help="Export as a base layer.")
@click.option('-f', '--format', 'img_format', type=click.Choice(['JPEG', 'PNG']),
              default='JPEG',
              help="Tile image format.")
@click.option('--zoom-levels',
              default=None,
              metavar="MIN..MAX",
              help="A min...max range of export zoom levels. "
                   "The default zoom level "
                   "is the one at which the dataset is contained within "
                   "a single tile.")
@click.option('--image-dump',
              metavar="PATH",
              help="A directory into which image tiles will be optionally "
                   "dumped.")
@click.option('-j', 'num_workers', type=int, default=DEFAULT_NUM_WORKERS,
              help="Number of worker processes (default: %d)." % (
                  DEFAULT_NUM_WORKERS))
@click.option('--src-nodata', default=None, show_default=True,
              type=float, help="Manually override source nodata")
@click.option('--dst-nodata', default=None, show_default=True,
              type=float, help="Manually override destination nodata")
@click.version_option(version=mbtiles_version, message='%(version)s')
@click.pass_context
def mbtiles(ctx, files, output, force_overwrite, title, description,
            layer_type, img_format, zoom_levels, image_dump, num_workers,
            src_nodata, dst_nodata):
    """Export a dataset to MBTiles (version 1.1) in a SQLite file.

    The input dataset may have any coordinate reference system. It must
    have at least three bands, which will be become the red, blue, and
    green bands of the output image tiles.

    If no zoom levels are specified, the defaults are the zoom levels
    nearest to the one at which one tile may contain the entire source
    dataset.

    If a title or description for the output file are not provided,
    they will be taken from the input dataset's filename.

    This command is suited for small to medium (~1 GB) sized sources.

    Python package: rio-mbtiles (https://github.com/mapbox/rio-mbtiles).
    """
    output, files = resolve_inout(files=files, output=output,
                                  force_overwrite=force_overwrite)
    inputfile = files[0]

    logger = logging.getLogger('rio-mbtiles')

    with ctx.obj['env']:

        # Read metadata from the source dataset.
        with rasterio.open(inputfile) as src:

            validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))
            base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}

            if src_nodata is not None:
                base_kwds.update(nodata=src_nodata)

            if dst_nodata is not None:
                base_kwds.update(nodata=dst_nodata)

            # Name and description.
            title = title or os.path.basename(src.name)
            description = description or src.name

            # Compute the geographic bounding box of the dataset.
            (west, east), (south, north) = transform(
                src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])

        # Resolve the minimum and maximum zoom levels for export.
        if zoom_levels:
            minzoom, maxzoom = map(int, zoom_levels.split('..'))
        else:
            zw = int(round(math.log(360.0 / (east - west), 2.0)))
            zh = int(round(math.log(170.1022 / (north - south), 2.0)))
            minzoom = min(zw, zh)
            maxzoom = max(zw, zh)

        logger.debug("Zoom range: %d..%d", minzoom, maxzoom)

        # Parameters for creation of tile images.
        base_kwds.update({
            'driver': img_format.upper(),
            'dtype': 'uint8',
            'nodata': 0,
            'height': 256,
            'width': 256,
            'count': 3,
            'crs': 'EPSG:3857'})

        img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'

        # Initialize the sqlite db.
        if os.path.exists(output):
            os.unlink(output)
        conn = sqlite3.connect(output)
        cur = conn.cursor()
        cur.execute(
            "CREATE TABLE tiles "
            "(zoom_level integer, tile_column integer, "
            "tile_row integer, tile_data blob);")
        cur.execute(
            "CREATE TABLE metadata (name text, value text);")

        # Insert mbtiles metadata into db.
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("name", title))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("type", layer_type))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("version", "1.1"))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("description", description))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("format", img_ext))
        cur.execute(
            "INSERT INTO metadata (name, value) VALUES (?, ?);",
            ("bounds", "%f,%f,%f,%f" % (west, south, east, north)))

        conn.commit()

        # Create a pool of workers to process tile tasks.
        pool = Pool(num_workers, init_worker, (inputfile, base_kwds), 100)

        # Constrain bounds.
        EPS = 1.0e-10
        west = max(-180 + EPS, west)
        south = max(-85.051129, south)
        east = min(180 - EPS, east)
        north = min(85.051129, north)

        # Initialize iterator over output tiles.
        tiles = mercantile.tiles(
            west, south, east, north, range(minzoom, maxzoom + 1))

        for tile, contents in pool.imap_unordered(process_tile, tiles):

            # MBTiles has a different origin than Mercantile/tilebelt.
            tiley = int(math.pow(2, tile.z)) - tile.y - 1

            # Optional image dump.
            if image_dump:
                img_name = '%d-%d-%d.%s' % (
                    tile.x, tiley, tile.z, img_ext)
                img_path = os.path.join(image_dump, img_name)
                with open(img_path, 'wb') as img:
                    img.write(contents)

            # Insert tile into db.
            cur.execute(
                "INSERT INTO tiles "
                "(zoom_level, tile_column, tile_row, tile_data) "
                "VALUES (?, ?, ?, ?);",
                (tile.z, tile.x, tiley, buffer(contents)))

            conn.commit()

        conn.close()

Example 115

Project: appdaemon Source File: appdaemon.py
def main():

  global config
  global config_file
  global config_file_modified

  #import appdaemon.stacktracer
  #appdaemon.stacktracer.trace_start("/tmp/trace.html")
  
  # Windows does not support SIGUSR1 or SIGUSR2
  if platform.system() != "Windows":
    signal.signal(signal.SIGUSR1, handle_sig)
    signal.signal(signal.SIGUSR2, handle_sig)

  
  # Get command line args

  parser = argparse.ArgumentParser()

  parser.add_argument("-c", "--config", help="full path to config file", type=str, default = None)
  parser.add_argument("-p", "--pidfile", help="full path to PID File", default = "/tmp/hapush.pid")
  parser.add_argument("-t", "--tick", help = "time in seconds that a tick in the schedular lasts", default = 1, type = float)
  parser.add_argument("-s", "--starttime", help = "start time for scheduler <YYYY-MM-DD HH:MM:SS>", type = str)
  parser.add_argument("-e", "--endtime", help = "end time for scheduler <YYYY-MM-DD HH:MM:SS>",type = str, default = None)
  parser.add_argument("-i", "--interval", help = "multiplier for scheduler tick", type = float, default = 1)
  parser.add_argument("-D", "--debug", help="debug level", default = "INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
  parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
  
  # Windows does not have Daemonize package so disallow
  if platform.system() != "Windows":
    parser.add_argument("-d", "--daemon", help="run as a background process", action="store_true")


  args = parser.parse_args()
  
  conf.tick = args.tick
  conf.interval = args.interval
  
  if args.starttime != None:
    conf.now = datetime.datetime.strptime(args.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
  else:
    conf.now = datetime.datetime.now().timestamp()
    
  if args.endtime != None:
    conf.endtime = datetime.datetime.strptime(args.endtime, "%Y-%m-%d %H:%M:%S")
  
  if conf.tick != 1 or conf.interval != 1 or args.starttime != None:
    conf.realtime = False
  
  config_file = args.config

  
  if config_file == None:
    config_file = find_path("appdaemon.cfg")
  
  if platform.system() != "Windows":
    isdaemon = args.daemon
  else:
    isdaemon = False

  # Read Config File

  config = configparser.ConfigParser()
  config.read_file(open(config_file))

  assert "AppDaemon" in config, "[AppDaemon] section required in {}".format(config_file)

  conf.config = config
  conf.ha_url = config['AppDaemon']['ha_url']
  conf.ha_key = config['AppDaemon'].get('ha_key', "")
  conf.logfile = config['AppDaemon'].get("logfile")
  conf.errorfile = config['AppDaemon'].get("errorfile")
  conf.app_dir = config['AppDaemon'].get("app_dir")
  conf.threads = int(config['AppDaemon']['threads'])
  conf.latitude = float(config['AppDaemon']['latitude'])
  conf.longitude = float(config['AppDaemon']['longitude'])
  conf.elevation = float(config['AppDaemon']['elevation'])
  conf.timezone = config['AppDaemon'].get("timezone")
  conf.time_zone = config['AppDaemon'].get("time_zone")
  conf.certpath = config['AppDaemon'].get("cert_path")
  
  if conf.timezone == None and conf.time_zone == None:
    raise KeyError("time_zone")

  if conf.time_zone == None:
    conf.time_zone = conf.timezone

  # Use the supplied timezone
  os.environ['TZ'] = conf.time_zone
  
  if conf.logfile == None:
    conf.logfile = "STDOUT"

  if conf.errorfile == None:
    conf.errorfile = "STDERR"
   
  if isdaemon and (conf.logfile == "STDOUT" or conf.errorfile == "STDERR" or conf.logfile == "STDERR" or conf.errorfile == "STDOUT"):
    raise ValueError("STDOUT and STDERR not allowed with -d")
    
  # Setup Logging

  conf.logger = logging.getLogger("log1")
  numeric_level = getattr(logging, args.debug, None)
  conf.logger.setLevel(numeric_level)
  conf.logger.propagate = False
  #formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

  # Send to file if we are daemonizing, else send to console
  
  if conf.logfile != "STDOUT":
    fh = RotatingFileHandler(conf.logfile, maxBytes=1000000, backupCount=3)
    fh.setLevel(numeric_level)
    #fh.setFormatter(formatter)
    conf.logger.addHandler(fh)
  else:
    # Default for StreamHandler() is sys.stderr
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(numeric_level)
    #ch.setFormatter(formatter)
    conf.logger.addHandler(ch)

  # Setup compile output

  conf.error = logging.getLogger("log2")
  numeric_level = getattr(logging, args.debug, None)
  conf.error.setLevel(numeric_level)
  conf.error.propagate = False
  #formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')

  if conf.errorfile != "STDERR":
    efh = RotatingFileHandler(conf.errorfile, maxBytes=1000000, backupCount=3)
  else:
    efh = logging.StreamHandler()

  efh.setLevel(numeric_level)
  #efh.setFormatter(formatter)
  conf.error.addHandler(efh)

  # Now we have logging, warn about timezone
  if conf.timezone != None:
    ha.log(conf.logger, "WARNING", "'timezone' directive is deprecated, please use time_zone instead")

  
  init_sun()

  config_file_modified = os.path.getmtime(config_file)

  # Add appdir  and subdirs to path
  if conf.app_dir == None:
    conf.app_dir = find_path("apps")
  
  for root, subdirs, files in os.walk(conf.app_dir):
    if root[-11:] != "__pycache__":
      sys.path.insert(0, root)
  

  # Start main loop

  ha.log(conf.logger, "INFO", "AppDaemon Version {} starting".format(__version__))
  
  if isdaemon:
    keep_fds = [fh.stream.fileno(), efh.stream.fileno()]
    pid = args.pidfile
    daemon = Daemonize(app="appdaemon", pid=pid, action=run, keep_fds=keep_fds)
    daemon.start()
    while True:
      time.sleep(1)
  else:
    run()

Example 116

Project: OpenRenderManagement Source File: dispatcher.py
    def computeAssignments(self):
        '''Computes and returns a list of (rendernode, command) assignments.'''

        LOGGER = logging.getLogger('main')

        from .model.node import NoRenderNodeAvailable, NoLicenseAvailableForTask
        # if no rendernodes available, return
        if not any(rn.isAvailable() for rn in self.dispatchTree.renderNodes.values()):
            return []

        # first create a set of entrypoints that are not done nor cancelled nor blocked nor paused and that have at least one command ready
        # FIXME: hack to avoid getting the 'graphs' poolShare node in entryPoints, need to avoid it more nicely...
        entryPoints = set([poolShare.node for poolShare in self.dispatchTree.poolShares.values()
                                if poolShare.node.status not in (NODE_BLOCKED, NODE_DONE, NODE_CANCELED, NODE_PAUSED) and poolShare.node.readyCommandCount > 0 and poolShare.node.name != 'graphs'])

        # don't proceed to the calculation if no render nodes available in the requested pools
        isRenderNodesAvailable = False
        for pool, jobsIterator in groupby(entryPoints, lambda x: x.mainPoolShare().pool):
            renderNodesAvailable = set([rn for rn in pool.renderNodes if rn.status not in [RN_UNKNOWN, RN_PAUSED, RN_WORKING]])
            if len(renderNodesAvailable):
                isRenderNodesAvailable = True
                break
        if not isRenderNodesAvailable:
            return []

        # Log time updating max rn
        prevTimer = time.time()

        # sort by pool for the groupby
        entryPoints = sorted(entryPoints, key=lambda node: node.mainPoolShare().pool)

        # update the value of the maxrn for the poolshares (parallel dispatching)
        for pool, jobsIterator in groupby(entryPoints, lambda x: x.mainPoolShare().pool):

            # we are treating every active job of the pool
            jobsList = [job for job in jobsIterator]

            # the new maxRN value is calculated based on the number of active jobs of the pool, and the number of online rendernodes of the pool
            onlineRenderNodes = set([rn for rn in pool.renderNodes if rn.status not in [RN_UNKNOWN, RN_PAUSED]])
            nbOnlineRenderNodes = len(onlineRenderNodes)
            # LOGGER.debug("@   - nb rns awake:%r" % (nbOnlineRenderNodes) )

            # if we have a userdefined maxRN for some nodes, remove them from the list and substracts their maxRN from the pool's size
            l = jobsList[:]  # duplicate the list to be safe when removing elements
            for job in l:
                # LOGGER.debug("@   - checking userDefMaxRN: %s -> %r maxRN=%d" % (job.name, job.mainPoolShare().userDefinedMaxRN, job.mainPoolShare().maxRN ) )
                if job.mainPoolShare().userDefinedMaxRN and job.mainPoolShare().maxRN not in [-1, 0]:
                    # LOGGER.debug("@     removing: %s -> maxRN=%d" % (job.name, job.mainPoolShare().maxRN ) )
                    jobsList.remove(job)
                    nbOnlineRenderNodes -= job.mainPoolShare().maxRN

            # LOGGER.debug("@   - nb rns awake after maxRN:%d" % (nbOnlineRenderNodes) )
            if len(jobsList) == 0:
                continue

            # Prepare updatedMaxRN with dispatch key proportions
            # list of dks (integer only)
            dkList = [job.dispatchKey for job in jobsList]
            nbJobs = len(jobsList)     # number of jobs in the current pool
            nbRNAssigned = 0            # number of render nodes assigned for this pool

            dkMin = min(dkList)
            # dkPositiveList: Shift all dks values in order that each min value of dk becomes 1
            dkPositiveList = map(lambda x: x-dkMin+1, dkList)  # dk values start at 1
            dkSum = sum(dkPositiveList)

            # sort by id (fifo)
            jobsList = sorted(jobsList, key=lambda x: x.id)

            # then sort by dispatchKey (priority)
            jobsList = sorted(jobsList, key=lambda x: x.dispatchKey, reverse=True)

            for dk, jobIterator in groupby(jobsList, lambda x: x.dispatchKey):

                jobs = [job for job in jobIterator]
                # dkPositive: Shift all dks values in order that each min value of dk becomes 1
                dkPositive = dk - dkMin + 1

                # Proportion of render nodes for
                updatedmaxRN = int(round(nbOnlineRenderNodes * (dkPositive / float(dkSum))))

                for job in jobs:
                    job.mainPoolShare().maxRN = updatedmaxRN
                    nbRNAssigned += updatedmaxRN

            # PRA: Here is the main choice!
            # Add remaining RNs to most important jobs (to fix rounding errors)
            unassignedRN = nbOnlineRenderNodes - nbRNAssigned
            while unassignedRN > 0:
                for job in jobsList:
                    if unassignedRN <= 0:
                        break
                    job.mainPoolShare().maxRN += 1
                    unassignedRN -= 1

        if singletonconfig.get('CORE','GET_STATS'):
            singletonstats.theStats.assignmentTimers['update_max_rn'] = time.time() - prevTimer
        LOGGER.info( "%8.2f ms --> .... updating max RN values", (time.time() - prevTimer)*1000 )

        # now, we are treating every nodes
        # sort by id (fifo)
        entryPoints = sorted(entryPoints, key=lambda node: node.id)
        # then sort by dispatchKey (priority)
        entryPoints = sorted(entryPoints, key=lambda node: node.dispatchKey, reverse=True)

        # Put nodes with a userDefinedMaxRN first
        userDefEntryPoints = ifilter(lambda node: node.mainPoolShare().userDefinedMaxRN, entryPoints)
        standardEntryPoints = ifilter(lambda node: not node.mainPoolShare().userDefinedMaxRN, entryPoints)
        scoredEntryPoints = chain(userDefEntryPoints, standardEntryPoints)

        # Log time dispatching RNs
        prevTimer = time.time()

        # Iterate over each entryPoint to get an assignment
        assignments = []  # list of (renderNode, Command)
        for entryPoint in scoredEntryPoints:
            # If we have dedicated render nodes for this poolShare
            if not any([poolShare.hasRenderNodesAvailable() for poolShare in entryPoint.poolShares.values()]):
                continue
	    
	    try:
	        for (rn, com) in entryPoint.dispatchIterator(lambda: self.queue.qsize() > 0):
        	    assignments.append((rn, com))
        	    # increment the allocatedRN for the poolshare
        	    entryPoint.mainPoolShare().allocatedRN += 1
        	    # save the active poolshare of the rendernode
        	    rn.currentpoolshare = entryPoint.mainPoolShare()
	    except NoRenderNodeAvailable:
                 pass
 	    except NoLicenseAvailableForTask:
                 LOGGER.info("Missing license for node \"%s\" (other commands can start anyway)." % entryPoint.name)
		 pass

        assignmentDict = collections.defaultdict(list)
        for (rn, com) in assignments:
            assignmentDict[rn].append(com)

        if singletonconfig.get('CORE','GET_STATS'):
            singletonstats.theStats.assignmentTimers['dispatch_command'] = time.time() - prevTimer
        LOGGER.info( "%8.2f ms --> .... dispatching commands", (time.time() - prevTimer)*1000  )

        #
        # Check replacements
        #
        # - faire une passe pour les jobs n'ayant pas leur part de gateau
        #     - identifier dans leur pool les jobs killable
        #     - pour chaque ressource, si match : on jette le job en cours ET on desactive son attribut killable


        #
        # Backfill
        #
        # TODO refaire une passe pour les jobs ayant un attribut "killable" et au moins une pool additionnelle

        return assignmentDict.items()

Example 117

Project: franklin Source File: acceptance.py
def test_backbone(analysis=None, analysis_dir=None):
    '''It tests the backbone infrastructure.

    If no analysis is given it will run all of them.
    If no analysis_dir is given a temporary one will be used.
    '''
    logger = logging.getLogger('franklin')
    if analysis_dir:
        analysis_fhand = None
        analysis_fpath = analysis_dir
    else:
        analysis_fhand = NamedTemporaryDir()
        analysis_fpath = analysis_fhand.name

    project_dir = analysis_fpath
    repository_dir = join(TEST_DATA_DIR, 'acceptance')
    settings_path = prepare_conf(project_dir, repository_dir)
    choice = analysis
#    choice = 'snvs'
    if choice in ('cleaning', None):
        original_reads = join(project_dir, 'reads/raw')
        if exists(original_reads):
            os.remove(original_reads)
        reads = join(project_dir, 'reads')
        if not exists(reads):
            os.mkdir(reads)
        shutil.copytree(join(repository_dir, 'cleaning'),
                        join(project_dir, 'reads/raw'))
        analyses = ['clean_reads', 'read_stats']
        run_analysis(analyses, settings_path)

    if choice in ('assembling', None):
        clean_reads_dir = join(project_dir, 'reads', 'cleaned')
        if os.path.exists(clean_reads_dir):
            shutil.rmtree(join(project_dir, 'reads'))
        os.mkdir(join(project_dir, 'reads'))
        shutil.copytree(join(repository_dir, 'assembling'),
                        join(project_dir, 'reads/cleaned'))

        analyses = [ 'prepare_mira_assembly', 'mira_assembly']
        run_analysis(analyses, settings_path)

    if choice in ('mapping', None):
        clean_reads_dir = join(project_dir, 'reads', 'cleaned')
        if os.path.exists(clean_reads_dir):
            shutil.rmtree(join(project_dir, 'reads'))
        os.mkdir(join(project_dir, 'reads'))
        shutil.copytree(join(repository_dir, 'assembling'),
                        join(project_dir, 'reads/cleaned'))
        if exists(join(project_dir, 'mapping')):
            shutil.rmtree(join(project_dir, 'mapping'))
        os.makedirs(join(project_dir, 'mapping', 'reference'))
        shutil.copy(join(repository_dir, 'mapping', 'reference.fasta'),
                    join(project_dir, 'mapping', 'reference',
                         'reference.fasta'))

        analyses = ['mapping', 'merge_bams', 'realign_bam']
        run_analysis(analyses, settings_path)

    if choice in ('snvs', None):
        annot_dir = join(project_dir, 'annotations')
        create_dir(annot_dir)
        annot_res = join(annot_dir, 'repr')
        os.mkdir(join(annot_dir, 'input'))
        os.mkdir(annot_res)
        shutil.copy(join(repository_dir, 'snvs', 'reference.fasta'),
                    join(annot_dir, 'input', 'reference.fasta'))

        mapping_dir = join(project_dir, 'mapping')
        create_dir(mapping_dir)
        os.mkdir(join(mapping_dir, 'reference'))
        shutil.copy(join(repository_dir, 'snvs', 'merged.bam'),
                   join(project_dir, 'mapping', 'merged.bam'))
        shutil.copy(join(repository_dir, 'snvs', 'reference.fasta'),
                   join(project_dir, 'mapping', 'reference', 'reference.fasta'))
        analyses = ['annotate_snvs', 'filter_snvs', 'annotation_stats',
                    'write_annotations']
        run_analysis(analyses, settings_path)

        stats_fpath = join(project_dir, 'annotations', 'features', 'stats',
                            'reference.txt')
        result = open(stats_fpath).read()

        #print  result
        expected = '''Sequences with SNVs: 47
SNVs found: 176
SNV types:
\tinsertion: 1
\tdeletion: 11
\tcomplex: 7
\ttransition: 113
\ttransversion: 44
SNV locations:
\tunknown: 176'''
        assert expected in result

    if choice in ('annotation', None):
        annot_dir = join(project_dir, 'annotations')
        if exists(join(annot_dir)):
            shutil.rmtree(annot_dir)
        os.mkdir(annot_dir)
        shutil.copytree(join(repository_dir, 'annotation', 'input'),
                        join(annot_dir, 'input'))
        shutil.copytree(join(repository_dir, 'annotation', 'blast'),
                        join(annot_dir, 'blast'))

        analyses = ['annotate_orfs', 'annotate_microsatellites',
                    'annotate_gos', 'annotate_descriptions',
                    'annotate_orthologs', 'annotate_introns',
                    'annotate_prot_change',
                    'write_annotations', 'annotation_stats']
        run_analysis(analyses, settings_path)

        stats_fpath = join(project_dir, 'annotations', 'features', 'stats',
                            'tair7_cdna.st_nucl.txt')
        result = open(stats_fpath).read()
        expected = '''Number of sequences: 4
Sequences with description: 4
Sequences with ORF: 4
Number of ORFs: 4
Sequences with intron: 2
Number of introns: 3'''
        assert expected in result

    if not analysis_dir:
        analysis_fhand.close()

Example 118

Project: firefox-flicks Source File: __init__.py
    def verify_request(self, uri, http_method='GET', body=None,
            headers=None, require_resource_owner=True, require_verifier=False,
            require_realm=False, required_realm=None, require_callback=False):
        """Verifies a request ensuring that the following is true:

        Per `section 3.2`_ of the spec.

        - all mandated OAuth parameters are supplied
        - parameters are only supplied in one source which may be the URI
          query, the Authorization header or the body
        - all parameters are checked and validated, see comments and the
          methods and properties of this class for further details.
        - the supplied signature is verified against a recalculated one

        A ValueError will be raised if any parameter is missing,
        supplied twice or invalid. A HTTP 400 Response should be returned
        upon catching an exception.

        A HTTP 401 Response should be returned if verify_request returns False.

        `Timing attacks`_ are prevented through the use of dummy credentials to
        create near constant time verification even if an invalid credential
        is used. Early exit on invalid credentials would enable attackers
        to perform `enumeration attacks`_. Near constant time string comparison
        is used to prevent secret key guessing. Note that timing attacks can
        only be prevented through near constant time execution, not by adding
        a random delay which would only require more samples to be gathered.

        .. _`section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2
        .. _`Timing attacks`: http://rdist.root.org/2010/07/19/exploiting-remote-timing-attacks/
        .. _`enumeration attacks`: http://www.sans.edu/research/security-laboratory/article/attacks-browsing
        """
        # Only include body data from x-www-form-urlencoded requests
        headers = headers or {}
        if ("Content-Type" in headers and
                headers["Content-Type"] == CONTENT_TYPE_FORM_URLENCODED):
            request = Request(uri, http_method, body, headers)
        else:
            request = Request(uri, http_method, '', headers)

        if self.enforce_ssl and not request.uri.lower().startswith("https://"):
            raise ValueError("Insecure transport, only HTTPS is allowed.")

        signature_type, params, oauth_params = self.get_signature_type_and_params(request)

        # The server SHOULD return a 400 (Bad Request) status code when
        # receiving a request with duplicated protocol parameters.
        if len(dict(oauth_params)) != len(oauth_params):
            raise ValueError("Duplicate OAuth entries.")

        oauth_params = dict(oauth_params)
        request.signature = oauth_params.get('oauth_signature')
        request.client_key = oauth_params.get('oauth_consumer_key')
        request.resource_owner_key = oauth_params.get('oauth_token')
        request.nonce = oauth_params.get('oauth_nonce')
        request.timestamp = oauth_params.get('oauth_timestamp')
        request.callback_uri = oauth_params.get('oauth_callback')
        request.verifier = oauth_params.get('oauth_verifier')
        request.signature_method = oauth_params.get('oauth_signature_method')
        request.realm = dict(params).get('realm')

        # The server SHOULD return a 400 (Bad Request) status code when
        # receiving a request with missing parameters.
        if not all((request.signature, request.client_key,
                    request.nonce, request.timestamp,
                    request.signature_method)):
            raise ValueError("Missing OAuth parameters.")

        # OAuth does not mandate a particular signature method, as each
        # implementation can have its own unique requirements.  Servers are
        # free to implement and docuement their own custom methods.
        # Recommending any particular method is beyond the scope of this
        # specification.  Implementers should review the Security
        # Considerations section (`Section 4`_) before deciding on which
        # method to support.
        # .. _`Section 4`: http://tools.ietf.org/html/rfc5849#section-4
        if not request.signature_method in self.allowed_signature_methods:
            raise ValueError("Invalid signature method.")

        # Servers receiving an authenticated request MUST validate it by:
        #   If the "oauth_version" parameter is present, ensuring its value is
        #   "1.0".
        if ('oauth_version' in request.oauth_params and
            request.oauth_params['oauth_version'] != '1.0'):
            raise ValueError("Invalid OAuth version.")

        # The timestamp value MUST be a positive integer. Unless otherwise
        # specified by the server's docuementation, the timestamp is expressed
        # in the number of seconds since January 1, 1970 00:00:00 GMT.
        if len(request.timestamp) != 10:
            raise ValueError("Invalid timestamp size")
        try:
            ts = int(request.timestamp)

        except ValueError:
            raise ValueError("Timestamp must be an integer")

        else:
            # To avoid the need to retain an infinite number of nonce values for
            # future checks, servers MAY choose to restrict the time period after
            # which a request with an old timestamp is rejected.
            if time.time() - ts > self.timestamp_lifetime:
                raise ValueError("Request too old, over 10 minutes.")

        # Provider specific validation of parameters, used to enforce
        # restrictions such as character set and length.
        if not self.check_client_key(request.client_key):
            raise ValueError("Invalid client key.")

        if not request.resource_owner_key and require_resource_owner:
            raise ValueError("Missing resource owner.")

        if (require_resource_owner and not require_verifier and
            not self.check_access_token(request.resource_owner_key)):
            raise ValueError("Invalid resource owner key.")

        if (require_resource_owner and require_verifier and
            not self.check_request_token(request.resource_owner_key)):
            raise ValueError("Invalid resource owner key.")

        if not self.check_nonce(request.nonce):
            raise ValueError("Invalid nonce.")

        if request.realm and not self.check_realm(request.realm):
            raise ValueError("Invalid realm. Allowed are %s" % self.realms)

        if not request.verifier and require_verifier:
            raise ValueError("Missing verifier.")

        if require_verifier and not self.check_verifier(request.verifier):
            raise ValueError("Invalid verifier.")

        # Servers receiving an authenticated request MUST validate it by:
        #   If using the "HMAC-SHA1" or "RSA-SHA1" signature methods, ensuring
        #   that the combination of nonce/timestamp/token (if present)
        #   received from the client has not been used before in a previous
        #   request (the server MAY reject requests with stale timestamps as
        #   described in `Section 3.3`_).
        # .._`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
        #
        # We check this before validating client and resource owner for
        # increased security and performance, both gained by doing less work.
        if require_verifier:
            token = {"request_token": request.resource_owner_key}
        else:
            token = {"access_token": request.resource_owner_key}
        if not self.validate_timestamp_and_nonce(request.client_key,
                request.timestamp, request.nonce, **token):
                return False, request

        # The server SHOULD return a 401 (Unauthorized) status code when
        # receiving a request with invalid client credentials.
        # Note: This is postponed in order to avoid timing attacks, instead
        # a dummy client is assigned and used to maintain near constant
        # time request verification.
        #
        # Note that early exit would enable client enumeration
        valid_client = self.validate_client_key(request.client_key)
        if not valid_client:
            client_key = self.dummy_client

        # Callback is normally never required, except for requests for
        # a Temporary Credential as described in `Section 2.1`_
        # .._`Section 2.1`: http://tools.ietf.org/html/rfc5849#section-2.1
        if require_callback:
            valid_redirect = self.validate_redirect_uri(request.client_key,
                    request.callback_uri)
        else:
            valid_redirect = True

        # The server SHOULD return a 401 (Unauthorized) status code when
        # receiving a request with invalid or expired token.
        # Note: This is postponed in order to avoid timing attacks, instead
        # a dummy token is assigned and used to maintain near constant
        # time request verification.
        #
        # Note that early exit would enable resource owner enumeration
        if request.resource_owner_key:
            if require_verifier:
                valid_resource_owner = self.validate_request_token(
                    request.client_key, request.resource_owner_key)
                if not valid_resource_owner:
                    resource_owner_key = self.dummy_request_token
            else:
                valid_resource_owner = self.validate_access_token(
                    request.client_key, request.resource_owner_key)
                if not valid_resource_owner:
                    resource_owner_key = self.dummy_access_token
        else:
            valid_resource_owner = True

        # Note that `realm`_ is only used in authorization headers and how
        # it should be interepreted is not included in the OAuth spec.
        # However they could be seen as a scope or realm to which the
        # client has access and as such every client should be checked
        # to ensure it is authorized access to that scope or realm.
        # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
        #
        # Note that early exit would enable client realm access enumeration.
        #
        # The require_realm indicates this is the first step in the OAuth
        # workflow where a client requests access to a specific realm.
        # This first step (obtaining request token) need not require a realm
        # and can then be identified by checking the require_resource_owner
        # flag and abscence of realm.
        #
        # Clients obtaining an access token will not supply a realm and it will
        # not be checked. Instead the previously requested realm should be
        # transferred from the request token to the access token.
        #
        # Access to protected resources will always validate the realm but note
        # that the realm is now tied to the access token and not provided by
        # the client.
        if ((require_realm and not request.resource_owner_key) or
            (not require_resource_owner and not request.realm)):
            valid_realm = self.validate_requested_realm(request.client_key,
                    request.realm)
        elif require_verifier:
            valid_realm = True
        else:
            valid_realm = self.validate_realm(request.client_key,
                    request.resource_owner_key, uri=request.uri,
                    required_realm=required_realm)

        # The server MUST verify (Section 3.2) the validity of the request,
        # ensure that the resource owner has authorized the provisioning of
        # token credentials to the client, and ensure that the temporary
        # credentials have not expired or been used before.  The server MUST
        # also verify the verification code received from the client.
        # .. _`Section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2
        #
        # Note that early exit would enable resource owner authorization
        # verifier enumertion.
        if request.verifier:
            valid_verifier = self.validate_verifier(request.client_key,
                request.resource_owner_key, request.verifier)
        else:
            valid_verifier = True

        # Parameters to Client depend on signature method which may vary
        # for each request. Note that HMAC-SHA1 and PLAINTEXT share parameters

        request.params = filter(lambda x: x[0] not in ("oauth_signature", "realm"), params)

        # ---- RSA Signature verification ----
        if request.signature_method == SIGNATURE_RSA:
            # The server verifies the signature per `[RFC3447] section 8.2.2`_
            # .. _`[RFC3447] section 8.2.2`: http://tools.ietf.org/html/rfc3447#section-8.2.1
            rsa_key = self.get_rsa_key(request.client_key)
            valid_signature = signature.verify_rsa_sha1(request, rsa_key)

        # ---- HMAC or Plaintext Signature verification ----
        else:
            # Servers receiving an authenticated request MUST validate it by:
            #   Recalculating the request signature independently as described in
            #   `Section 3.4`_ and comparing it to the value received from the
            #   client via the "oauth_signature" parameter.
            # .. _`Section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
            client_secret = self.get_client_secret(request.client_key)
            resource_owner_secret = None
            if require_resource_owner:
                if require_verifier:
                    resource_owner_secret = self.get_request_token_secret(
                        request.client_key, request.resource_owner_key)
                else:
                    resource_owner_secret = self.get_access_token_secret(
                        request.client_key, request.resource_owner_key)

            if request.signature_method == SIGNATURE_HMAC:
                valid_signature = signature.verify_hmac_sha1(request,
                    client_secret, resource_owner_secret)
            else:
                valid_signature = signature.verify_plaintext(request,
                    client_secret, resource_owner_secret)

        # We delay checking validity until the very end, using dummy values for
        # calculations and fetching secrets/keys to ensure the flow of every
        # request remains almost identical regardless of whether valid values
        # have been supplied. This ensures near constant time execution and
        # prevents malicious users from guessing sensitive information
        v = all((valid_client, valid_resource_owner, valid_realm,
                    valid_redirect, valid_verifier, valid_signature))
        logger = logging.getLogger("oauthlib")
        if not v:
            logger.info("[Failure] OAuthLib request verification failed.")
            logger.info("Valid client:\t%s" % valid_client)
            logger.info("Valid token:\t%s\t(Required: %s" % (valid_resource_owner, require_resource_owner))
            logger.info("Valid realm:\t%s\t(Required: %s)" % (valid_realm, require_realm))
            logger.info("Valid callback:\t%s" % valid_redirect)
            logger.info("Valid verifier:\t%s\t(Required: %s)" % (valid_verifier, require_verifier))
            logger.info("Valid signature:\t%s" % valid_signature)
        return v, request

Example 119

Project: fontbakery Source File: fontbakery-check-upstream.py
def upstream_checks():
    args = parser.parse_args()

    # set up a basic logging config
    log_format = '%(levelname)-8s %(message)s'
    logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = logging.Formatter(log_format)
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if args.verbose == 1:
        logger.setLevel(logging.INFO)
    elif args.verbose >= 2:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.ERROR)

    folders_to_check = []
    for f in args.folders:
        if os.path.isdir(f):
            folders_to_check.append(f)
        else:
            fb.error("'{}' is not a valid existing folder.".format(f))
            continue

    if len(folders_to_check) == 0:
        fb.error("None of the specified paths "
                 "seem to be existing folders.")
        exit(-1)

    for folder in folders_to_check:
        directory = UpstreamDirectory(folder)

# ---------------------------------------------------------------------
        fb.new_check("Each font in family has matching glyph names?")
        glyphs = None
        failed = False
        for f in directory.get_fonts():
            try:
                font = PiFont(os.path.join(folder, f))
                if glyphs is None:
                    glyphs = font.get_glyphs()
                elif glyphs != font.get_glyphs():
                    failed = True
                    fb.error(("Font '{}' has different glyphs in"
                              " comparison to onther fonts"
                              " in this family.").format(f))
                    break
            except:
                failed = True
                fb.error("Failed to load font file: '{}'".format(f))

        if failed is False:
            fb.ok("All fonts in family have matching glyph names.")

# ---------------------------------------------------------------------
        fb.new_check("Glyphs have same number"
                     " of contours across family ?")
        glyphs = {}
        failed = False
        for f in directory.get_fonts():
            font = PiFont(os.path.join(folder, f))
            for glyphcode, glyphname in font.get_glyphs():
                contours = font.get_contours_count(glyphname)
                if glyphcode in glyphs and glyphs[glyphcode] != contours:
                    failed = True
                    fb.error(("Number of contours of glyph '{}'"
                              " does not match."
                              " Expected {} contours, but actual is"
                              " {} contours").format(glyphname,
                                                     glyphs[glyphcode],
                                                     contours))
                glyphs[glyphcode] = contours
        if failed is False:
            fb.ok("Glyphs have same number of contours across family.")

# ---------------------------------------------------------------------
        fb.new_check("Glyphs have same"
                     " number of points across family ?")
        glyphs = {}
        failed = False
        for f in directory.get_fonts():
            font = PiFont(os.path.join(folder, f))
            for g, glyphname in font.get_glyphs():
                points = font.get_points_count(glyphname)
                if g in glyphs and glyphs[g] != points:
                    failed = True
                    fb.error(("Number of points of glyph '{}' does not match."
                              " Expected {} points, but actual is "
                              "{} points").format(glyphname,
                                                  glyphs[g],
                                                  points))
                glyphs[g] = points
        if failed is False:
            fb.ok("Glyphs have same"
                  " number of points across family.")

# ======================================================================
        def assertExists(folderpath, filenames, err_msg, ok_msg):
            if not isinstance(filenames, list):
                filenames = [filenames]

            missing = []
            for filename in filenames:
                fullpath = os.path.join(folderpath, filename)
                if os.path.exists(fullpath):
                    missing.append(fullpath)
            if len(missing) > 0:
                fb.error(err_msg.format(", ".join(missing)))
            else:
                fb.ok(ok_msg)

# ---------------------------------------------------------------------
        fb.new_check("Does this font folder contain COPYRIGHT file ?")
        assertExists(f, "COPYRIGHT.txt",
                     "Font folder lacks a copyright file at '{}'",
                     "Font folder contains COPYRIGHT.txt")

# ---------------------------------------------------------------------
        fb.new_check("Does this font folder contain a DESCRIPTION file ?")
        assertExists(f, "DESCRIPTION.en_us.html",
                     "Font folder lacks a description file at '{}'",
                     "Font folder should contain DESCRIPTION.en_us.html.")

# ---------------------------------------------------------------------
        fb.new_check("Does this font folder contain licensing files?")
        assertExists(f, ["LICENSE.txt", "OFL.txt"],
                     "Font folder lacks licensing files at '{}'",
                     "Font folder should contain licensing files.")

# ---------------------------------------------------------------------
        fb.new_check("Font folder should contain FONTLOG.txt")
        assertExists(f, "FONTLOG.txt",
                     "Font folder lacks a fontlog file at '{}'",
                     "Font folder should contain a 'FONTLOG.txt' file.")

# =======================================================================
# Tests for common upstream repository files.
# note:
# This test case is not related to font processing. It makes only common
# checks like one - test that upstream repository contains METADATA.pb)
# =======================================================================

# ---------------------------------------------------------------------
        fb.new_check("Repository contains METADATA.pb file?")
        fullpath = os.path.join(f, 'METADATA.pb')
        if not os.path.exists(fullpath):
            fb.error("File 'METADATA.pb' does not exist"
                     " in root of upstream repository")
        else:
            fb.ok("Repository contains METADATA.pb file.")

# ---------------------------------------------------------------------
        fb.new_check("Copyright notice is consistent"
                     " across all fonts in this family ?")

        COPYRIGHT_REGEX = re.compile(r'Copyright.*?20\d{2}.*', re.U | re.I)
        def grep_copyright_notice(contents):
            match = COPYRIGHT_REGEX.search(contents)
            if match:
                return match.group(0).strip(',\r\n')
            return

        def lookup_copyright_notice(ufo_folder):
            current_path = ufo_folder
            try:
                contents = open(os.path.join(ufo_folder,
                                             'fontinfo.plist')).read()
                copyright = grep_copyright_notice(contents)
                if copyright:
                    return copyright
            except (IOError, OSError):
                pass

            while os.path.realpath(self.operator.path) != current_path:
                # look for all text files inside folder
                # read contents from them and compare with copyright notice
                # pattern
                files = glob.glob(os.path.join(current_path, '*.txt'))
                files += glob.glob(os.path.join(current_path, '*.ttx'))
                for filename in files:
                    with open(os.path.join(current_path, filename)) as fp:
                        match = COPYRIGHT_REGEX.search(fp.read())
                        if not match:
                           continue
                        return match.group(0).strip(',\r\n')
                current_path = os.path.join(current_path, '..')  # go up
                current_path = os.path.realpath(current_path)
            return

        ufo_dirs = []
        for root, dirs, files in os.walk(folder):
            for d in dirs:
                fullpath = os.path.join(root, d)
                if os.path.splitext(fullpath)[1].lower() == '.ufo':
                    ufo_dirs.append(fullpath)
        if len(ufo_dirs) == 0:
            fb.skip("No UFO font file found.")
        else:
            failed = False
            copyright = None
            for ufo_folder in ufo_dirs:
                current_notice = lookup_copyright_notice(ufo_folder)
                if current_notice is None:
                    continue
                if copyright is not None and current_notice != copyright:
                    failed = True
                    fb.error('"{}" != "{}"'.format(current_notice,
                                                   copyright))
                    break
                copyright = current_notice
            if failed is False:
                fb.ok("Copyright notice is consistent"
                      " across all fonts in this family.")

        fb.save_json_report("fontbakery-check-upstream-results.json")

Example 120

Project: cloudinit.d Source File: boot.py
def parse_commands(argv):
    global g_verbose

    u = """[options] <command> [<top level launch plan> | <run name>]
Boot and manage a launch plan
Run with the command 'commands' to see a list of all possible commands
"""
    version = "cloudinitd " + (cloudinitd.Version)
    parser = OptionParser(usage=u, version=version)

    all_opts = []
    opt = bootOpts("verbose", "v", "Print more output", 1, count=True)
    all_opts.append(opt)
    opt.add_opt(parser)
    opt = bootOpts("validate", "x", "Check that boot plan is valid before launching it.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("dryrun", "y", "Perform dry run on the boot plan.  The IaaS service is never contacted but all other actions are performed.  This option offers an addition level of plan validation of -x.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("quiet", "q", "Print no output", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("name", "n", "Set the run name, only relevant for boot and reload (by default the system picks)", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("database", "d", "Path to the db directory", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logdir", "f", "Path to the base log directory.", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("loglevel", "l", "Controls the level of detail in the log file", "info", vals=["debug", "info", "warn", "error"])
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("logstack", "s", "Log stack trace information (extreme debug level)", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("noclean", "c", "Do not delete the database, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("safeclean", "C", "Do not delete the database on failed terminate, only relevant for the terminate command", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("kill", "k", "This option only applies to the iceage command.  When on it will terminate all VMs started with IaaS associated with this run to date.  This should be considered an extreme measure to prevent IaaS resource leaks.", False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("outstream", "O", SUPPRESS_HELP, None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("remotedebug", "X", SUPPRESS_HELP, False, flag=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("output", "o", "Create an json docuement which describes the application and write it to the associated file.  Relevant for boot and status", None)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvar", "g", "Add a variable to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)
    opt = bootOpts("globalvarfile", "G", "Add a file to global variable space", None, append_list=True)
    opt.add_opt(parser)
    all_opts.append(opt)


    homedir = os.path.expanduser("~/.cloudinitd")
    try:
        if not os.path.exists(homedir):
            os.mkdir(homedir)
            os.chmod(homedir, stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
    except Exception, ex:
        print_chars(0, "Error creating cloudinit.d directort %s : %s" % (homedir, str(ex)))

    (options, args) = parser.parse_args(args=argv)

    _deal_with_cmd_line_globals(options)

    for opt in all_opts:
        opt.validate(options)

    if not options.name:
        options.name = str(uuid.uuid4()).split("-")[0]

    if options.logdir is None:
        options.logdir = os.path.expanduser("~/.cloudinitd/")

    (options.logger, logfile) = cloudinitd.make_logger(options.loglevel, options.name, logdir=options.logdir)
    if not options.database:
        dbdir = os.path.expanduser("~/.cloudinitd")
        options.database = dbdir

    if options.logstack:
        logger = logging.getLogger("stacktracelog")
        logger.propagate = False
        logger.setLevel(logging.DEBUG)
        logdir = os.path.join(options.logdir, options.name)
        if not os.path.exists(logdir):
            try:
                os.mkdir(logdir)
            except OSError:
                pass
        stacklogfile = os.path.join(logdir, "stacktrace.log")
        handler = logging.handlers.RotatingFileHandler(stacklogfile, maxBytes=100*1024*1024, backupCount=5)
        logger.addHandler(handler)
        fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        formatter = logging.Formatter(fmt)
        handler.setFormatter(formatter)


    if options.quiet:
        options.verbose = 0
    g_verbose = options.verbose

    if options.outstream:
        global g_outfile
        g_outfile = open(options.outstream, "w")
    else:
        g_outfile = None

    if options.remotedebug:
        try:
            from pydev import pydevd
            debug_cs = os.environ['CLOUDINITD_DEBUG_CS'].split(':')
            debug_host = debug_cs[0]
            debug_port = int(debug_cs[1])
            pydevd.settrace(debug_host, port=debug_port, stdoutToServer=True, stderrToServer=True)
        except ImportError, e:
            print_chars(0, "Could not import remote debugging library: %s\n" % str(e), color="red", bold=True)
        except KeyError:
            print_chars(0, "If you want to do remote debugging please set the env CLOUDINITD_DEBUG_CS to the contact string of you expected debugger.\n", color="red", bold=True)
        except:
            print_chars(0, "Please verify the format of your contact string to be <hostname>:<port>.\n", color="red", bold=True)

    global g_options
    g_options = options
    return (args, options)

Example 121

Project: pan-python Source File: panxapi.py
def main():
    try:
        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
    except AttributeError:
        # Windows
        pass

    set_encoding()
    options = parse_opts()

    if options['debug']:
        logger = logging.getLogger()
        if options['debug'] == 3:
            logger.setLevel(pan.xapi.DEBUG3)
        elif options['debug'] == 2:
            logger.setLevel(pan.xapi.DEBUG2)
        elif options['debug'] == 1:
            logger.setLevel(pan.xapi.DEBUG1)

#        log_format = '%(levelname)s %(name)s %(message)s'
        log_format = '%(message)s'
        handler = logging.StreamHandler()
        formatter = logging.Formatter(log_format)
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    if options['cafile'] or options['capath']:
        ssl_context = create_ssl_context(options['cafile'],
                                         options['capath'])
    else:
        ssl_context = None

    try:
        xapi = pan.xapi.PanXapi(timeout=options['timeout'],
                                tag=options['tag'],
                                use_http=options['use_http'],
                                use_get=options['use_get'],
                                api_username=options['api_username'],
                                api_password=options['api_password'],
                                api_key=options['api_key'],
                                hostname=options['hostname'],
                                port=options['port'],
                                serial=options['serial'],
                                ssl_context=ssl_context)

    except pan.xapi.PanXapiError as msg:
        print('pan.xapi.PanXapi:', msg, file=sys.stderr)
        sys.exit(1)

    if options['debug'] > 2:
        print('xapi.__str__()===>\n', xapi, '\n<===',
              sep='', file=sys.stderr)

    extra_qs_used = False

    try:
        if options['keygen']:
            action = 'keygen'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.keygen(extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)
            if (options['api_username'] and options['api_password'] and
                    options['hostname'] and options['tag']):
                # .panrc
                d = datetime.now()
                print('# %s generated: %s' % (os.path.basename(sys.argv[0]),
                                              d.strftime('%Y/%m/%d %H:%M:%S')))
                print('hostname%%%s=%s' % (options['tag'],
                                           options['hostname']))
                print('api_key%%%s=%s' % (options['tag'], xapi.api_key))
            else:
                print('API key:  "%s"' % xapi.api_key)

        if options['show']:
            action = 'show'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.show(xpath=options['xpath'],
                      extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['get']:
            action = 'get'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.get(xpath=options['xpath'],
                     extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['delete']:
            action = 'delete'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.delete(xpath=options['xpath'],
                        extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['edit']:
            action = 'edit'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.edit(xpath=options['xpath'],
                      element=options['element'],
                      extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['set']:
            action = 'set'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.set(xpath=options['xpath'],
                     element=options['element'],
                     extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['dynamic-update']:
            action = 'dynamic-update'
            kwargs = {
                'cmd': options['cmd'],
                }
            if options['ad_hoc'] is not None:
                extra_qs_used = True
                kwargs['extra_qs'] = options['ad_hoc']
            if len(options['vsys']):
                kwargs['vsys'] = options['vsys'][0]
            xapi.user_id(**kwargs)
            print_status(xapi, action)
            print_response(xapi, options)

        if options['move'] is not None:
            action = 'move'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.move(xpath=options['xpath'],
                      where=options['move'],
                      dst=options['dst'],
                      extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['rename']:
            action = 'rename'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.rename(xpath=options['xpath'],
                        newname=options['dst'],
                        extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['clone']:
            action = 'clone'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.clone(xpath=options['xpath'],
                       xpath_from=options['src'],
                       newname=options['dst'],
                       extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['override']:
            action = 'override'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.override(xpath=options['xpath'],
                          element=options['element'],
                          extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['export'] is not None:
            action = 'export'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            if options['pcapid'] is not None:
                xapi.export(category=options['export'],
                            pcapid=options['pcapid'],
                            search_time=options['stime'],
                            serialno=options['serial'],
                            extra_qs=options['ad_hoc'])
            else:
                xapi.export(category=options['export'],
                            from_name=options['src'],
                            extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)
            if options['pcap_listing']:
                pcap_listing(xapi, options['export'])
            save_attachment(xapi, options)

        if options['log'] is not None:
            action = 'log'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            xapi.log(log_type=options['log'],
                     nlogs=options['nlogs'],
                     skip=options['skip'],
                     filter=options['filter'],
                     interval=options['interval'],
                     timeout=options['job_timeout'],
                     extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['report'] is not None:
            action = 'report'
            if options['ad_hoc'] is not None:
                extra_qs_used = True
            vsys = options['vsys'][0] if len(options['vsys']) else None
            xapi.report(reporttype=options['report'],
                        reportname=options['name'],
                        vsys=vsys,
                        interval=options['interval'],
                        timeout=options['job_timeout'],
                        extra_qs=options['ad_hoc'])
            print_status(xapi, action)
            print_response(xapi, options)

        if options['op'] is not None:
            action = 'op'
            kwargs = {
                'cmd': options['op'],
                'cmd_xml': options['cmd_xml'],
                }
            if options['ad_hoc'] is not None:
                extra_qs_used = True
                kwargs['extra_qs'] = options['ad_hoc']
            if len(options['vsys']):
                kwargs['vsys'] = options['vsys'][0]
            xapi.op(**kwargs)
            print_status(xapi, action)
            print_response(xapi, options)

        if (options['commit'] or options['commit_all']):
            if options['cmd']:
                cmd = options['cmd']
                if options['cmd_xml']:
                    cmd = xapi.cmd_xml(cmd)
            else:
                c = pan.commit.PanCommit(validate=options['validate'],
                                         force=options['force'],
                                         commit_all=options['commit_all'],
                                         merge_with_candidate=
                                         options['merge'])

                for part in options['partial']:
                    if part == 'device-and-network-excluded':
                        c.device_and_network_excluded()
                    elif part == 'policy-and-objects-excluded':
                        c.policy_and_objects_excluded()
                    elif part == 'shared-object-excluded':
                        c.shared_object_excluded()
                    elif part == 'no-vsys':
                        c.no_vsys()
                    elif part == 'vsys':
                        c.vsys(options['vsys'])

                if options['serial'] is not None:
                    c.device(options['serial'])
                if options['group'] is not None:
                    c.device_group(options['group'])
                if options['commit_all'] and options['vsys']:
                    c.vsys(options['vsys'][0])

                cmd = c.cmd()

            kwargs = {
                'cmd': cmd,
                'sync': options['sync'],
                'interval': options['interval'],
                'timeout': options['job_timeout'],
                }
            if options['ad_hoc'] is not None:
                extra_qs_used = True
                kwargs['extra_qs'] = options['ad_hoc']
            if options['commit_all']:
                kwargs['action'] = 'all'

            action = 'commit'
            xapi.commit(**kwargs)
            print_status(xapi, action)
            print_response(xapi, options)

        if not extra_qs_used and options['ad_hoc'] is not None:
            action = 'ad_hoc'
            xapi.ad_hoc(qs=options['ad_hoc'],
                        xpath=options['xpath'],
                        modify_qs=options['modify'])
            print_status(xapi, action)
            print_response(xapi, options)

    except pan.xapi.PanXapiError as msg:
        print_status(xapi, action, str(msg))
        print_response(xapi, options)
        sys.exit(1)

    sys.exit(0)

Example 122

Project: mediarover Source File: episode_sort.py
def __episode_sort(broker, options, **kwargs):

	logger = logging.getLogger("mediarover.scripts.sabnzbd.episode")

	# ensure user has indicated a desired quality level if quality management is turned on
	config = broker[CONFIG_OBJECT]
	if config['tv']['library']['quality']['managed'] and config['tv']['library']['quality']['desired'] is None:
		raise ConfigurationError("when quality management is on you must indicate a desired quality level at [tv] [[quality]] desired =")

	"""
	arguments:
	  1. The final directory of the job (full path)
	  2. The name of the NZB file
	  3. User modifiable job name
	  4. Newzbin report number (may be empty)
	  5. Newzbin or user-defined category
	  6. Group that the NZB was posted in e.g. alt.binaries.x
	  7. Status
	"""
	path = kwargs['path']
	job = kwargs['job']
	nzb = kwargs['nzb']
	report_id = kwargs.get('report_id', '')
	category = kwargs.get('category', '')
	group = kwargs.get('group', '')
	status = kwargs.get('status', 0)

	tv_root = config['tv']['tv_root']

	# check to ensure we have the necessary data to proceed
	if path is None or path == "":
		raise InvalidArgument("path to completed job is missing or null")
	elif os.path.basename(path).startswith("_FAILED_") or int(status) > 0:
		if job is None or job == "":
			raise InvalidArgument("job name is missing or null")
		elif int(status) == 1:
			raise FailedDownload("download failed verification")
		elif int(status) == 2:
			raise FailedDownload("download failed unpack")
		elif int(status) == 3:
			raise FailedDownload("download failed verification and unpack")
		else:
			raise FailedDownload("download failed")

	# build dict of watched series
	# register series dictionary with dependency broker
	series_lists = build_series_lists(config)
	broker.register(WATCHED_SERIES_LIST, series_lists[0])
	broker.register(IGNORED_SERIES_LIST, series_lists[1])

	logger.info("watching %d tv show(s)", len(series_lists[0]))
	logger.debug("finished processing watched tv")

	ignored = [ext.lower() for ext in config['tv']['ignored_extensions']]

	# locate episode file in given download directory
	orig_path = None
	extension = None
	size = 0
	for dirpath, dirnames, filenames in os.walk(path):
		for file in filenames:
			# check if current file's extension is in list
			# of ignored extensions
			(name, ext) = os.path.splitext(file)
			ext = ext.lstrip(".")
			if ext.lower() in ignored:
				continue

			# get size of current file (in bytes)
			stat = os.stat(os.path.join(dirpath, file))
			if stat.st_size > size:
				orig_path = os.path.join(dirpath, file)
				extension = ext
				size = stat.st_size
				logger.debug("identified possible download: filename => %s, size => %d", file, size)

	if orig_path is None:
		broker[NOTIFICATION_OBJECT].process(SORT_FAILED_NOTIFICATION, "unable to find episode file in given download path %r" % path)
		raise FilesystemError("unable to find episode file in given download path %r" % path)
	else:
		logger.info("found download file at '%s'", orig_path)

	# retrieve the proper factory object
	in_progress = broker[METADATA_OBJECT].get_in_progress(job)
	if in_progress is None:
		if report_id == "":
			factory = broker[EPISODE_FACTORY_OBJECT]
		else:
			factory = broker[NEWZBIN_FACTORY_OBJECT]
	else:
		factory = broker[in_progress['source']]

	# build episode object using job name
	try:
		episode = factory.create_episode(job)
	except (InvalidMultiEpisodeData, MissingParameterError), e:
		raise InvalidJobTitle("unable to parse job title and create Episode object: %s" % e)

	# sanitize series name for later use
	series = episode.series
	sanitized_name = series.sanitized_name

	# check if series is being ignored
	if sanitized_name not in broker[WATCHED_SERIES_LIST] and sanitized_name in broker[IGNORED_SERIES_LIST]:
		raise ConfigurationError("unable to sort episode as parent series is being ignored")

	# move downloaded file to new location and rename
	if not options.dry_run:

		# build a filesystem episode object
		file = FilesystemEpisode(orig_path, episode, size)
		logger.debug("created %r" % file)

		# determine quality of given job if quality management is turned on
		if config['tv']['library']['quality']['managed']:
			if 'quality' in kwargs:
				episode.quality = kwargs['quality']
			else:
				if in_progress is None:
					if config['tv']['library']['quality']['guess']:
						episode.quality = guess_quality_level(config, file.extension, episode.quality)
					else:
						logger.info("unable to find quality information in metadata db, assuming default quality level!")
				else:
					episode.quality = in_progress['quality']

		# find available disk with enough space for newly downloaded episode
		free_root = find_disk_with_space(series, tv_root, file.size) 
		if free_root is None:
			raise FilesystemError("unable to find disk with enough space to sort episode!")

		# make sure series folder exists on that disk
		series_dir = None
		for dir in series.path:
			if dir.startswith(free_root):
				series_dir = dir 
				break
		else:
			series_dir = os.path.join(free_root, series.format(config['tv']['template']['series']))
			try:
				os.makedirs(series_dir)
			except OSError, (e):
				logger.error("unable to create directory %r: %s", series_dir, e.strerror)
				raise FilesystemError(e.strerror)
			else:
				logger.debug("created series directory '%s'", series_dir)
			series.path.append(series_dir)

		dest_dir = series.locate_season_folder(episode.season, series_dir)
		if dest_dir is None:
			
			# get season folder (if desired)
			dest_dir = os.path.join(series_dir, file.format_season())

			if not os.path.isdir(dest_dir):
				try:
					os.makedirs(dest_dir)
				except OSError, (e):
					logger.error("unable to create directory %r: %s", dest_dir, e.strerror)
					raise FilesystemError(e.strerror)
				else:
					logger.debug("created season directory '%s'", dest_dir)

		# build list of episode(s) (either SingleEpisode or DailyEpisode) that are desirable
		# ie. missing or of more desirable quality than current offering
		desirables = series.filter_undesirables(episode)
		additional = None
		if len(desirables) == 0:
			logger.warning("duplicate episode detected!")
			additional = "[%s].%s" % (episode.quality, strftime("%Y%m%d%H%M"))

		# generate new filename for current episode
		new_path = os.path.join(dest_dir, file.format(additional))

		logger.info("attempting to move episode file...")
		try:
			shutil.move(orig_path, new_path)
		except OSError, (e):
			logger.error("unable to move downloaded episode to '%s': %s", new_path, e.strerror)
			raise FilesystemError(e.strerror)
	
		# move successful, cleanup download directory
		else:
			logger.info("downloaded episode moved from '%s' to '%s'", orig_path, new_path)

			# update episode and set new filesystem path
			file.path = new_path

			# remove job from in_progress
			if config['tv']['library']['quality']['managed']:
				broker[METADATA_OBJECT].delete_in_progress(job)

			if additional is None:

				# mark series episode list stale
				series.mark_episode_list_stale()

				# update metadata db with newly sorted episode information
				if config['tv']['library']['quality']['managed']:
					for ep in desirables:
						broker[METADATA_OBJECT].add_episode(ep)

				remove = []
				files = series.find_episode_on_disk(episode)

				# remove any duplicate or multipart episodes on disk that are no longer
				# needed...
				logger.info("checking filesystem for duplicate or multipart episode redundancies...")
				for found in files:
					object = found.episode
					if hasattr(object, "episodes"):
						for ep in object.episodes:
							list = series.find_episode_on_disk(ep, False)
							if len(list) == 0: # individual part not found on disk, can't delete this multi
								break
						else:
							remove.append(found)

					elif file != found:
						remove.append(found)

				# if series isn't being archived, delete oldest episode on disk if series episode count
				# exceeds the indicated value
				# NOTE: if the number of series episodes exceeds the indicated amount by more than one
				# display a warning message indicating as much. DO NOT remove more than one file!
				# We don't want to accidentally wipe out an entire series due to improper configuration!
				if sanitized_name in config['tv']['filter'] and config['tv']['filter'][sanitized_name]['archive'] is False:
					limit = config['tv']['filter'][sanitized_name]['episode_limit']
					if limit > 0:
						count = len(series.files)
						if count > limit:
							if count > limit + 1:
								logger.warning("the series '%s' has more episodes on disk than the configured limit of %d. Only 1 will be removed" % (series, limit))
							else:
								logger.info("removing oldest episode...")
							series.delete_oldest_episode_file()

				if len(remove) > 0:
					series.delete_episode_files(*remove)
					
			# clean up download directory by removing all remaining files
			try:
				shutil.rmtree(path)
			except (shutil.Error), e:
				raise CleanupError("unable to remove download directory '%s'" % e)
			else:
				logger.info("removing download directory '%s'" % path)

Example 123

Project: alignak Source File: arbiterdaemon.py
    def load_monitoring_config_file(self):  # pylint: disable=R0915
        """Load main configuration file (alignak.cfg)::

        * Read all files given in the -c parameters
        * Read all .cfg files in cfg_dir
        * Read all files in cfg_file
        * Create objects (Arbiter, Module)
        * Set HTTP links info (ssl etc)
        * Load its own modules
        * Execute read_configuration hook (for arbiter modules)
        * Create all objects (Service, Host, Realms ...)
        * "Compile" configuration (Linkify, explode, apply inheritance, fill default values ...)
        * Cut conf into parts and prepare it for sending

        :return: None
        """
        if self.verify_only:
            # Force the global logger at INFO level
            alignak_logger = logging.getLogger("alignak")
            alignak_logger.setLevel(logging.INFO)
            logger.info("Arbiter is in configuration check mode")
            logger.info("-----")

        logger.info("Loading configuration")
        # REF: doc/alignak-conf-dispatching.png (1)
        buf = self.conf.read_config(self.config_files)
        raw_objects = self.conf.read_config_buf(buf)
        # Maybe conf is already invalid
        if not self.conf.conf_is_correct:
            err = "***> One or more problems was encountered while processing the config files..."
            logger.error(err)
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(err)

        logger.info("I correctly loaded the configuration files")

        # First we need to get arbiters and modules
        # so we can ask them for objects
        self.conf.create_objects_for_type(raw_objects, 'arbiter')
        self.conf.create_objects_for_type(raw_objects, 'module')

        self.conf.early_arbiter_linking()

        # Search which Arbiterlink I am
        for arb in self.conf.arbiters:
            if arb.get_name() in ['Default-Arbiter', self.config_name]:
                arb.need_conf = False
                self.myself = arb
                self.is_master = not self.myself.spare
                if self.is_master:
                    logger.info("I am the master Arbiter: %s", arb.get_name())
                else:
                    logger.info("I am a spare Arbiter: %s", arb.get_name())
                # export this data to our statsmgr object :)
                statsd_host = getattr(self.conf, 'statsd_host', 'localhost')
                statsd_port = getattr(self.conf, 'statsd_port', 8125)
                statsd_prefix = getattr(self.conf, 'statsd_prefix', 'alignak')
                statsd_enabled = getattr(self.conf, 'statsd_enabled', False)
                statsmgr.register(arb.get_name(), 'arbiter',
                                  statsd_host=statsd_host, statsd_port=statsd_port,
                                  statsd_prefix=statsd_prefix, statsd_enabled=statsd_enabled)

                # Set myself as alive ;)
                self.myself.alive = True
            else:  # not me
                arb.need_conf = True

        if not self.myself:
            sys.exit("Error: I cannot find my own Arbiter object (%s), I bail out. "
                     "To solve this, please change the arbiter_name parameter in "
                     "the arbiter configuration file (certainly arbiter-master.cfg) "
                     "with the value '%s'."
                     " Thanks." % (self.config_name, socket.gethostname()))

        # Ok it's time to load the module manager now!
        self.load_modules_manager()
        # we request the instances without them being *started*
        # (for those that are concerned ("external" modules):
        # we will *start* these instances after we have been daemonized (if requested)
        self.do_load_modules(self.myself.modules)

        # Call modules that manage this read configuration pass
        self.hook_point('read_configuration')

        # Call modules get_objects() to load new objects from them
        # (example modules: glpi, mongodb, dummy_arbiter)
        self.load_modules_configuration_objects(raw_objects)

        # Resume standard operations
        self.conf.create_objects(raw_objects)

        # Maybe conf is already invalid
        if not self.conf.conf_is_correct:
            err = "***> One or more problems was encountered while processing the config files..."
            logger.error(err)
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(err)

        # Manage all post-conf modules
        self.hook_point('early_configuration')

        # Load all file triggers
        self.conf.load_triggers()

        # Create Template links
        self.conf.linkify_templates()

        # All inheritances
        self.conf.apply_inheritance()

        # Explode between types
        self.conf.explode()

        # Implicit inheritance for services
        self.conf.apply_implicit_inheritance()

        # Fill default values
        self.conf.fill_default()

        # Remove templates from config
        self.conf.remove_templates()

        # Overrides specific service instances properties
        self.conf.override_properties()

        # Linkify objects to each other
        self.conf.linkify()

        # applying dependencies
        self.conf.apply_dependencies()

        # Hacking some global parameters inherited from Nagios to create
        # on the fly some Broker modules like for status.dat parameters
        # or nagios.log one if there are none already available
        self.conf.hack_old_nagios_parameters()

        # Raise warning about currently unmanaged parameters
        if self.verify_only:
            self.conf.warn_about_unmanaged_parameters()

        # Explode global conf parameters into Classes
        self.conf.explode_global_conf()

        # set our own timezone and propagate it to other satellites
        self.conf.propagate_timezone_option()

        # Look for business rules, and create the dep tree
        self.conf.create_business_rules()
        # And link them
        self.conf.create_business_rules_dependencies()

        # Warn about useless parameters in Alignak
        if self.verify_only:
            self.conf.notice_about_useless_parameters()

        # Manage all post-conf modules
        self.hook_point('late_configuration')

        # Configuration is correct?
        self.conf.is_correct()

        # Maybe some elements were not wrong, so we must clean if possible
        self.conf.clean()

        # If the conf is not correct, we must get out now (do not try to split the configuration)
        if not self.conf.conf_is_correct:
            err = "Configuration is incorrect, sorry, I bail out"
            logger.error(err)
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(err)

        # REF: doc/alignak-conf-dispatching.png (2)
        logger.info("Splitting hosts and services into parts")
        self.confs = self.conf.cut_into_parts()

        # The conf can be incorrect here if the cut into parts see errors like
        # a realm with hosts and no schedulers for it
        if not self.conf.conf_is_correct:
            err = "Configuration is incorrect, sorry, I bail out"
            logger.error(err)
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(err)

        logger.info('Things look okay - No serious problems were detected '
                    'during the pre-flight check')

        # Clean objects of temporary/unnecessary attributes for live work:
        self.conf.clean()

        # Exit if we are just here for config checking
        if self.verify_only:
            logger.info("Arbiter checked the configuration")
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(0)

        if self.analyse:
            self.launch_analyse()
            sys.exit(0)

        # Some properties need to be "flatten" (put in strings)
        # before being sent, like realms for hosts for example
        # BEWARE: after the cutting part, because we stringify some properties
        self.conf.prepare_for_sending()

        # Ignore daemon configuration parameters (port, log, ...) in the monitoring configuration
        # It's better to use daemon default parameters rather than host found in the monitoring
        # configuration...

        self.accept_passive_unknown_check_results = BoolProp.pythonize(
            getattr(self.myself, 'accept_passive_unknown_check_results', '0')
        )

        #  We need to set self.host & self.port to be used by do_daemon_init_and_start
        self.host = self.myself.address
        self.port = self.myself.port

        logger.info("Configuration Loaded")

        # Still a last configuration check because some things may have changed when
        # we prepared the configuration for sending
        if not self.conf.conf_is_correct:
            err = "Configuration is incorrect, sorry, I bail out"
            logger.error(err)
            # Display found warnings and errors
            self.conf.show_errors()
            sys.exit(err)

        # Display found warnings and errors
        self.conf.show_errors()

Example 124

Project: mediarover Source File: set_quality.py
def __set_quality(broker, options, series_name=None, season_num=None, episode_num=None):
	logger = logging.getLogger("mediarover")

	help = """
Options:
(y)es    - process series and specify episode quality
(n)o     - skip to next series
(q)uit   - exit application"""

	series_help = """
Series Options:
(l)ow    - mark episodes as being of low quality
(m)edium - mark episodes as being of medium quality
(h)igh   - mark episodes as being of high quality"""

	config = broker[CONFIG_OBJECT]

	# build dict of watched series
	# register series dictionary with dependency broker
	series_lists = build_series_lists(config, process_aliases=False)
	broker.register(WATCHED_SERIES_LIST, series_lists[0])

	# build list of series to iterate over
	if series_name:
		names = [Series.sanitize_series_name(series_name)]
		if names[0] not in broker[WATCHED_SERIES_LIST]:
			print "ERROR: Unable to find series matching %r" % series_name
			exit(2)
		else:
			if season_num is not None:
				season_num = int(season_num)
			if episode_num is not None:
				episode_num = int(episode_num)
	else:
		names = broker[WATCHED_SERIES_LIST].keys()
		names.sort()

	displayed_series_help = 0
	quality_levels = [LOW, MEDIUM, HIGH]

	if options.series_prompt:
		print help

	for sanitized in names:
		series = broker[WATCHED_SERIES_LIST][sanitized]

		if options.series_prompt:
			answer = __query_user("Process '%s'? ([y]/n/q/?)" % series.name, ['y','n','q','?'], 'y', help)
			if answer == 'n':
				continue
			elif answer == 'q':
				exit(0)
		else:
			# ATTENTION: get files list now so that processing statement follows logging code 
			# resulting from filesystem scan
			series.files
			print "Processing '%s'..." % series.name

		# determine default quality for current series
		if config['tv']['filter'][sanitized]['desired_quality'] is not None:
			default = config['tv']['filter'][sanitized]['desired_quality']
		else:
			default = config['tv']['library']['quality']['desired']

		# if quality guessing is on, populate extension lists (if they weren't 
		# provided by user)
		if config['tv']['library']['quality']['managed'] and config['tv']['library']['quality']['guess']:
			if len(options.low) == 0:
				options.low = config['tv']['library']['quality']['extension'][LOW]
			if len(options.medium) == 0:
				options.medium = config['tv']['library']['quality']['extension'][MEDIUM]
			if len(options.high) == 0:
				options.high = config['tv']['library']['quality']['extension'][HIGH]

		low = list()
		medium = list()
		high = list()

		avg_sizes = dict()
		for file in series.files:
			if season_num:
				if file.episode.season != season_num:
					continue
				elif episode_num and file.episode.episode != episode_num:
						continue

			if hasattr(file.episode, 'episodes'):
				parts = file.episode.episodes
			else:
				parts = [file.episode]

			# first things first: check if user has chosen a quality level
			# for files with the current extension
			ext = file.extension
			if ext in options.low:
				low.extend(parts)
			elif ext in options.medium:
				medium.extend(parts)
			elif ext in options.high:
				high.extend(parts)

			# guess not, group files by average file size
			else:
				size = file.size
				for avg_size in avg_sizes.keys():
					difference = abs(float(avg_size)/float(size/len(parts)) - 1)

					# if the difference is 10% or less, update average value
					# and add current part(s) to list
					if difference <= 0.1:
						# add current file size to running total
						avg_sizes[avg_size]['total_size'] += size
						avg_sizes[avg_size]['episodes'].extend(parts)

						# calculate new average size and update dict
						new_avg = avg_sizes[avg_size]['total_size'] / len(avg_sizes[avg_size]['episodes'])
						avg_sizes[new_avg] = avg_sizes[avg_size]
						del avg_sizes[avg_size]
						break
					else:
						continue

				# no comparable size in current list, add and move on
				else:
					avg_sizes[size] = {'total_size': size, 'episodes': parts}

		# build quality prompt
		quality_prompt = list()
		for level in quality_levels:
			if level == default:
				quality_prompt.append("[%c]" % level[0])
			else:
				quality_prompt.append(level[0])
		quality_prompt.extend(['q','?'])
		quality_prompt = "/".join(quality_prompt)

		if not displayed_series_help:
			displayed_series_help += 1
			print series_help

		sizes = avg_sizes.keys()
		sizes.sort()
		for avg_size in sizes:
			approx_size = avg_size / (1024 * 1024)
			print "Found %d episode(s) with average size of %dMB" % (len(avg_sizes[avg_size]['episodes']), approx_size)
			answer = __query_user("Quality? (%s)" % quality_prompt, ['l','m','h','q','?'], default, series_help)
			if answer == 'q':
				exit(1)
			elif answer == 'l':
				quality = LOW
			elif answer == 'm':
				quality = MEDIUM
			else:
				quality = HIGH

			# set quality for all episodes in given size list
			for episode in avg_sizes[avg_size]['episodes']:
				episode.quality = quality
				broker[METADATA_OBJECT].add_episode(episode)

		# set quality for all episodes that were matched by extension
		extension_msg = "Setting quality of '%s' for %d episode(s) with extension found in %s"
		if len(low):
			quality = LOW
			print extension_msg % (quality, len(low), options.low)
			for episode in low:
				episode.quality = quality
				broker[METADATA_OBJECT].add_episode(episode)

		if len(medium):
			quality = MEDIUM
			print extension_msg % (quality, len(medium), options.medium)
			for episode in medium:
				episode.quality = quality
				broker[METADATA_OBJECT].add_episode(episode)

		if len(high):
			quality = HIGH
			print extension_msg % (quality, len(high), options.high)
			for episode in high:
				episode.quality = quality
				broker[METADATA_OBJECT].add_episode(episode)

	print "DONE"

Example 125

Project: mirror Source File: main.py
def start_daemon():
    """Entry point for daemon script"""
    import mirror.common
    mirror.common.setup_translations()

    # Setup the argument parser
    parser = OptionParser(usage="%prog [options]")
    parser.add_option("-v", "--version", action="callback",
                      callback=version_callback,
                      help=_("Show program's version number and exit"))
    parser.add_option("-D", "--do-not-daemonize", dest="donot",
                      help=_("Do not daemonize (default is daemonize)"),
                      action="store_true",
                      default=False)
    parser.add_option("-c", "--config", dest="config",
                      help=_("Set the config location directory"),
                      action="store", type="str")
    parser.add_option("-P", "--pidfile", dest="pidfile",
                      help=_("Use pidfile to store process id"),
                      action="store", type="str")
    parser.add_option("-u", "--user", dest="user",
                      help=_("User to switch to. Need to start as root"),
                      action="store", type="str")
    parser.add_option("-g", "--group", dest="group",
                      help=_("Group to switch to. Need to start as root"),
                      action="store", type="str")
    parser.add_option("-l", "--logfile", dest="logfile",
                      help=_("Set the logfile location"),
                      action="store", type="str")
    parser.add_option("-L", "--loglevel", dest="loglevel",
                      help=_("Set the log level: none, info, warning, error, "
                             "critical, debug"),
                      action="store", type="str")
    parser.add_option("-q", "--quiet", dest="quiet",
                      help=_("Sets the log level to 'none', this is the same as `-L none`"),
                      action="store_true", default=False)
    parser.add_option("-r", "--rotate-logs", dest="rotate_logs",
                      help=_("Rotate logfiles."),
                      action="store_true", default=False)
    parser.add_option("--profile", dest="profile",
                      help=_("Profiles the daemon"),
                      action="store_true", default=False)
    parser.add_option("-t", "--tasks", dest="list_tasks",
                      help=_("List current tasks in scheduler's queue"),
                      action="store_true", default=False)
    parser.add_option("-s", "--signal", dest="signal",
                      help=_("Send signal to mirrord: stop, reload"),
                      action="store", type="str")

    # Get the options and args from the OptionParser
    (options, args) = parser.parse_args()

    if options.list_tasks:
        sys.exit(mirror.console.list_task_queue())

    if options.signal:
        sys.exit(mirror.console.signal_process(options.signal))

    if options.quiet:
        options.loglevel = "none"
    if not options.loglevel:
        options.loglevel = "info"

    logfile_mode = 'w'
    if options.rotate_logs:
        logfile_mode = 'a'

    import mirror.configmanager
    if options.config:
        if not mirror.configmanager.set_config_dir(options.config):
            print("There was an error setting the config dir! Exiting..")
            sys.exit(1)

    # Sets the options.logfile to point to the default location
    def set_logfile():
        if not options.logfile:
            options.logfile = os.path.join(mirror.common.DEFAULT_MIRRORD_LOG_DIR,
                                           "mirrord.log")

    set_logfile()

    # Setup the logger
    try:
        # Try to make the logfile's directory if it doesn't exist
        os.makedirs(os.path.abspath(os.path.dirname(options.logfile)))
    except:
        pass

    # Setup the logger
    if os.path.isfile(options.logfile):
        logfile_mode = 'a'
    mirror.log.setupLogger(level=options.loglevel,
                           filename=options.logfile,
                           filemode=logfile_mode)
    if options.donot:
        mirror.log.addStreamHandler(level=options.loglevel)

    # Writes out a pidfile if necessary
    def write_pidfile():
        if options.pidfile:
            open(options.pidfile, "wb").write("%s\n" % os.getpid())

    # If the do not daemonize is set, then we just skip the forking
    if not options.donot:
        if os.fork():
            # We've forked and this is now the parent process, so die!
            os._exit(0)
        os.setsid()
        # Do second fork
        if os.fork():
            os._exit(0)

    # Change to root directory
    os.chdir("/")
    # Write pid file before change gid and uid
    write_pidfile()

    if options.group:
        if not options.group.isdigit():
            import grp
            options.group = grp.getgrnam(options.group)[2]
        os.setgid(options.group)
    if options.user:
        if not options.user.isdigit():
            import pwd
            options.user = pwd.getpwnam(options.user)[2]
        os.setuid(options.user)

    # Redirect stdin, stdout, stderr to /dev/null ...
    # if mirrord is running as daemon
    if not options.donot:
        fp = open("/dev/null", 'r+')
        os.dup2(fp.fileno(), sys.stdin.fileno())
        os.dup2(fp.fileno(), sys.stdout.fileno())
        os.dup2(fp.fileno(), sys.stderr.fileno())
        fp.close()

    import logging
    log = logging.getLogger(__name__)

    try:
        mirror.common.check_mirrord_running(
            mirror.configmanager.get_config_dir("mirrord.pid"))
        # return fp to keep file not closed (by __exit__()), so the lock will not get released
        # we also write pid into it
        fp = mirror.common.lock_file(
            mirror.configmanager.get_config_dir("mirrord.pid"))
    except mirror.error.MirrordRunningError as e:
        log.error(e)
        log.error("You cannot run multiple daemons with the same config directory set.")
        sys.exit(1)
    except Exception as e:
        log.exception(e)
        sys.exit(1)

    import mirror.handler
    signal.signal(signal.SIGTERM, mirror.handler.shutdown_handler)
    signal.signal(signal.SIGQUIT, mirror.handler.shutdown_handler)
    signal.signal(signal.SIGINT,  mirror.handler.shutdown_handler)
    signal.signal(signal.SIGCHLD, mirror.handler.sigchld_handler)
    signal.signal(signal.SIGHUP,  mirror.handler.reload_handler)

    if options.profile:
        import hotshot
        hsp = hotshot.Profile(mirror.configmanager.get_config_dir("mirrord.profile"))
        hsp.start()
    try:
        log.info("Starting mirror daemon...")
        from mirror.daemon import MirrorDaemon
        daemon = MirrorDaemon(options, args)
        daemon.start()
    except Exception as e:
        log.exception(e)
        sys.exit(1)
    finally:
        if options.profile:
            hsp.stop()
            hsp.close()
            import hotshot.stats
            stats = hotshot.stats.load(mirror.configmanager.get_config_dir("mirrord.profile"))
            stats.strip_dirs()
            stats.sort_stats("time", "calls")
            stats.print_stats(400)

Example 126

Project: pkgbuilder Source File: wrapper.py
def wrapper(source='AUTO'):
    """A wrapper for pacman and PKGBUILDer."""
    # Because I need to work with -S and nothing else, I am going to use
    # regular expressions on the argument list.  Sorry.
    if source == 'AUTO':
        argst = sys.argv[1:]
    else:
        argst = source

    log = logging.getLogger('pbwrapper')
    if '--debug' in argst:
        DS.debugmode()
    elif '--debugpb' in argst:
        DS.debugmode()
        argst.remove("--debugpb")
        sys.argv.remove("--debugpb")

    log.info('*** PBwrapper v{0} (PKGBUILDer '
             '{1})'.format(__wrapperversion__, __version__))

    if (('-L' in argst) or ('--unlock' in argst) or (re.search('-[a-zA-Z]*L',
                                                               ' '.join(argst))
                                                     is not None)):
        try:
            os.remove('/var/lib/pacman/db.lck')
            exit(0)
        except OSError as e:
            DS.fancy_error('[-L --unlock] ' + e.strerror)
            exit(1)

    if (('-S' in argst) or ('--sync' in argst) or (re.search('-[a-zA-Z]*S',
                                                             ' '.join(argst))
                                                   is not None)):
        # The user has requested -S.
        # -l/--list is in not in *a because it takes over the whole package
        # list, and that is a workaround.
        log.debug('Got -S, preparing to parse arguments...')
        pacmanshort = ['f', 'g', 'l', 'p', 'q']
        pacmanlong = ['asdeps', 'asexplicit', 'dbonly', 'downloadonly',
                      'force', 'groups', 'list', 'needed', 'nodeps',
                      'noprogressbar', 'noscriptlet', 'print', 'quiet',
                      'verbose']
        pacmanshorta = ['b', 'r']
        pacmanlonga = ['arch', 'cachedir', 'config', 'dbpath', 'gpgdir',
                       'hookdir', 'ignoregroup', 'logfile',
                       'print-format', 'root', 'assume-installed']

        pbshort = ['D', 'C', 'F']
        pblong = ['fetch', 'userfetch', 'vcsupgrade', 'novcsupgrade', 'colors',
                  'nocolors', 'depcheck', 'nodepcheck', 'validation',
                  'novalidation', 'install', 'buildonly', 'pgpcheck',
                  'skippgpcheck', 'deep', 'shallow', 'noclean', 'nodebug']

        commonshort = ['S', 'd', 'i', 's', 'v', 'w']
        commonlong = ['debug', 'info', 'search', 'sync', 'confirm',
                      'noconfirm']
        commonlongl = ['ignore']
        commonshortc = ['c', 'y', 'u']
        commonlongc = ['clean', 'refresh', 'sysupgrade']

        ignoredshort = ['L']
        ignoredlong = ['unlock']

        allpacman = pacmanshort + pacmanlong + pacmanshorta + pacmanlonga
        allpb = pbshort + pblong  # + pbshorta + pblonga
        allcommon = commonshort + commonlong + commonlongl + commonshortc + commonlongc

        allshort = pacmanshort + pbshort + commonshort
        alllong = pacmanlong + pblong + commonlong

        allshortc = commonshortc
        alllongc = commonlongc
        allcountable = allshortc + alllongc

        parser = argparse.ArgumentParser(add_help=False, usage=_('%(prog)s'
                                         ' <operation> [...]'),
                                         argument_default=argparse.SUPPRESS)
        parser.add_argument('-h', '--help', action='store_true',
                            default=False, dest='help')
        parser.add_argument('-V', '--version', action='store_true',
                            default=False, dest='version')

        for i in allshort + ignoredshort:
            parser.add_argument('-' + i, action='store_true', default=False,
                                dest=i)

        for i in alllong + ignoredlong:
            parser.add_argument('--' + i, action='store_true', default=False,
                                dest=i)

        for i in allshortc:
            parser.add_argument('-' + i, action='count', default=0, dest=i)

        for i in alllongc:
            parser.add_argument('--' + i, action='count', default=0, dest=i)

        for i in pacmanshorta:
            parser.add_argument('-' + i, action='store', nargs=1,
                                default='NIL', dest=i)

        for i in pacmanlonga:
            parser.add_argument('--' + i, action='store', nargs=1,
                                default='NIL', dest=i)

        for i in commonlongl:
            parser.add_argument('--' + i, action='append', dest=i)

        parser.add_argument('pkgnames', action='store', nargs='*')

        # Starting actual work.

        if source != 'AUTO':
            args = parser.parse_args(source)
        else:
            args = parser.parse_args()

        log.debug('Arguments parsed.  {0}'.format(args.__dict__))

        try:
            pkgnames = args.pkgnames
        except AttributeError:
            pkgnames = []

        execargs = []
        pacargs = []
        pbargs = []

        for k, v in args.__dict__.items():
            if v is not False:
                # == This argument has been provided.
                if k in allcountable:
                    # == This is a countable argument.
                    if k in allshortc:
                        for x in range(v):
                            execargs.append('-' + k)
                    elif k in alllongc:
                        for x in range(v):
                            execargs.append('--' + k)
                elif v:
                    # == This argument doesn't have a value.
                    if k in allshort:
                        execargs.append('-' + k)
                    elif k in alllong:
                        execargs.append('--' + k)

        for i in execargs:
            if i[1:] in allshort + allshortc:
                s = i[1:]
            elif i[2:] in alllong + alllongc:
                s = i[2:]
            else:
                raise SanityError('argparse broke')

            if s in allcommon:
                pacargs.append(i)
                pbargs.append(i)

            if s in allpacman:
                pacargs.append(i)
            elif s in allpb:
                pbargs.append(i)

        for k, v in args.__dict__.items():
            if v is not False and v != 'NIL':
                # == This argument can take values and has one.
                if k in pacmanshorta:
                    pacargs.append('-' + k)
                    pacargs.extend(v)
                elif k in pacmanlonga:
                    pacargs.append('--' + k)
                    pacargs.extend(v)
                elif k in commonlongl:
                    for vi in v:
                        pacargs.append('--' + k)
                        pacargs.append(vi)
                        pbargs.append('--' + k)
                        pbargs.append(vi)

        log.debug('Preparing to run pacman and/or PKGBUILDer...')

        if args.search or args.s:
            log.debug('Got -s.')
            if args.pkgnames:
                log.info('Running pacman.')
                DS.run_command([DS.paccommand] + pacargs + pkgnames)
                log.info('Running pkgbuilder (pkgbuilder.__main__.main()).')
                pbmain(pbargs + pkgnames)
            else:
                log.info('Nothing to do — args.pkgnames is empty.')

            exit()
        elif args.l or args.list:
            log.debug('Got -l.')
            log.info('Running pacman.')
            DS.run_command([DS.paccommand] + pacargs + pkgnames)
            exit()
        elif args.u or args.sysupgrade:
            log.debug('Got -u.')
            log.info('Running pacman.')
            DS.sudo([DS.paccommand] + pacargs)
            log.info('Running pkgbuilder (pkgbuilder.__main__.main()).')
            pbmain(pbargs, quit=False)
        elif args.y or args.refresh:
            log.debug('Got -y.')
            log.info('Running pacman.')
            DS.sudo([DS.paccommand] + pacargs)
        elif args.help:
            show_help()
            exit()
        elif args.version:
            show_version()
            exit()

        log.debug('Generating AUR packages list...')
        pbpkgnames = []
        info = pkgbuilder.utils.info(pkgnames)

        names = [i.name for i in info]
        pbpkgnames = [n for n in pkgnames if n in names]
        pacmanpkgnames = [i for i in pkgnames if i not in pbpkgnames]

        droppable = ['-u', '-y', '--sysupgrade', '--refresh']

        pacargs = [i for i in pacargs if i not in droppable]
        pbargs = [i for i in pbargs if i not in droppable]
        log.debug('Generated.')

        if pacmanpkgnames != []:
            log.info('Running pacman.')
            DS.sudo([DS.paccommand] + pacargs + pacmanpkgnames)
        else:
            log.info('No repo packages in the list.')

        if pbpkgnames != []:
            log.info('Running pkgbuilder (pkgbuilder.main.main()).')
            pbmain(pbargs + pbpkgnames)
        else:
            log.info('No AUR packages in the list.')

        sanitycheck = pacmanpkgnames + pbpkgnames
        if len(sanitycheck) != len(pkgnames):
            log.info('Running pacman due to failed sanity check.')
            sanityargs = [item for item in pkgnames if (item not in
                          sanitycheck)]
            DS.sudo([DS.paccommand] + pacargs + sanityargs)
    elif (('-F' in argst) or ('--fetch' in argst) or
          ('--userfetch' in argst) or
          ('-X' in argst) or ('--runtx' in argst) or
          (re.search('-[a-zA-Z]*F', ' '.join(argst)) is not None) or
          (re.search('-[a-zA-Z]*X', ' '.join(argst)) is not None)):
        # pkgbuilder -F, --fetch / --userfetch / -X, --runtx.
        pbmain(argst)
    elif ('-h' in argst) or ('--help' in argst):
        show_help()
    elif ('-V' in argst) or ('--version' in argst):
        show_version()
    elif 'UTshibboleet' in argst:
        if argst[0] == 'unittests' and argst[1] == 'UTshibboleet':
            # http://xkcd.com/806/
            pass
        else:
            print('Please don’t use the reserved UTshibboleet argument.')

    elif (('-Q' in argst) or ('--query' in argst) or (re.search(
            '-[a-zA-Z]*Q', ''.join(argst)) is not None)):
        DS.run_command([DS.paccommand] + argst)
    else:
        DS.sudo([DS.paccommand] + argst)

Example 127

Project: edm2016 Source File: online_cross_validation.py
def get_online_rps(learner, test_student_idx, test_student_time_idx=None,
                   learn_node_keys=(THETAS_KEY, ), compute_first_interaction_rps=False,
                   copy_learner=True, **test_learner_kwargs):
    """Compute the probability of correct of the `test response` node's interactions with an online
    training scheme (train learners on interactions up to but not including interaction i, and make
    a prediction of probability of correct for i.
    NOTE: We assume that for each student, interactions are sorted by time.

    :param BayesNetLearner learner: The learner with the original train and test response nodes.
    :param np.ndarray test_student_idx: Index indicating the student associated with each
        interaction in learner.nodes['test responses'].
    :param np.ndarray|None test_student_time_idx: unique event identifiers for each interaction
        in the test set. If None, it is assumed that each interaction occurs at a new time, and
        can be used to predict all following interactions.  If supplied, adjacent interactions
        with identical identifiers will both be in the test or the train set, and never split
        during online validation.
    :param tuple(str) learn_node_keys: The keys of learner nodes whose variables should be adapted
        at each iteration.
    :param bool compute_first_interaction_rps: Whether to compute the RP of a student's first
        interaction (thetas optimized under the prior only).  Default is no, in which case NaNs
        are returned for first interaction RPs.
    :param bool copy_learner: whether to operate on a copy of the learner, which avoids mutating
        the learner but incurs a memory cost of copying all the nodes' data.  Set to False if the
        learner is disposable, in which case the data of all nodes in ``learn_node_keys`` will be
        modified, and the theta node will be pruned in-place for efficient optimization.
    :param test_learner_kwargs: Optional keyword arguments that will be passed to the constructor
        of BayesNetLearner for the test learner.
    :return: Probabilities of correct (RPs) for each interaction (not including the first, which is
        set to np.nan) derived from a learner trained on the previous interactions.
    :rtype: np.ndarray
    """
    if not (test_learner_kwargs and 'callback' in test_learner_kwargs):
        test_learner_kwargs['callback'] = ConvergenceCallback()

    # get learner logger to set desired levels
    learner_logger = logging.getLogger('rnn_prof.irt')

    # get iteration count, an array indicating the online validation iteration associated with each
    # interaction in the set of test responses
    iteration_count = _idx_to_occurrence_ordinal(test_student_idx, test_student_time_idx)
    max_interactions = np.max(iteration_count) + 1

    # get corrects and parameter indices that will be sub-indexed during online validation
    correct = learner.nodes[TEST_RESPONSES_KEY].data
    theta_idx = learner.nodes[TEST_RESPONSES_KEY].cpd.index_map(THETAS_KEY)
    item_idx = learner.nodes[TEST_RESPONSES_KEY].cpd.index_map(OFFSET_COEFFS_KEY)
    cpd_class = learner.nodes[TEST_RESPONSES_KEY].cpd.__class__

    num_items = learner.nodes[OFFSET_COEFFS_KEY].cpd.dim
    # initialize arrays for storing all online validation prob corrects
    prob_correct = np.nan * np.empty_like(iteration_count, dtype=float)

    if copy_learner:
        # make copies of the nodes
        new_nodes = []
        for node in BayesNetGraph(learner.nodes).topological_sorting():
            # insert a copy
            LOGGER.debug("adding {}".format(node.name))
            new_nodes.append(node.copy())

            # replace child-parent references in the previously inserted nodes to point to this one
            for prev_node in new_nodes:
                for par_key, par_node in prev_node.param_nodes.iteritems():
                    if par_node is node:
                        prev_node.param_nodes[par_key] = new_nodes[-1]
                        LOGGER.debug("relinking %s's node's %s param",
                                     prev_node.name, par_key)

        test_learner = BayesNetLearner(new_nodes, **test_learner_kwargs)
    else:
        test_learner = learner

    # turn off learning for nodes that should be constant
    for node in test_learner.nodes.itervalues():
        if node.name not in learn_node_keys:
            LOGGER.debug("node {} parameters will not be learned".format(node.name))
            node.solver_pars.learn = False
            node.converged = True

    theta_node = test_learner.nodes[THETAS_KEY]
    # get the thetas that depend (directly or through the prior precision) on the interactions
    # in orig_test_node
    thetas_to_keep = theta_node.cpd.get_dependent_vars(np.unique(theta_idx))
    # trim theta node in place and remap theta_idx to the newly trimmed cpd
    theta_idx = theta_node.subset(thetas_to_keep, inplace=True)[theta_idx]
    num_thetas = theta_node.cpd.dim

    # quiet the online learner logger from INFO to WARNING (leave DEBUG alone)
    orig_log_level = learner_logger.getEffectiveLevel()
    if orig_log_level == logging.INFO:
        learner_logger.setLevel(logging.WARNING)

    for k in np.arange(0 if compute_first_interaction_rps else 1, max_interactions):
        test_idx = (iteration_count == k)
        train_idx = (iteration_count < k)
        # remove from train index students not in test_idx (whose interactions are all processed)
        train_idx &= np.in1d(test_student_idx, test_student_idx[test_idx])

        test_learner = BayesNetLearner(test_learner.nodes.values(), **test_learner_kwargs)
        # make new train/test nodes by splitting the original test node's correct into train/test
        for node_name, idx in ((TRAIN_RESPONSES_KEY, train_idx), (TEST_RESPONSES_KEY, test_idx)):
            if k == 0 and node_name == TRAIN_RESPONSES_KEY:
                # when on first interaction, make training node not empty (to avoid errors); it
                # will be labeled held-out below
                idx = test_idx
            param_nodes = test_learner.nodes[node_name].param_nodes
            test_learner.nodes[node_name] = Node(name=node_name,
                                                 data=correct[idx],
                                                 solver_pars=SolverPars(learn=False),
                                                 cpd=cpd_class(item_idx=item_idx[idx],
                                                               theta_idx=theta_idx[idx],
                                                               num_thetas=num_thetas,
                                                               num_items=num_items),
                                                 param_nodes=param_nodes,
                                                 held_out=((node_name == TEST_RESPONSES_KEY) or
                                                           k == 0))

        # run this iteration's learner and save the probability of correct for test responses
        test_learner.learn()
        iter_test_node = test_learner.nodes[TEST_RESPONSES_KEY]
        prob_correct[test_idx] = iter_test_node.cpd.compute_prob_true(**iter_test_node.param_data)

        if np.any(np.isnan(prob_correct[test_idx])):
            LOGGER.warn("NaN value in prob correct; iteration=%d" % k)
        if not k % PRINT_FREQ:
            num_train_interactions = np.sum(train_idx)
            num_test_interactions = np.sum(test_idx)
            msg = "Processed histories up to length %d (max=%d): %d train and %d test interactions."
            LOGGER.info(msg % (k, max_interactions, num_train_interactions, num_test_interactions))

    # reset the learner logger
    learner_logger.setLevel(orig_log_level)

    return prob_correct

Example 128

Project: LibrERP Source File: translate.py
def extend_trans_generate(lang, modules, cr):
    logger = logging.getLogger('i18n')
    dbname = cr.dbname

    pool = pooler.get_pool(dbname)
    trans_obj = pool.get('ir.translation')
    model_data_obj = pool.get('ir.model.data')
    uid = 1
    l = pool.obj_list()
    l.sort()

    query = 'SELECT name, model, res_id, module'    \
            '  FROM ir_model_data'

    query_models = """SELECT m.id, m.model, imd.module
            FROM ir_model AS m, ir_model_data AS imd
            WHERE m.id = imd.res_id AND imd.model = 'ir.model' """

    if 'all_installed' in modules:
        query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
        query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
    query_param = None
    if 'all' not in modules:
        query += ' WHERE module IN %s'
        query_models += ' AND imd.module in %s'
        query_param = (tuple(modules),)
    query += ' ORDER BY module, model, name'
    query_models += ' ORDER BY module, model'

    cr.execute(query, query_param)

    _to_translate = []
    def push_translation(module, type, name, id, source):
        tuple = (module, source, name, id, type)
        if source and tuple not in _to_translate:
            _to_translate.append(tuple)

    def encode(s):
        if isinstance(s, unicode):
            return s.encode('utf8')
        return s

    for (xml_name,model,res_id,module) in cr.fetchall():
        module = encode(module)
        model = encode(model)
        xml_name = "%s.%s" % (module, encode(xml_name))

        if not pool.get(model):
            logger.error("Unable to find object %r", model)
            continue

        exists = pool.get(model).exists(cr, uid, res_id)
        if not exists:
            logger.warning("Unable to find object %r with id %d", model, res_id)
            continue
        obj = pool.get(model).browse(cr, uid, res_id)

        if model=='ir.ui.view':
            d = etree.XML(encode(obj.arch))
            for t in trans_parse_view(d):
                push_translation(module, 'view', encode(obj.model), 0, t)
        elif model=='ir.actions.wizard':
            service_name = 'wizard.'+encode(obj.wiz_name)
            if netsvc.Service._services.get(service_name):
                obj2 = netsvc.Service._services[service_name]
                for state_name, state_def in obj2.states.iteritems():
                    if 'result' in state_def:
                        result = state_def['result']
                        if result['type'] != 'form':
                            continue
                        name = "%s,%s" % (encode(obj.wiz_name), state_name)

                        def_params = {
                            'string': ('wizard_field', lambda s: [encode(s)]),
                            'selection': ('selection', lambda s: [encode(e[1]) for e in ((not callable(s)) and s or [])]),
                            'help': ('help', lambda s: [encode(s)]),
                        }

                        # export fields
                        if not result.has_key('fields'):
                            logger.warning("res has no fields: %r", result)
                            continue
                        for field_name, field_def in result['fields'].iteritems():
                            res_name = name + ',' + field_name

                            for fn in def_params:
                                if fn in field_def:
                                    transtype, modifier = def_params[fn]
                                    for val in modifier(field_def[fn]):
                                        push_translation(module, transtype, res_name, 0, val)

                        # export arch
                        arch = result['arch']
                        if arch and not isinstance(arch, UpdateableStr):
                            d = etree.XML(arch)
                            for t in trans_parse_view(d):
                                push_translation(module, 'wizard_view', name, 0, t)

                        # export button labels
                        for but_args in result['state']:
                            button_name = but_args[0]
                            button_label = but_args[1]
                            res_name = name + ',' + button_name
                            push_translation(module, 'wizard_button', res_name, 0, button_label)

        elif model=='ir.model.fields':
            try:
                field_name = encode(obj.name)
            except AttributeError, exc:
                logger.error("name error in %s: %s", xml_name, str(exc))
                continue
            objmodel = pool.get(obj.model)
            if not objmodel or not field_name in objmodel._columns:
                continue
            field_def = objmodel._columns[field_name]

            name = "%s,%s" % (encode(obj.model), field_name)
            push_translation(module, 'field', name, 0, encode(field_def.string))

            if field_def.help:
                push_translation(module, 'help', name, 0, encode(field_def.help))

            if field_def.translate:
                ids = objmodel.search(cr, uid, [])
                obj_values = objmodel.read(cr, uid, ids, [field_name])
                for obj_value in obj_values:
                    res_id = obj_value['id']
                    if obj.name in ('ir.model', 'ir.ui.menu'):
                        res_id = 0
                    model_data_ids = model_data_obj.search(cr, uid, [
                        ('model', '=', model),
                        ('res_id', '=', res_id),
                        ])
                    if not model_data_ids:
                        push_translation(module, 'model', name, 0, encode(obj_value[field_name]))

            if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
                for dummy, val in field_def.selection:
                    push_translation(module, 'selection', name, 0, encode(val))

        elif model=='ir.actions.report.xml':
            name = encode(obj.report_name)
            fname = ""
            ##### Changes for Aeroo ######
            if obj.report_type == 'aeroo':
                trans_ids = trans_obj.search(cr, uid, [('type', '=', 'report'),('res_id', '=', obj.id)])
                for t in trans_obj.read(cr, uid, trans_ids, ['name','src']):
                    push_translation(module, "report", t['name'], xml_name, t['src'])
            ##############################
            else:
                if obj.report_rml:
                    fname = obj.report_rml
                    parse_func = trans_parse_rml
                    report_type = "report"
                elif obj.report_xsl:
                    fname = obj.report_xsl
                    parse_func = trans_parse_xsl
                    report_type = "xsl"
                if fname and obj.report_type in ('pdf', 'xsl'):
                    try:
                        report_file = tools.file_open(fname)
                        try:
                            d = etree.parse(report_file)
                            for t in parse_func(d.iter()):
                                push_translation(module, report_type, name, 0, t)
                        finally:
                            report_file.close()
                    except (IOError, etree.XMLSyntaxError):
                        logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)

        for field_name,field_def in obj._table._columns.items():
            if field_def.translate:
                name = model + "," + field_name
                try:
                    trad = getattr(obj, field_name) or ''
                except:
                    trad = ''
                push_translation(module, 'model', name, xml_name, encode(trad))

        # End of data for ir.model.data query results

    cr.execute(query_models, query_param)

    def push_constraint_msg(module, term_type, model, msg):
        # Check presence of __call__ directly instead of using
        # callable() because it will be deprecated as of Python 3.0
        if not hasattr(msg, '__call__'):
            push_translation(module, term_type, model, 0, encode(msg))

    for (model_id, model, module) in cr.fetchall():
        module = encode(module)
        model = encode(model)

        model_obj = pool.get(model)

        if not model_obj:
            logging.getLogger("i18n").error("Unable to find object %r", model)
            continue

        for constraint in getattr(model_obj, '_constraints', []):
            push_constraint_msg(module, 'constraint', model, constraint[1])

        for constraint in getattr(model_obj, '_sql_constraints', []):
            push_constraint_msg(module, 'sql_constraint', model, constraint[2])

    # parse source code for _() calls
    def get_module_from_path(path, mod_paths=None):
        if not mod_paths:
            # First, construct a list of possible paths
            def_path = os.path.abspath(os.path.join(tools.config['root_path'], 'addons'))     # default addons path (base)
            ad_paths= map(lambda m: os.path.abspath(m.strip()),tools.config['addons_path'].split(','))
            mod_paths=[def_path]
            for adp in ad_paths:
                mod_paths.append(adp)
                if not os.path.isabs(adp):
                    mod_paths.append(adp)
                elif adp.startswith(def_path):
                    mod_paths.append(adp[len(def_path)+1:])
        for mp in mod_paths:
            if path.startswith(mp) and (os.path.dirname(path) != mp):
                path = path[len(mp)+1:]
                return path.split(os.path.sep)[0]
        return 'base'   # files that are not in a module are considered as being in 'base' module

    modobj = pool.get('ir.module.module')
    installed_modids = modobj.search(cr, uid, [('state', '=', 'installed')])
    installed_modules = map(lambda m: m['name'], modobj.read(cr, uid, installed_modids, ['name']))

    root_path = os.path.join(tools.config['root_path'], 'addons')

    apaths = map(os.path.abspath, map(str.strip, tools.config['addons_path'].split(',')))
    if root_path in apaths:
        path_list = apaths
    else :
        path_list = [root_path,] + apaths

    # Also scan these non-addon paths
    for bin_path in ['osv', 'report' ]:
        path_list.append(os.path.join(tools.config['root_path'], bin_path))

    logger.debug("Scanning modules at paths: ", path_list)

    mod_paths = []
    join_dquotes = re.compile(r'([^\\])"[\s\\]*"', re.DOTALL)
    join_quotes = re.compile(r'([^\\])\'[\s\\]*\'', re.DOTALL)
    re_dquotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*"(.+?)"[\s]*?\)', re.DOTALL)
    re_quotes = re.compile(r'[^a-zA-Z0-9_]_\([\s]*\'(.+?)\'[\s]*?\)', re.DOTALL)

    def export_code_terms_from_file(fname, path, root, terms_type):
        fabsolutepath = join(root, fname)
        frelativepath = fabsolutepath[len(path):]
        module = get_module_from_path(fabsolutepath, mod_paths=mod_paths)
        is_mod_installed = module in installed_modules
        if (('all' in modules) or (module in modules)) and is_mod_installed:
            logger.debug("Scanning code of %s at module: %s", frelativepath, module)
            src_file = tools.file_open(fabsolutepath, subdir='')
            try:
                code_string = src_file.read()
            finally:
                src_file.close()
            if module in installed_modules:
                frelativepath = str("addons" + frelativepath)
            ite = re_dquotes.finditer(code_string)
            code_offset = 0
            code_line = 1
            for i in ite:
                src = i.group(1)
                if src.startswith('""'):
                    assert src.endswith('""'), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30])
                    src = src[2:-2]
                else:
                    src = join_dquotes.sub(r'\1', src)
                # try to count the lines from the last pos to our place:
                code_line += code_string[code_offset:i.start(1)].count('\n')
                # now, since we did a binary read of a python source file, we
                # have to expand pythonic escapes like the interpreter does.
                src = src.decode('string_escape')
                push_translation(module, terms_type, frelativepath, code_line, encode(src))
                code_line += i.group(1).count('\n')
                code_offset = i.end() # we have counted newlines up to the match end

            ite = re_quotes.finditer(code_string)
            code_offset = 0 #reset counters
            code_line = 1
            for i in ite:
                src = i.group(1)
                if src.startswith("''"):
                    assert src.endswith("''"), "Incorrect usage of _(..) function (should contain only literal strings!) in file %s near: %s" % (frelativepath, src[:30])
                    src = src[2:-2]
                else:
                    src = join_quotes.sub(r'\1', src)
                code_line += code_string[code_offset:i.start(1)].count('\n')
                src = src.decode('string_escape')
                push_translation(module, terms_type, frelativepath, code_line, encode(src))
                code_line += i.group(1).count('\n')
                code_offset = i.end() # we have counted newlines up to the match end

    for path in path_list:
        logger.debug("Scanning files of modules at %s", path)
        for root, dummy, files in tools.osutil.walksymlinks(path):
            for fname in itertools.chain(fnmatch.filter(files, '*.py')):
                export_code_terms_from_file(fname, path, root, 'code')
            for fname in itertools.chain(fnmatch.filter(files, '*.mako')):
                export_code_terms_from_file(fname, path, root, 'report')


    out = [["module","type","name","res_id","src","value"]] # header
    _to_translate.sort()
    # translate strings marked as to be translated
    for module, source, name, id, type in _to_translate:
        trans = trans_obj._get_source(cr, uid, name, type, lang, source)
        out.append([module, type, name, id, source, encode(trans) or ''])

    return out

Example 129

Project: multidrive Source File: onedrivestorageservice.py
    def upload(self, file_path, destination=None, modified_time=None,
               create_folder=False, overwrite=False):
        logger = logging.getLogger("multidrive")
        logger.info("Upload {} OneDrive Storage Service".format(file_path))

        file_name = os.path.basename(file_path)
        full_remote_path = file_name
        if destination is not None:
            if self.is_folder(destination) is False:
                if create_folder is False:
                    raise RuntimeError("Destination folder not valid")
                self.create_folder(destination)

            if destination.endswith('/') is False:
                destination = destination+"/"
            full_remote_path = destination+full_remote_path

        file_size = os.path.getsize(file_path)

        payload = {}
        payload["@name.conflictBehavior"] = "fail"
        if overwrite is True:
            payload["@name.conflictBehavior"] = "replace"

        # Special case for empty file
        if file_size == 0:
            url = (self.onedrive_url_root+"/drive/root:/" +
                   urllib.parse.quote(full_remote_path)+":/content")
            if self.__app_folder__:
                url = (self.onedrive_url_root+"/drive/special/approot:/" +
                       urllib.parse.quote(full_remote_path)+":/content")

            response = self.http_request(url=url,
                                         request_type=RequestType.PUT,
                                         status_codes=(requests.codes.ok,
                                                       requests.codes.created,
                                                       requests.codes.accepted,
                                                       requests.codes.
                                                       conflict),
                                         data="",
                                         params=payload,
                                         use_access_token=True,
                                         action_string="Upload")

            if response.status_code in (requests.codes.conflict,):
                raise RuntimeError("File already exists")
            logger.info("Upload complete")
            return

        NUM_ATTEMPTS = 5
        cur_attempt = 1
        while cur_attempt <= NUM_ATTEMPTS:
            headers = {'Content-Type': "application/json"}

            url = (self.onedrive_url_root+"/drive/root:/" +
                   urllib.parse.quote(full_remote_path) +
                   ":/upload.createSession")

            if self.__app_folder__:
                url = (self.onedrive_url_root+"/drive/special/approot:/" +
                       urllib.parse.quote(full_remote_path) +
                       ":/upload.createSession")

            response = self.http_request(url=url,
                                         request_type=RequestType.POST,
                                         status_codes=(requests.codes.ok,),
                                         headers=headers,
                                         data=json.dumps(payload),
                                         use_access_token=True,
                                         action_string="Upload",
                                         timeout=120)

            data = json.loads(response.text)

            url = data['uploadUrl']

            CHUNK_SIZE = 10*1024*1024
            chunk_start = 0
            chunk_end = CHUNK_SIZE - 1
            if chunk_end+1 >= file_size:
                chunk_end = file_size - 1
            response = None

            # TODO: Deal with insufficient Storage error (507)
            # TODO: Deal with other 400/500 series errors

            cur_file_hash = hashlib.sha1()
            with open(file_path, "rb") as f:
                retry_chunk = False
                while chunk_start < file_size:
                    if retry_chunk is False:
                        chunk_data = f.read(CHUNK_SIZE)
                        cur_file_hash.update(chunk_data)
                    retry_chunk = False
                    headers = {}
                    headers['Content-Length'] = str(file_size)
                    headers['Content-Range'] = ('bytes {}-{}/{}'.
                                                format(chunk_start,
                                                       chunk_end,
                                                       file_size))
                    status_codes = (requests.codes.ok,
                                    requests.codes.created,
                                    requests.codes.accepted,
                                    requests.codes.conflict,
                                    requests.codes.range_not_satisfiable)
                    # TODO: Further testing on some errors
                    # err_codes = (requests.codes.server_error,)
                    response = self.http_request(url=url,
                                                 request_type=RequestType.PUT,
                                                 headers=headers,
                                                 status_codes=status_codes,
                                                 # severe_status_codes=err_codes,
                                                 data=chunk_data,
                                                 use_access_token=True,
                                                 action_string="Upload Chunk",
                                                 timeout=120)

                    # TODO: Check for proper response based on
                    # location in file uploading.

                    if response.status_code in (requests.codes.conflict,):
                        raise RuntimeError("File Already Exists")

                    if response.status_code in (requests.codes.
                                                range_not_satisfiable,):
                        logger.warning("Got error {}".format(response.text))
                        logger.warning("DEBUG: Getting upload status")
                        logger.warning("Current Chunk  Start: " +
                                       str(chunk_start))
                        upload_status = self.get_upload_status(url)
                        logger.warning("Status: " + str(upload_status))
                        if 'nextExpectedRanges' in upload_status:
                            new_start_range = int(upload_status
                                                  ['nextExpectedRanges']
                                                  [0].split("-")[0])
                            valid_chunk = (new_start_range > chunk_start and
                                           new_start_range < chunk_end)
                            if (valid_chunk):
                                difference = new_start_range-chunk_start
                                chunk_start = new_start_range
                                chunk_data = chunk_data[difference:]
                                retry_chunk = True
                                logger.warning("Attempting to retry part of "
                                               "current chunk")
                                logger.warning("new chunk start: " +
                                               str(chunk_start))
                                logger.warning("new chunk end: " +
                                               str(chunk_end))
                                continue
                            break

                    print("{} of {} bytes sent, {}% complete"
                          .format(str(chunk_end+1),
                                  str(file_size),
                                  "%.2f" % (float(chunk_end+1)
                                            / float(file_size)*100)),
                          end='\r')
                    chunk_start = chunk_end+1
                    chunk_end += CHUNK_SIZE
                    if chunk_end+1 >= file_size:
                        chunk_end = file_size - 1

            logger.info(response.status_code)
            logger.info(response.text)

            data = json.loads(response.text)
            if ('file' in data and 'hashes' in data['file']):
                server_hash = data['file']['hashes']['sha1Hash']
            else:
                server_hash = "None"

            logger.info("SHA1 local:"+cur_file_hash.hexdigest())
            logger.info("SHA1 remote:"+server_hash)
            if (cur_file_hash.hexdigest() == server_hash.lower()):
                print("\nUpload of file {} complete".
                      format(os.path.basename(file_name)))
                return
            cur_attempt += 1
            logger.warning("Hash of uploaded file does "
                           "not match server.  Attempting again")
            # If it doesn't match, we need to replace the existing file now
            payload["@name.conflictBehavior"] = "replace"

        if (cur_file_hash.hexdigest() != server_hash.lower()):
                raise RuntimeError("Hash of uploaded file does "
                                   "not match server.")

Example 130

Project: Metadator Source File: Metadator.py
    def __init__(self):
        u"""
        Main window constructor
        Creates 1 frame and 2 labeled subframes
        """
        # first: the log
        # see: http://sametmax.com/ecrire-des-logs-en-python/
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)  # all errors will be get
        log_form = logging.Formatter('%(asctime)s || %(levelname)s || %(message)s')
        logfile = RotatingFileHandler('Metadator_LOG.log', 'a', 5000000, 1)
        logfile.setLevel(logging.DEBUG)
        logfile.setFormatter(log_form)
        self.logger.addHandler(logfile)
        self.logger.info('\n\t ======== Metadator ========')  # first messages
        self.logger.info('Starting the UI')

        # checking the path to GDAL in the path
        if "GDAL_DATA" not in env.keys():
            try:
                gdal.SetConfigOption(str('GDAL_DATA'),
                                     str(path.abspath(r'data/gdal')))
            except:
                print("Oups! Something has gone wrong...\
                      see: https://github.com/Guts/Metadator/issues/21")
        else:
            pass

        # basics settings
        Tk.__init__(self)           # constructor of parent graphic class
        self.title(u'Metadator {0}'.format(MetadatorVersion))
        self.style = Style()        # more friendly windows style
        if opersys == 'win32':
            self.logger.info('Op. system: {0}'.format(platform.platform()))
            self.iconbitmap('Metadator.ico')    # windows icon
            self.uzer = env.get(u'USERNAME')
        elif opersys == 'linux2':
            self.logger.info('Op. system: {0}'.format(platform.platform()))
            self.uzer = env.get(u'USER')
            icon = Image("photo", file=r'data/img/metadator.gif')
            self.call('wm', 'iconphoto', self._w, icon)
            self.minsize(580, 100)
            self.style.theme_use('clam')
        elif opersys == 'darwin':
            self.logger.info('Op. system: {0}'.format(platform.platform()))
            self.uzer = env.get(u'USER')
        else:
            self.logger.warning('Operating system not tested')
            self.logger.info('Op. system: {0}'.format(platform.platform()))
        self.resizable(width=False, height=False)
        self.focus_force()

        self.logger.info('GDAL version: {}'.format(gdal.__version__))

        # variables
        self.def_rep = ""       # folder to search for
        self.def_lang = 'FR'    # language to start
        self.def_doc = IntVar()     # to export into Word
        self.def_xls = IntVar()     # to export into Excel 2003
        self.def_xml = IntVar()     # to export into ISO 19139
        self.def_cat = IntVar()     # to merge all output Word files
        self.def_odt = IntVar()     # to export into OpenDocuementText
        self.def_dict = IntVar()    # to make a dictionnary of data
        self.def_kass = IntVar()    # to handle field name case sensitive
        self.def_stat = IntVar()    # to active/disable stats fields
        self.li_pro = []            # list for profiles in language selected
        self.li_shp = []            # list for shapefiles path
        self.li_tab = []            # list for MapInfo tables path
        self.num_folders = 0        # number of folders explored
        self.today = strftime("%Y-%m-%d")   # date of the day
        self.dico_layer = OD()      # dictionary about layer properties
        self.dico_profil = OD()     # dictionary from profile selected
        self.dico_fields = OD()     # dictionary for fields information
        self.dico_rekur = OD()      # dictionary of recurring attributes
        self.dico_err = OD()     # errors list
        self.dico_help = OD()                # dictionary of help texts
        li_lang = [lg for lg in listdir(r'locale')]   # available languages
        self.blabla = OD()      # texts dictionary

        # GUI fonts
        ft_tit = tkFont.Font(family="Times", size=10, weight=tkFont.BOLD)

        # fillfulling
        self.load_settings()
        self.load_texts(self.def_lang)
        self.li_profiles(self.def_lang)
        self.li_rekurs(self.def_lang)
        self.recup_help(self.def_lang)

        # Tabs
        self.nb = Notebook(self)
        self.tab_globals = Frame(self.nb)   # tab_id = 0
        self.tab_options = Frame(self.nb)   # tab_id = 1
        self.tab_attribs = Frame(self.nb)   # tab_id = 2
        self.nb.add(self.tab_globals,
                    text=self.blabla.get('gui_tab1'), padding=3)
        self.nb.add(self.tab_options,
                    text=self.blabla.get('gui_tab2'), padding=3)
        self.nb.add(self.tab_attribs,
                    text=self.blabla.get('gui_tab3'), padding=3)
        self.logger.info('UI created')

                ### Tab 1: global
        # Frames
        self.FrPath = Labelframe(self.tab_globals,
                                 name='main',
                                 text=self.blabla.get('tab1_fr1'))
        self.FrProg = Labelframe(self.tab_globals,
                                 name='progression',
                                 text=self.blabla.get('tab1_frprog'))
            ## Frame 1
        # target folder
        self.labtarg = Label(self.FrPath, text=self.blabla.get('tab1_path'))
        self.target = Entry(self.FrPath, width=25)
        self.browsetarg = Button(self.FrPath,       # browse button
                                 text=self.blabla.get('tab1_browse'),
                                 command=lambda: self.setpathtarg(),
                                 takefocus=True)
        self.browsetarg.focus_force()               # force the focus on
        self.profil = Label(self.FrPath, text=self.blabla.get('tab1_prof'))
        # profiles switcher
        self.ddl_profil = Combobox(self.FrPath, values=self.li_pro, width=5)
        self.ddl_profil.current(0)
        self.ddl_profil.bind("<<ComboboxSelected>>", self.select_profil)
        # widgets placement
        self.labtarg.grid(row=1, column=1, columnspan=1,
                          sticky=N + S + W + E, padx=2, pady=8)
        self.target.grid(row=1, column=2, columnspan=1,
                         sticky=N + S + W + E, padx=2, pady=8)
        self.browsetarg.grid(row=1, column=3,
                             sticky=N + S + W + E, padx=2, pady=8)
        self.profil.grid(row=2, column=1,
                         sticky=N + S + W + E, padx=2, pady=8)
        self.ddl_profil.grid(row=2, column=2, sticky=W + E + N + S,
                             columnspan=2, padx=2, pady=8)

        # tooltips
        InfoBulle(self.target, message=self.dico_help.get(30)[1])
        InfoBulle(self.browsetarg, message=self.dico_help.get(30)[1])
        InfoBulle(self.ddl_profil, message=self.dico_help.get(31)[1])

            ## Frame 2
        # variables
        self.status = StringVar(self.FrProg, '')
        # widgets
        self.prog_layers = Progressbar(self.FrProg, orient="horizontal")
        self.prog_fields = Progressbar(self.FrProg, orient="horizontal")
        # widgets placement
        Label(self.FrProg, textvariable=self.status,
                           foreground='DodgerBlue').pack(expand=1)
        self.prog_layers.pack(expand=1, fill=X)

        # Frames placement
        self.FrPath.pack(expand=1, fill='both')
        self.FrProg.pack(expand=1, fill='both')

                ### Tab 2: options
        # Export options
        caz_doc = Checkbutton(self.tab_options,
                              text=u'HTML / Word (.doc/.docx)',
                              variable=self.def_doc,
                              command=lambda: self.catalog_dependance())
        caz_xls = Checkbutton(self.tab_options,
                              text=u'Excel 2003 (.xls)',
                              variable=self.def_xls)
        caz_xml = Checkbutton(self.tab_options,
                              text=u'XML (ISO 19139)',
                              variable=self.def_xml)
        self.caz_cat = Checkbutton(self.tab_options,
                                   text=self.blabla.get('tab2_merge'),
                                   variable=self.def_cat)
        caz_odt = Checkbutton(self.tab_options,
                              text=u'Open Docuement Text (.odt)',
                              variable=self.def_odt)
        # widgets placement
        caz_doc.grid(row=1,
                     column=0,
                     sticky=N + S + W + E,
                     padx=2, pady=2)
        self.caz_cat.grid(row=2,
                          column=0,
                          sticky=N + S + W + E,
                          padx=2, pady=2)
        caz_xls.grid(row=1,
                     column=1,
                     sticky=N + S + W + E,
                     padx=2, pady=2)
        caz_xml.grid(row=2,
                     column=1,
                     sticky=N + S + W + E,
                     padx=2, pady=2)
        caz_odt.grid(row=3,
                     column=1,
                     sticky=N + S + W + E,
                     padx=2, pady=2)
        # disabling the widgets which work only on Windows OS
        if opersys != 'win32':
            self.logger.info('Disabling Windows reserved functions.')
            self.def_doc.set(0)
            self.def_cat.set(0)
            caz_doc.configure(state='disabled')
            self.caz_cat.configure(state='disabled')
        else:
            pass
        # make the catalog option depending on the Word option
        self.catalog_dependance()

        # tooltips
        InfoBulle(caz_doc,
                  message=self.dico_help.get(33)[1],
                  image=self.dico_help.get(33)[2])
        InfoBulle(caz_xls,
                  message=self.dico_help.get(34)[1],
                  image=self.dico_help.get(34)[2])
        InfoBulle(caz_xml,
                  message=self.dico_help.get(35)[1],
                  image=self.dico_help.get(35)[2])
        InfoBulle(caz_odt,
                  message=self.dico_help.get(36)[1],
                  image=self.dico_help.get(36)[2])
        InfoBulle(self.caz_cat,
                  message=self.dico_help.get(37)[1],
                  image=self.dico_help.get(37)[2])

                ### Tab 3: recurring attributes
        # Attribute selector
        self.lab_chps = Label(self.tab_attribs, text=self.blabla.get('tab3_sele'))
        self.ddl_attr = Combobox(self.tab_attribs, values=self.dico_rekur.keys())
        self.ddl_attr.bind("<<ComboboxSelected>>", self.edit_rekur)
        self.supr = Button(self.tab_attribs, text=self.blabla.get('tab3_supp'),
                           command=self.del_rekur)
        # frame
        self.FrRekur = Labelframe(self.tab_attribs,
                                  name='attributes',
                                  text=self.blabla.get('tab3_tit'))
        # attribute settings
        self.tab3_LBnom = Label(self.FrRekur,
                                text=self.blabla.get('tab3_nom'),
                                state=DISABLED)
        self.tab3_ENnom = Entry(self.FrRekur, state=DISABLED)
        self.tab3_LBdesc = Label(self.FrRekur,
                                 text=self.blabla.get('tab3_desc'),
                                 state=DISABLED)
        self.tab3_TXdesc = Text(self.FrRekur,
                                height=5, width=30,
                                wrap=WORD, state=DISABLED)
        self.tab3_CBcass = Checkbutton(self.FrRekur,
                                       text=self.blabla.get('tab3_cass'),
                                       variable=self.def_kass,
                                       state=DISABLED)
        self.tab3_CBstat = Checkbutton(self.FrRekur,
                                       text=self.blabla.get('tab3_stat'),
                                       variable=self.def_stat,
                                       state=DISABLED)
        # Validation button
        self.save = Button(self.FrRekur,
                           text=self.blabla.get('tab3_save'),
                           command=self.save_rekur,
                           state='disabled')

        # widgets placement
        self.lab_chps.grid(row=1, column=1, sticky=N + S + W,
                           padx=2, pady=2)
        self.ddl_attr.grid(row=1, column=2, sticky=N + S + W + E,
                           padx=2, pady=2)
        self.supr.grid(row=1, column=3, sticky=N + S + W + E,
                       padx=2, pady=2)
        self.tab3_LBnom.grid(row=1, column=0, columnspan=1,
                             sticky=N + S + W, padx=2, pady=2)
        self.tab3_ENnom.grid(row=1, column=1, columnspan=1,
                             sticky=N + S + W + E, padx=2, pady=2)
        self.tab3_LBdesc.grid(row=2, column=0, columnspan=1,
                              sticky=N + S + W + E, padx=2, pady=2)
        self.tab3_TXdesc.grid(row=2, column=1, columnspan=2,
                              sticky=N + S + W + E, padx=2, pady=2)
        self.tab3_CBcass.grid(row=3, column=0, columnspan=1,
                              sticky=N + S + W + E, padx=2, pady=2)
        self.tab3_CBstat.grid(row=3, column=1, columnspan=1,
                              sticky=N + S + W + E, padx=2, pady=2)
        self.save.grid(row=5, column=0, columnspan=4,
                       sticky=N + S + W + E, padx=2, pady=2)

        # Frame placement
        self.FrRekur.grid(row=2, column=1, columnspan=3,
                          sticky=N + S + W + E, padx=2, pady=2)

        # tooltips
        InfoBulle(self.lab_chps, message=self.dico_help.get(38)[1])
        InfoBulle(self.ddl_attr, message=self.dico_help.get(39)[1])
        InfoBulle(self.supr, message=self.dico_help.get(40)[1])
        InfoBulle(self.tab3_CBcass, message=self.dico_help.get(41)[1])
        InfoBulle(self.tab3_CBstat, message=self.dico_help.get(42)[1])

            ## Main frame
        # Hola
        self.welcome = Label(self,
                             text=self.blabla.get('hi') + self.uzer,
                             font=ft_tit,
                             foreground="red2")
        # Image
        self.icone = PhotoImage(master=self, file=r'data/img/metadator.gif')
        Label(self, image=self.icone).grid(row=2,
                                           column=0,
                                           padx=2,
                                           pady=2,
                                           sticky=N + S + W + E)
        # credits
        s = Style(self)
        s.configure('Kim.TButton', foreground='DodgerBlue',
                    borderwidth=0, relief="flat")
        Button(self,
               text='by Julien M. (2015)',
               style='Kim.TButton',
               command=lambda: open_new('https://github.com/Guts')).grid(row=3,
                                                                         padx=2,
                                                                         pady=2,
                                                                         sticky=W+E)
        # language switcher
        self.ddl_lang = Combobox(self, values=li_lang, width=5)
        self.ddl_lang.current(li_lang.index(self.def_lang))
        self.ddl_lang.bind("<<ComboboxSelected>>", self.change_lang)
        # Go go go button
        self.val = Button(self,
                          text=self.blabla.get('tab1_go'),
                          state='active',
                          command=lambda: self.process())
        # Cancel button
        self.can = Button(self,
                          text=self.blabla.get('gui_quit'),
                          command=self.destroy)
        # widgets placement
        self.welcome.grid(row=0, column=0, columnspan=1, sticky=N + S + W + E,
                          padx=2, pady=2)
        self.ddl_lang.grid(row=1, column=0, sticky=N, padx=2, pady=0)
        self.can.grid(row=4, column=0, sticky=N + S + W + E, padx=2, pady=2)
        self.val.grid(row=4, column=1, sticky=N + S + W + E, padx=2, pady=2)

        # tooltips
        InfoBulle(self.ddl_lang, message=self.dico_help.get(32)[1])

                ### Notebook placement
        self.nb.grid(row=0, rowspan=4, column=1, sticky=N + S + W + E)
        # keep updated list of profiles
        self.maj()

Example 131

Project: rasterio Source File: shapes.py
@click.command(short_help="Write shapes extracted from bands or masks.")
@options.file_in_arg
@options.output_opt
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@cligj.projection_geographic_opt
@cligj.projection_projected_opt
@cligj.sequence_opt
@cligj.use_rs_opt
@cligj.geojson_type_feature_opt(True)
@cligj.geojson_type_bbox_opt(False)
@click.option('--band/--mask', default=True,
              help="Choose to extract from a band (the default) or a mask.")
@click.option('--bidx', 'bandidx', type=int, default=None,
              help="Index of the band or mask that is the source of shapes.")
@click.option('--sampling', type=int, default=1,
              help="Inverse of the sampling fraction; "
                   "a value of 10 decimates.")
@click.option('--with-nodata/--without-nodata', default=False,
              help="Include or do not include (the default) nodata regions.")
@click.option('--as-mask/--not-as-mask', default=False,
              help="Interpret a band as a mask and output only one class of "
                   "valid data shapes.")
@click.pass_context
def shapes(
        ctx, input, output, precision, indent, compact, projection, sequence,
        use_rs, geojson_type, band, bandidx, sampling, with_nodata, as_mask):
    """Extracts shapes from one band or mask of a dataset and writes
    them out as GeoJSON. Unless otherwise specified, the shapes will be
    transformed to WGS 84 coordinates.

    The default action of this command is to extract shapes from the
    first band of the input dataset. The shapes are polygons bounding
    contiguous regions (or features) of the same raster value. This
    command performs poorly for int16 or float type datasets.

    Bands other than the first can be specified using the `--bidx`
    option:

      $ rio shapes --bidx 3 tests/data/RGB.byte.tif

    The valid data footprint of a dataset's i-th band can be extracted
    by using the `--mask` and `--bidx` options:

      $ rio shapes --mask --bidx 1 tests/data/RGB.byte.tif

    Omitting the `--bidx` option results in a footprint extracted from
    the conjunction of all band masks. This is generally smaller than
    any individual band's footprint.

    A dataset band may be analyzed as though it were a binary mask with
    the `--as-mask` option:

      $ rio shapes --as-mask --bidx 1 tests/data/RGB.byte.tif
    """
    # These import numpy, which we don't want to do unless it's needed.
    import numpy as np
    import rasterio.features
    import rasterio.warp

    logger = logging.getLogger('rio')
    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')

    stdout = click.open_file(
        output, 'w') if output else click.get_text_stream('stdout')

    bidx = 1 if bandidx is None and band else bandidx

    # This is the generator for (feature, bbox) pairs.
    class Collection(object):

        def __init__(self, env):
            self._xs = []
            self._ys = []
            self.env = env

        @property
        def bbox(self):
            return min(self._xs), min(self._ys), max(self._xs), max(self._ys)

        def __call__(self):
            with rasterio.open(input) as src:
                if bidx is not None and bidx > src.count:
                    raise ValueError('bidx is out of range for raster')

                img = None
                msk = None

                # Adjust transforms.
                transform = src.transform
                if sampling > 1:
                    # Decimation of the raster produces a georeferencing
                    # shift that we correct with a translation.
                    transform *= Affine.translation(
                        src.width % sampling, src.height % sampling)
                    # And follow by scaling.
                    transform *= Affine.scale(float(sampling))

                # Most of the time, we'll use the valid data mask.
                # We skip reading it if we're extracting every possible
                # feature (even invalid data features) from a band.
                if not band or (band and not as_mask and not with_nodata):
                    if sampling == 1:
                        msk = src.read_masks(bidx)
                    else:
                        msk_shape = (
                            src.height // sampling, src.width // sampling)
                        if bidx is None:
                            msk = np.zeros(
                                (src.count,) + msk_shape, 'uint8')
                        else:
                            msk = np.zeros(msk_shape, 'uint8')
                        msk = src.read_masks(bidx, msk)

                    if bidx is None:
                        msk = np.logical_or.reduce(msk).astype('uint8')

                    # Possibly overridden below.
                    img = msk

                # Read the band data unless the --mask option is given.
                if band:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                    else:
                        img = np.zeros(
                            (src.height // sampling, src.width // sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)

                # If --as-mask option was given, convert the image
                # to a binary image. This reduces the number of shape
                # categories to 2 and likely reduces the number of
                # shapes.
                if as_mask:
                    tmp = np.ones_like(img, 'uint8') * 255
                    tmp[img == 0] = 0
                    img = tmp
                    if not with_nodata:
                        msk = tmp

                # Transform the raster bounds.
                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(
                        src.crs, CRS({'init': 'epsg:4326'}), xs, ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                # Prepare keyword arguments for shapes().
                kwargs = {'transform': transform}
                if not with_nodata:
                    kwargs['mask'] = msk

                src_basename = os.path.basename(src.name)

                # Yield GeoJSON features.
                for i, (g, val) in enumerate(
                        rasterio.features.shapes(img, **kwargs)):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs, 'EPSG:4326', g,
                            antimeridian_cutting=True, precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': "{0}:{1}".format(src_basename, i),
                        'properties': {
                            'val': val, 'filename': src_basename
                        },
                        'bbox': [min(xs), min(ys), max(xs), max(ys)],
                        'geometry': g
                    }

    if not sequence:
        geojson_type = 'collection'

    try:
        with ctx.obj['env'] as env:
            write_features(
                stdout, Collection(env), sequence=sequence,
                geojson_type=geojson_type, use_rs=use_rs,
                **dump_kwds)
    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()

Example 132

Project: xenonmkv Source File: xenonmkv.py
def main():
    # Main program begins
    global args, log, app_path
    log = logging.getLogger("xenonmkv")
    console_handler = logging.StreamHandler()
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s [%(levelname)s] %(message)s'
    )
    console_handler.setFormatter(formatter)
    log.addHandler(console_handler)

    dependencies = ('mkvinfo', 'mediainfo', 'mkvextract',
                    'mplayer', 'faac', 'MP4Box')

    parser = argparse.ArgumentParser(description='Parse command line arguments '
                                     'for XenonMKV.')
    parser.add_argument('source_file', help='Path to the source MKV file')
    parser.add_argument('-d', '--destination',
                        help="""Directory to output the destination .mp4 file
                        (default: current directory)""",
                        default='.')
    parser.add_argument('-sd', '--scratch-dir',
                        help="""Specify a scratch directory where temporary files should
                         be stored""",
                        default=None)
    parser.add_argument('-cfg', '--config-file',
                        help="""Provide a configuration file that
                         contains default arguments or settings for the application""",
                        default='')
    parser.add_argument("-p", '--profile',
                        help="""Select a standardized device profile for encoding.
                         Current profile options are: xbox360, playbook""",
                        default="")

    output_group = parser.add_argument_group("Output options")
    output_group.add_argument('-q', '--quiet',
                              help="""Do not display output or progress from tools,
                               or prompt for input""",
                              action='store_true')
    output_group.add_argument('-v', '--verbose', help='Verbose output',
                              action='store_true')
    output_group.add_argument('-vv', '--debug',
                              help='Highly verbose debug output',
                              action='store_true')
    output_group.add_argument('-pf', '--print-file',
                              help='Output filenames before and after converting',
                              action='store_true')

    video_group = parser.add_argument_group("Video options",
                                            "Options for processing video.")
    video_group.add_argument('-nrp', '--no-round-par',
                             help="""When processing video, do not round pixel aspect
                             ratio from 0.98 to 1.01 to 1:1.""",
                             action='store_true')
    video_group.add_argument('-irf', '--ignore-reference-frames',
                             help="""If the source video has too many reference frames
                              to play on low-powered devices (Xbox, PlayBook), continue
                              converting anyway""",
                             action='store_true')

    audio_group = parser.add_argument_group("Audio options",
                                            "Select custom audio decoding and "
                                            "encoding options.")
    audio_group.add_argument('-c', '--channels',
                             help="""Specify the maximum number of channels that are
                              acceptable in the output file. Certain devices (Xbox) will
                              not play audio with more than two channels. If the audio
                              needs to be re-encoded at all, it will be downmixed to two
                              channels only. Possible values for this option are 2
                              (stereo); 4 (surround); 5.1 or 6 (full 5.1); 7.1 or 8
                              (full 7.1 audio).
                              For more details, view the README file.""",
                             default=6)
    audio_group.add_argument('-fq', '--faac-quality',
                             help="""Quality setting for FAAC when encoding WAV files
                              to AAC. Defaults to 150 (see
                              http://wiki.hydrogenaudio.org/index.php?title=FAAC)""",
                             default=150)

    track_group = parser.add_argument_group("Track options",
                                            "These options determine how multiple tracks "
                                            "in MKV files are selected.")
    track_group.add_argument('-st', '--select-tracks',
                             help="""If there are multiple tracks in the MKV file, prompt
                             to select which ones will be used. By default, the last video
                              and audio tracks flagged as 'default' in the MKV file will
                             be used. This option requires interactive user input, so do
                             not use it in batch processing or scripts.""",
                             action='store_true')
    track_group.add_argument('-vt', '--video-track',
                             help="""Use the specified video track. If not present in
                             the file, the default track will be used.""",
                             type=int)
    track_group.add_argument('-at', '--audio-track',
                             help="""Use the specified audio track. If not present in
                             the file, the default track will be used.""",
                             type=int)
    track_group.add_argument('-lang', '--preferred-language',
                             help="""Provide a preferred language code in ISO 639-1 format
                              ('en' for English, 'fr' for French, etc.)
                             When picking tracks, this language will be preferred.""")

    proc_group = parser.add_argument_group("File and processing options",
                                           """These options determine how XenonMKV
                                           processes files and their contents.""")
    proc_group.add_argument('-rp', '--resume-previous',
                            help="""Resume a previous run (do not recreate files
                            if they already exist). Useful for debugging quickly if a
                            conversion has already partially succeeded.""",
                            action='store_true')
    proc_group.add_argument('-n', '--name',
                            help="""Specify a name for the final MP4 container.
                            Defaults to the original file name.""",
                            default="")
    proc_group.add_argument('-preserve', '--preserve-temp-files',
                            help="""Preserve temporary files on the filesystem rather
                             than deleting them at the end of each run.""",
                            action='store_true', default=False)
    proc_group.add_argument("-eS", "--error-filesize",
                            help="""Stop processing this file if it is over 4GiB.
                             Files of this size will not be processed correctly by some
                             devices such as the Xbox 360, and they will not save
                             correctly to FAT32-formatted storage. By default, you will
                             only see a warning message, and processing will continue.""",
                            action="store_true")
    proc_group.add_argument('--mp4box-retries',
                            help="""Set the number of retry attempts for MP4Box to attempt
                             to create a file (default: 3)""", default=3, type=int)

    dep_group = parser.add_argument_group("Custom paths",
                                          "Set custom paths for the utilities used by "
                                          "XenonMKV.")
    for dependency in dependencies:
        dep_group.add_argument("--{0}-path".format(dependency.lower()),
                               help="""Set a custom complete path for the {0} tool.
                                Any library under that path will also be
                                loaded.""".format(dependency))

    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    config_file_output = False

    # If a configuration file was specified, attempt to read it.
    if args.config_file and os.path.isfile(args.config_file):
        config_file_output = parse_config_file(args)

    # Depending on the arguments, set the logging level appropriately.
    if args.quiet:
        log.setLevel(logging.ERROR)
    elif args.debug:
        log.setLevel(logging.DEBUG)
        log.debug("Using debug/highly verbose mode output")
    elif args.verbose:
        log.setLevel(logging.INFO)

    # If we parsed a configuration file, run through all logging output
    if config_file_output:
        for level, message in config_file_output:
            getattr(log, level)(message)

    # Pick temporary/scratch directory
    if not args.scratch_dir:
        if "TEMP" in os.environ:
            args.scratch_dir = os.environ["TEMP"]
        elif os.path.isdir("/var/tmp"):
            args.scratch_dir = "/var/tmp"
        else:
            args.scratch_dir = os.curdir

    # Apply selected profile
    if args.profile:
        if args.profile == "xbox360":
            args.channels = 2
            args.error_filesize = True
        elif args.profile == "playbook":
            args.channels = 6
            args.error_filesize = False
        else:
            log.warning("Unrecognized device profile {0}".format(args.profile))
            args.profile = ""

    # Check for 5.1/7.1 audio with the channels setting
    if args.channels == "5.1":
        args.channels = 6
    elif args.channels == "7.1":
        args.channels = 8
    if args.channels not in ('2', '4', '6', '8', 2, 4, 6, 8):
        log.warning("An invalid number of channels was specified. "
                    "Falling back to 2-channel stereo audio.")
        args.channels = 2

    # Enforce channels as integer for comparison purposes later on
    args.channels = int(args.channels)

    # Ensure preferred language, if present, is lowercased and 2 characters
    if args.preferred_language:
        args.preferred_language = args.preferred_language.lower()
        if len(args.preferred_language) < 2:
            log.warning("Could not set preferred language code '{0}'".format(
                        args.preferred_language))
            args.preferred_language = None
        elif len(args.preferred_language) > 2:
            args.preferred_language = args.preferred_language[0:2]
            log.warning("Preferred language code truncated to '{0}'".format(
                        args.preferred_language))

    # Make sure user is not prompted for input if quiet option is used
    if args.quiet and args.select_tracks:
        log.warning("Cannot use interactive track selection in quiet mode. "
                    "Tracks will be automatically selected.")
        args.select_tracks = False

    log.debug("Starting XenonMKV")

    # Check if we have a full file path or are just specifying a file
    if os.sep not in args.source_file:
        log.debug("Ensuring that we have a complete path to {0}".format(
                  args.source_file))
        args.source_file = os.path.join(os.getcwd(), args.source_file)
        log.debug("{0} will be used to reference the original MKV file".format(
                  args.source_file))

    # Always ensure destination path ends with a slash
    if not args.destination.endswith(os.sep):
        args.destination += os.sep

    if not args.scratch_dir.endswith(os.sep):
        args.scratch_dir += os.sep

    # Initialize file utilities
    f_utils = FileUtils(log, args)

    # Check if all dependent applications are installed and available in PATH,
    # or if they are specified.
    # If so, store them in args.tool_paths so all classes
    # have access to them as needed
    (args.tool_paths, args.library_paths) = f_utils.check_dependencies(
        dependencies
    )

    # Check if source file exists and is an appropriate size
    try:
        f_utils.check_source_file(args.source_file)
    except IOError as e:
        log_exception("check_source_file", e)

    source_basename = os.path.basename(args.source_file)
    source_noext = source_basename[0:source_basename.rindex(".")]

    if not args.name:
        args.name = source_noext
        log.debug("Using '{0}' as final container name".format(args.name))

    # Check if destination directory exists
    try:
        f_utils.check_dest_dir(args.destination)
    except IOError as e:
        log_exception("check_dest_dir", e)

    log.info("Loading source file {0}".format(args.source_file))

    if args.print_file:
        print "Processing: {0}".format(args.source_file)

    try:
        to_convert = MKVFile(args.source_file, log, args)
        to_convert.get_mkvinfo()
    except Exception as e:
        if not args.preserve_temp_files:
            cleanup_temp_files()
        log_exception("get_mkvinfo", e)

    # If the user knows which A/V tracks they want, set them.
    # MKVFile will not overwrite them.
    try_set_video_track(to_convert)
    try_set_audio_track(to_convert)

    try:
        # Check for multiple tracks
        if to_convert.has_multiple_av_tracks():
            log.debug("Source file {0} has multiple audio or "
                      "video tracks".format(args.source_file))

            # First, pick default tracks,
            # which can be overridden in select_tracks
            to_convert.set_default_av_tracks()

            if args.select_tracks:
                video_tracks = to_convert.video_track_list()
                audio_tracks = to_convert.audio_track_list()
                if len(video_tracks) > 1:
                    args.video_track = select_track("video", video_tracks)
                    try_set_video_track(to_convert)
                if len(audio_tracks) > 1:
                    args.audio_track = select_track("audio", audio_tracks)
                    try_set_audio_track(to_convert)
            else:
                log.debug("Selected default audio and video tracks")

        else:
            # Pick default (or only) audio/video tracks
            log.debug("Source file {0} has 1 audio and 1 video track; "
                      "using these".format(args.source_file))
            to_convert.set_default_av_tracks()
    except Exception as e:
        if not args.preserve_temp_files:
            cleanup_temp_files()
        log_exception("set_default_av_tracks", e)

    # Next phase: Extract MKV files to scratch directory
    try:
        (video_file, audio_file) = to_convert.extract_mkv()
    except Exception as e:
        if not args.preserve_temp_files:
            cleanup_temp_files()
        log_exception("extract_mkv", e)

    # If needed, hex edit the video file to make it compliant
    # with a lower h264 profile level
    if video_file.endswith(".h264"):
        f_utils.hex_edit_video_file(video_file)

    # Detect which audio codec is in place and dump audio to WAV accordingly
    if to_convert.get_audio_track().needs_recode:
        log.debug("Audio track {0} needs to be re-encoded".format(audio_file))
        audio_dec = AudioDecoder(audio_file, log, args)
        audio_dec.decode()

        # Once audio has been decoded to a WAV,
        # use the appropriate AAC encoder to transform it to .aac
        enc = AACEncoder(
            os.path.join(args.scratch_dir, "audiodump.wav"), log, args)
        enc.encode()
        encoded_audio = os.path.join(args.scratch_dir, "audiodump.aac")
    else:
        # The audio track does not need to be re-encoded.
        # Reference the already-valid audio file and put it into the MP4 container.
        encoded_audio = audio_file

    # Now, throw things back together into a .mp4 container with MP4Box.
    video_track = to_convert.get_video_track()
    mp4box = MP4Box(video_file, encoded_audio, video_track.frame_rate,
                    video_track.pixel_ar, args, log)
    try:
        mp4box.package()
    except Exception as e:
        if not args.preserve_temp_files:
            cleanup_temp_files()
        log_exception("package", e)

    # Move the file to the destination directory with the original name
    dest_path = os.path.join(args.destination, source_noext + ".mp4")
    shutil.move(os.path.join(args.scratch_dir, "output.mp4"), dest_path)

    log.info("Processing of {0} complete; file saved as {1}".format(
             args.source_file, dest_path))

    # Delete temporary files if possible
    if not args.preserve_temp_files:
        cleanup_temp_files()

    log.debug("XenonMKV completed processing")
    if args.print_file:
        print "Completed: {0}".format(dest_path)

Example 133

Project: btrfs-sxbackup Source File: __main__.py
def main():
    # Parse arguments
    parser = ArgumentParser(prog=_APP_NAME)
    parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=False,
                        help='do not log to stdout')
    parser.add_argument('--version', action='version', version='%s v%s' % (_APP_NAME, __version__))
    parser.add_argument('-v', dest='verbosity', action='count',
                        help='can be specified multiple times to increase verbosity')

    subparsers = parser.add_subparsers()
    subparsers.required = True
    subparsers.dest = 'command'

    # Reusable options
    compress_args = ['-c', '--compress']
    compress_kwargs = {'action': 'store_true',
                       'help': 'enables compression during transmission. Requires lzop to be installed on both source'
                               ' and destination',
                       'default': None}

    source_retention_args = ['-sr', '--source-retention']
    source_retention_kwargs = {'type': str,
                               'default': None,
                               'help': 'expression defining which source snapshots to retain/cleanup.'
                                       ' can be a static number (of backups) or more complex expression like'
                                       ' "1d:4/d, 1w:daily, 2m:none" literally translating to: "1 day from now keep'
                                       ' 4 backups a day, 1 week from now keep daily backups,'
                                       ' 2 months from now keep none"'}

    destination_retention_args = ['-dr', '--destination-retention']
    destination_retention_kwargs = {'type': str,
                                    'default': None,
                                    'help': 'expression defining which destination snapshots to retain/cleanup.'
                                            ' can be a static number (of backups) or more complex'
                                            ' expression (see --source-retention argument)'}

    subvolumes_args = ['subvolumes']
    subvolumes_kwargs = {'type': str,
                         'nargs': '+',
                         'metavar': 'subvolume',
                         'help': 'backup job source or destination subvolume. local path or SSH url'}

    # Initialize command cmdline params
    p_init = subparsers.add_parser(_CMD_INIT, help='initialize backup job')
    p_init.add_argument('source_subvolume', type=str, metavar='source-subvolume',
                        help='source subvolume tobackup. local path or ssh url')
    p_init.add_argument('destination_subvolume', type=str, metavar='destination-subvolume', nargs='?', default=None,
                        help='optional destination subvolume receiving backup snapshots. local path or ssh url')
    p_init.add_argument(*source_retention_args, **source_retention_kwargs)
    p_init.add_argument(*destination_retention_args, **destination_retention_kwargs)
    p_init.add_argument(*compress_args, **compress_kwargs)

    p_destroy = subparsers.add_parser(_CMD_DESTROY, help='destroy backup job by removing configuration files from source'
                                                         ' and destination. backup snapshots will be kept on both sides'
                                                         ' by default.')
    p_destroy.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_destroy.add_argument('--purge', action='store_true', help='removes all backup snapshots from source and destination')

    # Update command cmdline params
    p_update = subparsers.add_parser(_CMD_UPDATE, help='update backup job')
    p_update.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_update.add_argument(*source_retention_args, **source_retention_kwargs)
    p_update.add_argument(*destination_retention_args, **destination_retention_kwargs)
    p_update.add_argument(*compress_args, **compress_kwargs)
    p_update.add_argument('-nc', '--no-compress', action='store_true', help='disable compression during transmission')

    # Run command cmdline params
    p_run = subparsers.add_parser(_CMD_RUN, help='run backup job')
    p_run.add_argument(*subvolumes_args, **subvolumes_kwargs)
    p_run.add_argument('-m', '--mail', type=str, nargs='?', const='',
                       help='enables email notifications. If an email address is given, it overrides the'
                            ' default email-recipient setting in /etc/btrfs-sxbackup.conf')
    p_run.add_argument('-li', '--log-ident', dest='log_ident', type=str, default=None,
                       help='log ident used for syslog logging, defaults to script name')

    # Info command cmdline params
    p_info = subparsers.add_parser(_CMD_INFO, help='backup job info')
    p_info.add_argument(*subvolumes_args, **subvolumes_kwargs)

    # Purge command cmdline params
    p_purge = subparsers.add_parser(_CMD_PURGE, help="purge backups according to retention expressions")
    p_purge.add_argument(*subvolumes_args, **subvolumes_kwargs)
    purge_source_retention_kwargs = source_retention_kwargs.copy()
    purge_destination_retention_kwargs = destination_retention_kwargs.copy()
    purge_source_retention_kwargs['help'] = 'Optionally override %s' % purge_source_retention_kwargs['help']
    purge_destination_retention_kwargs['help'] = 'Optionally override %s' % purge_destination_retention_kwargs['help']
    p_purge.add_argument(*source_retention_args, **purge_source_retention_kwargs)
    p_purge.add_argument(*destination_retention_args, **purge_destination_retention_kwargs)

    # Transfer
    p_transfer = subparsers.add_parser(_CMD_TRANSFER, help='transfer snapshot')
    p_transfer.add_argument('source_subvolume', type=str, metavar='source-subvolume',
                            help='source subvolume to transfer. local path or ssh url')
    p_transfer.add_argument('destination_subvolume', type=str, metavar='destination-subvolume',
                            help='destination subvolume. local path or ssh url')
    p_transfer.add_argument(*compress_args, **compress_kwargs)

    # Initialize logging
    args = parser.parse_args()

    # Read global configuration
    Configuration.instance().read()

    logger = logging.getLogger()

    if not args.quiet:
        log_std_handler = logging.StreamHandler(sys.stdout)
        log_std_handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
        logger.addHandler(log_std_handler)

    log_memory_handler = None
    log_trace = False
    email_recipient = None

    def handle_exception(ex: Exception):
        """
        Exception handler
        :param ex:
        :return:
        """

        # Log exception message
        if len(str(ex)) > 0:
            logger.error('%s' % str(ex))

        if isinstance(ex, CalledProcessError):
            if ex.output:
                output = ex.output.decode().strip()
                if len(output) > 0:
                    logger.error('%s' % output)

        if log_trace:
            # Log stack trace
            logger.error(traceback.format_exc())

        # Email notification
        if email_recipient:
            try:
                # Format message and send
                msg = '\n'.join(map(lambda log_record: log_memory_handler.formatter.format(log_record),
                                    log_memory_handler.buffer))
                mail.send(email_recipient, '%s FAILED' % _APP_NAME, msg)
            except Exception as ex:
                logger.error(str(ex))

    # Syslog handler
    if args.command == _CMD_RUN:
        log_syslog_handler = logging.handlers.SysLogHandler('/dev/log')
        log_syslog_handler.setFormatter(logging.Formatter(_APP_NAME + '[%(process)d] %(levelname)s %(message)s'))
        logger.addHandler(log_syslog_handler)

        # Log ident support
        if args.log_ident:
            log_ident = args.log_ident if args.log_ident else Configuration.instance().log_ident
            if log_ident:
                log_syslog_handler.ident = log_ident + ' '

        # Mail notification support
        if args.mail is not None:
            email_recipient = args.mail if len(args.mail) > 0 else Configuration.instance().email_recipient

            # Memory handler will buffer output for sending via mail later if needed
            log_memory_handler = logging.handlers.MemoryHandler(capacity=-1)
            log_memory_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
            logger.addHandler(log_memory_handler)

    if args.verbosity and args.verbosity >= 1:
        logger.setLevel(logging.DEBUG)
        log_trace = True
    else:
        logger.setLevel(logging.INFO)
    logger.info('%s v%s' % (_APP_NAME, __version__))

    exitcode = 0

    try:
        if args.command == _CMD_RUN:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.run()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INIT:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            destination_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            job = Job.init(source_url=urllib.parse.urlsplit(args.source_subvolume),
                           source_retention=source_retention,
                           dest_url=urllib.parse.urlsplit(args.destination_subvolume) if args.destination_subvolume
                           else None,
                           dest_retention=destination_retention,
                           compress=args.compress)

        elif args.command == _CMD_UPDATE:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.update(source_retention=source_retention,
                               dest_retention=dest_retention,
                               compress=args.compress if args.compress else
                               not args.no_compress if args.no_compress else
                               None)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_DESTROY:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.destroy(purge=args.purge)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_INFO:
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume), raise_errors=False)
                    job.print_info()
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_PURGE:
            source_retention = RetentionExpression(args.source_retention) if args.source_retention else None
            dest_retention = RetentionExpression(args.destination_retention) if args.destination_retention else None
            for subvolume in args.subvolumes:
                try:
                    job = Job.load(urllib.parse.urlsplit(subvolume))
                    job.purge(source_retention=source_retention, dest_retention=dest_retention)
                except Exception as e:
                    handle_exception(e)
                    exitcode = 1

        elif args.command == _CMD_TRANSFER:
            source = Location(urllib.parse.urlsplit(args.source_subvolume))
            destination = Location(urllib.parse.urlsplit(args.destination_subvolume))
            source.transfer_btrfs_snapshot(destination, compress=args.compress)

    except SystemExit as e:
        if e.code != 0:
            raise

    except KeyboardInterrupt as k:
        exitcode = 1

    except Exception as e:
        handle_exception(e)
        exitcode = 1

    exit(exitcode)

Example 134

Project: pywebsocket Source File: _stream_hybi.py
def parse_frame(receive_bytes, logger=None,
                ws_version=common.VERSION_HYBI_LATEST,
                unmask_receive=True):
    """Parses a frame. Returns a tuple containing each header field and
    payload.

    Args:
        receive_bytes: a function that reads frame data from a stream or
            something similar. The function takes length of the bytes to be
            read. The function must raise ConnectionTerminatedException if
            there is not enough data to be read.
        logger: a logging object.
        ws_version: the version of WebSocket protocol.
        unmask_receive: unmask received frames. When received unmasked
            frame, raises InvalidFrameException.

    Raises:
        ConnectionTerminatedException: when receive_bytes raises it.
        InvalidFrameException: when the frame contains invalid data.
    """

    if not logger:
        logger = logging.getLogger()

    logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')

    received = receive_bytes(2)

    first_byte = ord(received[0])
    fin = (first_byte >> 7) & 1
    rsv1 = (first_byte >> 6) & 1
    rsv2 = (first_byte >> 5) & 1
    rsv3 = (first_byte >> 4) & 1
    opcode = first_byte & 0xf

    second_byte = ord(received[1])
    mask = (second_byte >> 7) & 1
    payload_length = second_byte & 0x7f

    logger.log(common.LOGLEVEL_FINE,
               'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
               'Mask=%s, Payload_length=%s',
               fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)

    if (mask == 1) != unmask_receive:
        raise InvalidFrameException(
            'Mask bit on the received frame did\'nt match masking '
            'configuration for received frames')

    # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
    # into the 8-octet extended payload length field (or 0x0-0xFD in
    # 2-octet field).
    valid_length_encoding = True
    length_encoding_bytes = 1
    if payload_length == 127:
        logger.log(common.LOGLEVEL_FINE,
                   'Receive 8-octet extended payload length')

        extended_payload_length = receive_bytes(8)
        payload_length = struct.unpack(
            '!Q', extended_payload_length)[0]
        if payload_length > 0x7FFFFFFFFFFFFFFF:
            raise InvalidFrameException(
                'Extended payload length >= 2^63')
        if ws_version >= 13 and payload_length < 0x10000:
            valid_length_encoding = False
            length_encoding_bytes = 8

        logger.log(common.LOGLEVEL_FINE,
                   'Decoded_payload_length=%s', payload_length)
    elif payload_length == 126:
        logger.log(common.LOGLEVEL_FINE,
                   'Receive 2-octet extended payload length')

        extended_payload_length = receive_bytes(2)
        payload_length = struct.unpack(
            '!H', extended_payload_length)[0]
        if ws_version >= 13 and payload_length < 126:
            valid_length_encoding = False
            length_encoding_bytes = 2

        logger.log(common.LOGLEVEL_FINE,
                   'Decoded_payload_length=%s', payload_length)

    if not valid_length_encoding:
        logger.warning(
            'Payload length is not encoded using the minimal number of '
            'bytes (%d is encoded using %d bytes)',
            payload_length,
            length_encoding_bytes)

    if mask == 1:
        logger.log(common.LOGLEVEL_FINE, 'Receive mask')

        masking_nonce = receive_bytes(4)
        masker = util.RepeatedXorMasker(masking_nonce)

        logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
    else:
        masker = _NOOP_MASKER

    logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
    if logger.isEnabledFor(common.LOGLEVEL_FINE):
        receive_start = time.time()

    raw_payload_bytes = receive_bytes(payload_length)

    if logger.isEnabledFor(common.LOGLEVEL_FINE):
        logger.log(
            common.LOGLEVEL_FINE,
            'Done receiving payload data at %s MB/s',
            payload_length / (time.time() - receive_start) / 1000 / 1000)
    logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')

    if logger.isEnabledFor(common.LOGLEVEL_FINE):
        unmask_start = time.time()

    unmasked_bytes = masker.mask(raw_payload_bytes)

    if logger.isEnabledFor(common.LOGLEVEL_FINE):
        logger.log(
            common.LOGLEVEL_FINE,
            'Done unmasking payload data at %s MB/s',
            payload_length / (time.time() - unmask_start) / 1000 / 1000)

    return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3

Example 135

Project: econsensus Source File: process_email.py
    def _process_email(self, mail, verbosity): # pylint: disable=R0914
        logger = logging.getLogger('econsensus')

        #handle multipart mails, cycle through mail 
        #until find text type with a full payload.
        if mail.is_multipart():
            for message in mail.get_payload():
                if message.get_content_maintype() == 'text':
                    msg_string = self._strip_string(message.get_payload(), verbosity)
                    if msg_string:
                        break
        else:
            msg_string = self._strip_string(mail.get_payload(), verbosity)       
        
        if not msg_string:
            logger.error("[EMAIL REJECTED] From '%s' Reason: Email payload empty" % mail['From'])
            return
        
        #Must match email 'from' address to user
        from_match = re.search('([\w\-\.]+@\w[\w\-]+\.+[\w\-]+)', mail['From'])
        if from_match:
            self._print_if_verbose(verbosity, "Found email 'from' '%s'" % from_match.group(1))
            try:
                user = User.objects.get(email=from_match.group(1))
            except ObjectDoesNotExist:
                logger.error("[EMAIL REJECTED] From '%s' Reason: id '%s' does not correspond to any known User" \
                             % (mail['From'], from_match.group(1)))
                return
            except MultipleObjectsReturned:
                logger.error("[EMAIL REJECTED] From '%s' Reason: Query returned several Users for id '%s'" \
                             % (mail['From'], from_match.group(1)))
                return
            self._print_if_verbose(verbosity, "Matched email to user '%s'" % user)
        else:
            logger.error("[EMAIL REJECTED] From '%s' Reason: Unrecognised email address format" % mail['From'])
            return
        
        #Must match email 'to' address to organization
        org_match = re.search('([\w\-\.]+)@\w[\w\-]+\.+[\w\-]+', mail['To'])
        if org_match:
            self._print_if_verbose(verbosity, "Found email 'to' '%s'" % org_match.group(1))
            try:
                organization = Organization.objects.get(slug=org_match.group(1))
            except ObjectDoesNotExist:
                logger.error("[EMAIL REJECTED] From '%s' Reason: id '%s' does not correspond to any known Organization" \
                             % (mail['From'], org_match.group(1)))
                return
            except MultipleObjectsReturned:
                logger.error("[EMAIL REJECTED] From '%s' Reason: Query returned several Organizations for id '%s'" \
                             % (mail['From'], org_match.group(1)))
                return
            self._print_if_verbose(verbosity, "Matched email to organization '%s'" % organization.name)
        else:
            logger.error("[EMAIL REJECTED] From '%s' Reason: Couldn't pull Organization from '%s'" % (mail['From'], mail['To']))
            return

        #User must be a member of the Organization
        if organization not in Organization.active.get_for_user(user):
            self._print_if_verbose(verbosity, "User %s is not a member of Organization %s" % (user.username, organization.name))
            logger.error("[EMAIL REJECTED] From '%s' Reason: User '%s' is not a member of Organization '%s'" \
                         % (mail['From'], user.username, organization.name))
            return

        #Look for feedback types in the message body
        rating = Feedback.COMMENT_STATUS                    
        description = msg_string                        
        parse_feedback = re.match('(\w+)\s*:\s*([\s\S]*)', msg_string, re.IGNORECASE)
        if parse_feedback:
            description = parse_feedback.group(2)
            rating_match = re.match('question|danger|concerns|consent|comment', parse_feedback.group(1), re.IGNORECASE)
            if rating_match:
                self._print_if_verbose(verbosity, "Found feedback rating '%s'" % rating_match.group())
                rating = dict(Feedback.RATING_CHOICES).values().index(rating_match.group().lower())

        # Determine whether email is in reply to a notification
        subject_match = re.search('\[EC#(\d+)(?:\\\\(\d+)(?:\\\\(\d+))?)?\]', mail['Subject'])
        if subject_match:
            #Check that the user has the right to comment against the decision.
            if subject_match.group(1):
                self._print_if_verbose(verbosity, "Found decision id '%s' in Subject" % subject_match.group(1))
                try:
                    decision = Decision.objects.get(pk=subject_match.group(1))
                except ObjectDoesNotExist:
                    logger.error("[EMAIL REJECTED] From '%s' Reason: id '%s' does not correspond to any known Decision" \
                                 % (mail['From'], subject_match.group(1)))
                    return
                except MultipleObjectsReturned:
                    logger.error("[EMAIL REJECTED] From '%s' Reason: Query returned several Decisions for id '%s'" \
                                 % (mail['From'], subject_match.group(1)))
                    return
                if user not in decision.organization.users.all():
                    logger.error("[EMAIL REJECTED] From '%s' Reason: User cannot reply to decision #%s because they are not a member of that organization." \
                                 % (mail['From'], subject_match.group(1)))
                    return
   
            #process comment or feedback against feedback
            if subject_match.group(2):
                self._print_if_verbose(verbosity, "Found feedback id '%s' in Subject" % subject_match.group(2))
                try:
                    feedback = Feedback.objects.get(pk=subject_match.group(2))
                except ObjectDoesNotExist:
                    logger.error("[EMAIL REJECTED] From '%s' Reason: id '%s' does not correspond to any known Feedback" \
                                 % (mail['From'], subject_match.group(2)))
                    return
                except MultipleObjectsReturned:
                    logger.error("[EMAIL REJECTED] From '%s' Reason: Query returned more than one Feedback for id '%s'" \
                                 % (mail['From'], subject_match.group(2)))
                    return
                
                if parse_feedback and rating_match:
                    decision = feedback.decision
                    self._print_if_verbose(verbosity, "Creating feedback with rating '%s' and description '%s'." % (rating, description))
                    feedback = Feedback(author=user, decision=decision, rating=rating, description=description)
                    feedback.save()
                    logger.info("User '%s' added feedback via email to decision #%s" % (user, decision.id))
                    self._print_if_verbose(verbosity, "Found corresponding object '%s'" % decision.excerpt)
                else:
                    comment_text = msg_string                
                    self._print_if_verbose(verbosity, "Creating comment '%s'." % (comment_text))
                    comment = Comment(user=user,
                                     user_name=user.get_full_name(),
                                     user_email=user.email,
                                     comment = comment_text,
                                     content_object=feedback, 
                                     object_pk=feedback.id,
                                     content_type=ContentType.objects.get(app_label="publicweb", model="feedback"),
                                     submit_date = timezone.now(),
                                     site = Site.objects.get_current())
                    comment.save()
                    logger.info("User '%s' added comment via email to feedback #%s" % (user, feedback.id))
                    self._print_if_verbose(verbosity, "Found corresponding object '%s'" % feedback.description)
            
            #process feedback against decision
            elif subject_match.group(1):
                self._print_if_verbose(verbosity, "Creating feedback with rating '%s' and description '%s'." % (rating, description))
                feedback = Feedback(author=user, decision=decision, rating=rating, description=description)
                feedback.save()
                logger.info("User '%s' added feedback via email to decision #%s" % (user, decision.id))
                self._print_if_verbose(verbosity, "Found corresponding object '%s'" % decision.excerpt)
                
            else:
                self._print_if_verbose(verbosity, "No id found in message subject: %s" % mail['Subject'])                
                logger.error("[EMAIL REJECTED] From '%s' Reason: No id present." \
                             % mail['From'])
        # Email was not in reply to a notification so create a new proposal
        else:
            proposal_match = re.search('proposal', mail['Subject'], re.IGNORECASE)
            if proposal_match:
                decision = Decision(author=user, editor=user, status=Decision.PROPOSAL_STATUS, organization=organization, \
                                    description=msg_string)
                decision.save()
                self._print_if_verbose(verbosity, "User '%s' created decision #%s via email" % (user, decision.id))                
                logger.info("User '%s' created decision #%s via email" % (user, decision.id))

            else:
                logger.error("[EMAIL REJECTED] From '%s' Reason: Email was not in reply to a notification and body didn't contain keyword 'proposal'" \
                             % mail['From'])

Example 136

Project: FanFicFare Source File: cli.py
def main(argv=None,
         parser=None,
         passed_defaultsini=None,
         passed_personalini=None):
    if argv is None:
        argv = sys.argv[1:]
    # read in args, anything starting with -- will be treated as --<varible>=<value>
    if not parser:
        parser = OptionParser('usage: %prog [options] [STORYURL]...')
    parser.add_option('-f', '--format', dest='format', default='epub',
                      help='write story as FORMAT, epub(default), mobi, text or html', metavar='FORMAT')

    if passed_defaultsini:
        config_help = 'read config from specified file(s) in addition to calibre plugin personal.ini, ~/.fanficfare/personal.ini, and ./personal.ini'
    else:
        config_help = 'read config from specified file(s) in addition to ~/.fanficfare/defaults.ini, ~/.fanficfare/personal.ini, ./defaults.ini, and ./personal.ini'
    parser.add_option('-c', '--config',
                      action='append', dest='configfile', default=None,
                      help=config_help, metavar='CONFIG')
    parser.add_option('-b', '--begin', dest='begin', default=None,
                      help='Begin with Chapter START', metavar='START')
    parser.add_option('-e', '--end', dest='end', default=None,
                      help='End with Chapter END', metavar='END')
    parser.add_option('-o', '--option',
                      action='append', dest='options',
                      help='set an option NAME=VALUE', metavar='NAME=VALUE')
    parser.add_option('-m', '--meta-only',
                      action='store_true', dest='metaonly',
                      help='Retrieve metadata and stop.  Or, if --update-epub, update metadata title page only.', )
    parser.add_option('-u', '--update-epub',
                      action='store_true', dest='update',
                      help='Update an existing epub(if present) with new chapters.  Give either epub filename or story URL.', )
    parser.add_option('--update-cover',
                      action='store_true', dest='updatecover',
                      help='Update cover in an existing epub, otherwise existing cover (if any) is used on update.  Only valid with --update-epub.', )
    parser.add_option('--unnew',
                      action='store_true', dest='unnew',
                      help='Remove (new) chapter marks left by mark_new_chapters setting.', )
    parser.add_option('--force',
                      action='store_true', dest='force',
                      help='Force overwrite of an existing epub, download and overwrite all chapters.', )
    parser.add_option('-i', '--infile',
                      help='Give a filename to read for URLs (and/or existing EPUB files with --update-epub).',
                      dest='infile', default=None,
                      metavar='INFILE')

    parser.add_option('-l', '--list',
                      dest='list', default=None, metavar='URL',
                      help='Get list of valid story URLs from page given.', )
    parser.add_option('-n', '--normalize-list',
                      dest='normalize', default=None, metavar='URL',
                      help='Get list of valid story URLs from page given, but normalized to standard forms.', )
    parser.add_option('--download-list',
                      dest='downloadlist', default=None, metavar='URL',
                      help='Download story URLs retrieved from page given.  Update existing EPUBs if used with --update-epub.', )

    parser.add_option('--imap',
                      action='store_true', dest='imaplist',
                      help='Get list of valid story URLs from unread email from IMAP account configured in ini.', )

    parser.add_option('--download-imap',
                      action='store_true', dest='downloadimap',
                      help='Download valid story URLs from unread email from IMAP account configured in ini.  Update existing EPUBs if used with --update-epub.', )

    parser.add_option('-s', '--sites-list',
                      action='store_true', dest='siteslist', default=False,
                      help='Get list of valid story URLs examples.', )
    parser.add_option('-d', '--debug',
                      action='store_true', dest='debug',
                      help='Show debug and notice output.', )
    parser.add_option('-v', '--version',
                      action='store_true', dest='version',
                      help='Display version and quit.', )

    options, args = parser.parse_args(argv)

    if options.version:
        print("Version: %s" % version)
        return

    if not options.debug:
        logger = logging.getLogger('fanficfare')
        logger.setLevel(logging.WARNING)

    list_only = any((options.imaplist,
                     options.siteslist,
                     options.list,
                     options.normalize,
                     ))

    if list_only and (args or any((options.downloadimap,
                                   options.downloadlist))):
        parser.error('Incorrect arguments: Cannot download and list URLs at the same time.')

    if options.siteslist:
        for site, examples in adapters.getSiteExamples():
            print '\n#### %s\nExample URLs:' % site
            for u in examples:
                print '  * %s' % u
        return

    if options.update and options.format != 'epub':
        parser.error('-u/--update-epub only works with epub')

    if options.unnew and options.format != 'epub':
        parser.error('--unnew only works with epub')

    urls=args

    if not list_only and not (args or any((options.infile,
                                           options.downloadimap,
                                           options.downloadlist))):
        parser.print_help();
        return
    
    if options.list:
        configuration = get_configuration(options.list,
                                          passed_defaultsini,
                                          passed_personalini,options)
        retlist = get_urls_from_page(options.list, configuration)
        print '\n'.join(retlist)

    if options.normalize:
        configuration = get_configuration(options.normalize,
                                          passed_defaultsini,
                                          passed_personalini,options)
        retlist = get_urls_from_page(options.normalize, configuration,normalize=True)
        print '\n'.join(retlist)

    if options.downloadlist:
        configuration = get_configuration(options.downloadlist,
                                          passed_defaultsini,
                                          passed_personalini,options)
        retlist = get_urls_from_page(options.downloadlist, configuration)
        urls.extend(retlist)

    if options.imaplist or options.downloadimap:
        # list doesn't have a supported site.
        configuration = get_configuration('test1.com',passed_defaultsini,passed_personalini,options)
        markread = configuration.getConfig('imap_mark_read') == 'true' or \
            (configuration.getConfig('imap_mark_read') == 'downloadonly' and options.downloadimap)
        retlist = get_urls_from_imap(configuration.getConfig('imap_server'),
                                     configuration.getConfig('imap_username'),
                                     configuration.getConfig('imap_password'),
                                     configuration.getConfig('imap_folder'),
                                     markread)

        if options.downloadimap:
            urls.extend(retlist)
        else:
            print '\n'.join(retlist)

    # for passing in a file list
    if options.infile:
        with open(options.infile,"r") as infile:
            #print "File exists and is readable"
            for url in infile:
                if '#' in url:
                    url = url[:url.find('#')].strip()
                url = url.strip()
                if len(url) > 0:
                    #print "URL: (%s)"%url
                    urls.append(url)

    if not list_only:
        if len(urls) < 1:
            print "No valid story URLs found"
        else:
            for url in urls:
                try:
                    do_download(url,
                                options,
                                passed_defaultsini,
                                passed_personalini)
                #print("pagecache:%s"%options.pagecache.keys())
                except Exception, e:
                    if len(urls) == 1:
                        raise
                    print "URL(%s) Failed: Exception (%s). Run URL individually for more detail."%(url,e)

Example 137

Project: disk_perf_test_tool Source File: daemonize.py
    def start(self):
        """ start method
        Main daemonization process.
        """
        # If pidfile already exists, we should read pid from there;
        # to overwrite it, if locking
        # will fail, because locking attempt somehow purges the file contents.
        if os.path.isfile(self.pid):
            with open(self.pid, "r") as old_pidfile:
                old_pid = old_pidfile.read()
        # Create a lockfile so that only one instance of this daemon is
        # running at any time.
        try:
            lockfile = open(self.pid, "w")
        except IOError:
            print("Unable to create the pidfile.")
            sys.exit(1)
        try:
            # Try to get an exclusive lock on the file. This will fail if
            # another process has the file
            # locked.
            fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except IOError:
            print("Unable to lock on the pidfile.")
            # We need to overwrite the pidfile if we got here.
            with open(self.pid, "w") as pidfile:
                pidfile.write(old_pid)
            sys.exit(1)

        # Fork, creating a new process for the child.
        process_id = os.fork()
        if process_id < 0:
            # Fork error. Exit badly.
            sys.exit(1)
        elif process_id != 0:
            # This is the parent process. Exit.
            sys.exit(0)
        # This is the child process. Continue.

        # Stop listening for signals that the parent process receives.
        # This is done by getting a new process id.
        # setpgrp() is an alternative to setsid().
        # setsid puts the process in a new parent group and detaches
        # its controlling terminal.
        process_id = os.setsid()
        if process_id == -1:
            # Uh oh, there was a problem.
            sys.exit(1)

        # Add lockfile to self.keep_fds.
        self.keep_fds.append(lockfile.fileno())

        # Close all file descriptors, except the ones mentioned in
        # self.keep_fds.
        devnull = "/dev/null"
        if hasattr(os, "devnull"):
            # Python has set os.devnull on this system, use it instead as it
            # might be different
            # than /dev/null.
            devnull = os.devnull

        if self.auto_close_fds:
            for fd in range(3, resource.getrlimit(resource.RLIMIT_NOFILE)[0]):
                if fd not in self.keep_fds:
                    try:
                        os.close(fd)
                    except OSError:
                        pass

        devnull_fd = os.open(devnull, os.O_RDWR)
        os.dup2(devnull_fd, 0)
        os.dup2(devnull_fd, 1)
        os.dup2(devnull_fd, 2)

        if self.logger is None:
            # Initialize logging.
            self.logger = logging.getLogger(self.app)
            self.logger.setLevel(logging.DEBUG)
            # Display log messages only on defined handlers.
            self.logger.propagate = False

            # Initialize syslog.
            # It will correctly work on OS X, Linux and FreeBSD.
            if sys.platform == "darwin":
                syslog_address = "/var/run/syslog"
            else:
                syslog_address = "/dev/log"

            # We will continue with syslog initialization only if
            # actually have such capabilities
            # on the machine we are running this.
            if os.path.isfile(syslog_address):
                syslog = logging.handlers.SysLogHandler(syslog_address)
                if self.verbose:
                    syslog.setLevel(logging.DEBUG)
                else:
                    syslog.setLevel(logging.INFO)
                # Try to mimic to normal syslog messages.
                format_t = "%(asctime)s %(name)s: %(message)s"
                formatter = logging.Formatter(format_t,
                                              "%b %e %H:%M:%S")
                syslog.setFormatter(formatter)

                self.logger.addHandler(syslog)

        # Set umask to default to safe file permissions when running
        # as a root daemon. 027 is an
        # octal number which we are typing as 0o27 for Python3 compatibility.
        os.umask(0o27)

        # Change to a known directory. If this isn't done, starting a daemon
        # in a subdirectory that
        # needs to be deleted results in "directory busy" errors.
        os.chdir("/")

        # Execute privileged action
        privileged_action_result = self.privileged_action()
        if not privileged_action_result:
            privileged_action_result = []

        # Change gid
        if self.group:
            try:
                gid = grp.getgrnam(self.group).gr_gid
            except KeyError:
                self.logger.error("Group {0} not found".format(self.group))
                sys.exit(1)
            try:
                os.setgid(gid)
            except OSError:
                self.logger.error("Unable to change gid.")
                sys.exit(1)

        # Change uid
        if self.user:
            try:
                uid = pwd.getpwnam(self.user).pw_uid
            except KeyError:
                self.logger.error("User {0} not found.".format(self.user))
                sys.exit(1)
            try:
                os.setuid(uid)
            except OSError:
                self.logger.error("Unable to change uid.")
                sys.exit(1)

        try:
            lockfile.write("%s" % (os.getpid()))
            lockfile.flush()
        except IOError:
            self.logger.error("Unable to write pid to the pidfile.")
            print("Unable to write pid to the pidfile.")
            sys.exit(1)

        # Set custom action on SIGTERM.
        signal.signal(signal.SIGTERM, self.sigterm)
        atexit.register(self.exit)

        self.logger.warn("Starting daemon.")

        self.action(*privileged_action_result)

Example 138

Project: home-assistant Source File: device_sun_light_trigger.py
def setup(hass, config):
    """The triggers to turn lights on or off based on device presence."""
    logger = logging.getLogger(__name__)
    device_tracker = get_component('device_tracker')
    group = get_component('group')
    light = get_component('light')
    sun = get_component('sun')

    disable_turn_off = config[DOMAIN].get(CONF_DISABLE_TURN_OFF)
    light_group = config[DOMAIN].get(CONF_LIGHT_GROUP,
                                     light.ENTITY_ID_ALL_LIGHTS)
    light_profile = config[DOMAIN].get(CONF_LIGHT_PROFILE)
    device_group = config[DOMAIN].get(CONF_DEVICE_GROUP,
                                      device_tracker.ENTITY_ID_ALL_DEVICES)
    device_entity_ids = group.get_entity_ids(hass, device_group,
                                             device_tracker.DOMAIN)

    if not device_entity_ids:
        logger.error("No devices found to track")
        return False

    # Get the light IDs from the specified group
    light_ids = group.get_entity_ids(hass, light_group, light.DOMAIN)

    if not light_ids:
        logger.error("No lights found to turn on")
        return False

    def calc_time_for_light_when_sunset():
        """Calculate the time when to start fading lights in when sun sets.

        Returns None if no next_setting data available.
        """
        next_setting = sun.next_setting(hass)
        if not next_setting:
            return None
        return next_setting - LIGHT_TRANSITION_TIME * len(light_ids)

    def async_turn_on_before_sunset(light_id):
        """Helper function to turn on lights.

        Speed is slow if there are devices home and the light is not on yet.
        """
        if not device_tracker.is_on(hass) or light.is_on(hass, light_id):
            return
        light.async_turn_on(hass, light_id,
                            transition=LIGHT_TRANSITION_TIME.seconds,
                            profile=light_profile)

    # Track every time sun rises so we can schedule a time-based
    # pre-sun set event
    @track_state_change(sun.ENTITY_ID, sun.STATE_BELOW_HORIZON,
                        sun.STATE_ABOVE_HORIZON)
    @callback
    def schedule_lights_at_sun_set(hass, entity, old_state, new_state):
        """The moment sun sets we want to have all the lights on.

        We will schedule to have each light start after one another
        and slowly transition in.
        """
        start_point = calc_time_for_light_when_sunset()
        if not start_point:
            return

        def async_turn_on_factory(light_id):
            """Lambda can keep track of function parameters.

            No local parameters. If we put the lambda directly in the below
            statement only the last light will be turned on.
            """
            @callback
            def async_turn_on_light(now):
                """Turn on specific light."""
                async_turn_on_before_sunset(light_id)

            return async_turn_on_light

        for index, light_id in enumerate(light_ids):
            track_point_in_time(hass, async_turn_on_factory(light_id),
                                start_point + index * LIGHT_TRANSITION_TIME)

    # If the sun is already above horizon schedule the time-based pre-sun set
    # event.
    if sun.is_on(hass):
        schedule_lights_at_sun_set(hass, None, None, None)

    @track_state_change(device_entity_ids, STATE_NOT_HOME, STATE_HOME)
    @callback
    def check_light_on_dev_state_change(hass, entity, old_state, new_state):
        """Handle tracked device state changes."""
        # pylint: disable=unused-variable
        lights_are_on = group.is_on(hass, light_group)

        light_needed = not (lights_are_on or sun.is_on(hass))

        # These variables are needed for the elif check
        now = dt_util.now()
        start_point = calc_time_for_light_when_sunset()

        # Do we need lights?
        if light_needed:
            logger.info("Home coming event for %s. Turning lights on", entity)
            light.async_turn_on(hass, light_ids, profile=light_profile)

        # Are we in the time span were we would turn on the lights
        # if someone would be home?
        # Check this by seeing if current time is later then the point
        # in time when we would start putting the lights on.
        elif (start_point and
              start_point < now < sun.next_setting(hass)):

            # Check for every light if it would be on if someone was home
            # when the fading in started and turn it on if so
            for index, light_id in enumerate(light_ids):
                if now > start_point + index * LIGHT_TRANSITION_TIME:
                    light.async_turn_on(hass, light_id)

                else:
                    # If this light didn't happen to be turned on yet so
                    # will all the following then, break.
                    break

    if not disable_turn_off:
        @track_state_change(device_group, STATE_HOME, STATE_NOT_HOME)
        @callback
        def turn_off_lights_when_all_leave(hass, entity, old_state, new_state):
            """Handle device group state change."""
            # pylint: disable=unused-variable
            if not group.is_on(hass, light_group):
                return

            logger.info(
                "Everyone has left but there are lights on. Turning them off")
            light.async_turn_off(hass, light_ids)

    return True

Example 139

Project: genmod Source File: get_batches.py
def get_batches(variants, batch_queue, header, vep=False, compound_mode=False, 
results_queue=None, annotation_keyword = 'Annotation'):
    """
    Create variant batches based on their annotation and put them into the 
    batch queue.
    
    Variants are given a new 'annotation' field in the variant dictionary and 
    also a 'exonic' field.
    get_batches will then use the annotation to search for sequences of variants
    with overlapping annotations. These are collected into one batch and gets
    put into a queue.
    Variants that are in between features will be in their own batch.
    
    Arguments:
         variants (Iterator):An iterator that returns variant dictionaries
         batch_queue (Queue): A queue where the batches will be putted
         header (HeaderParser): A HeaderParser object
         vep (bool): If variant is annotated with vep
         compound_mode (bool): If only compounds should be used
         results_queue (Queue): A queue where variants can be put for printing
    
    Returns:
         Does not return but put the results in a queue
    """
    logger = logging.getLogger(__name__)
    
    logger.debug("Set beginning to True")
    beginning = True
    logger.debug("Create first empty batch")
    # A batch is a ordered dictionary with variants
    batch = OrderedDict()
    new_chrom = None
    current_chrom = None
    current_features = []
    chromosomes = []
    
    start_parsing_time = datetime.now()
    start_chrom_time = start_parsing_time
    start_twenty_time = start_parsing_time
    
    nr_of_variants = 0
    nr_of_batches = 0
    
    header_line = header.header
    vep_header = header.vep_columns
    logger.info("Start parsing the variants")
    
    for line in variants:
        if not line.startswith('#'):
            compound_variant = False
            add_variant = True
            
            variant = get_variant_dict(line, header_line)
            variant_id = get_variant_id(variant)
            variant['variant_id'] = variant_id
            variant['info_dict'] = get_info_dict(variant['INFO'])
            
            if compound_mode:
                if not variant['info_dict'].get('Compounds'):
                    add_variant = False
            
            if vep:
                variant['vep_info'] = get_vep_dict(
                    vep_string=variant['info_dict']['CSQ'], 
                    vep_header=vep_header,
                    allele=variant['ALT'].split(',')[0]
                    )
            
            logger.debug("Checking variant {0}".format(variant_id))

            nr_of_variants += 1
            new_chrom = variant['CHROM']
            if new_chrom.startswith('chr'):
                new_chrom = new_chrom[3:]

            logger.debug("Update new chrom to {0}".format(new_chrom))

            new_features = get_annotation(
                variant = variant, 
                vep = vep,
                annotation_key = annotation_keyword
            )
            logger.debug("Adding {0} to variant {1}".format(
                ', '.join(new_features), variant_id
            ))

            variant['annotation'] = new_features

            if nr_of_variants % 20000 == 0:
                logger.info("{0} variants parsed".format(nr_of_variants))
                logger.info("Last 20.000 took {0} to parse.".format(
                    str(datetime.now() - start_twenty_time)))
                start_twenty_time = datetime.now()

            if beginning:
                logger.debug("First variant.")
                current_features = new_features

                if add_variant:
                    logger.debug("Adding {0} to variant batch".format(variant_id))
                    batch[variant_id] = variant
                else:
                    results_queue.put(variant)

                logger.debug("Updating current chrom to {0}".format(new_chrom))
                current_chrom = new_chrom

                chromosomes.append(current_chrom)
                logger.debug("Adding chr {0} to chromosomes".format(new_chrom)) 
                
                beginning = False
                logger.debug("Updating beginning to False")
                
            else:
                # If we should put the batch in the queue:
                logger.debug("Updating send to True") 
                send = True
                
                # Check if the variant ovelapps any features
                if len(new_features) != 0:
                    # Check if the features overlap the previous variants features
                    if new_features.intersection(current_features):
                        logger.debug("Set send to False since variant features overlap") 
                        send = False
            
                # If we are at a new chromosome we finish the current batch:
                if new_chrom != current_chrom:
                    if current_chrom not in chromosomes:
                        chromosomes.append(current_chrom)
                    logger.debug("Adding chr {0} to chromosomes".format(new_chrom)) 
                    # New chromosome means new batch
                    send = True
                    logger.info("Chromosome {0} parsed. Time to parse"\
                                " chromosome: {1}".format(
                                current_chrom, datetime.now()-start_chrom_time))
                    start_chrom_time = datetime.now()
                    current_chrom = new_chrom
            
                if send:
                    # Put the job in the queue
                    if len(batch) > 0:
                        logger.debug("Adding batch in queue")
                        batch_queue.put(batch)
                        nr_of_batches += 1
                    #Reset the variables
                    current_features = new_features
                    logger.debug("Initializing empty batch") 
                    batch = {}
                else:
                    current_features = current_features.union(new_features)
                
                if add_variant:
                    logger.debug("Adding variant {0} to batch".format(variant_id)) 
                    batch[variant_id] = variant
                else:
                    results_queue.put(variant)
    
    if current_chrom not in chromosomes:
        logger.debug("Adding chr {0} to chromosomes".format(current_chrom))
        chromosomes.append(current_chrom)

    logger.info("Chromosome {0} parsed. Time to parse"\
                " chromosome: {0}".format(
                current_chrom, datetime.now()-start_chrom_time))
    
    if len(batch) > 0:
        nr_of_batches += 1
        batch_queue.put(batch)
        logger.debug("Adding batch to queue") 
    
    
    logger.info("Variants parsed. Time to parse variants: {0}".format(
        str(datetime.now() - start_parsing_time)
    ))
    
    logger.info("Number of variants in variant file: {0}".format(nr_of_variants))
    logger.info("Number of batches created: {0}".format(nr_of_batches))
    
    
    
    return chromosomes

Example 140

Project: jsnapy Source File: jsnapy.py
Function: init
    def __init__(self):
        """
        taking parameters from command line
        """
        self.q = Queue.Queue()
        self.snap_q = Queue.Queue()
        self.log_detail = {'hostname': None}
        self.snap_del = False
        self.logger = logging.getLogger(__name__)
        self.parser = argparse.ArgumentParser(
            formatter_class=argparse.RawTextHelpFormatter,
            description=textwrap.dedent('''\
                                        Tool to capture snapshots and compare them
                                        It supports four subcommands:
                                         --snap, --check, --snapcheck, --diff
                                        1. Take snapshot:
                                                jsnapy --snap pre_snapfile -f main_configfile
                                        2. Compare snapshots:
                                                jsnapy --check post_snapfile pre_snapfile -f main_configfile
                                        3. Compare current configuration:
                                                jsnapy --snapcheck snapfile -f main_configfile
                                        4. Take diff without specifying test case:
                                                jsnapy --diff pre_snapfile post_snapfile -f main_configfile
                                            '''),
            usage="\nThis tool enables you to capture and audit runtime environment of "
            "\nnetworked devices running the Junos operating system (Junos OS)\n")

        group = self.parser.add_mutually_exclusive_group()
        # for mutually exclusive gp, can not use two or more options at a time
        group.add_argument(
            '--snap',
            action='store_true',
            help="take the snapshot for commands specified in test file")
        group.add_argument(
            '--check',
            action='store_true',
            help=" compare pre and post snapshots based on test operators specified in test file")
        
        group.add_argument(
            '--snapcheck',
            action='store_true',
            help='check current snapshot based on test file')
        
      #########
      # will supoort it later
      # for windows
      ########
      #  group.add_argument(
      #      "--init",
      #      action="store_true",
      #      help="generate init folders: snapshots, configs and main.yml",
      #  )
      #########

        group.add_argument(
            "--diff",
            action="store_true",
            help="display difference between two snapshots"
        )
        group.add_argument(
            "-V", "--version",
            action="store_true",
            help="displays version"
        )

        self.parser.add_argument(
            "pre_snapfile",
            nargs='?',
            help="pre snapshot filename")       # make it optional
        self.parser.add_argument(
            "post_snapfile",
            nargs='?',
            help="post snapshot filename",
            type=str)       # make it optional
        self.parser.add_argument(
            "-f", "--file",
            help="config file to take snapshot",
            type=str)
        self.parser.add_argument(
            "--local",
            action="store_true",
            help="whether to run snapcheck on local snapshot")
        self.parser.add_argument(
            "--folder",
            help="custom directory path for lookup",
            type=str)
        self.parser.add_argument("-t", "--hostname", help="hostname", type=str)
        self.parser.add_argument(
            "-p",
            "--passwd",
            help="password to login",
            type=str)
        self.parser.add_argument(
            "-l",
            "--login",
            help="username to login",
            type=str)
        self.parser.add_argument(
            "-P",
            "--port",
            help="port no to connect to device",
            type=int
        )
        self.parser.add_argument(
            "-v", "--verbosity",
            action = "count",
            help= textwrap.dedent('''\
            Set verbosity
            -v: Debug level messages
            -vv: Info level messages
            -vvv: Warning level messages
            -vvvv: Error level messages
            -vvvvv: Critical level messages''')
        )
       # self.parser.add_argument(
       #     "-m",
       #     "--mail",
       #     help="mail result to given id",
       #     type=str)
        # self.parser.add_argument(
        #    "-o",
        #    "--overwrite",
        #    action='store_true',
        #    help="overwrite directories and files generated by init",
        #)

        #self.args = self.parser.parse_args()
        self.args, unknown = self.parser.parse_known_args()

        self.db = dict()
        self.db['store_in_sqlite'] = False
        self.db['check_from_sqlite'] = False
        self.db['db_name'] = ""
        self.db['first_snap_id'] = None
        self.db['second_snap_id'] = None
        
        DirStore.custom_dir=self.args.folder

Example 141

Project: grokmirror Source File: fsck.py
def fsck_mirror(name, config, verbose=False, force=False):
    global logger
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    if 'log' in config.keys():
        ch = logging.FileHandler(config['log'])
        formatter = logging.Formatter(
            "[%(process)d] %(asctime)s - %(levelname)s - %(message)s")
        ch.setFormatter(formatter)
        loglevel = logging.INFO

        if 'loglevel' in config.keys():
            if config['loglevel'] == 'debug':
                loglevel = logging.DEBUG

        ch.setLevel(loglevel)
        logger.addHandler(ch)

    ch = logging.StreamHandler()
    formatter = logging.Formatter('%(message)s')
    ch.setFormatter(formatter)

    if verbose:
        ch.setLevel(logging.INFO)
    else:
        ch.setLevel(logging.CRITICAL)

    logger.addHandler(ch)

    # push it into grokmirror to override the default logger
    grokmirror.logger = logger

    logger.info('Running grok-fsck for [%s]' % name)

    # Lock the tree to make sure we only run one instance
    logger.debug('Attempting to obtain lock on %s' % config['lock'])
    flockh = open(config['lock'], 'w')
    try:
        lockf(flockh, LOCK_EX | LOCK_NB)
    except IOError:
        logger.info('Could not obtain exclusive lock on %s' % config['lock'])
        logger.info('Assuming another process is running.')
        return 0

    manifest = grokmirror.read_manifest(config['manifest'])

    if os.path.exists(config['statusfile']):
        logger.info('Reading status from %s' % config['statusfile'])
        stfh = open(config['statusfile'], 'r')
        try:
            # Format of the status file:
            #  {
            #    '/full/path/to/repository': {
            #      'lastcheck': 'YYYY-MM-DD' or 'never',
            #      'nextcheck': 'YYYY-MM-DD',
            #      'lastrepack': 'YYYY-MM-DD',
            #      'fingerprint': 'sha-1',
            #      's_elapsed': seconds,
            #      'quick_repack_count': times,
            #    },
            #    ...
            #  }

            status = json.load(stfh)
        except:
            # Huai le!
            logger.critical('Failed to parse %s' % config['statusfile'])
            lockf(flockh, LOCK_UN)
            flockh.close()
            return 1
    else:
        status = {}

    frequency = int(config['frequency'])

    today = datetime.datetime.today()

    # Go through the manifest and compare with status
    for gitdir in manifest.keys():
        fullpath = os.path.join(config['toplevel'], gitdir.lstrip('/'))
        if fullpath not in status.keys():
            # Newly added repository
            # Randomize next check between now and frequency
            delay = random.randint(0, frequency)
            nextdate = today + datetime.timedelta(days=delay)
            nextcheck = nextdate.strftime('%F')
            status[fullpath] = {
                'lastcheck': 'never',
                'nextcheck': nextcheck,
            }
            logger.info('Added new repository %s with next check on %s' % (
                gitdir, nextcheck))

    total_checked = 0
    total_elapsed = 0

    # Go through status and queue checks for all the dirs that are due today
    # (unless --force, which is EVERYTHING)
    todayiso = today.strftime('%F')
    for fullpath in status.keys():
        # Check to make sure it's still in the manifest
        gitdir = fullpath.replace(config['toplevel'], '', 1)
        gitdir = '/' + gitdir.lstrip('/')

        if gitdir not in manifest.keys():
            del status[fullpath]
            logger.info('Removed %s which is no longer in manifest' % gitdir)
            continue

        # If nextcheck is before today, set it to today
        # XXX: If a system comes up after being in downtime for a while, this
        #      may cause pain for them, so perhaps use randomization here?
        nextcheck = datetime.datetime.strptime(status[fullpath]['nextcheck'],
                                               '%Y-%m-%d')

        if force or nextcheck <= today:
            logger.debug('Preparing to check %s' % fullpath)
            # Calculate elapsed seconds
            startt = time.time()
            run_git_fsck(fullpath, config)
            total_checked += 1

            # Did the fingerprint change since last time we repacked?
            oldfpr = None
            if 'fingerprint' in status[fullpath].keys():
                oldfpr = status[fullpath]['fingerprint']

            fpr = grokmirror.get_repo_fingerprint(config['toplevel'], gitdir, force=True)

            if fpr != oldfpr or force:
                full_repack = False
                if not 'quick_repack_count' in status[fullpath].keys():
                    status[fullpath]['quick_repack_count'] = 0

                quick_repack_count = status[fullpath]['quick_repack_count']
                if 'full_repack_every' in config.keys():
                    # but did you set 'full_repack_flags' as well?
                    if 'full_repack_flags' not in config.keys():
                        logger.critical('full_repack_every is set, but not full_repack_flags')
                    else:
                        full_repack_every = int(config['full_repack_every'])
                        # is it anything insane?
                        if full_repack_every < 2:
                            full_repack_every = 2
                            logger.warning('full_repack_every is too low, forced to 2')

                        # is it time to trigger full repack?
                        # We -1 because if we want a repack every 10th time, then we need to trigger
                        # when current repack count is 9.
                        if quick_repack_count >= full_repack_every-1:
                            logger.debug('Time to do full repack on %s' % fullpath)
                            full_repack = True
                            quick_repack_count = 0
                            status[fullpath]['lastfullrepack'] = todayiso
                        else:
                            logger.debug('Repack count for %s not yet reached full repack trigger' % fullpath)
                            quick_repack_count += 1

                run_git_repack(fullpath, config, full_repack)
                run_git_prune(fullpath, config, manifest)
                status[fullpath]['lastrepack'] = todayiso
                status[fullpath]['quick_repack_count'] = quick_repack_count

            else:
                logger.debug('No changes to %s since last run, not repacking' % gitdir)

            endt = time.time()

            total_elapsed += endt-startt

            status[fullpath]['fingerprint'] = fpr
            status[fullpath]['lastcheck'] = todayiso
            status[fullpath]['s_elapsed'] = int(endt - startt)

            if force:
                # Use randomization for next check, again
                delay = random.randint(1, frequency)
            else:
                delay = frequency

            nextdate = today + datetime.timedelta(days=delay)
            status[fullpath]['nextcheck'] = nextdate.strftime('%F')

            # Write status file after each check, so if the process dies, we won't
            # have to recheck all the repos we've already checked
            logger.debug('Updating status file in %s' % config['statusfile'])
            stfh = open(config['statusfile'], 'w')
            json.dump(status, stfh, indent=2)
            stfh.close()

    if not total_checked:
        logger.info('No new repos to check.')
    else:
        logger.info('Repos checked: %s' % total_checked)
        logger.info('Total running time: %s s' % int(total_elapsed))

    lockf(flockh, LOCK_UN)
    flockh.close()

Example 142

Project: portage Source File: main.py
def emirrordist_main(args):

	# The calling environment is ignored, so the program is
	# completely controlled by commandline arguments.
	env = {}

	if not sys.stdout.isatty():
		portage.output.nocolor()
		env['NOCOLOR'] = 'true'

	parser, options, args = parse_args(args)

	if options.version:
		sys.stdout.write("Portage %s\n" % portage.VERSION)
		return os.EX_OK

	config_root = options.config_root

	if options.repositories_configuration is not None:
		env['PORTAGE_REPOSITORIES'] = options.repositories_configuration

	settings = portage.config(config_root=config_root,
		local_config=False, env=env)

	default_opts = None
	if not options.ignore_default_opts:
		default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()

	if default_opts:
		parser, options, args = parse_args(default_opts + args)

		settings = portage.config(config_root=config_root,
			local_config=False, env=env)

	if options.repo is None:
		if len(settings.repositories.prepos) == 2:
			for repo in settings.repositories:
				if repo.name != "DEFAULT":
					options.repo = repo.name
					break

		if options.repo is None:
			parser.error("--repo option is required")

	repo_path = settings.repositories.treemap.get(options.repo)
	if repo_path is None:
		parser.error("Unable to locate repository named '%s'" % (options.repo,))

	if options.jobs is not None:
		options.jobs = int(options.jobs)

	if options.load_average is not None:
		options.load_average = float(options.load_average)

	if options.failure_log is not None:
		options.failure_log = normalize_path(
			os.path.abspath(options.failure_log))

		parent_dir = os.path.dirname(options.failure_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--failure-log '%s' parent is not a "
				"writable directory") % options.failure_log)

	if options.success_log is not None:
		options.success_log = normalize_path(
			os.path.abspath(options.success_log))

		parent_dir = os.path.dirname(options.success_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--success-log '%s' parent is not a "
				"writable directory") % options.success_log)

	if options.scheduled_deletion_log is not None:
		options.scheduled_deletion_log = normalize_path(
			os.path.abspath(options.scheduled_deletion_log))

		parent_dir = os.path.dirname(options.scheduled_deletion_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--scheduled-deletion-log '%s' parent is not a "
				"writable directory") % options.scheduled_deletion_log)

		if options.deletion_db is None:
			parser.error("--scheduled-deletion-log requires --deletion-db")

	if options.deletion_delay is not None:
		options.deletion_delay = long(options.deletion_delay)
		if options.deletion_db is None:
			parser.error("--deletion-delay requires --deletion-db")

	if options.deletion_db is not None:
		if options.deletion_delay is None:
			parser.error("--deletion-db requires --deletion-delay")
		options.deletion_db = normalize_path(
			os.path.abspath(options.deletion_db))

	if options.temp_dir is not None:
		options.temp_dir = normalize_path(
			os.path.abspath(options.temp_dir))

		if not (os.path.isdir(options.temp_dir) and
			os.access(options.temp_dir, os.W_OK|os.X_OK)):
			parser.error(("--temp-dir '%s' is not a "
				"writable directory") % options.temp_dir)

	if options.distfiles is not None:
		options.distfiles = normalize_path(
			os.path.abspath(options.distfiles))

		if not (os.path.isdir(options.distfiles) and
			os.access(options.distfiles, os.W_OK|os.X_OK)):
			parser.error(("--distfiles '%s' is not a "
				"writable directory") % options.distfiles)
	else:
		parser.error("missing required --distfiles parameter")

	if options.mirror_overrides is not None:
		options.mirror_overrides = normalize_path(
			os.path.abspath(options.mirror_overrides))

		if not (os.access(options.mirror_overrides, os.R_OK) and
			os.path.isfile(options.mirror_overrides)):
			parser.error(
				"--mirror-overrides-file '%s' is not a readable file" %
				options.mirror_overrides)

	if options.distfiles_local is not None:
		options.distfiles_local = normalize_path(
			os.path.abspath(options.distfiles_local))

		if not (os.path.isdir(options.distfiles_local) and
			os.access(options.distfiles_local, os.W_OK|os.X_OK)):
			parser.error(("--distfiles-local '%s' is not a "
				"writable directory") % options.distfiles_local)

	if options.distfiles_db is not None:
		options.distfiles_db = normalize_path(
			os.path.abspath(options.distfiles_db))

	if options.tries is not None:
		options.tries = int(options.tries)

	if options.recycle_dir is not None:
		options.recycle_dir = normalize_path(
			os.path.abspath(options.recycle_dir))
		if not (os.path.isdir(options.recycle_dir) and
			os.access(options.recycle_dir, os.W_OK|os.X_OK)):
			parser.error(("--recycle-dir '%s' is not a "
				"writable directory") % options.recycle_dir)

	if options.recycle_db is not None:
		if options.recycle_dir is None:
			parser.error("--recycle-db requires "
				"--recycle-dir to be specified")
		options.recycle_db = normalize_path(
			os.path.abspath(options.recycle_db))

	if options.recycle_deletion_delay is not None:
		options.recycle_deletion_delay = \
			long(options.recycle_deletion_delay)

	if options.fetch_log_dir is not None:
		options.fetch_log_dir = normalize_path(
			os.path.abspath(options.fetch_log_dir))

		if not (os.path.isdir(options.fetch_log_dir) and
			os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
			parser.error(("--fetch-log-dir '%s' is not a "
				"writable directory") % options.fetch_log_dir)

	if options.whitelist_from:
		normalized_paths = []
		for x in options.whitelist_from:
			path = normalize_path(os.path.abspath(x))
			if not os.access(path, os.R_OK):
				parser.error("--whitelist-from '%s' is not readable" % x)
			if os.path.isfile(path):
				normalized_paths.append(path)
			elif os.path.isdir(path):
				for file in _recursive_file_list(path):
					if not os.access(file, os.R_OK):
						parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
					normalized_paths.append(file)
			else:
				parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
		options.whitelist_from = normalized_paths

	if options.strict_manifests is not None:
		if options.strict_manifests == "y":
			settings.features.add("strict")
		else:
			settings.features.discard("strict")

	settings.lock()

	portdb = portage.portdbapi(mysettings=settings)

	# Limit ebuilds to the specified repo.
	portdb.porttrees = [repo_path]

	portage.util.initialize_logger()

	if options.verbose > 0:
		l = logging.getLogger()
		l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)

	with Config(options, portdb,
		SchedulerInterface(global_event_loop())) as config:

		if not options.mirror:
			parser.error('No action specified')

		returncode = os.EX_OK

		if options.mirror:
			signum = run_main_scheduler(MirrorDistTask(config))
			if signum is not None:
				sys.exit(128 + signum)

	return returncode

Example 143

Project: RHEAS Source File: dssat.py
    def writeControlFile(self, modelpath, vsm, depths, startdate, gid, lat, lon, planting, fertildates, irrigdates):
        """Writes DSSAT control file for specific pixel."""
        log = logging.getLogger(__name__)
        if isinstance(vsm, list):
            vsm = (vsm * (int(self.nens / len(vsm)) + 1))[:self.nens]
        else:
            vsm = [vsm] * self.nens
        try:
            fin = open(self.basefile)
            blines = fin.readlines()
        except IOError:
            log.error("Error opening {0}".format(self.basefile))
            sys.exit()
        profiles = self._sampleSoilProfiles(gid)
        profiles = [p[0] for p in profiles]
        for ens in range(self.nens):
            sm = vsm[ens]
            fertilizers = fertildates
            irrigation = irrigdates
            prof = profiles[ens].split("\n")
            dz = map(lambda ln: float(ln.split()[0]), profiles[
                     ens].split("\n")[3:-1])
            smi = self._interpolateSoilMoist(sm, depths, dz)
            filename = "{0}/DSSAT{1}_{2:03d}.INP" .format(
                modelpath, self.nens, ens + 1)
            fout = open(filename, 'w')
            l = 0
            while l < len(blines):
                line = blines[l]
                if line.find("SIMULATION CONTROL") > 0:
                    fout.write(blines[l])
                    l += 1
                    dt = blines[l].split()[3]
                    fout.write(blines[l].replace(dt, "{0:04d}{1:03d}".format(
                        startdate.year, (startdate - date(startdate.year, 1, 1)).days + 1)))
                    l += 1
                elif line.find("FIELDS") > 0:
                    fout.write(blines[l])
                    fout.write(blines[l + 1])
                    blat = blines[l + 2].split()[0]
                    blon = blines[l + 2].split()[1]
                    fout.write(blines[
                               l + 2].replace(blat, "{0:8.5f}".format(lat)).replace(blon, "{0:10.5f}".format(lon)))
                    l += 3
                elif line.find("AUTOMATIC") > 0:
                    fout.write(blines[l])
                    dt1 = blines[l + 1].split()[0]
                    dt2 = blines[l + 1].split()[1]
                    # self.startyear, self.startmonth, self.startday)
                    dt = date(2012, 1, 1)
                    dts = "{0:04d}{1}".format(dt.year, dt.strftime("%j"))
                    fout.write(
                        blines[l + 1].replace(dt1, dts).replace(dt2, dts))
                    fout.write(blines[l + 2])
                    fout.write(blines[l + 3])
                    fout.write(blines[l + 4])
                    dt1 = blines[l + 5].split()[1]
                    fout.write(blines[l + 5].replace(dt1, dts))
                    l += 6
                elif line.find("INITIAL CONDITIONS") > 0:
                    fout.write(blines[l])
                    dt = blines[l + 1].split()[1]
                    fout.write(blines[l + 1].replace(dt, "{0:04d}{1:03d}".format(
                        startdate.year, int(startdate.strftime("%j")))))
                    l += 2
                    for lyr in range(len(dz)):
                        fout.write("{0:8.0f}{1:8.3f}{2:8.1f}{3:8.1f}\n".format(
                            dz[lyr], smi[0, lyr], 0.5, 0.1))
                    while blines[l].find("PLANTING") < 0:
                        l += 1
                elif line.find("WEATHER") == 0:
                    oldfile = line.split()[1]
                    fout.write(blines[l].replace(
                        oldfile, "WEATH{0:03d}.WTH" .format(ens + 1)))
                    l += 1
                elif line.find("PLANTING") >= 0:
                    fout.write(line)
                    toks = blines[l + 1].split()
                    olddate = toks[0]
                    try:
                        assert planting >= date(self.startyear, self.startmonth, self.startday) and planting <= date(
                            self.endyear, self.endmonth, self.endday)
                    except:
                        log.error("Planting date selected outside simulation period.")
                        sys.exit()
                    dts = "{0:04d}{1}".format(
                        planting.year, planting.strftime("%j"))
                    fout.write(blines[l + 1].replace(olddate, dts))
                    l += 2
                elif line.find("OUTPUT") == 0:
                    oldfile = line.split()[1]
                    fout.write(blines[l].replace(
                        oldfile, "OUTPT{0:03d}" .format(ens + 1)))
                    l += 1
                elif line.find("SOIL") > 0:
                    fout.write(blines[l])
                    for ln in range(len(prof) - 1):
                        fout.write(prof[ln] + "\n")
                    fout.write("\n")
                    for z in dz:
                        fout.write(
                            "{0:6.0f}   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0\n".format(z))
                    while blines[l].find("CULTIVAR") < 0:
                        l += 1
                    l -= 1
                elif line.find("CULTIVAR") > 0:
                    fout.write(line)
                    if len(blines[l + 1].split()) < 5:
                        fout.write(blines[l + 1])
                    else:
                        cultivar = self._cultivar(ens, gid)  # lat, lon)
                        fout.write(cultivar + "\n")
                    l += 2
                elif line.find("IRRIGATION") >= 0:
                    fout.write(line)
                    fout.write(blines[l + 1])
                    toks = blines[l + 2].split()
                    olddate = toks[0]
                    amount = toks[2]
                    if irrigation is None:
                        it = "{0:04d}-{1:02d}-{2:02d}".format(
                            self.startyear, self.startmonth, self.startday)
                        irrigation = {it: 0.0}
                    for i in irrigation.keys():
                        dt = date(*map(int, i.split("-")))
                        try:
                            assert dt >= date(self.startyear, self.startmonth, self.startday) and dt <= date(
                                self.endyear, self.endmonth, self.endday)
                        except:
                            log.error("Irrigation date selected outside simulation period.")
                            sys.exit()
                        dts = "{0:04d}{1}".format(dt.year, dt.strftime("%j"))
                        fout.write(
                            blines[l + 2].replace(olddate, dts).replace(amount, str(irrigation[i])))
                    l += 3
                elif line.find("FERTILIZERS") >= 0:
                    fout.write(line)
                    toks = blines[l + 1].split()
                    olddate = toks[0]
                    amount = toks[3]
                    percent = toks[4]
                    if fertilizers is None:
                        if planting is not None:
                            fertilizers = {(planting + timedelta(10)).strftime("%Y-%m-%d"): [
                                amount, percent], (planting + timedelta(40)).strftime("%Y-%m-%d"): [amount, percent]}
                    for f in fertilizers.keys():
                        dt = date(*map(int, f.split("-")))
                        try:
                            assert dt >= date(self.startyear, self.startmonth, self.startday) and dt <= date(
                                self.endyear, self.endmonth, self.endday)
                        except:
                            log.error("Fertilization date selected outside simulation period.")
                            sys.exit()
                        dts = "{0:04d}{1}".format(dt.year, dt.strftime("%j"))
                        fout.write(blines[l + 1].replace(olddate, dts).replace(
                            amount, str(fertilizers[f][0])).replace(percent, str(fertilizers[f][1])))
                    l += 2
                else:
                    fout.write(line)
                    l += 1
            fout.close()
        return dz, smi

Example 144

Project: pan-python Source File: panwfapi.py
def main():
    try:
        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
    except AttributeError:
        # Windows
        pass

#    set_encoding()
    options = parse_opts()

    if options['debug']:
        logger = logging.getLogger()
        if options['debug'] == 3:
            logger.setLevel(pan.wfapi.DEBUG3)
        elif options['debug'] == 2:
            logger.setLevel(pan.wfapi.DEBUG2)
        elif options['debug'] == 1:
            logger.setLevel(pan.wfapi.DEBUG1)

#        log_format = '%(levelname)s %(name)s %(message)s'
        log_format = '%(message)s'
        handler = logging.StreamHandler()
        formatter = logging.Formatter(log_format)
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    if options['cafile'] or options['capath'] or options['ssl']:
        ssl_context = create_ssl_context(options['cafile'],
                                         options['capath'],
                                         options['ssl'])
    else:
        ssl_context = None

    try:
        wfapi = pan.wfapi.PanWFapi(tag=options['tag'],
                                   api_key=options['api_key'],
                                   hostname=options['hostname'],
                                   timeout=options['timeout'],
                                   http=options['http'],
                                   ssl_context=ssl_context)

    except pan.wfapi.PanWFapiError as msg:
        print('pan.wfapi.PanWFapi:', msg, file=sys.stderr)
        sys.exit(1)

    if options['debug'] > 2:
        print('wfapi.__str__()===>\n', wfapi, '\n<===',
              sep='', file=sys.stderr)

    try:
        hashes = process_hashes(options['hash'])

        if options['submit'] is not None:
            action = 'submit'
            kwargs = {}
            if os.path.isfile(options['submit']):
                kwargs['file'] = options['submit']
            else:
                o = urlparse(options['submit'])
                if options['debug']:
                    print(o, file=sys.stderr)
                if o.scheme == 'file':
                    if o.path and os.path.isfile(o.path):
                        kwargs['file'] = o.path
                    else:
                        print('Invalid URL: file not found:',
                              options['submit'], file=sys.stderr)
                        sys.exit(1)
                else:
                    if o.scheme in ['http', 'https', 'ftp']:
                        kwargs['url'] = options['submit']
                    else:
                        print('Invalid file or URL:',
                              options['submit'], file=sys.stderr)
                        sys.exit(1)

            wfapi.submit(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)

        if options['submit-link'] is not None:
            action = 'submit'
            kwargs = {}
            kwargs['links'] = process_arg(options['submit-link'], list=True)

            wfapi.submit(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)

        if options['change-request']:
            action = 'change-request'
            kwargs = {}
            if len(hashes) > 1:
                print('Only 1 hash allowed for %s' % action, file=sys.stderr)
                sys.exit(1)
            if len(hashes) == 1:
                kwargs['hash'] = hashes[0]
            if options['new-verdict'] is not None:
                kwargs['verdict'] = process_verdict(options['new-verdict'])
            if options['email'] is not None:
                kwargs['email'] = options['email']
            if options['comment'] is not None:
                kwargs['comment'] = process_arg(options['comment'])

            wfapi.change_request(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)

        if options['report']:
            action = 'report'
            kwargs = {}
            if len(hashes) > 1:
                print('Only 1 hash allowed for %s' % action, file=sys.stderr)
                sys.exit(1)
            if len(hashes) == 1:
                kwargs['hash'] = hashes[0]
            if options['format'] is not None:
                kwargs['format'] = options['format']

            wfapi.report(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

        if options['verdict']:
            kwargs = {}
            if len(hashes) == 1:
                action = 'verdict'
                kwargs['hash'] = hashes[0]
                wfapi.verdict(**kwargs)
            elif len(hashes) > 1:
                action = 'verdicts'
                kwargs['hashes'] = hashes
                wfapi.verdicts(**kwargs)
            else:
                action = 'verdict'
                wfapi.verdict(**kwargs)

            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

        if options['sample']:
            action = 'sample'
            kwargs = {}
            if len(hashes) > 1:
                print('Only 1 hash allowed for %s' % action, file=sys.stderr)
                sys.exit(1)
            if len(hashes) == 1:
                kwargs['hash'] = hashes[0]

            wfapi.sample(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

        if options['pcap']:
            action = 'pcap'
            kwargs = {}
            if len(hashes) > 1:
                print('Only 1 hash allowed for %s' % action, file=sys.stderr)
                sys.exit(1)
            if len(hashes) == 1:
                kwargs['hash'] = hashes[0]
            if options['platform'] is not None:
                kwargs['platform'] = options['platform']

            wfapi.pcap(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

        if options['changed']:
            action = 'verdicts_changed'
            kwargs = {}
            if options['date'] is not None:
                kwargs['date'] = options['date']
                try:
                    x = int(options['date'])
                except ValueError:
                    pass
                else:
                    if x < 1:
                        d = date.today()
                        d = d - timedelta(-x)
                        kwargs['date'] = d.isoformat()
                        if options['debug']:
                            print('relative date(%d): %s' % (x, kwargs['date']),
                                  file=sys.stderr)

            wfapi.verdicts_changed(**kwargs)
            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

        if options['testfile']:
            action = 'testfile'

            wfapi.testfile()
            print_status(wfapi, action)
            print_response(wfapi, options)
            save_file(wfapi, options)

    except pan.wfapi.PanWFapiError as msg:
        print_status(wfapi, action, msg)
        print_response(wfapi, options)
        sys.exit(1)

    sys.exit(0)

Example 145

Project: hydroshare Source File: receivers.py
def _extract_metadata(resource, sqlite_file_name):
    err_message = "Not a valid ODM2 SQLite file"
    log = logging.getLogger()
    try:
        con = sqlite3.connect(sqlite_file_name)
        with con:
            # get the records in python dictionary format
            con.row_factory = sqlite3.Row
            cur = con.cursor()

            # populate the lookup CV tables that are needed later for metadata editing
            _create_cv_lookup_models(cur, resource.metadata, 'CV_VariableType', CVVariableType)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_VariableName', CVVariableName)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_Speciation', CVSpeciation)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_SiteType', CVSiteType)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_ElevationDatum', CVElevationDatum)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_MethodType', CVMethodType)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_UnitsType', CVUnitsType)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_Status', CVStatus)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_Medium', CVMedium)
            _create_cv_lookup_models(cur, resource.metadata, 'CV_AggregationStatistic',
                                     CVAggregationStatistic)

            # read data from necessary tables and create metadata elements
            # extract core metadata

            # extract abstract and title
            cur.execute("SELECT DataSetTitle, DataSetAbstract FROM DataSets")
            dataset = cur.fetchone()
            # update title element
            if dataset["DataSetTitle"]:
                resource.metadata.update_element('title', element_id=resource.metadata.title.id,
                                                 value=dataset["DataSetTitle"])

            # create abstract/description element
            if dataset["DataSetAbstract"]:
                resource.metadata.create_element('description', abstract=dataset["DataSetAbstract"])

            # extract keywords/subjects
            # these are the comma separated values in the VariableNameCV column of the Variables
            # table
            cur.execute("SELECT VariableID, VariableNameCV FROM Variables")
            variables = cur.fetchall()
            keyword_list = []
            for variable in variables:
                keywords = variable["VariableNameCV"].split(",")
                keyword_list = keyword_list + keywords

            # use set() to remove any duplicate keywords
            for kw in set(keyword_list):
                resource.metadata.create_element("subject", value=kw)

            # find the contributors for metadata
            _extract_creators_contributors(resource, cur)

            # extract coverage data
            _extract_coverage_metadata(resource, cur)

            # extract extended metadata
            cur.execute("SELECT * FROM Sites")
            sites = cur.fetchall()
            is_create_multiple_site_elements = len(sites) > 1

            cur.execute("SELECT * FROM Variables")
            variables = cur.fetchall()
            is_create_multiple_variable_elements = len(variables) > 1

            cur.execute("SELECT * FROM Methods")
            methods = cur.fetchall()
            is_create_multiple_method_elements = len(methods) > 1

            cur.execute("SELECT * FROM ProcessingLevels")
            processing_levels = cur.fetchall()
            is_create_multiple_processinglevel_elements = len(processing_levels) > 1

            cur.execute("SELECT * FROM TimeSeriesResults")
            timeseries_results = cur.fetchall()
            is_create_multiple_timeseriesresult_elements = len(timeseries_results) > 1

            cur.execute("SELECT * FROM Results")
            results = cur.fetchall()
            for result in results:
                # extract site element data
                # Start with Results table to -> FeatureActions table -> SamplingFeatures table
                # check if we need to create multiple site elements
                cur.execute("SELECT * FROM FeatureActions WHERE FeatureActionID=?",
                            (result["FeatureActionID"],))
                feature_action = cur.fetchone()
                if is_create_multiple_site_elements or len(resource.metadata.sites) == 0:
                    cur.execute("SELECT * FROM SamplingFeatures WHERE SamplingFeatureID=?",
                                (feature_action["SamplingFeatureID"],))
                    sampling_feature = cur.fetchone()

                    cur.execute("SELECT * FROM Sites WHERE SamplingFeatureID=?",
                                (feature_action["SamplingFeatureID"],))
                    site = cur.fetchone()
                    if not any(sampling_feature["SamplingFeatureCode"] == s.site_code for s
                               in resource.metadata.sites):

                        data_dict = {}
                        data_dict['series_ids'] = [result["ResultUUID"]]
                        data_dict['site_code'] = sampling_feature["SamplingFeatureCode"]
                        data_dict['site_name'] = sampling_feature["SamplingFeatureName"]
                        if sampling_feature["Elevation_m"]:
                            data_dict["elevation_m"] = sampling_feature["Elevation_m"]

                        if sampling_feature["ElevationDatumCV"]:
                            data_dict["elevation_datum"] = sampling_feature["ElevationDatumCV"]

                        if site["SiteTypeCV"]:
                            data_dict["site_type"] = site["SiteTypeCV"]

                        data_dict["latitude"] = site["Latitude"]
                        data_dict["longitude"] = site["Longitude"]

                        # create site element
                        resource.metadata.create_element('site', **data_dict)
                    else:
                        _update_element_series_ids(resource.metadata.sites[0], result["ResultUUID"])
                else:
                    _update_element_series_ids(resource.metadata.sites[0], result["ResultUUID"])

                # extract variable element data
                # Start with Results table to -> Variables table
                if is_create_multiple_variable_elements or len(resource.metadata.variables) == 0:
                    cur.execute("SELECT * FROM Variables WHERE VariableID=?",
                                (result["VariableID"],))
                    variable = cur.fetchone()
                    if not any(variable["VariableCode"] == v.variable_code for v
                               in resource.metadata.variables):

                        data_dict = {}
                        data_dict['series_ids'] = [result["ResultUUID"]]
                        data_dict['variable_code'] = variable["VariableCode"]
                        data_dict["variable_name"] = variable["VariableNameCV"]
                        data_dict['variable_type'] = variable["VariableTypeCV"]
                        data_dict["no_data_value"] = variable["NoDataValue"]
                        if variable["VariableDefinition"]:
                            data_dict["variable_definition"] = variable["VariableDefinition"]

                        if variable["SpeciationCV"]:
                            data_dict["speciation"] = variable["SpeciationCV"]

                        # create variable element
                        resource.metadata.create_element('variable', **data_dict)
                    else:
                        _update_element_series_ids(resource.metadata.variables[0],
                                                   result["ResultUUID"])
                else:
                    _update_element_series_ids(resource.metadata.variables[0], result["ResultUUID"])

                # extract method element data
                # Start with Results table -> FeatureActions table to -> Actions table to ->
                # Method table
                if is_create_multiple_method_elements or len(resource.metadata.methods) == 0:
                    cur.execute("SELECT MethodID from Actions WHERE ActionID=?",
                                (feature_action["ActionID"],))
                    action = cur.fetchone()
                    cur.execute("SELECT * FROM Methods WHERE MethodID=?", (action["MethodID"],))
                    method = cur.fetchone()
                    if not any(method["MethodCode"] == m.method_code for m
                               in resource.metadata.methods):

                        data_dict = {}
                        data_dict['series_ids'] = [result["ResultUUID"]]
                        data_dict['method_code'] = method["MethodCode"]
                        data_dict["method_name"] = method["MethodName"]
                        data_dict['method_type'] = method["MethodTypeCV"]

                        if method["MethodDescription"]:
                            data_dict["method_description"] = method["MethodDescription"]

                        if method["MethodLink"]:
                            data_dict["method_link"] = method["MethodLink"]

                        # create method element
                        resource.metadata.create_element('method', **data_dict)
                    else:
                        _update_element_series_ids(resource.metadata.methods[0],
                                                   result["ResultUUID"])
                else:
                    _update_element_series_ids(resource.metadata.methods[0], result["ResultUUID"])

                # extract processinglevel element data
                # Start with Results table to -> ProcessingLevels table
                if is_create_multiple_processinglevel_elements \
                        or len(resource.metadata.processing_levels) == 0:
                    cur.execute("SELECT * FROM ProcessingLevels WHERE ProcessingLevelID=?",
                                (result["ProcessingLevelID"],))
                    pro_level = cur.fetchone()
                    if not any(pro_level["ProcessingLevelCode"] == p.processing_level_code for p
                               in resource.metadata.processing_levels):

                        data_dict = {}
                        data_dict['series_ids'] = [result["ResultUUID"]]
                        data_dict['processing_level_code'] = pro_level["ProcessingLevelCode"]
                        if pro_level["Definition"]:
                            data_dict["definition"] = pro_level["Definition"]

                        if pro_level["Explanation"]:
                            data_dict["explanation"] = pro_level["Explanation"]

                        # create processinglevel element
                        resource.metadata.create_element('processinglevel', **data_dict)
                    else:
                        _update_element_series_ids(resource.metadata.processing_levels[0],
                                                   result["ResultUUID"])
                else:
                    _update_element_series_ids(resource.metadata.processing_levels[0],
                                               result["ResultUUID"])

                # extract data for TimeSeriesResult element
                # Start with Results table
                if is_create_multiple_timeseriesresult_elements \
                        or len(resource.metadata.time_series_results) == 0:
                    data_dict = {}
                    data_dict['series_ids'] = [result["ResultUUID"]]
                    data_dict["status"] = result["StatusCV"]
                    data_dict["sample_medium"] = result["SampledMediumCV"]
                    data_dict["value_count"] = result["ValueCount"]

                    cur.execute("SELECT * FROM Units WHERE UnitsID=?", (result["UnitsID"],))
                    unit = cur.fetchone()
                    data_dict['units_type'] = unit["UnitsTypeCV"]
                    data_dict['units_name'] = unit["UnitsName"]
                    data_dict['units_abbreviation'] = unit["UnitsAbbreviation"]

                    cur.execute("SELECT AggregationStatisticCV FROM TimeSeriesResults WHERE "
                                "ResultID=?", (result["ResultID"],))
                    ts_result = cur.fetchone()
                    data_dict["aggregation_statistics"] = ts_result["AggregationStatisticCV"]

                    # create the TimeSeriesResult element
                    resource.metadata.create_element('timeseriesresult', **data_dict)
                else:
                    _update_element_series_ids(resource.metadata.time_series_results[0],
                                               result["ResultUUID"])

            return None

    except sqlite3.Error as ex:
        sqlite_err_msg = str(ex.args[0])
        log.error(sqlite_err_msg)
        return sqlite_err_msg
    except Exception as ex:
        log.error(ex.message)
        return err_message

Example 146

Project: portage-funtoo Source File: main.py
def emirrordist_main(args):

	# The calling environment is ignored, so the program is
	# completely controlled by commandline arguments.
	env = {}

	if not sys.stdout.isatty():
		portage.output.nocolor()
		env['NOCOLOR'] = 'true'

	parser, options, args = parse_args(args)

	if options.version:
		sys.stdout.write("Portage %s\n" % portage.VERSION)
		return os.EX_OK

	config_root = options.config_root

	if options.repo is None:
		env['PORTDIR_OVERLAY'] = ''
	elif options.portdir_overlay:
		env['PORTDIR_OVERLAY'] = options.portdir_overlay

	if options.portdir is not None:
		env['PORTDIR'] = options.portdir

	settings = portage.config(config_root=config_root,
		local_config=False, env=env)

	default_opts = None
	if not options.ignore_default_opts:
		default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()

	if default_opts:
		parser, options, args = parse_args(default_opts + args)

		settings = portage.config(config_root=config_root,
			local_config=False, env=env)

	repo_path = None
	if options.repo is not None:
		repo_path = settings.repositories.treemap.get(options.repo)
		if repo_path is None:
			parser.error("Unable to locate repository named '%s'" % \
				(options.repo,))
	else:
		repo_path = settings.repositories.mainRepoLocation()
		if not repo_path:
			parser.error("PORTDIR is undefined")

	if options.jobs is not None:
		options.jobs = int(options.jobs)

	if options.load_average is not None:
		options.load_average = float(options.load_average)

	if options.failure_log is not None:
		options.failure_log = normalize_path(
			os.path.abspath(options.failure_log))

		parent_dir = os.path.dirname(options.failure_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--failure-log '%s' parent is not a "
				"writable directory") % options.failure_log)

	if options.success_log is not None:
		options.success_log = normalize_path(
			os.path.abspath(options.success_log))

		parent_dir = os.path.dirname(options.success_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--success-log '%s' parent is not a "
				"writable directory") % options.success_log)

	if options.scheduled_deletion_log is not None:
		options.scheduled_deletion_log = normalize_path(
			os.path.abspath(options.scheduled_deletion_log))

		parent_dir = os.path.dirname(options.scheduled_deletion_log)
		if not (os.path.isdir(parent_dir) and
			os.access(parent_dir, os.W_OK|os.X_OK)):
			parser.error(("--scheduled-deletion-log '%s' parent is not a "
				"writable directory") % options.scheduled_deletion_log)

		if options.deletion_db is None:
			parser.error("--scheduled-deletion-log requires --deletion-db")

	if options.deletion_delay is not None:
		options.deletion_delay = long(options.deletion_delay)
		if options.deletion_db is None:
			parser.error("--deletion-delay requires --deletion-db")

	if options.deletion_db is not None:
		if options.deletion_delay is None:
			parser.error("--deletion-db requires --deletion-delay")
		options.deletion_db = normalize_path(
			os.path.abspath(options.deletion_db))

	if options.temp_dir is not None:
		options.temp_dir = normalize_path(
			os.path.abspath(options.temp_dir))

		if not (os.path.isdir(options.temp_dir) and
			os.access(options.temp_dir, os.W_OK|os.X_OK)):
			parser.error(("--temp-dir '%s' is not a "
				"writable directory") % options.temp_dir)

	if options.distfiles is not None:
		options.distfiles = normalize_path(
			os.path.abspath(options.distfiles))

		if not (os.path.isdir(options.distfiles) and
			os.access(options.distfiles, os.W_OK|os.X_OK)):
			parser.error(("--distfiles '%s' is not a "
				"writable directory") % options.distfiles)
	else:
		parser.error("missing required --distfiles parameter")

	if options.mirror_overrides is not None:
		options.mirror_overrides = normalize_path(
			os.path.abspath(options.mirror_overrides))

		if not (os.access(options.mirror_overrides, os.R_OK) and
			os.path.isfile(options.mirror_overrides)):
			parser.error(
				"--mirror-overrides-file '%s' is not a readable file" %
				options.mirror_overrides)

	if options.distfiles_local is not None:
		options.distfiles_local = normalize_path(
			os.path.abspath(options.distfiles_local))

		if not (os.path.isdir(options.distfiles_local) and
			os.access(options.distfiles_local, os.W_OK|os.X_OK)):
			parser.error(("--distfiles-local '%s' is not a "
				"writable directory") % options.distfiles_local)

	if options.distfiles_db is not None:
		options.distfiles_db = normalize_path(
			os.path.abspath(options.distfiles_db))

	if options.tries is not None:
		options.tries = int(options.tries)

	if options.recycle_dir is not None:
		options.recycle_dir = normalize_path(
			os.path.abspath(options.recycle_dir))
		if not (os.path.isdir(options.recycle_dir) and
			os.access(options.recycle_dir, os.W_OK|os.X_OK)):
			parser.error(("--recycle-dir '%s' is not a "
				"writable directory") % options.recycle_dir)

	if options.recycle_db is not None:
		if options.recycle_dir is None:
			parser.error("--recycle-db requires "
				"--recycle-dir to be specified")
		options.recycle_db = normalize_path(
			os.path.abspath(options.recycle_db))

	if options.recycle_deletion_delay is not None:
		options.recycle_deletion_delay = \
			long(options.recycle_deletion_delay)

	if options.fetch_log_dir is not None:
		options.fetch_log_dir = normalize_path(
			os.path.abspath(options.fetch_log_dir))

		if not (os.path.isdir(options.fetch_log_dir) and
			os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
			parser.error(("--fetch-log-dir '%s' is not a "
				"writable directory") % options.fetch_log_dir)

	if options.whitelist_from:
		normalized_paths = []
		for x in options.whitelist_from:
			path = normalize_path(os.path.abspath(x))
			normalized_paths.append(path)
			if not (os.access(path, os.R_OK) and os.path.isfile(path)):
				parser.error(
					"--whitelist-from '%s' is not a readable file" % x)
		options.whitelist_from = normalized_paths

	if options.strict_manifests is not None:
		if options.strict_manifests == "y":
			settings.features.add("strict")
		else:
			settings.features.discard("strict")

	settings.lock()

	portdb = portage.portdbapi(mysettings=settings)

	# Limit ebuilds to the specified repo.
	portdb.porttrees = [repo_path]

	portage.util.initialize_logger()

	if options.verbose > 0:
		l = logging.getLogger()
		l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)

	with Config(options, portdb,
		SchedulerInterface(global_event_loop())) as config:

		if not options.mirror:
			parser.error('No action specified')

		returncode = os.EX_OK

		if options.mirror:
			signum = run_main_scheduler(MirrorDistTask(config))
			if signum is not None:
				sys.exit(128 + signum)

	return returncode

Example 147

Project: portage Source File: argparser.py
def parse_args(argv, qahelp, repoman_default_opts):
	"""Use a customized optionParser to parse command line arguments for repoman
	Args:
		argv - a sequence of command line arguments
		qahelp - a dict of qa warning to help message
	Returns:
		(opts, args), just like a call to parser.parse_args()
	"""

	argv = portage._decode_argv(argv)

	modes = {
		'commit': 'Run a scan then commit changes',
		'ci': 'Run a scan then commit changes',
		'fix': 'Fix simple QA issues (stray digests, missing digests)',
		'full': 'Scan directory tree and print all issues (not a summary)',
		'help': 'Show this screen',
		'manifest': 'Generate a Manifest (fetches files if necessary)',
		'manifest-check': 'Check Manifests for missing or incorrect digests',
		'scan': 'Scan directory tree for QA issues'
	}

	output_choices = {
		'default': 'The normal output format',
		'column': 'Columnar output suitable for use with grep'
	}

	mode_keys = list(modes)
	mode_keys.sort()

	output_keys = sorted(output_choices)

	parser = argparse.ArgumentParser(
		usage="repoman [options] [mode]",
		description="Modes: %s" % " | ".join(mode_keys),
		epilog="For more help consult the man page.")

	parser.add_argument(
		'-a', '--ask', dest='ask', action='store_true',
		default=False,
		help='Request a confirmation before commiting')

	parser.add_argument(
		'-m', '--commitmsg', dest='commitmsg',
		help='specify a commit message on the command line')

	parser.add_argument(
		'-M', '--commitmsgfile', dest='commitmsgfile',
		help='specify a path to a file that contains a commit message')

	parser.add_argument(
		'--digest', choices=('y', 'n'), metavar='<y|n>',
		help='Automatically update Manifest digests for modified files')

	parser.add_argument(
		'-p', '--pretend', dest='pretend', default=False,
		action='store_true',
		help='don\'t commit or fix anything; just show what would be done')

	parser.add_argument(
		'-q', '--quiet', dest="quiet", action="count",
		default=0,
		help='do not print unnecessary messages')

	parser.add_argument(
		'--echangelog', choices=('y', 'n', 'force'), metavar="<y|n|force>",
		help=(
			'for commit mode, call echangelog if ChangeLog is unmodified (or '
			'regardless of modification if \'force\' is specified)'))

	parser.add_argument(
		'--experimental-inherit', choices=('y', 'n'), metavar="<y|n>",
		default='n',
		help=(
			'Enable experimental inherit.missing checks which may misbehave'
			' when the internal eclass database becomes outdated'))

	parser.add_argument(
		'-f', '--force', dest='force', action='store_true',
		default=False,
		help='Commit with QA violations')

	parser.add_argument(
		'-S', '--straight-to-stable', dest='straight_to_stable',
		default=False, action='store_true',
		help='Allow committing straight to stable')

	parser.add_argument(
		'--vcs', dest='vcs',
		help='Force using specific VCS instead of autodetection')

	parser.add_argument(
		'-v', '--verbose', dest="verbosity", action='count',
		help='be very verbose in output', default=0)

	parser.add_argument(
		'-V', '--version', dest='version', action='store_true',
		help='show version info')

	parser.add_argument(
		'-x', '--xmlparse', dest='xml_parse', action='store_true',
		default=False,
		help='forces the metadata.xml parse check to be carried out')

	parser.add_argument(
		'--if-modified', choices=('y', 'n'), default='n',
		metavar="<y|n>",
		help='only check packages that have uncommitted modifications')

	parser.add_argument(
		'-i', '--ignore-arches', dest='ignore_arches', action='store_true',
		default=False,
		help='ignore arch-specific failures (where arch != host)')

	parser.add_argument(
		"--ignore-default-opts",
		action="store_true",
		help="do not use the REPOMAN_DEFAULT_OPTS environment variable")

	parser.add_argument(
		'-I', '--ignore-masked', dest='ignore_masked', action='store_true',
		default=False,
		help='ignore masked packages (not allowed with commit mode)')

	parser.add_argument(
		'--include-arches',
		dest='include_arches', metavar='ARCHES', action='append',
		help=(
			'A space separated list of arches used to '
			'filter the selection of profiles for dependency checks'))

	parser.add_argument(
		'-d', '--include-dev', dest='include_dev', action='store_true',
		default=False,
		help='include dev profiles in dependency checks')

	parser.add_argument(
		'-e', '--include-exp-profiles', choices=('y', 'n'), metavar='<y|n>',
		default=False,
		help='include exp profiles in dependency checks')

	parser.add_argument(
		'--unmatched-removal', dest='unmatched_removal', action='store_true',
		default=False,
		help=(
			'enable strict checking of package.mask and package.unmask files'
			' for unmatched removal atoms'))

	parser.add_argument(
		'--without-mask', dest='without_mask', action='store_true',
		default=False,
		help=(
			'behave as if no package.mask entries exist'
			' (not allowed with commit mode)'))

	parser.add_argument(
		'--output-style', dest='output_style', choices=output_keys,
		help='select output type', default='default')

	parser.add_argument(
		'--mode', dest='mode', choices=mode_keys,
		help='specify which mode repoman will run in (default=full)')

	opts, args = parser.parse_known_args(argv[1:])

	if not opts.ignore_default_opts:
		default_opts = util.shlex_split(repoman_default_opts)
		if default_opts:
			opts, args = parser.parse_known_args(default_opts + sys.argv[1:])

	if opts.mode == 'help':
		parser.print_help(short=False)

	for arg in args:
		if arg in modes:
			if not opts.mode:
				opts.mode = arg
				break
		else:
			parser.error("invalid mode: %s" % arg)

	if not opts.mode:
		opts.mode = 'full'

	if opts.mode == 'ci':
		opts.mode = 'commit'  # backwards compat shortcut

	# Use verbosity and quiet options to appropriately fiddle with the loglevel
	for val in range(opts.verbosity):
		logger = logging.getLogger()
		logger.setLevel(logger.getEffectiveLevel() - 10)

	for val in range(opts.quiet):
		logger = logging.getLogger()
		logger.setLevel(logger.getEffectiveLevel() + 10)

	if opts.mode == 'commit' and opts.commitmsg:
		opts.commitmsg = _unicode_decode(opts.commitmsg)

	if opts.mode == 'commit' and not (opts.force or opts.pretend):
		if opts.ignore_masked:
			opts.ignore_masked = False
			logging.warn('Commit mode automatically disables --ignore-masked')
		if opts.without_mask:
			opts.without_mask = False
			logging.warn('Commit mode automatically disables --without-mask')

	return (opts, args)

Example 148

Project: gramps Source File: argparser.py
    def parse_args(self):
        """
        Fill in lists with open, exports, imports, and actions options.

        Any errors are added to self.errors
        """
        try:
            options, leftargs = getopt.getopt(self.args[1:],
                                              SHORTOPTS, LONGOPTS)
        except getopt.GetoptError as msg:
            # Extract the arguments in the list.
            # The % operator replaces the list elements
            # with repr() of the list elements
            # which is OK for latin characters,
            # but not for non latin characters in list elements
            cliargs = "[ "
            for arg in range(len(self.args) - 1):
                cliargs += self.args[arg + 1] + " "
            cliargs += "]"
            # Must first do str() of the msg object.
            msg = str(msg)
            self.errors += [(_('Error parsing the arguments'),
                             msg + '\n' +
                             _("Error parsing the arguments: %s \n"
                               "Type gramps --help for an overview of "
                               "commands, or read the manual pages."
                              ) % cliargs)]
            return

        # Some args can work on a list of databases:
        if leftargs:
            for opt_ix in range(len(options)):
                option, value = options[opt_ix]
                if option in ['-L', '-l', '-t']:
                    self.database_names = leftargs
                    leftargs = []

        if leftargs:
            # if there were an argument without option,
            # use it as a file to open and return
            self.open_gui = leftargs[0]
            print(_("Trying to open: %s ..."
                   ) % leftargs[0],
                  file=sys.stderr)
            #see if force open is on
            for opt_ix in range(len(options)):
                option, value = options[opt_ix]
                if option in ('-u', '--force-unlock'):
                    self.force_unlock = True
                    break
            return

        # Go over all given option and place them into appropriate lists
        cleandbg = []
        need_to_quit = False
        for opt_ix in range(len(options)):
            option, value = options[opt_ix]
            if option in ['-O', '--open']:
                self.open = value
            elif option in ['-C', '--create']:
                self.create = value
            elif option in ['-i', '--import']:
                family_tree_format = None
                if (opt_ix < len(options) - 1
                        and options[opt_ix + 1][0] in ('-f', '--format')):
                    family_tree_format = options[opt_ix + 1][1]
                self.imports.append((value, family_tree_format))
            elif option in ['-r', '--remove']:
                self.removes.append(value)
            elif option in ['-e', '--export']:
                family_tree_format = None
                if (opt_ix < len(options) - 1
                        and options[opt_ix + 1][0] in ('-f', '--format')):
                    family_tree_format = options[opt_ix + 1][1]
                self.exports.append((value, family_tree_format))
            elif option in ['-a', '--action']:
                action = value
                if action not in ('report', 'tool', 'book'):
                    print(_("Unknown action: %s. Ignoring."
                           ) % action,
                          file=sys.stderr)
                    continue
                options_str = ""
                if (opt_ix < len(options)-1
                        and options[opt_ix+1][0] in ('-p', '--options')):
                    options_str = options[opt_ix+1][1]
                self.actions.append((action, options_str))
            elif option in ['-d', '--debug']:
                print(_('setup debugging'), value, file=sys.stderr)
                logger = logging.getLogger(value)
                logger.setLevel(logging.DEBUG)
                cleandbg += [opt_ix]
            elif option in ['-l']:
                self.list = True
            elif option in ['-L']:
                self.list_more = True
            elif option in ['-t']:
                self.list_table = True
            elif option in ['-s', '--show']:
                print(_("Gramps config settings from %s:"
                       ) % config.filename)
                for sect in config.data:
                    for setting in config.data[sect]:
                        print("%s.%s=%s" % (sect, setting,
                                            repr(config.data[sect][setting])))
                    print()
                sys.exit(0)
            elif option in ['-b', '--databases']:
                default = config.data["database"]["backend"]
                pmgr = BasePluginManager.get_instance()
                pmgr.reg_plugins(PLUGINS_DIR, self, None)
                pmgr.reg_plugins(USER_PLUGINS, self, None, load_on_reg=True)
                for plugin in pmgr.get_reg_databases():
                    pdata = pmgr.get_plugin(plugin.id)
                    mod = pmgr.load_plugin(pdata)
                    if mod:
                        database = getattr(mod, pdata.databaseclass)
                        summary = database.get_class_summary()
                        print("Database backend ID:",
                              pdata.id,
                              "(default)" if pdata.id == default else "")
                        for key in sorted(summary.keys()):
                            print("   ", _("%s:") % key, summary[key])
                sys.exit(0)
            elif option in ['-c', '--config']:
                cfg_name = value
                set_value = False
                if cfg_name:
                    if ":" in cfg_name:
                        cfg_name, new_value = cfg_name.split(":", 1)
                        set_value = True
                    if config.has_default(cfg_name):
                        setting_value = config.get(cfg_name)
                        print(_("Current Gramps config setting: "
                                "%(name)s:%(value)s"
                               ) % {'name'  : cfg_name,
                                    'value' : repr(setting_value)},
                              file=sys.stderr)
                        if set_value:
                            # does a user want the default config value?
                            if new_value in ("DEFAULT", _("DEFAULT")):
                                new_value = config.get_default(cfg_name)
                            else:
                                converter = get_type_converter(setting_value)
                                new_value = converter(new_value)
                            config.set(cfg_name, new_value)
                            # translators: indent "New" to match "Current"
                            print(_("    New Gramps config setting: "
                                    "%(name)s:%(value)s"
                                   ) % {'name'  : cfg_name,
                                        'value' : repr(config.get(cfg_name))},
                                  file=sys.stderr)
                        else:
                            need_to_quit = True
                    else:
                        print(_("Gramps: no such config setting: '%s'"
                               ) % cfg_name,
                              file=sys.stderr)
                        need_to_quit = True
                cleandbg += [opt_ix]
            elif option in ['-h', '-?', '--help']:
                self.help = True
            elif option in ['-u', '--force-unlock']:
                self.force_unlock = True
            elif option in ['--usage']:
                self.usage = True
            elif option in ['-y', '--yes']:
                self.auto_accept = True
            elif option in ['-q', '--quiet']:
                self.quiet = True

        #clean options list
        cleandbg.reverse()
        for ind in cleandbg:
            del options[ind]

        if (len(options) > 0
                and self.open is None
                and self.imports == []
                and self.removes == []
                and not (self.list
                         or self.list_more
                         or self.list_table
                         or self.help)):
            # Extract and convert to unicode the arguments in the list.
            # The % operator replaces the list elements with repr() of
            # the list elements, which is OK for latin characters
            # but not for non-latin characters in list elements
            cliargs = "[ "
            for arg in range(len(self.args) - 1):
                cliargs += self.args[arg + 1] + ' '
            cliargs += "]"
            self.errors += [(_('Error parsing the arguments'),
                             _("Error parsing the arguments: %s \n"
                               "To use in the command-line mode, supply at "
                               "least one input file to process."
                              ) % cliargs)]
        if need_to_quit:
            sys.exit(0)

Example 149

Project: yum-plugin-replace Source File: replace.py
    def doCommand(self, base, basecmd, extcmds):
        logger = logging.getLogger("yum.verbose.main")
        print "Replacing packages takes time, please be patient..."
        global pkgs_to_remove
        pkgs_to_install = []
        pkgs_to_not_remove = []
        deps_to_resolve = []
        pkgs_with_same_srpm = []

        def msg(x):
            logger.log(logginglevels.INFO_2, x)
        def msg_warn(x):
            logger.warn(x)


        opts = base.plugins.cmdline[0]
        if len(base.plugins.cmdline[1]) <= 1:
            raise UpdateError, "Must specify a package to be replaced (i.e yum replace pkg --replace-with pkgXY)"
        if not opts.replace_with:
            raise UpdateError, "Replacement package name required (--replace-with)" 

        orig_pkg = base.plugins.cmdline[1][1]
        new_pkg = opts.replace_with

        if not base.isPackageInstalled(orig_pkg):
            raise RemoveError, "Package '%s' is not installed." % orig_pkg

        # get pkg object
        res = base.rpmdb.searchNevra(name=orig_pkg)
        if len(res) > 1:
            raise RemoveError, \
                "Multiple packages found matching '%s'.  Please remove manually." % \
                orig_pkg
        orig_pkgobject = res[0]
        pkgs_to_remove.append(orig_pkgobject)

        # find all other installed packages with same srpm (orig_pkg's subpackages)
        for pkg in base.rpmdb:
            if pkg.sourcerpm == orig_pkgobject.sourcerpm:
                pkgs_to_remove.append(pkg)
                for dep in pkg.provides_names:
                    deps_to_resolve.append(dep)

        # get new pkg object
        new_pkgs = []
        res = base.pkgSack.returnNewestByName(new_pkg)
        for i in res:
            if platform.machine() == i.arch:
                if i not in new_pkgs:
                    new_pkgs.append(i)

        # if no archs matched (maybe a i686 / i386 issue) then pass them all
        if len(new_pkgs) == 0:
            new_pkgs = res

        # clean up duplicates, for some reason yum creates duplicate package objects
        # that are the same, but different object ref so they don't compare.  here
        # we compare against returnEVR().
        final_pkgs = []
        for i in new_pkgs:
            add = True 
            for i2 in final_pkgs:
                if i.returnEVR() == i2.returnEVR() and i.arch == i2.arch:
                    add = False
            if add and i not in final_pkgs:
                final_pkgs.append(i)

        if len(final_pkgs) > 1:
            raise UpdateError, \
                "Multiple packages found matching '%s'.  Please upgrade manually." % \
                new_pkg
        new_pkgobject = new_pkgs[0]
        pkgs_to_install.append(new_pkgobject)

        orig_prefix = orig_pkg
        new_prefix = new_pkg

        # Find the original and new prefixes of packages based on their sourcerpm name
        m = re.match('(.*)-%s-%s' % (orig_pkgobject.version, orig_pkgobject.release),\
            orig_pkgobject.sourcerpm)
        if m:
            orig_prefix = m.group(1)

        m = re.match('(.*)-%s-%s' % (new_pkgobject.version, new_pkgobject.release),\
            new_pkgobject.sourcerpm)
        if m:
            new_prefix = m.group(1)

        # don't remove pkgs that rely on orig_pkg (yum tries to remove them)
        for pkg in base.rpmdb:
            for req in pkg.requires_names:
                if req in deps_to_resolve:
                    if pkg not in pkgs_to_not_remove and pkg not in pkgs_to_remove:
                        pkgs_to_not_remove.append(pkg)

        # determine all new_pkg subpackages that provide missing deps
        providers = {}
        for pkg in base.pkgSack:
            if pkg.sourcerpm == new_pkgobject.sourcerpm:
                pkgs_with_same_srpm.append(pkg)
                for dep in pkg.provides_names:
                    if dep in deps_to_resolve:
                        if pkg not in pkgs_to_remove:
                            
                            # build a list of all packages matching provides
                            if not providers.has_key(dep):
                                providers[dep] = []
                            providers[dep].append(pkg)

        # We now have a complete list of package providers we care about
        if providers:
            resolved = False
            for key, pkgs in providers.items():
                if len(pkgs) == 1:
                    pkgs_to_install.append(pkgs[0])
                    deps_to_resolve.remove(key)
                    resolved = True

                elif len(pkgs) > 1:
                    # Attempt to auto resolve multiple provides
                    for rpkg in pkgs_to_remove:
                        npkg = rpkg.name.replace(orig_prefix, new_prefix)
                        for pkg in pkgs:
                            if npkg == pkg.name:
                                pkgs_to_install.append(pkg)
                                resolved = True

                    # we've completed our auto resolve,
                    # if resolved lets remove the key
                    if resolved:
                        deps_to_resolve.remove(key)

                    if not resolved:
                        print '\nWARNING: Multiple Providers found for %s' % key
                        print "  %s" % [str(i) for i in pkgs]

                # remove the dep from dict since it should be handled.
                del(providers[key])

        # This is messy: determine if any of the pkgs_to_not_remove have
        # counterparts as part of same 'base name' set (but different srpm, i.e. 
        # php and php-pear has different source rpms but you want phpXY-pear too).
        while pkgs_to_not_remove:
            pkg = pkgs_to_not_remove.pop()
            m = re.match('%s-(.*)' % orig_prefix, pkg.name)
            if not m:
                continue
            replace_name = "%s-%s" % (new_prefix, m.group(1))
            for pkg2 in base.pkgSack: 
                if pkg2.name == replace_name:
                    if pkg not in pkgs_to_remove:
                        pkgs_to_remove.append(pkg)
                        if pkg in pkgs_to_not_remove:
                            pkgs_to_not_remove.remove(pkg)
                    if pkg2 not in pkgs_to_install:
                        pkgs_to_install.append(pkg2)
                        if pkg2 in pkgs_to_not_remove:
                            pkgs_to_not_remove.remove(pkg2)

        # clean up duplicates (multiple versions)
        _pkgs_to_install = []
        for pkg in pkgs_to_install:
            latest_pkg = base.pkgSack.returnNewestByName(pkg.name)[0]
            if latest_pkg not in _pkgs_to_install:
                _pkgs_to_install.append(latest_pkg)
        pkgs_to_install = _pkgs_to_install

        # Its common that newer/replacement packages won't provide all the same things.
        # Give the user the chance to bail if they are scared.
        if len(deps_to_resolve) > 0:
            print
            print "WARNING: Unable to resolve all providers: %s" % deps_to_resolve
            print
            if not opts.assumeyes:
                res = raw_input("This may be normal depending on the package.  Continue? [y/N] ")
                if not res.strip('\n').lower() in ['y', 'yes']:
                    sys.exit(1)
        
        # remove old packages
        for pkg in pkgs_to_remove:
            base.remove(pkg)
        # install new/replacement packages
        for pkg in pkgs_to_install:
            base.install(pkg) 

        return 2, ["Run transaction to replace '%s' with '%s'" % (orig_pkg, new_pkg)]

Example 150

Project: calibre Source File: stylizer.py
    def __init__(self, tree, path, oeb, opts, profile=None,
            extra_css='', user_css='', base_css=''):
        self.oeb, self.opts = oeb, opts
        self.profile = profile
        if self.profile is None:
            # Use the default profile. This should really be using
            # opts.output_profile, but I don't want to risk changing it, as
            # doing so might well have hard to debug font size effects.
            from calibre.customize.ui import output_profiles
            for x in output_profiles():
                if x.short_name == 'default':
                    self.profile = x
                    break
        if self.profile is None:
            # Just in case the default profile is removed in the future :)
            self.profile = opts.output_profile
        self.body_font_size = self.profile.fbase
        self.logger = oeb.logger
        item = oeb.manifest.hrefs[path]
        basename = os.path.basename(path)
        cssname = os.path.splitext(basename)[0] + '.css'
        stylesheets = [html_css_stylesheet()]
        if base_css:
            stylesheets.append(parseString(base_css, validate=False))
        style_tags = xpath(tree, '//*[local-name()="style" or local-name()="link"]')

        # Add cssutils parsing profiles from output_profile
        for profile in self.opts.output_profile.extra_css_modules:
            cssprofiles.addProfile(profile['name'],
                                        profile['props'],
                                        profile['macros'])

        parser = CSSParser(fetcher=self._fetch_css_file,
                log=logging.getLogger('calibre.css'))
        self.font_face_rules = []
        for elem in style_tags:
            if (elem.tag == XHTML('style') and
                elem.get('type', CSS_MIME) in OEB_STYLES and media_ok(elem.get('media'))):
                text = elem.text if elem.text else u''
                for x in elem:
                    t = getattr(x, 'text', None)
                    if t:
                        text += u'\n\n' + force_unicode(t, u'utf-8')
                    t = getattr(x, 'tail', None)
                    if t:
                        text += u'\n\n' + force_unicode(t, u'utf-8')
                if text:
                    text = oeb.css_preprocessor(text)
                    # We handle @import rules separately
                    parser.setFetcher(lambda x: ('utf-8', b''))
                    stylesheet = parser.parseString(text, href=cssname,
                            validate=False)
                    parser.setFetcher(self._fetch_css_file)
                    for rule in stylesheet.cssRules:
                        if rule.type == rule.IMPORT_RULE:
                            ihref = item.abshref(rule.href)
                            if not media_ok(rule.media.mediaText):
                                continue
                            hrefs = self.oeb.manifest.hrefs
                            if ihref not in hrefs:
                                self.logger.warn('Ignoring missing stylesheet in @import rule:', rule.href)
                                continue
                            sitem = hrefs[ihref]
                            if sitem.media_type not in OEB_STYLES:
                                self.logger.warn('CSS @import of non-CSS file %r' % rule.href)
                                continue
                            stylesheets.append(sitem.data)
                    for rule in tuple(stylesheet.cssRules.rulesOfType(CSSRule.PAGE_RULE)):
                        stylesheet.cssRules.remove(rule)
                    # Make links to resources absolute, since these rules will
                    # be folded into a stylesheet at the root
                    replaceUrls(stylesheet, item.abshref,
                            ignoreImportRules=True)
                    stylesheets.append(stylesheet)
            elif (elem.tag == XHTML('link') and elem.get('href') and
                  elem.get('rel', 'stylesheet').lower() == 'stylesheet' and
                  elem.get('type', CSS_MIME).lower() in OEB_STYLES and
                  media_ok(elem.get('media'))
                  ):
                href = urlnormalize(elem.attrib['href'])
                path = item.abshref(href)
                sitem = oeb.manifest.hrefs.get(path, None)
                if sitem is None:
                    self.logger.warn(
                        'Stylesheet %r referenced by file %r not in manifest' %
                        (path, item.href))
                    continue
                if not hasattr(sitem.data, 'cssRules'):
                    self.logger.warn(
                    'Stylesheet %r referenced by file %r is not CSS'%(path,
                        item.href))
                    continue
                stylesheets.append(sitem.data)
        csses = {'extra_css':extra_css, 'user_css':user_css}
        for w, x in csses.items():
            if x:
                try:
                    text = x
                    stylesheet = parser.parseString(text, href=cssname,
                            validate=False)
                    stylesheets.append(stylesheet)
                except:
                    self.logger.exception('Failed to parse %s, ignoring.'%w)
                    self.logger.debug('Bad css: ')
                    self.logger.debug(x)
        rules = []
        index = 0
        self.stylesheets = set()
        self.page_rule = {}
        for sheet_index, stylesheet in enumerate(stylesheets):
            href = stylesheet.href
            self.stylesheets.add(href)
            for rule in stylesheet.cssRules:
                if rule.type == rule.MEDIA_RULE:
                    if media_ok(rule.media.mediaText):
                        for subrule in rule.cssRules:
                            rules.extend(self.flatten_rule(subrule, href, index, is_user_agent_sheet=sheet_index==0))
                            index += 1
                else:
                    rules.extend(self.flatten_rule(rule, href, index, is_user_agent_sheet=sheet_index==0))
                    index = index + 1
        rules.sort()
        self.rules = rules
        self._styles = {}
        pseudo_pat = re.compile(ur':{1,2}(%s)' % ('|'.join(INAPPROPRIATE_PSEUDO_CLASSES)), re.I)
        select = Select(tree, ignore_inappropriate_pseudo_classes=True)

        for _, _, cssdict, text, _ in rules:
            fl = pseudo_pat.search(text)
            try:
                matches = tuple(select(text))
            except SelectorError as err:
                self.logger.error('Ignoring CSS rule with invalid selector: %r (%s)' % (text, as_unicode(err)))
                continue

            if fl is not None:
                fl = fl.group(1)
                if fl == 'first-letter' and getattr(self.oeb,
                        'plumber_output_format', '').lower() in {u'mobi', u'docx'}:
                    # Fake first-letter
                    from lxml.builder import ElementMaker
                    E = ElementMaker(namespace=XHTML_NS)
                    for elem in matches:
                        for x in elem.iter('*'):
                            if x.text:
                                punctuation_chars = []
                                text = unicode(x.text)
                                while text:
                                    category = unicodedata.category(text[0])
                                    if category[0] not in {'P', 'Z'}:
                                        break
                                    punctuation_chars.append(text[0])
                                    text = text[1:]

                                special_text = u''.join(punctuation_chars) + \
                                        (text[0] if text else u'')
                                span = E.span(special_text)
                                span.set('data-fake-first-letter', '1')
                                span.tail = text[1:]
                                x.text = None
                                x.insert(0, span)
                                self.style(span)._update_cssdict(cssdict)
                                break
                else:  # Element pseudo-class
                    for elem in matches:
                        self.style(elem)._update_pseudo_class(fl, cssdict)
            else:
                for elem in matches:
                    self.style(elem)._update_cssdict(cssdict)
        for elem in xpath(tree, '//h:*[@style]'):
            self.style(elem)._apply_style_attr(url_replacer=item.abshref)
        num_pat = re.compile(r'[0-9.]+$')
        for elem in xpath(tree, '//h:img[@width or @height]'):
            style = self.style(elem)
            # Check if either height or width is not default
            is_styled = style._style.get('width', 'auto') != 'auto' or \
                    style._style.get('height', 'auto') != 'auto'
            if not is_styled:
                # Update img style dimension using width and height
                upd = {}
                for prop in ('width', 'height'):
                    val = elem.get(prop, '').strip()
                    try:
                        del elem.attrib[prop]
                    except:
                        pass
                    if val:
                        if num_pat.match(val) is not None:
                            val += 'px'
                        upd[prop] = val
                if upd:
                    style._update_cssdict(upd)
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected Page 4