os.path.expanduser

Here are the examples of the python api os.path.expanduser taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

166 Examples 7

Example 151

Project: Cerebrum Source File: __init__.py
Function: initiate
def initiate():
	ap = argparse.ArgumentParser() # Define an Argument Parser
	ap.add_argument("-v", "--video", help="path to the video file") # Add --video argument
	ap.add_argument("-a", "--audio", help="path to the audio file") # Add --audio argument
	ap.add_argument("-c", "--captions", help="path to the captios file") # Add --captions argument
	args = vars(ap.parse_args()) # Parse the arguments

	stem_manager = multiprocessing.Manager() # Shared memory space manager
	hearing_perception_stimulated = stem_manager.Value('i', 0) # Define hearing perception stimualted variable in shared memory to get if it's stimulated or not (Integer)
	vision_perception_stimulated = stem_manager.Value('i', 0) # Define vision perception stimualted variable in shared memory to get if it's stimulated or not (Integer)
	language_analysis_stimulated = stem_manager.Value('i', 0) # Define language analysis stimualted variable in shared memory to get if it's stimulated or not (Integer)

	active_perceptions = 0

	#args = [os.path.expanduser("--directory ~/ComeOnRethink")]
	#os.execvp("rethinkdb", args)
	rethinkdb_process = subprocess.Popen(['rethinkdb', '--directory', os.path.expanduser('~/Hippocampus')]) # RethinkDB directory to store data and metadata
	time.sleep(3)
	conn = r.connect("localhost", 28015)
	try:
		r.db('test').table_create('hearing_memory').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('hearing_timestamps').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('language_memory').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('language_timestamps').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('vision_memory').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('vision_timestamps').run(conn)
	except:
		pass
	try:
		r.db('test').table_create('crossmodal_mappings').run(conn)
	except:
		pass
	#try:
	#	r.db('test').table_create('neuralnet').run(conn)
	#except:
	#	pass
	conn.close()
	time.sleep(3)

	if args["audio"] is None:
		pass
	else:
		hearing_perception_process = multiprocessing.Process(target=HearingPerception.start, args=(args["audio"],hearing_perception_stimulated)) # Define hearing perception process
		hearing_perception_process.start() # Start hearing perception process
		active_perceptions += 1

	if args["video"] is None:
		pass
	else:
		vision_perception_process = multiprocessing.Process(target=VisionPerception.start, args=(args["video"],vision_perception_stimulated)) # Define vision perception process
		vision_perception_process.start() # Start vision perception process
		active_perceptions += 1

	if args["captions"] is None:
		pass
	else:
		language_analysis_process = multiprocessing.Process(target=LanguageAnalyzer.start, args=(args["captions"],language_analysis_stimulated)) # Define language analysis process
		language_analysis_process.start() # Start language analysis process
		active_perceptions += 1

	crossmodal_mapperHV_process = multiprocessing.Process(target=MapperStarters.startHV) # Define crossmodal mapper for hearing & vision process
	crossmodal_mapperHV_process.start() # Start crossmodal mapperHV process

	crossmodal_mapperHL_process = multiprocessing.Process(target=MapperStarters.startHL) # Define crossmodal mapper for hearing & language process
	crossmodal_mapperHL_process.start() # Start crossmodal mapperHL process

	crossmodal_mapperVL_process = multiprocessing.Process(target=MapperStarters.startVL) # Define crossmodal mapper for vision & language process
	crossmodal_mapperVL_process.start() # Start crossmodal mapperVL process

	training = 0

	while True:
		if args["audio"]:
			if not hearing_perception_process.is_alive():
				active_perceptions -= 1
				args["audio"] = None
				print "WARNING: Hearing Perception process is terminated."
		if args["video"]:
			if not vision_perception_process.is_alive():
				active_perceptions -= 1
				args["video"] = None
				print "WARNING: Vision Perception process is terminated."
		if args["captions"]:
			if not language_analysis_process.is_alive():
				active_perceptions -= 1
				args["captions"] = None
				print "WARNING: Language Analysis process is terminated."
		if active_perceptions == 0 and not training:
				neuralnet_weaver_process = multiprocessing.Process(target=NeuralWeaver.start) # Define neuralnet weaver process
				neuralnet_weaver_process.start() # Start neuralnet weaver process
				training = 1
		if training and not neuralnet_weaver_process.is_alive():
			if crossmodal_mapperHV_process.is_alive():
				crossmodal_mapperHV_process.terminate()
			if crossmodal_mapperHL_process.is_alive():
				crossmodal_mapperHL_process.terminate()
			if crossmodal_mapperVL_process.is_alive():
				crossmodal_mapperVL_process.terminate()
			print "Training is finished."
			break

	os.killpg(os.getpgid(rethinkdb_process.pid), signal.SIGTERM)
	print "Cerebrum exiting."

Example 152

Project: strsync Source File: strsync.py
def main():
    parser = argparse.ArgumentParser(description='Automatically translate and synchronize .strings files from defined base language.')
    parser.add_argument('-b','--base-lang-name', help='A base(or source) localizable resource name.(default=\'Base\'), (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')', default='Base', required=False)
    parser.add_argument('-x','--excluding-lang-names', type=str, help='A localizable resource name that you want to exclude. (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')', default=[], required=False, nargs='+')
    parser.add_argument('-c','--client-id', help='Client ID for MS Translation API', required=True)
    parser.add_argument('-s','--client-secret', help='Client Secret key for MS Translation API', required=True)
    parser.add_argument('-f','--force-translate-keys', type=str, help='Keys in the strings to update and translate by force. (input nothing for all keys.)', default=[], required=False, nargs='*')
    parser.add_argument('-fb','--following-base-keys', type=str, help='Keys in the strings to follow from "Base".', default=[], required=False, nargs='+')
    parser.add_argument('-fbl','--following-base-keys-if-length-longer', type=str, help='Keys in the strings to follow from "Base" if its length longer than length of "Base" value.', default=[], required=False, nargs='+')
    parser.add_argument('-ic','--ignore-comments', help='Allows to ignore comment synchronization.', default=None, required=False, nargs='*')
    parser.add_argument('target path', help='Target localizable resource path. (root path of Base.lproj, default=./)', default='./', nargs='?')
    args = vars(parser.parse_args())

    reload(sys)
    sys.setdefaultencoding('utf-8')

    # configure arguments
    __LANG_SEP__ = '-'
    __DIR_SUFFIX__ = ".lproj"
    __FILE_SUFFIX__ = ".strings"
    __RESOURCE_PATH__ = expanduser(args['target path'])
    __BASE_LANG__ = args['base_lang_name']
    __EXCLUDING_LANGS__ = args['excluding_lang_names']
    __KEYS_FORCE_TRANSLATE__ = args['force_translate_keys']
    __KEYS_FORCE_TRANSLATE_ALL__ = ('--force-translate-keys' in sys.argv or '-f' in sys.argv) and not __KEYS_FORCE_TRANSLATE__
    __KEYS_FOLLOW_BASE__ = args['following_base_keys']
    __KEYS_FOLLOW_BASE_IF_LENGTH_LONGER__ = args['following_base_keys_if_length_longer']
    __IGNORE_COMMENTS__ = args['ignore_comments'] is not None
    __BASE_RESOUCE_DIR__ = None

    __LITERNAL_FORMAT__ = "%@"
    __LITERNAL_FORMAT_RE__ = re.compile(r"(%\s{1,}@)|(@\s{0,}%)")
    __LITERNAL_REPLACEMENT__ = "**"
    __LITERNAL_REPLACEMENT_RE__ = re.compile(r"\*\s{0,}\*")

    __QUOTES_RE__ = re.compile(r"\"")
    __QUOTES_REPLACEMENT__ = "'"

    if __BASE_LANG__.endswith(__DIR_SUFFIX__):
        __BASE_RESOUCE_DIR__ = __BASE_LANG__
        __BASE_LANG__ = __BASE_LANG__.split(__DIR_SUFFIX__)[0]
    else:
        __BASE_RESOUCE_DIR__ = __BASE_LANG__+__DIR_SUFFIX__

    # setup Translator & langs

    # read ios langs
    print '(i) Fetching supported locale codes for ios9 ...'
    __IOS9_CODES__ = [lang_row[0] for lang_row in csv.reader(open(resolve_file_path('lc_ios9.tsv'),'rb'), delimiter='\t')]
    print '(i) Supported numbers of locale code :', len(__IOS9_CODES__)

    __MS_CODE_ALIASES__ = {
        # MS API Supported : ios9 supported ISO639 1-2 codes
        'zh-CHS' : ['zh-Hans', 'zh-CN', 'zh-SG'],
        'zh-CHT' : ['zh-Hant', 'zh-MO', 'zh-HK', 'zh-TW'],
        'en' : ['en-AU', 'en-GB'],
        'es' : ['es-MX'],
        'fr' : ['fr-CA'],
        'pt' : ['pt-BR','pt-PT']
    }

    # read mst langs
    print '(i) Fetching supported locales from Microsoft Translation API...'
    trans = Translator(args['client_id'], args['client_secret'])

    __MS_LANG_FILE__ = resolve_file_path('lc_ms.cached.tsv')
    __MS_SUPPORTED_CODES__ = None
    if os.path.exists(__MS_LANG_FILE__):
        __MS_SUPPORTED_CODES__ = [l.strip() for l in open(__MS_LANG_FILE__,'rb').readlines()]
    else:
        __MS_SUPPORTED_CODES__ = trans.get_languages()
        cfile = open(__MS_LANG_FILE__,'w')
        codes = ''
        for code in __MS_SUPPORTED_CODES__:
            codes += code+'\n'
        cfile.write(codes)
        cfile.close()
    print '(i) Supported numbers of locale code :', len(__MS_SUPPORTED_CODES__)
    
    #
    global_result_logs = {}

    # methods
    def supported_lang(code):
        alias = [ms for ms, ios in __MS_CODE_ALIASES__.items() if code in ios]
        # check es-{Custom defined alias}
        if len(alias)==1:
            return alias[0]
        # check es-MX
        elif code in __MS_SUPPORTED_CODES__:
            return code
        # check es
        elif code.split(__LANG_SEP__)[0] in __MS_SUPPORTED_CODES__:
            return code.split(__LANG_SEP__)[0]
        else:
            return None

    def preprocessing_translate_strs(strs):
        return [__LITERNAL_FORMAT_RE__.sub(__LITERNAL_FORMAT__, s.strip()).replace(__LITERNAL_FORMAT__, __LITERNAL_REPLACEMENT__) for s in strs]

    def postprocessing_translate_str(str):
        str = str.strip()
        # remove Quotes
        str = __QUOTES_RE__.sub(__QUOTES_REPLACEMENT__, str)
        # replace tp liternal replacement
        str = validate_liternal_replacement(str)
        # liternal replacement to liternal for format
        str = str.replace(__LITERNAL_REPLACEMENT__, __LITERNAL_FORMAT__)
        return str

    def validate_liternal_format(str):
        return __LITERNAL_FORMAT_RE__.sub(__LITERNAL_FORMAT__, str)

    def validate_liternal_replacement(str):
        return __LITERNAL_REPLACEMENT_RE__.sub(__LITERNAL_FORMAT__, str)

    def translate_ms(strs, to):
        lang = supported_lang(to)
        strs = preprocessing_translate_strs(strs)
        return [postprocessing_translate_str(r['TranslatedText']) for r in trans.translate_array(strs, lang)] if lang else strs

    def strings_obj_from_file(file):
        return strsparser.parse_strings(filename=file)

    def merge_two_dicts(x, y):
        '''Given two dicts, merge them into a new dict as a shallow copy.'''
        z = x.copy()
        z.update(y)
        return z

    # core function
    def insert_or_translate(target_file, lc):
        #parse target file
        target_kv = {}
        target_kc = {}
        target_error_lines = []
        if not notexist_or_empty_file(target_file):
            parsed_strings = strsparser.parse_strings(filename=target_file)
            for item in parsed_strings:
                k, e = item['key'], item['error']
                # line error
                if e:
                    target_error_lines.append(e)
                if not target_error_lines:
                    target_kv[k] = item['value']
                    target_kc[k] = item['comment']

        #parsing complete or return.
        if target_error_lines:
            print '(!) Syntax error - Skip'
            return False, None, None, target_error_lines

        #base
        base_content = base_dict[os.path.basename(target_file)]
        base_kv = {}
        base_kc = {}
        for item in base_content:
            k, e = item['key'], item['error']
            # line error
            if e:
                print '(!) WARNING : Syntax error from Base -> ', k, ':' , e
            base_kv[k] = item['value']
            base_kc[k] = item['comment']

        force_adding_keys = base_kv.keys() if __KEYS_FORCE_TRANSLATE_ALL__ else __KEYS_FORCE_TRANSLATE__
        adding_keys = list(((set(base_kv.keys()) - set(target_kv.keys())) | (set(base_kv.keys()) & set(force_adding_keys))) - set(__KEYS_FOLLOW_BASE__))
        removing_keys = list(set(target_kv.keys()) - set(base_kv.keys()))
        existing_keys = list(set(base_kv.keys()) - (set(adding_keys) | set(removing_keys)))
        updated_keys = []

        """
        perform translate
        """
        translated_kv = {}
        if len(adding_keys):
            print 'Translating...'
            translated_kv = dict(zip(adding_keys, translate_ms([base_kv[k] for k in adding_keys], lc)))

        updated_content = []
        for item in base_content:
            k = item['key']
            newitem = dict.fromkeys(item.keys())
            newitem['key'] = k
            target_value, target_comment = target_kv.get(k), target_kc.get(k)
            newitem['comment'] = target_comment if __IGNORE_COMMENTS__ else target_comment or base_kc[k]
            needs_update_comment = False if __IGNORE_COMMENTS__ else not target_comment and base_kc[k]
            
            #added
            if k in adding_keys:
                if k in translated_kv:
                    newitem['value'] = translated_kv[k]
                    if not newitem['comment']:
                        newitem['comment'] = 'Translated from: {0}'.format(base_kv[k])
                    print '[Add] "{0}" = "{1}" <- {2}'.format(k, newitem['value'], base_kv[k])
                else:
                    newitem['value'] = target_kv[k]
                    if not newitem['comment']:
                        newitem['comment'] = 'Translate failed from: {0}'.format(base_kv[k])
                    print '[Error] "{0}" = "{1}" X <- {2}'.format(k, newitem['value'], base_kv[k])
            #exists
            elif k in existing_keys:
                
                if k in __KEYS_FOLLOW_BASE_IF_LENGTH_LONGER__:
                    if target_value != base_kv[k] and len(target_value) > len(base_kv[k]) or needs_update_comment:
                        print '(!) Length of "', target_value, '" is longer than"', base_kv[k], '" as', len(target_value), '>', len(base_kv[k])
                        newitem['value'] = base_kv[k]
                        updated_keys.append(k)
                        
                        if not lc in global_result_logs:                            
                            global_result_logs[lc] = {}
                        global_result_logs[lc][k] = (target_value, base_kv[k])
                    else:
                        newitem['value'] = target_value or base_kv[k]
                        
                elif k in __KEYS_FOLLOW_BASE__:
                    newitem['value'] = base_kv[k]
                    if target_value != base_kv[k] or needs_update_comment:
                        updated_keys.append(k)
                        
                else:
                    newitem['value'] = target_value or base_kv[k]
                    if not target_value or needs_update_comment:
                        updated_keys.append(k)

            updated_content.append(newitem)

        #removed or wrong
        for k in removing_keys:
            print '[Remove]', k

        if len(adding_keys) or len(removing_keys):
            print '(i) Changed Keys: Added {0}, Updated {1}, Removed {2}'.format(len(adding_keys), len(updated_keys), len(removing_keys))

        return updated_content and (len(adding_keys)>0 or len(updated_keys)>0 or len(removing_keys)>0), updated_content, translated_kv, target_error_lines

    def write_file(target_file, list_of_content):
        suc = False
        try:
            f = codecs.open(target_file, "w", "utf-8")
            contents = ''
            for content in list_of_content:
                if content['comment']:
                    contents += '/*{0}*/'.format(content['comment']) + '\n'
                contents += '"{0}" = "{1}";'.format(content['key'], content['value']) + '\n'
            f.write(contents)
            suc = True
        except IOError:
            print 'IOError to open', target_file
        finally:
            f.close()
        return suc

    def remove_file(target_file):
        try:
            os.rename(target_file, target_file+'.deleted')
            return True
        except IOError:
            print 'IOError to rename', target_file
            return False

    def create_file(target_file):
        open(target_file, 'a').close()

    def notexist_or_empty_file(target_file):
        return not os.path.exists(target_file) or os.path.getsize(target_file)==0

    def resolve_file_names(target_file_names):
        return map(lambda f: f.decode('utf-8'), filter(lambda f: f.endswith(__FILE_SUFFIX__), target_file_names))

    base_dict = {}
    results_dict = {}

    # Get Base Language Specs

    walked = list(os.walk(__RESOURCE_PATH__, topdown=True))

    for dir, subdirs, files in walked:
        if os.path.basename(dir)==__BASE_RESOUCE_DIR__:
            for _file in resolve_file_names(files):
                f = os.path.join(dir, _file)
                if notexist_or_empty_file(f):
                    continue

                base_dict[_file] = strings_obj_from_file(f)

    if not base_dict:
        print '[!] Not found "{0}" in target path "{1}"'.format(__BASE_RESOUCE_DIR__, __RESOURCE_PATH__)
        sys.exit(0)

    print 'Start synchronizing...'
    for file in base_dict:
        print 'Target:', file

    for dir, subdirs, files in walked:
        files = resolve_file_names(files)

        if dir.endswith((__DIR_SUFFIX__)):
            lc = os.path.basename(dir).split(__DIR_SUFFIX__)[0]
            if lc.find('_'): lc = lc.replace('_', __LANG_SEP__)
            if lc == __BASE_LANG__:
                continue

            if lc in __EXCLUDING_LANGS__:
                print 'Skip: ', lc
                continue

            # lc = supported_lang(lc)
            results_dict[lc] = {
                'deleted_files' : [],
                'added_files' : [],
                'updated_files' : [],
                'skipped_files' : [],
                'translated_files_lines' : {},
                'error_lines_kv' : {}
            }

            if not supported_lang(lc):
                print 'Does not supported: ', lc
                results_dict[lc]['skipped_files'] = join_path_all(dir, files)
                continue

            print '\n', 'Analayzing localizables... {1} (at {0})'.format(dir, lc)

            added_files = list(set(base_dict.keys()) - set(files))
            removed_files = list(set(files) - set(base_dict.keys()))
            existing_files = list(set(files) - (set(added_files) | set(removed_files)))

            added_files = join_path_all(dir, added_files)
            removed_files = join_path_all(dir, removed_files)
            existing_files = join_path_all(dir, existing_files)

            added_cnt, updated_cnt, removed_cnt = 0, 0, 0
            translated_files_lines = results_dict[lc]['translated_files_lines']
            error_files = results_dict[lc]['error_lines_kv']

            #remove - file
            for removed_file in removed_files:
                print 'Removing File... {0}'.format(removed_file)
                if remove_file(removed_file):
                    removed_cnt+=1

            #add - file
            for added_file in added_files:
                print 'Adding File... {0}'.format(added_file)
                create_file(added_file)
                u, c, t, e = insert_or_translate(added_file, lc)
                #error
                if e:
                    error_files[added_file] = e
                #normal
                elif u and write_file(added_file, c):
                    added_cnt+=1
                    translated_files_lines[added_file] = t

            #exist - lookup lines
            for ext_file in existing_files:
                u, c, t, e = insert_or_translate(ext_file, lc)
                #error
                if e:
                    error_files[ext_file] = e
                #normal
                elif u:
                    print 'Updating File... {0}'.format(ext_file)
                    if write_file(ext_file, c):
                        updated_cnt=+1
                        translated_files_lines[ext_file] = t

            if added_cnt or updated_cnt or removed_cnt or error_files:
                print '(i) Changed Files : Added {0}, Updated {1}, Removed {2}, Error {3}'.format(added_cnt, updated_cnt, removed_cnt, len(error_files.keys()))
            else:
                print 'Nothing to translate or add.'

            """
            Results
            """
            results_dict[lc]['deleted_files'] = removed_files
            results_dict[lc]['added_files'] = list(set(added_files) & set(translated_files_lines.keys()))
            results_dict[lc]['updated_files'] = list(set(existing_files) & set(translated_files_lines.keys()))
            if error_files:
                print error_files
            results_dict[lc]['error_lines_kv'] = error_files

    # print total Results
    print ''
    t_file_cnt, t_line_cnt = 0, 0
    file_add_cnt, file_remove_cnt, file_update_cnt, file_skip_cnt = 0,0,0,0

    for lc in results_dict.keys():
        result_lc = results_dict[lc]

        file_add_cnt += len(result_lc['added_files'])
        file_remove_cnt += len(result_lc['deleted_files'])
        file_update_cnt += len(result_lc['updated_files'])
        file_skip_cnt += len(result_lc['skipped_files'])

        for f in result_lc['added_files']: print 'Added',f
        for f in result_lc['deleted_files']: print 'Removed',f
        for f in result_lc['updated_files']: print 'Updated',f
        for f in result_lc['skipped_files']: print 'Skiped',f

        tfiles = result_lc['translated_files_lines']
        if tfiles:
            # print '============ Results for langcode : {0} ============='.format(lc)
            for f in tfiles:
                t_file_cnt += 1
                if len(tfiles[f]):
                    # print '', f
                    for key in tfiles[f]:
                        t_line_cnt += 1
                        # print key, ' = ', tfiles[f][key]
          
    for lc in global_result_logs.keys():
        print lc
        for t in global_result_logs[lc].keys():
            o, b = global_result_logs[lc][t]
            print o.decode('utf-8'), ' -> ', b

    print ''
    found_warining = filter(lambda i: i or None, rget(results_dict, 'error_lines_kv'))

    if file_add_cnt or file_update_cnt or file_remove_cnt or file_skip_cnt or found_warining:
        print 'Total New Translated Strings : {0}'.format(t_line_cnt)
        print 'Changed Files Total : Added {0}, Updated {1}, Removed {2}, Skipped {3}'.format(file_add_cnt, file_update_cnt, file_remove_cnt, file_skip_cnt)
        print "Synchronized."

        if found_warining:
            print '\n[!!] WARNING: Found strings that contains the syntax error. Please confirm.'
            for a in found_warining:
                for k in a:
                    print 'at', k
                    for i in a[k]:
                        print ' ', i
    else:
        print "All strings are already synchronized. Nothing to translate or add."

    return

Example 153

Project: realdebrid-CLI Source File: rdcli.py
def main():
    """
    Main program
    """

    base = path.join(path.expanduser('~'), '.config', 'rdcli-py')
    conf_file = path.join(base, 'conf.json')
    cookie_file = path.join(base, 'cookie.txt')

    list_only = False
    test = False
    verbose = True
    timeout = 120

    # make sure the config dir exists
    if not path.exists(base):
        makedirs(base)

    try:
        with open(conf_file, 'r') as conf:
            configuration = load(conf)
    except (IOError, ValueError):
        configuration = {}

    # the default output dir is taken from the config file
    # if it hasn't been configured, then use the current directory
    output_dir = configuration.get('output_dir', getcwd())
    download_password = ''

    worker = RDWorker(cookie_file)

    # parse command-line arguments
    try:
        opts, args = gnu_getopt(argv[1:], 'hviqtlp:o:T:O:', ['config', 'version'])
    except GetoptError as e:
        print str(e)
        print_help()
        exit(1)

    for option, argument in opts:
        if option == '-h':
            print_help()
            exit(0)
        if option == '--version' or option == '-v':
            print_version()
            exit(0)
        if option == '--config':
            config_args = argv[2:]

            if len(config_args) == 0:
                print 'Error: No configuration option supplied'
                exit(1)
            if len(config_args) == 1:
                config_args.append(None)

            if len(config_args) > 2:
                print 'WARNING: the following values have been ignored:', ', '.join(config_args[2:])
                config_args = config_args[0:2]

            config.update_value(*config_args, conf_file=conf_file)
            exit(0)
        elif option == '-i':
            username, password = config.ask_credentials()
            config.save_credentials(username, password, conf_file)
        elif option == '-q':
            if not list_only:
                verbose = False
        elif option == '-t':
            if not list_only:
                test = True
        elif option == '-l':
            list_only = True
            test = False
            verbose = False
        elif option == '-o':
            output_dir = argument
        elif option == '-p':
            download_password = argument
        elif option == '-T':
            timeout = int(argument)
        elif option == '-O':
            filename = argument

    # stop now if no download and no output wanted
    if test and not verbose:
        exit(0)

    if verbose:
        def debug(s):
            print s,
    else:
        def debug(s):
            pass

    # make sure we have something to process
    if len(args) > 0:
        output_dir = path.abspath(path.expanduser(output_dir))
        # ensure we can write in output directory
        if not output_dir == getcwd() and not path.exists(unicode(output_dir)):
            debug('%s no such directory' % unicode(output_dir))
            exit(1)
        else:
            if not access(output_dir, W_OK | X_OK):
                debug('Output directory not writable')
                exit(1)
            else:
                debug(u'Output directory: %s\n' % output_dir)

        # retrieve login info
        try:
            with open(conf_file, 'r') as conf:
                configuration = load(conf)
                username = configuration.get('username', '')
                password = configuration.get('password', '')
        except (KeyError, IOError, ValueError):
            username, password = config.ask_credentials()
            config.save_credentials(username, password, conf_file)

        # login
        try:
            worker.login(username, password)
        except BaseException as e:
            exit('Login failed: %s' % str(e))

        if path.isfile(args[0]):
            with open(args[0], 'r') as f:
                links = f.readlines()
        else:
            links = args[0].splitlines()

        # unrestrict and download
        for link in links:
            link = link.strip()
            debug('\nUnrestricting %s' % link)

            try:
                unrestricted, original_filename = worker.unrestrict(link, download_password)
                debug(u' -> ' + unrestricted + '\n')

                if list_only:
                    print unrestricted
                elif not test:

                    if len(links) == 1:
                        try:
                            fullpath = path.join(output_dir, filename)
                        except NameError:
                            fullpath = path.join(output_dir, original_filename)
                    else:
                        fullpath = path.join(output_dir, original_filename)

                    try:
                        to_mb = lambda b: b / 1048576.
                        to_kb = lambda b: b / 1024.

                        opener = build_opener(HTTPCookieProcessor(worker.cookies))
                        stream = opener.open(unrestricted)
                        info = stream.info().getheaders('Content-Length')

                        total_size = 0
                        downloaded_size = 0

                        if len(info):
                            total_size = float(info[0])
                            start = 'Downloading: %s (%.2f MB)\n' % (fullpath, to_mb(total_size))
                        else:
                            start = 'Downloading: %s (unknown size)\n' % fullpath

                        debug(start)

                        with open(fullpath, 'wb') as output:
                            start = datetime.now()
                            end = datetime.now()

                            if verbose:
                                status = ''

                            while True:
                                try:
                                    content = stream.read(20480)  # 20 KB

                                    if not content:
                                        break

                                    output.write(content)
                                    downloaded_size += len(content)

                                    if verbose:
                                        padding_length = len(status)
                                        last_downloaded = len(content)

                                        if last_downloaded > 1024:
                                            speed = to_mb(last_downloaded) / (datetime.now() - end).total_seconds()
                                            unit = 'MB/s'
                                        else:
                                            speed = to_kb(last_downloaded) / (datetime.now() - end).total_seconds()
                                            unit = 'kB/s'

                                        status = '\r%.3f MB' % to_mb(downloaded_size)

                                        if total_size > 0:
                                            status += '  [%3.2f%%]' % (downloaded_size * 100. / total_size)

                                        status += '  @ %.2f %s' % (speed, unit)

                                        print status.ljust(padding_length),
                                        end = datetime.now()

                                except KeyboardInterrupt:
                                    break

                            output.flush()
                            stream.close()

                        speed = to_mb(downloaded_size) / (end - start).total_seconds()

                        if total_size > 0:
                            final_status = '%.2f MB [%.2f%%] downloaded in %s (%.2f MB/s avg.)' \
                                           % (to_mb(downloaded_size), (downloaded_size * 100. / total_size),
                                              str(end - start).split('.')[0], speed)
                        else:
                            final_status = '%.2f MB downloaded in %s (%.2f MB/s avg.)' \
                                           % (to_mb(downloaded_size), str(end - start).split('.')[0], speed)
                        debug('\r%s\n' % final_status)
                    except BaseException as e:
                        debug('\nDownload failed: %s\n' % e)
            except UnrestrictionError as e:
                debug('-> WARNING, unrestriction failed (%s)' % str(e) + '\n')

        debug('End\n')
        return 0
    else:
        print_help()
        exit(1)

Example 154

Project: ncclient Source File: ssh.py
Function: connect
    def connect(self, host, port=830, timeout=None, unknown_host_cb=default_unknown_host_cb,
                username=None, password=None, key_filename=None, allow_agent=True,
                hostkey_verify=True, look_for_keys=True, ssh_config=None):

        """Connect via SSH and initialize the NETCONF session. First attempts the publickey authentication method and then password authentication.

        To disable attempting publickey authentication altogether, call with *allow_agent* and *look_for_keys* as `False`.

        *host* is the hostname or IP address to connect to

        *port* is by default 830, but some devices use the default SSH port of 22 so this may need to be specified

        *timeout* is an optional timeout for socket connect

        *unknown_host_cb* is called when the server host key is not recognized. It takes two arguments, the hostname and the fingerprint (see the signature of :func:`default_unknown_host_cb`)

        *username* is the username to use for SSH authentication

        *password* is the password used if using password authentication, or the passphrase to use for unlocking keys that require it

        *key_filename* is a filename where a the private key to be used can be found

        *allow_agent* enables querying SSH agent (if found) for keys

        *hostkey_verify* enables hostkey verification from ~/.ssh/known_hosts

        *look_for_keys* enables looking in the usual locations for ssh keys (e.g. :file:`~/.ssh/id_*`)

        *ssh_config* enables parsing of an OpenSSH configuration file, if set to its path, e.g. :file:`~/.ssh/config` or to True (in this case, use :file:`~/.ssh/config`).
        """
        # Optionaly, parse .ssh/config
        config = {}
        if ssh_config is True:
            ssh_config = "~/.ssh/config" if sys.platform != "win32" else "~/ssh/config"
        if ssh_config is not None:
            config = paramiko.SSHConfig()
            config.parse(open(os.path.expanduser(ssh_config)))
            config = config.lookup(host)
            host = config.get("hostname", host)
            if username is None:
                username = config.get("user")
            if key_filename is None:
                key_filename = config.get("identityfile")

        if username is None:
            username = getpass.getuser()

        sock = None
        if config.get("proxycommand"):
            sock = paramiko.proxy.ProxyCommand(config.get("proxycommand"))
        else:
            for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
                af, socktype, proto, canonname, sa = res
                try:
                    sock = socket.socket(af, socktype, proto)
                    sock.settimeout(timeout)
                except socket.error:
                    continue
                try:
                    sock.connect(sa)
                except socket.error:
                    sock.close()
                    continue
                break
            else:
                raise SSHError("Could not open socket to %s:%s" % (host, port))

        t = self._transport = paramiko.Transport(sock)
        t.set_log_channel(logger.name)
        if config.get("compression") == 'yes':
            t.use_compression()

        try:
            t.start_client()
        except paramiko.SSHException:
            raise SSHError('Negotiation failed')

        # host key verification
        server_key = t.get_remote_server_key()

        fingerprint = _colonify(hexlify(server_key.get_fingerprint()))

        if hostkey_verify:
            known_host = self._host_keys.check(host, server_key)
            if not known_host and not unknown_host_cb(host, fingerprint):
                raise SSHUnknownHostError(host, fingerprint)

        if key_filename is None:
            key_filenames = []
        elif isinstance(key_filename, (str, bytes)):
            key_filenames = [ key_filename ]
        else:
            key_filenames = key_filename

        self._auth(username, password, key_filenames, allow_agent, look_for_keys)

        self._connected = True # there was no error authenticating
        # TODO: leopoul: Review, test, and if needed rewrite this part
        subsystem_names = self._device_handler.get_ssh_subsystem_names()
        for subname in subsystem_names:
            c = self._channel = self._transport.open_session()
            self._channel_id = c.get_id()
            channel_name = "%s-subsystem-%s" % (subname, str(self._channel_id))
            c.set_name(channel_name)
            try:
                c.invoke_subsystem(subname)
            except paramiko.SSHException as e:
                logger.info("%s (subsystem request rejected)", e)
                handle_exception = self._device_handler.handle_connection_exceptions(self)
                # Ignore the exception, since we continue to try the different
                # subsystem names until we find one that can connect.
                #have to handle exception for each vendor here
                if not handle_exception:
                    continue
            self._channel_name = c.get_name()
            self._post_connect()
            return
        raise SSHError("Could not open connection, possibly due to unacceptable"
                       " SSH subsystem name.")

Example 155

Project: rez Source File: search.py
def command(opts, parser, extra_arg_groups=None):
    from rez.config import config
    from rez.exceptions import RezError
    from rez.utils.formatting import get_epoch_time_from_str, expand_abbreviations
    from rez.utils.logging_ import print_error
    from rez.packages_ import iter_package_families, iter_packages
    from rez.vendor.version.requirement import Requirement
    import os.path
    import fnmatch
    import sys

    error_class = None if opts.debug else RezError

    before_time = 0
    after_time = 0
    if opts.before:
        before_time = get_epoch_time_from_str(opts.before)
    if opts.after:
        after_time = get_epoch_time_from_str(opts.after)
    if after_time and before_time and (after_time >= before_time):
        parser.error("non-overlapping --before and --after")

    if opts.paths is None:
        pkg_paths = config.nonlocal_packages_path if opts.no_local else None
    else:
        pkg_paths = (opts.paths or "").split(os.pathsep)
        pkg_paths = [os.path.expanduser(x) for x in pkg_paths if x]

    name_pattern = opts.PKG or '*'
    version_range = None
    if opts.PKG:
        try:
            req = Requirement(opts.PKG)
            name_pattern = req.name
            if not req.range.is_any():
                version_range = req.range
        except:
            pass

    type_ = opts.type
    if opts.errors or (type_ == "auto" and version_range):
        type_ = "package"
        # turn some of the nastier rez-1 warnings into errors
        config.override("error_package_name_mismatch", True)
        config.override("error_version_mismatch", True)
        config.override("error_nonstring_version", True)

    if opts.no_warnings:
        config.override("warn_none", True)

    # families
    found = False
    family_names = []
    families = iter_package_families(paths=pkg_paths)
    if opts.sort:
        families = sorted(families, key=lambda x: x.name)
    for family in families:
        if family.name not in family_names and \
                fnmatch.fnmatch(family.name, name_pattern):
            family_names.append(family.name)
            if type_ == "auto":
                type_ = "package" if family.name == name_pattern else "family"
            if type_ == "family":
                print family.name
                found = True

    def _handle(e):
        print_error(str(e))

    def _print_resource(r):
        if opts.validate:
            try:
                r.validate_data()
            except error_class as e:
                _handle(e)
                return

        if opts.format:
            txt = expand_abbreviations(opts.format, fields)
            lines = txt.split("\\n")
            for line in lines:
                try:
                    line_ = r.format(line)
                except error_class as e:
                    _handle(e)
                    break
                if opts.no_newlines:
                    line_ = line_.replace('\n', "\\n")

                print line_
        else:
            print r.qualified_name

    # packages/variants
    if type_ in ("package", "variant"):
        for name in family_names:
            packages = iter_packages(name, version_range, paths=pkg_paths)
            if opts.sort or opts.latest:
                packages = sorted(packages, key=lambda x: x.version)
                if opts.latest and packages:
                    packages = [packages[-1]]

            for package in packages:
                if ((before_time or after_time)
                    and package.timestamp
                    and (before_time and package.timestamp >= before_time
                         or after_time and package.timestamp <= after_time)):
                    continue

                if opts.errors:
                    try:
                        package.validate_data()
                    except error_class as e:
                        _handle(e)
                        found = True
                elif type_ == "package":
                    _print_resource(package)
                    found = True
                elif type_ == "variant":
                    try:
                        package.validate_data()
                    except error_class as e:
                        _handle(e)
                        continue

                    try:
                        for variant in package.iter_variants():
                            _print_resource(variant)
                            found = True
                    except error_class as e:
                        _handle(e)
                        continue

    if not found:
        if opts.errors:
            print "no erroneous packages found"
        else:
            print "no matches found"
            sys.exit(1)

Example 156

Project: rez Source File: shells.py
Function: spawn_shell
    def spawn_shell(self, context_file, tmpdir, rcfile=None, norc=False,
                    stdin=False, command=None, env=None, quiet=False,
                    pre_command=None, add_rez=True,
                    package_commands_sourced_first=None, **Popen_args):

        d = self.get_startup_sequence(rcfile, norc, bool(stdin), command)
        envvar = d["envvar"]
        files = d["files"]
        bind_files = d["bind_files"]
        do_rcfile = d["do_rcfile"]
        shell_command = None

        if package_commands_sourced_first is None:
            package_commands_sourced_first = config.package_commands_sourced_first

        def _record_shell(ex, files, bind_rez=True, print_msg=False):
            if bind_rez and package_commands_sourced_first:
                ex.source(context_file)

            for file_ in files:
                if os.path.exists(os.path.expanduser(file_)):
                    ex.source(file_)

            if bind_rez and not package_commands_sourced_first:
                ex.source(context_file)

            if envvar:
                ex.unsetenv(envvar)
            if add_rez and bind_rez:
                ex.interpreter._bind_interactive_rez()
            if print_msg and add_rez and not quiet:
                ex.info('')
                ex.info('You are now in a rez-configured environment.')
                ex.info('')
                if system.is_production_rez_install:
                    ex.command('rezolve context')

        def _write_shell(ex, filename):
            code = ex.get_output()
            target_file = os.path.join(tmpdir, filename)
            with open(target_file, 'w') as f:
                f.write(code)
            return target_file

        def _create_ex():
            return RexExecutor(interpreter=self.new_shell(),
                               parent_environ={},
                               add_default_namespaces=False)

        executor = _create_ex()

        if self.settings.prompt:
            newprompt = '${REZ_ENV_PROMPT}%s' % self.settings.prompt
            executor.interpreter._saferefenv('REZ_ENV_PROMPT')
            executor.env.REZ_ENV_PROMPT = newprompt

        if d["command"] is not None:
            _record_shell(executor, files=files)
            shell_command = d["command"]
        else:
            if d["stdin"]:
                assert(self.stdin_arg)
                shell_command = "%s %s" % (self.executable, self.stdin_arg)
                quiet = True
            elif do_rcfile:
                assert(self.rcfile_arg)
                shell_command = "%s %s" % (self.executable, self.rcfile_arg)
            else:
                shell_command = self.executable

            if do_rcfile:
                # hijack rcfile to insert our own script
                ex = _create_ex()
                _record_shell(ex, files=files, print_msg=(not quiet))
                filename = "rcfile.%s" % self.file_extension()
                filepath = _write_shell(ex, filename)
                shell_command += " %s" % filepath
            elif envvar:
                # hijack env-var to insert our own script
                ex = _create_ex()
                _record_shell(ex, files=files, print_msg=(not quiet))
                filename = "%s.%s" % (envvar, self.file_extension())
                filepath = _write_shell(ex, filename)
                executor.setenv(envvar, filepath)
            else:
                # hijack $HOME to insert our own script
                files = [x for x in files if x not in bind_files] + list(bind_files)

                if files:
                    for file_ in files:
                        if file_ in bind_files:
                            bind_rez = True
                            files_ = [file_] if d["source_bind_files"] else []
                        else:
                            bind_rez = False
                            files_ = [file_]

                        ex = _create_ex()
                        ex.setenv('HOME', os.environ.get('HOME', ''))
                        _record_shell(ex, files=files_, bind_rez=bind_rez,
                                      print_msg=bind_rez)
                        _write_shell(ex, os.path.basename(file_))

                    executor.setenv("HOME", tmpdir)

                    # keep history
                    if self.histfile and self.histvar:
                        histfile = os.path.expanduser(self.histfile)
                        if os.path.exists(histfile):
                            executor.setenv(self.histvar, histfile)
                else:
                    if config.warn("shell_startup"):
                        print_warning(
                            "WARNING: Could not configure environment from "
                            "within the target shell (%s); this has been done "
                            "in the parent process instead." % self.name())
                    executor.source(context_file)

        if shell_command:  # an empty string means 'run no command and exit'
            executor.command(shell_command)
        executor.command("exit %s" % self.last_command_status)

        code = executor.get_output()
        target_file = os.path.join(tmpdir, "rez-shell.%s" % self.file_extension())
        with open(target_file, 'w') as f:
            f.write(code)

        if d["stdin"] and stdin and (stdin is not True):
            Popen_args["stdin"] = stdin

        cmd = []
        if pre_command:
            if isinstance(pre_command, basestring):
                cmd = pre_command.strip().split()
            else:
                cmd = pre_command
        cmd.extend([self.executable, target_file])

        try:
            p = subprocess.Popen(cmd, env=env, **Popen_args)
        except Exception as e:
            cmd_str = ' '.join(map(pipes.quote, cmd))
            raise RezSystemError("Error running command:\n%s\n%s"
                                 % (cmd_str, str(e)))
        return p

Example 157

Project: neon Source File: argparser.py
Function: parse_args
    def parse_args(self, gen_be=True):
        """
        Parse the command line arguments and setup neon
        runtime environment accordingly

        Arguments:
            gen_be (bool): if False, the arg parser will not
                           generate the backend

        Returns:
            namespace: contains the parsed arguments as attributes

        """
        args = super(NeonArgparser, self).parse_args()
        err_msg = None  # used for relaying exception to logger

        # set up the logging
        # max thresh is 50 (critical only), min is 10 (debug or higher)
        try:
            log_thresh = max(10, 40 - args.verbose * 10)
        except (AttributeError, TypeError):
            # if defaults are not set or not -v given
            # for latter will get type error
            log_thresh = 30
        args.log_thresh = log_thresh

        # logging formater
        fmtr = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        # get the parent logger for neon
        main_logger = logging.getLogger('neon')
        main_logger.setLevel(log_thresh)

        # setup a console stderr log handler
        stderrlog = logging.StreamHandler()
        stderrlog.setFormatter(fmtr)

        # expand any user directories in paths
        for path in ['data_dir', 'save_path', 'model_file', 'output_file',
                     'logfile']:
            if getattr(args, path):
                setattr(args, path, os.path.expanduser(getattr(args, path)))

        if args.logfile:
            # add log to file as well
            filelog = RotatingFileHandler(filename=args.logfile, mode='w',
                                          maxBytes=10000000, backupCount=5)
            filelog.setFormatter(fmtr)
            filelog.setLevel(log_thresh)
            main_logger.addHandler(filelog)

            # if a log file is specified and progress bar displayed,
            # log only errors to console.
            if args.no_progress_bar:
                stderrlog.setLevel(log_thresh)
            else:
                stderrlog.setLevel(logging.ERROR)
        else:
            stderrlog.setLevel(log_thresh)

        # add this handler instead
        main_logger.propagate = False
        main_logger.addHandler(stderrlog)

        # need to write out float otherwise numpy
        # generates type in bytes not bits (f16 == 128 bits)
        args.datatype = 'float' + args.datatype[1:]
        args.datatype = np.dtype(args.datatype).type

        # invert no_progress_bar meaning and store in args.progress_bar
        args.progress_bar = not args.no_progress_bar

        if args.backend == 'cpu' and args.rounding > 0:
            err_msg = 'CPU backend does not support stochastic rounding'
            logger.exception(err_msg)
            raise NotImplementedError(err_msg)

        # done up front to avoid losing data due to incorrect path
        if args.save_path:
            savedir = os.path.dirname(os.path.abspath(args.save_path))
            if not os.access(savedir, os.R_OK | os.W_OK):
                try:
                    os.makedirs(savedir)
                except OSError:
                    err_msg = 'Can not create save_path %s' % (savedir)
            if os.path.exists(args.save_path):
                logger.warning('save file %s exists, attempting to overwrite' % args.save_path)
                if not os.access(args.save_path, os.R_OK | os.W_OK):
                    err_msg = 'Can not write to save_path file %s' % args.save_path
            if err_msg:
                logger.exception(err_msg)
                raise IOError(err_msg)

        if (args.serialize > 0) and (args.save_path is None):
            args.save_path = "neon_model.pkl"
            logger.warn('No path given for model serialization, using default "%s"',
                        args.save_path)
        if (args.save_path is not None) and (args.serialize == 0):
            args.serialize = 1
            logger.warn('No schedule given for model serialization, using default %d',
                        args.serialize)

        if args.model_file:
            err_msg = None
            if not os.path.exists(args.model_file):
                err_msg = 'Model file %s not present' % args.model_file
            if not os.access(args.model_file, os.R_OK):
                err_msg = 'No read access for model file %s' % args.model_file
            if err_msg:
                logger.exception(err_msg)
                raise IOError(err_msg)

        if args.caffe:
            args.compat_mode = 'caffe'
        else:
            args.compat_mode = None

        if args.deterministic:
            logger.warn('--deterministic flag is deprecated.  Specify random seed for '
                        'deterministic behavior.')
        # extended parsers may need to generate backend after argparsing
        if gen_be:
            # generate the backend
            gen_backend(backend=args.backend,
                        rng_seed=args.rng_seed,
                        device_id=args.device_id,
                        batch_size=args.batch_size,
                        datatype=args.datatype,
                        max_devices=args.max_devices,
                        compat_mode=args.compat_mode)

        # display what command line / config options were set (and from where)
        logger.info(self.format_values())

        self._PARSED = True
        self.args = args
        args.callback_args = extract_valid_args(args, Callbacks.__init__, startidx=1)
        return args

Example 158

Project: ss Source File: ss.py
Function: main
def main(argv=sys.argv, stream=sys.stdout):
    parser = optparse.OptionParser(
        usage='Usage: ss [options] <file or dir> <file or dir>...',
        description='Searches for subtitles using OpenSubtitles (http://www.opensubtitles.org).\n\nVersion: %s' % __version__,
        epilog='If a directory is given, search for subtitles for all movies on it (non-recursively).',
    )
    parser.add_option('-v', '--verbose',
                      help='always displays configuration and enable verbose mode.',
                      action='store_true', default=False)
    options, args = parser.parse_args(args=argv)

    config_filename = os.path.join(os.path.expanduser('~'), '.ss.ini')
    config = load_configuration(config_filename)
    if options.verbose:
        print('Configuration read from {0}'.format(config_filename))
        print(config, file=stream)
        print()

    if len(args) < 2:
        parser.print_help(file=stream)
        return 2

    input_filenames = list(find_movie_files(args[1:], recursive=config.recursive))
    if not input_filenames:
        print('No files to search subtitles for. Aborting.', file=stream)
        return 1

    if config.mkv:
        if not check_mkv_installed():
            print('mkvmerge not found in PATH.', file=stream)
            print('Either install mkvtoolnix or disable mkv merging ' +
                  'in your config.', file=stream)
            return 4

    header = Fore.WHITE + Style.BRIGHT
    lang_style = Fore.CYAN + Style.BRIGHT
    languages = ', '.join(
        lang_style + x + Style.RESET_ALL for x in config.languages)
    msg = '{header}Languages: {languages}'.format(header=header,
                                                  languages=languages)
    print(msg, file=stream)
    print(file=stream)

    multi = len(config.languages) > 1

    to_skip = set()
    if config.skip:
        for input_filename in input_filenames:
            for language in config.languages:
                if has_subtitle(input_filename, language, multi):
                    to_skip.add((input_filename, language))

        if to_skip:
            print('Skipping %d subtitles.' % len(to_skip), file=stream)

    def print_status(text, status):
        spaces = max(70 - len(text), 2)
        print('{text}{spaces}{status}'.format(
            text=text, spaces=' ' * spaces, status=status), file=stream)

    to_query = set(itertools.product(input_filenames, config.languages))
    to_query.difference_update(to_skip)

    if not to_query:
        return 0

    header_style = Fore.WHITE + Style.BRIGHT
    print(header_style + 'Downloading', file=stream)
    print(file=stream)

    matches = []
    to_query = sorted(to_query)

    with ThreadPoolExecutor(max_workers=config.parallel_jobs) as executor:
        future_to_movie_and_language = {}
        for movie_filename, language in to_query:
            f = executor.submit(search_and_download, movie_filename,
                                language=language, multi=multi)
            future_to_movie_and_language[f] = (movie_filename, language)

        for future in as_completed(future_to_movie_and_language):
            movie_filename, language = future_to_movie_and_language[future]
            subtitle_filename = future.result()

            if subtitle_filename:
                status = Fore.GREEN + '[OK]'
                matches.append((movie_filename, language, subtitle_filename))
            else:
                status = Fore.RED + '[Not found]'

            name = os.path.basename(movie_filename)
            print_status(
                name,
                status='{lang_color}{lang} {status}'.format(
                    lang_color=Fore.CYAN + Style.BRIGHT,
                    lang=language,
                    status=status))

    if config.mkv:
        print(file=stream)
        print(header_style + 'Embedding MKV', file=stream)
        print(file=stream)
        failures = []  # list of (movie_filename, output)
        to_embed = {}  # dict of movie -> (language, subtitle_filename)
        for movie_filename, language, subtitle_filename in matches:
            to_embed.setdefault(movie_filename, []).append((language,
                                                            subtitle_filename))
        to_embed = sorted(to_embed.items())
        with ThreadPoolExecutor(max_workers=config.parallel_jobs) as executor:
            future_to_mkv_filename = {}
            for movie_filename, subtitles in to_embed:
                subtitles.sort()
                movie_ext = os.path.splitext(movie_filename)[1].lower()
                mkv_filename = os.path.splitext(movie_filename)[0] + u'.mkv'
                if movie_ext != u'.mkv' and not os.path.isfile(mkv_filename):
                    f = executor.submit(embed_mkv, movie_filename, subtitles)
                    future_to_mkv_filename[f] = (mkv_filename, movie_filename)
                else:
                    print_status(os.path.basename(mkv_filename),
                                 Style.BRIGHT + Fore.YELLOW + '[skipped]')

            for future in as_completed(future_to_mkv_filename):
                mkv_filename, movie_filename = future_to_mkv_filename[future]
                status, output = future.result()
                if not status:
                    failures.append((movie_filename, output))
                status = Fore.GREEN + '[OK]' if status else Fore.RED + '[ERROR]'
                status = Style.BRIGHT + status
                print_status(os.path.basename(mkv_filename), status)

        if failures:
            print('_' * 80, file=stream)
            for movie_filename, output in failures:
                print(':%s:' % movie_filename, file=stream)
                print(output, file=stream)

    return 0

Example 159

Project: phpsploit Source File: sparser.py
Function: init
    def __init__(self, filename, mode = 'r'):
        """Opens input file, and if available the definition file.  If the
        definition file is available __init__ will then create some pyparsing
        helper variables.  """
        if mode not in ['r', 'w', 'a']:
            raise IOError(0, 'Illegal mode: ' + repr(mode))

        if string.find(filename, ':/') > 1: # URL
            if mode == 'w':
                raise IOError("can't write to a URL")
            import urllib.request, urllib.parse, urllib.error
            self.file = urllib.request.urlopen(filename)
        else:
            filename = os.path.expanduser(filename)
            if mode == 'r' or mode == 'a':
                if not os.path.exists(filename):
                    raise IOError(2, 'No such file or directory: ' + filename)
            filen, file_extension = os.path.splitext(filename)
            command_dict = {
              ('.Z', 'r'): 
                "self.file = os.popen('uncompress -c ' + filename, mode)",
              ('.gz', 'r'): 
                "self.file = gzip.GzipFile(filename, 'rb')",
              ('.bz2', 'r'): 
                "self.file = os.popen('bzip2 -dc ' + filename, mode)",
              ('.Z', 'w'): 
                "self.file = os.popen('compress > ' + filename, mode)",
              ('.gz', 'w'): 
                "self.file = gzip.GzipFile(filename, 'wb')",
              ('.bz2', 'w'): 
                "self.file = os.popen('bzip2 > ' + filename, mode)",
              ('.Z', 'a'): 
                "raise IOError, (0, 'Can\'t append to .Z files')",
              ('.gz', 'a'): 
                "self.file = gzip.GzipFile(filename, 'ab')",
              ('.bz2', 'a'): 
                "raise IOError, (0, 'Can\'t append to .bz2 files')",
                           }

            exec(command_dict.get((file_extension, mode), 
                                  'self.file = open(filename, mode)'))

        self.grammar = None

        # Try to find a parse ('*_def.ext') definition file.  First try to find
        # a file specific parse definition file, then look for 'sparse.def'
        # that would be the definition file for all files within the directory.

        # The definition file is pure Python.  The one variable that needs to
        # be specified is 'parse'.  The 'parse' variable is a list of tuples
        # defining the name, type, and because it is a list, the order of
        # variables on each line in the data file.  The variable name is a
        # string, the type variable is defined as integer, real, and qString.

        # parse = [
        #          ('year', integer),
        #          ('month', integer),
        #          ('day', integer),
        #          ('value', real),
        #         ]

        definition_file_one = filen + "_def" + file_extension
        definition_file_two = os.path.dirname(filen) + os.sep + "sparse.def"
        if os.path.exists(definition_file_one):
            self.parsedef = definition_file_one
        elif os.path.exists(definition_file_two):
            self.parsedef = definition_file_two
        else:
            self.parsedef = None
            return None

        # Create some handy pyparsing constructs.  I kept 'decimal_sep' so that
        # could easily change to parse if the decimal separator is a ",".
        decimal_sep = "."
        sign = oneOf("+ -")
        # part of printables without decimal_sep, +, -
        special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~', 
                                       decimal_sep, "") 
        integer = ToInteger(
                  Combine(Optional(sign) + 
                          Word(nums))).setName("integer")
        positive_integer = ToInteger(
                           Combine(Optional("+") + 
                                   Word(nums))).setName("integer")
        negative_integer = ToInteger(
                           Combine("-" + 
                                   Word(nums))).setName("integer")
        real = ToFloat(
               Combine(Optional(sign) + 
                       Word(nums) + 
                       decimal_sep + 
                       Optional(Word(nums)) + 
                       Optional(oneOf("E e") + 
                                Word(nums)))).setName("real")
        positive_real = ToFloat(
                        Combine(Optional("+") + 
                                Word(nums) + 
                                decimal_sep + 
                                Optional(Word(nums)) + 
                                Optional(oneOf("E e") + 
                                         Word(nums)))).setName("real")
        negative_real = ToFloat(
                        Combine("-" + 
                                Word(nums) + 
                                decimal_sep + 
                                Optional(Word(nums)) + 
                                Optional(oneOf("E e") + 
                                         Word(nums)))).setName("real")
        qString = ( sglQuotedString | dblQuotedString ).setName("qString")
    
        # add other characters we should skip over between interesting fields
        integer_junk = Optional(
                       Suppress(
                       Word(alphas + 
                            special_chars + 
                            decimal_sep))).setName("integer_junk")
        real_junk = Optional(
                    Suppress(
                    Word(alphas + 
                         special_chars))).setName("real_junk")
        qString_junk = SkipTo(qString).setName("qString_junk")

        # Now that 'integer', 'real', and 'qString' have been assigned I can
        # execute the definition file.  
        exec(compile(open(self.parsedef).read(), self.parsedef, 'exec'))

        # Build the grammar, combination of the 'integer', 'real, 'qString',
        # and '*_junk' variables assigned above in the order specified in the
        # definition file.
        grammar = []
        for nam, expr in parse:
            grammar.append( eval(expr.name + "_junk"))
            grammar.append( expr.setResultsName(nam) )
        self.grammar = And( grammar[1:] + [restOfLine] )

Example 160

Project: modl Source File: hcp_plot.py
def display_explained_variance_density(output_dir):
    dir_list = [join(output_dir, f) for f in os.listdir(output_dir) if
                os.path.isdir(join(output_dir, f))]

    fig = plt.figure(figsize=(fig_width * 0.73, fig_height))
    gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
    fig.subplots_adjust(bottom=0.29)
    fig.subplots_adjust(left=0.075)
    fig.subplots_adjust(right=.92)

    results = []
    analyses = []
    ref_time = 1000000
    for dir_name in dir_list:
        try:
            analyses.append(
                json.load(open(join(dir_name, 'analysis.json'), 'r')))
            results.append(
                json.load(open(join(dir_name, 'results.json'), 'r')))
            if results[-1]['reduction'] == 12:
                timings = np.array(results[-1]['timings'])
                diff = timings[1:] - timings[:1]
                ref_time = min(ref_time, np.min(diff))
        except IOError:
            pass
    print(ref_time)
    h_reductions = []
    ax = {}
    ylim = {1e-2: [2.455e8, 2.525e8], 1e-3: [2.3e8, 2.47e8],
            1e-4: [2.16e8, 2.42e8]}
    for i, alpha in enumerate([1e-3, 1e-4]):
        ax[alpha] = fig.add_subplot(gs[:, i])
        if i == 0:
            ax[alpha].set_ylabel('Objective value on test set')
        ax[alpha].annotate('$\\lambda  = 10^{%.0f}$' % log(alpha, 10),
                           xy=(.65, .85),
                           fontsize=8,
                           xycoords='axes fraction')
        ax[alpha].set_xlim([.05, 200])
        ax[alpha].set_ylim(ylim[alpha])

        for tick in ax[alpha].xaxis.get_major_ticks():
            tick.label.set_fontsize(7)
        ax[alpha].set_xscale('log')

        ax[alpha].set_xticks([.1, 1, 10, 100])
        ax[alpha].set_xticklabels(['.1 h', '1 h', '10 h', '100 h'])

        sns.despine(fig=fig, ax=ax[alpha])

        ax[alpha].spines['left'].set_color((.6, .6, .6))
        ax[alpha].spines['bottom'].set_color((.6, .6, .6))
        ax[alpha].xaxis.set_tick_params(color=(.6, .6, .6), which='both')
        ax[alpha].yaxis.set_tick_params(color=(.6, .6, .6), which='both')
        for tick in ax[alpha].xaxis.get_major_ticks():
            tick.label.set_color('black')
        for tick in ax[alpha].yaxis.get_major_ticks():
            tick.label.set_fontsize(6)

            tick.label.set_color('black')
        t = ax[alpha].yaxis.get_offset_text()
        t.set_size(5)
    ax[1e-4].set_xlabel('CPU\ntime', ha='right')
    ax[1e-4].xaxis.set_label_coords(1.15, -0.05)

    colormap = sns.cubehelix_palette(4, start=0, rot=0., hue=1, dark=.3,
                                     light=.7,
                                     reverse=False)
    other_colormap = sns.cubehelix_palette(4, start=0, rot=.5, hue=1, dark=.3,
                                           light=.7,
                                           reverse=False)
    colormap[0] = other_colormap[0]
    colormap_dict = {reduction: color for reduction, color in
                     zip([1, 4, 8, 12],
                         colormap)}

    x_bar = []
    y_bar_objective = []
    y_bar_density = []
    hue_bar = []

    for result, analysis in zip(results, analyses):
        if result['alpha'] != 1e-2 and result['reduction'] != 2:
            print("%s %s" % (result['alpha'], result['reduction']))
            timings = (np.array(analysis['records']) + 1) / int(
                result['reduction']) * 12 * ref_time / 3600
            # timings = np.array(result['timings'])[np.array(analysis['records']) + 1] / 3600
            s, = ax[result[
                'alpha']].plot(
                timings,
                np.array(analysis['objectives']) / 4,
                color=colormap_dict[int(result['reduction'])],
                linewidth=2,
                linestyle='--' if result[
                                      'reduction'] == 1 else '-',
                zorder=result['reduction'] if result[
                                                  'reduction'] != 1 else 100)
            if result['alpha'] == 1e-3:
                h_reductions.append(
                    (s, '%.0f' % result['reduction']))

    handles, labels = list(zip(*h_reductions[::-1]))
    argsort = sorted(range(len(labels)), key=lambda t: int(labels[t]))
    handles = [handles[i] for i in argsort]
    labels = [labels[i] for i in argsort]

    offset = .3
    yoffset = -.05
    legend_vanilla = mlegend.Legend(ax[1e-3], handles[:1], ['No reduction'],
                                    loc='lower left',
                                    ncol=5,
                                    numpoints=1,
                                    handlelength=2,
                                    markerscale=1.4,
                                    bbox_to_anchor=(
                                        0.3 + offset, -.39 + yoffset),
                                    fontsize=8,
                                    frameon=False
                                    )

    legend_ratio = mlegend.Legend(ax[1e-3], handles[1:], labels[1:],
                                  loc='lower left',
                                  ncol=5,
                                  markerscale=1.4,
                                  handlelength=2,
                                  fontsize=8,
                                  bbox_to_anchor=(
                                      0.3 + offset, -.54 + yoffset),
                                  frameon=False
                                  )
    ax[1e-3].annotate('Original online algorithm',
                      xy=(0.28 + offset, -.27 + yoffset),
                      xycoords='axes fraction',
                      horizontalalignment='right', verticalalignment='bottom',
                      fontsize=8)
    ax[1e-3].annotate('Proposed reduction factor $r$',
                      xy=(0.28 + offset, -.42 + yoffset),
                      xycoords='axes fraction',
                      horizontalalignment='right', verticalalignment='bottom',
                      fontsize=8)
    ax[1e-3].add_artist(legend_ratio)
    ax[1e-3].add_artist(legend_vanilla)

    ax[1e-3].annotate('(a) Convergence speed', xy=(0.7, 1.02), ha='center',
                      fontsize=9, va='bottom', xycoords='axes fraction')

    fig.savefig(join(output_dir, 'hcp_bench.pdf'))

    for result, analysis in zip(results, analyses):
        if result['alpha'] != 1e-2 and result['reduction'] != 2:
            x_bar.append(result['alpha'])
            y_bar_objective.append(analysis['objectives'][-1])
            y_bar_density.append(analysis['densities'][-1])
            hue_bar.append(result['reduction'])
    ref_objective = {}
    for objective, alpha, reduction in zip(y_bar_objective, x_bar, hue_bar):
        if reduction == 1:
            ref_objective[alpha] = objective

    for i, (objective, alpha) in enumerate(zip(y_bar_objective, x_bar)):
        y_bar_objective[i] /= ref_objective[alpha]
        y_bar_objective[i] -= 1

    ####################### Final objective
    fig = plt.figure(figsize=(fig_width * 0.27, fig_height))
    fig.subplots_adjust(bottom=0.29)
    fig.subplots_adjust(left=0.05)
    fig.subplots_adjust(right=1.2)
    fig.subplots_adjust(top=0.85)
    gs = gridspec.GridSpec(2, 1, width_ratios=[1, 1], height_ratios=[1.2, 0.8])
    ax_bar_objective = fig.add_subplot(gs[0])
    ax_bar_objective.set_ylim(-0.007, 0.007)
    ax_bar_objective.set_yticks([-0.005, 0, 0.005])
    ax_bar_objective.set_yticklabels(['-0.5\%', '0\%', '0.5\%'])
    ax_bar_objective.tick_params(axis='y', labelsize=6)

    sns.despine(fig=fig, ax=ax_bar_objective, left=True, right=False)

    sns.barplot(x=x_bar, y=y_bar_objective, hue=hue_bar, ax=ax_bar_objective,
                order=[1e-3, 1e-4],
                palette=colormap)
    plt.setp(ax_bar_objective.patches, linewidth=0.1)
    ax_bar_objective.legend_ = None
    ax_bar_objective.get_xaxis().set_visible(False)
    ax_bar_objective.set_xlim([-.5, 1.6])
    ax_bar_objective.annotate('Final\nobjective\ndeviation\n(relative)',
                              xy=(1.28, 0.45), fontsize=7, va='center',
                              xycoords='axes fraction')
    ax_bar_objective.annotate('(Less is better)', xy=(.06, 0.1), fontsize=7,
                              va='center', xycoords='axes fraction')
    ax_bar_objective.yaxis.set_label_position('right')

    ################################## Density
    x_bar = []
    y_bar_density = []
    hue_bar = []
    for result, analysis in zip(results, analyses):
        if result['alpha'] != 1e-2 and result['reduction'] != 2:
            x_bar.append(result['alpha'])
            y_bar_density.append(analysis['densities'][-1])
            hue_bar.append(result['reduction'])

    ax_bar_density = fig.add_subplot(gs[1])
    ax_bar_density.set_yscale('log')
    ax_bar_density.set_ylim(100, 1000)
    ax_bar_density.set_yticks([100, 1000])
    ax_bar_density.set_yticklabels(['100', '1000'])
    ax_bar_density.tick_params(axis='y', labelsize=6)

    sns.barplot(x=x_bar, y=y_bar_density, hue=hue_bar, ax=ax_bar_density,
                order=[1e-3, 1e-4],
                palette=colormap)
    ax_bar_density.set_xticklabels(['$10^{-2}$', '$10^{-3}$', '$10^{-4}$'])
    sns.despine(fig=fig, ax=ax_bar_density, left=True, right=False)
    # ax_bar_density.get_xaxis().set_ticks([])
    ax_bar_density.set_xlim([-.5, 1.6])
    ax_bar_density.set_xlabel('Regularization $\\lambda$')
    ax_bar_density.annotate('$\\frac{\\ell_1}{\\ell_2}(\\mathbf D)$',
                            xy=(1.26, 0.45),
                            fontsize=7, va='center', xycoords='axes fraction')
    ax_bar_density.yaxis.set_label_position('right')

    plt.setp(ax_bar_density.patches, linewidth=0.1)
    ax_bar_density.legend_ = None

    for ax in [ax_bar_density, ax_bar_objective]:
        ax.spines['right'].set_color((.6, .6, .6))
        ax.spines['bottom'].set_color((.6, .6, .6))
        ax.xaxis.set_tick_params(color=(.6, .6, .6), which='both')
        ax.yaxis.set_tick_params(color=(.6, .6, .6), which='both')

    for tic in ax_bar_density.xaxis.get_major_ticks():
        tic.tick1On = tic.tick2On = False
    ax_bar_objective.spines['bottom'].set_position(('data', 0))
    ax_bar_objective.spines['bottom'].set_linewidth(.3)
    ax_bar_objective.annotate('(b) Decomposition quality', xy=(0.7, 1.21),
                              ha='center', va='bottom', fontsize=9,
                              xycoords='axes fraction')

    fig.savefig(expanduser(join(output_dir, 'bar_plot.pdf')))

Example 161

Project: exitmap Source File: exitmap.py
def parse_cmd_args():
    """
    Parse and return command line arguments.
    """

    desc = "Perform a task over (a subset of) all Tor exit relays."
    parser = argparse.ArgumentParser(description=desc, add_help=False)

    parser.add_argument("-f", "--config-file", type=str, default=None,
                        help="Path to the configuration file.")

    args, remaining_argv = parser.parse_known_args()

    # First, try to load the configuration file and load its content as our
    # defaults.

    if args.config_file:
        config_file = args.config_file
    else:
        home_dir = os.path.expanduser("~")
        config_file = os.path.join(home_dir, ".exitmaprc")

    config_parser = ConfigParser.SafeConfigParser()
    file_parsed = config_parser.read([config_file])
    if file_parsed:
        try:
            defaults = dict(config_parser.items("Defaults"))
        except ConfigParser.NoSectionError as err:
            log.warning("Could not parse config file \"%s\": %s" %
                        (config_file, err))
            defaults = {}
    else:
        defaults = {}

    parser = argparse.ArgumentParser(parents=[parser])
    parser.set_defaults(**defaults)

    # Now, load the arguments given over the command line.

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-C", "--country", type=str, default=None,
                       help="Only probe exit relays of the country which is "
                            "determined by the given 2-letter country code.")

    group.add_argument("-e", "--exit", type=str, default=None,
                       help="Only probe the exit relay which has the given "
                            "20-byte fingerprint.")

    group.add_argument("-E", "--exit-file", type=str, default=None,
                       help="File containing the 20-byte fingerprints "
                            "of exit relays to probe, one per line.")

    parser.add_argument("-d", "--build-delay", type=float, default=3,
                        help="Wait for the given delay (in seconds) between "
                             "circuit builds.  The default is 3.")

    parser.add_argument("-n", "--delay-noise", type=float, default=0,
                        help="Sample random value in [0, DELAY_NOISE) and "
                             "randomly add it to or subtract it from the build"
                             " delay.  This randomises the build delay.  The "
                             "default is 0.")

    # Create /tmp/exitmap_tor_datadir-$USER to allow many users to run
    # exitmap in parallel.

    tor_directory = "/tmp/exitmap_tor_datadir-" + pwd.getpwuid(os.getuid())[0]

    parser.add_argument("-t", "--tor-dir", type=str,
                        default=tor_directory,
                        help="Tor's data directory.  If set, the network "
                             "consensus can be re-used in between scans which "
                             "speeds up bootstrapping.  The default is %s." %
                             tor_directory)

    parser.add_argument("-a", "--analysis-dir", type=str,
                        default=None,
                        help="The directory where analysis results are "
                             "written to.  If the directory is used depends "
                             "on the module.  The default is /tmp.")

    parser.add_argument("-v", "--verbosity", type=str, default="info",
                        help="Minimum verbosity level for logging.  Available "
                             "in ascending order: debug, info, warning, "
                             "error, critical).  The default is info.")

    parser.add_argument("-i", "--first-hop", type=str, default=None,
                        help="The 20-byte fingerprint of the Tor relay which "
                             "is used as first hop.  This relay should be "
                             "under your control.")

    parser.add_argument("-o", "--logfile", type=str, default=None,
                        help="Filename to which log output should be written "
                             "to.")

    exits = parser.add_mutually_exclusive_group()

    exits.add_argument("-b", "--bad-exits", action="store_true",
                       help="Only scan exit relays that have the BadExit "
                            "flag.  By default, only good exits are scanned.")

    exits.add_argument("-l", "--all-exits", action="store_true",
                       help="Scan all exits, including those that have the "
                            "BadExit flag.  By default, only good exits are "
                            "scanned.")

    parser.add_argument("-V", "--version", action="version",
                        version="%(prog)s 2015.04.06")

    parser.add_argument("module", nargs='+',
                        help="Run the given module (available: %s)." %
                        ", ".join(get_modules()))

    parser.set_defaults(**defaults)

    return parser.parse_args(remaining_argv)

Example 162

Project: nupic Source File: run_swarm.py
def runPermutations(args):
  """
  The main function of the RunPermutations utility.
  This utility will automatically generate and run multiple prediction framework
  experiments that are permutations of a base experiment via the Grok engine.
  For example, if you have an experiment that you want to test with 3 possible
  values of variable A and 2 possible values of variable B, this utility will
  automatically generate the experiment directories and description files for
  each of the 6 different experiments.

  Here is an example permutations file which is read by this script below. The
  permutations file must be in the same directory as the description.py for the
  base experiment that you want to permute. It contains a permutations dict, an
  optional list of the result items to report on for each experiment, and an
  optional result item to optimize for.

  When an 'optimize' entry is provided, this tool will attempt to prioritize the
  order in which the various permutations are run in order to improve the odds
  of running the best permutations sooner. It does this by watching the results
  for various parameter values and putting parameter values that give generally
  better results at the head of the queue.

  In addition, when the optimize key is provided, we periodically update the UI
  with the best results obtained so far on that metric.

  ---------------------------------------------------------------------------
  permutations = dict(
                  iterationCount = [1000, 5000],
                  coincCount = [50, 100],
                  trainTP = [False],
                  )

  report = ['.*reconstructErrAvg',
            '.*inputPredScore.*',
            ]

  optimize = 'postProc_gym1_baseline:inputPredScore'

  Parameters:
  ----------------------------------------------------------------------
  args:           Command-line args; the equivalent of sys.argv[1:]
  retval:         for the actions 'run', 'pickup', and 'dryRun', returns the
                  Hypersearch job ID (in ClinetJobs table); otherwise returns
                  None
  """

  helpString = (
      "\n\n%prog [options] permutationsScript\n"
      "%prog [options] expDescription.json\n\n"
      "This script runs permutations of an experiment via Grok engine, as "
      "defined in a\npermutations.py script or an expGenerator experiment "
      "description json file.\nIn the expDescription.json form, the json file "
      "MUST have the file extension\n'.json' and MUST conform to "
      "expGenerator/experimentDescriptionSchema.json.")

  parser = optparse.OptionParser(usage=helpString)

  parser.add_option(
    "--replaceReport", dest="replaceReport", action="store_true",
    default=DEFAULT_OPTIONS["replaceReport"],
    help="Replace existing csv report file if it exists. Default is to "
         "append to the existing file. [default: %default].")

  parser.add_option(
    "--action", dest="action", default=DEFAULT_OPTIONS["action"],
    choices=["run", "pickup", "report", "dryRun"],
    help="Which action to perform. Possible actions are run, pickup, choices, "
         "report, list. "
         "run: run a new HyperSearch via Grok. "
         "pickup: pick up the latest run of a HyperSearch job. "
         "dryRun: run a single HypersearchWorker inline within the application "
         "process without the Grok infrastructure to flush out bugs in "
         "description and permutations scripts; defaults to "
         "maxPermutations=1: use --maxPermutations to change this; "
         "report: just print results from the last or current run. "
         "[default: %default].")

  parser.add_option(
    "--maxPermutations", dest="maxPermutations",
    default=DEFAULT_OPTIONS["maxPermutations"], type="int",
    help="Maximum number of models to search. Applies only to the 'run' and "
    "'dryRun' actions. [default: %default].")

  parser.add_option(
    "--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
    type="string",
    help="json dump of environment variable settings that should be applied"
    "for the job before running. [default: %default].")

  parser.add_option(
    "--useTerminators", dest="useTerminators", action="store_true",
    default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
         "[default: %default].")

  parser.add_option(
      "--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
      type="int",
      help="Maximum number of concurrent workers to launch. Applies only to "
      "the 'run' action. [default: %default].")

  parser.add_option(
    "-v", dest="verbosityCount", action="count", default=0,
    help="Increase verbosity of the output.  Specify multiple times for "
         "increased verbosity. e.g., -vv is more verbose than -v.")

  parser.add_option(
    "--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
     help="Time out for this search in minutes"
         "[default: %default].")

  parser.add_option(
    "--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
    help="If 'yes', overwrite existing description.py and permutations.py"
         " (in the same directory as the <expDescription.json> file) if they"
         " already exist. [default: %default].")

  parser.add_option(
    "--genTopNDescriptions", dest="genTopNDescriptions",
    default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
    help="Generate description files for the top N models. Each one will be"
         " placed into it's own subdirectory under the base description file."
         "[default: %default].")

  (options, positionalArgs) = parser.parse_args(args)

  # Get the permutations script's filepath
  if len(positionalArgs) != 1:
    parser.error("You must supply the name of exactly one permutations script "
                 "or JSON description file.")

  fileArgPath = os.path.expanduser(positionalArgs[0])
  fileArgPath = os.path.expandvars(fileArgPath)
  fileArgPath = os.path.abspath(fileArgPath)

  permWorkDir = os.path.dirname(fileArgPath)

  outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]

  basename = os.path.basename(fileArgPath)
  fileExtension = os.path.splitext(basename)[1]
  optionsDict = vars(options)

  if fileExtension == ".json":
    returnValue = permutations_runner.runWithJsonFile(
      fileArgPath, optionsDict, outputLabel, permWorkDir)
  else:
    returnValue = permutations_runner.runWithPermutationsScript(
      fileArgPath, optionsDict, outputLabel, permWorkDir)

  return returnValue

Example 163

Project: release-tools Source File: dashboard.py
Function: main
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--project-list',
        default=governance.PROJECTS_LIST,
        help='a URL pointing to a projects.yaml file, defaults to %(default)s',
    )
    parser.add_argument(
        '--releases-repo',
        default=os.path.expanduser('~/repos/openstack/releases'),
        help='path to local copy of the releases repository',
    )
    parser.add_argument(
        '--format', '-f',
        choices=['csv', 'etherpad'],
        default='csv',
    )
    parser.add_argument(
        'series',
        help='the series name',
    )
    args = parser.parse_args()

    # Load all of the existing deliverable data and determine the most
    # recent version tagged.
    latest_versions = {}
    release_notes = {}
    pat = os.path.join(
        args.releases_repo,
        'deliverables',
        args.series,
        '*.yaml',
    )
    for fn in glob.glob(pat):
        with open(fn, 'r') as f:
            y = yaml.safe_load(f.read())
        deliverable = os.path.basename(fn)[:-5]
        v = y['releases'][-1]['version']
        latest_versions[deliverable] = v
        release_notes[deliverable] = y.get('release-notes')

    team_data = governance.get_team_data()
    teams = {
        n.lower(): governance.Team(n, i)
        for n, i in team_data.items()
    }

    # Organize deliverables by their release model, whether they are
    # managed, and the team that owns them.
    deliverables_by_model = {
        MILESTONE: {},
        INTERMEDIARY: {},
        TRAILING: {},
    }
    for t in teams.values():
        for dn, di in t.deliverables.items():
            for model in deliverables_by_model.keys():
                if model in di.tags:
                    dbm_team = deliverables_by_model[model].setdefault(
                        di.team.name.lower(), [])
                    dbm_team.append(di)
                    break

    # Dump the dashboard data
    if args.format == 'csv':
        writer = csv.writer(sys.stdout)
        writer.writerow(
            ('Release Model',
             'Team',
             'Deliverable Type',
             'Deliverable Name',
             'Pre-RC1',
             'RC1',
             'Branched at',
             'Latest RC',
             'Release Notes',
             'Comments',
             'PTL Nick',
             'PTL Email',
             'IRC Channel')
        )
        for model in [MILESTONE, INTERMEDIARY, TRAILING]:
            short_model = model.rpartition('-')[-1]
            dbm_teams = sorted(deliverables_by_model[model].items())
            for team_name, team_deliverables in dbm_teams:
                team = teams[team_name]
                for d in sorted(team_deliverables, key=lambda d: d.name):
                    writer.writerow(
                        (short_model,
                         team.name.lower(),
                         d.type,
                         d.name,
                         latest_versions.get(d.name, 'not found'),
                         '',  # RC1
                         '',  # Branched at
                         '',  # Latest RC
                         release_notes.get(d.name, ''),  # Release notes
                         '',  # Comments
                         team.data['ptl']['irc'],
                         team.data['ptl']['email'],
                         team.data.get('irc-channel'))
                    )

    else:
        for model in [MILESTONE, INTERMEDIARY, TRAILING]:
            print('{}\n'.format(model))
            dbm_teams = sorted(deliverables_by_model[model].items())
            for team_name, team_deliverables in dbm_teams:
                team = teams[team_name]
                print('  * {}'.format(team_name))
                print('    * PTL: {} - {}'.format(
                    team.data['ptl']['irc'],
                    team.data['ptl']['email'],
                ))
                print('    * IRC: {}'.format(team.data.get('irc-channel', '')))
                print('    * Deliverables')
                for d in sorted(team_deliverables, key=lambda d: d.name):
                    v = latest_versions.get(d.name, 'not found')
                    print('      * {d.name} ({d.type}) [{v}]'.format(d=d, v=v))
                print()

Example 164

Project: GoAgent-Always-Available Source File: dev_appserver_main.py
Function: parse_arguments
def ParseArguments(argv):
  """Parses command-line arguments.

  Args:
    argv: Command-line arguments, including the executable name, used to
      execute this application.

  Returns:
    Tuple (args, option_dict) where:
      args: List of command-line arguments following the executable name.
      option_dict: Dictionary of parsed flags that maps keys from DEFAULT_ARGS
        to their values, which are either pulled from the defaults, or from
        command-line flags.
  """
  option_dict = DEFAULT_ARGS.copy()

  try:
    opts, args = getopt.gnu_getopt(
      argv[1:],
      'a:cdhp:',
      [ 'address=',
        'admin_console_host=',
        'admin_console_server=',
        'allow_skipped_files',
        'auth_domain=',
        'backends',
        'blobstore_path=',
        'clear_datastore',
        'clear_prospective_search',
        'datastore_path=',
        'debug',
        'debug_imports',
        'default_partition=',
        'disable_static_caching',
        'disable_task_running',
        'enable_sendmail',
        'help',
        'high_replication',
        'history_path=',
        'multiprocess',
        'multiprocess_api_port=',
        'multiprocess_api_server',
        'multiprocess_app_instance_id=',
        'multiprocess_backend_id=',
        'multiprocess_backend_instance_id=',
        'multiprocess_min_port=',
        'mysql_host=',
        'mysql_password=',
        'mysql_port=',
        'mysql_socket=',
        'mysql_user=',
        'port=',
        'require_indexes',
        'show_mail_body',
        'skip_sdk_update_check',
        'smtp_host=',
        'smtp_password=',
        'smtp_port=',
        'smtp_user=',
        'task_retry_seconds=',
        'trusted',
        'use_sqlite',
      ])
  except getopt.GetoptError, e:
    print >>sys.stderr, 'Error: %s' % e
    PrintUsageExit(1)

  for option, value in opts:
    if option in ('-h', '--help'):
      PrintUsageExit(0)

    if option in ('-d', '--debug'):
      option_dict[ARG_LOG_LEVEL] = logging.DEBUG

    if option in ('-p', '--port'):
      try:
        option_dict[ARG_PORT] = int(value)
        if not (65535 > option_dict[ARG_PORT] > 0):
          raise ValueError
      except ValueError:
        print >>sys.stderr, 'Invalid value supplied for port'
        PrintUsageExit(1)

    def expand_path(s):
      return os.path.abspath(os.path.expanduser(s))

    if option in ('-a', '--address'):
      option_dict[ARG_ADDRESS] = value

    if option == '--blobstore_path':
      option_dict[ARG_BLOBSTORE_PATH] = expand_path(value)

    if option == '--datastore_path':
      option_dict[ARG_DATASTORE_PATH] = expand_path(value)

    if option == '--prospective_search_path':
      option_dict[ARG_PROSPECTIVE_SEARCH_PATH] = expand_path(value)

    if option == '--skip_sdk_update_check':
      option_dict[ARG_SKIP_SDK_UPDATE_CHECK] = True

    if option == '--use_sqlite':
      option_dict[ARG_USE_SQLITE] = True

    if option == '--high_replication':
      option_dict[ARG_HIGH_REPLICATION] = True

    if option == '--history_path':
      option_dict[ARG_HISTORY_PATH] = expand_path(value)

    if option in ('-c', '--clear_datastore'):
      option_dict[ARG_CLEAR_DATASTORE] = True

    if option == '--clear_prospective_search':
      option_dict[ARG_CLEAR_PROSPECTIVE_SEARCH] = True

    if option == '--require_indexes':
      option_dict[ARG_REQUIRE_INDEXES] = True

    if option == '--mysql_host':
      option_dict[ARG_MYSQL_HOST] = value

    if option == '--mysql_port':
      option_dict[ARG_MYSQL_PORT] = _ParsePort(value, '--mysql_port')

    if option == '--mysql_user':
      option_dict[ARG_MYSQL_USER] = value

    if option == '--mysql_password':
      option_dict[ARG_MYSQL_PASSWORD] = value

    if option == '--mysql_socket':
      option_dict[ARG_MYSQL_SOCKET] = value

    if option == '--smtp_host':
      option_dict[ARG_SMTP_HOST] = value

    if option == '--smtp_port':
      option_dict[ARG_SMTP_PORT] = _ParsePort(value, '--smtp_port')

    if option == '--smtp_user':
      option_dict[ARG_SMTP_USER] = value

    if option == '--smtp_password':
      option_dict[ARG_SMTP_PASSWORD] = value

    if option == '--enable_sendmail':
      option_dict[ARG_ENABLE_SENDMAIL] = True

    if option == '--show_mail_body':
      option_dict[ARG_SHOW_MAIL_BODY] = True

    if option == '--auth_domain':
      option_dict['_DEFAULT_ENV_AUTH_DOMAIN'] = value

    if option == '--debug_imports':
      option_dict['_ENABLE_LOGGING'] = True

    if option == '--admin_console_server':
      option_dict[ARG_ADMIN_CONSOLE_SERVER] = value.strip()

    if option == '--admin_console_host':
      option_dict[ARG_ADMIN_CONSOLE_HOST] = value

    if option == '--allow_skipped_files':
      option_dict[ARG_ALLOW_SKIPPED_FILES] = True

    if option == '--disable_static_caching':
      option_dict[ARG_STATIC_CACHING] = False

    if option == '--disable_task_running':
      option_dict[ARG_DISABLE_TASK_RUNNING] = True

    if option == '--task_retry_seconds':
      try:
        option_dict[ARG_TASK_RETRY_SECONDS] = int(value)
        if option_dict[ARG_TASK_RETRY_SECONDS] < 0:
          raise ValueError
      except ValueError:
        print >>sys.stderr, 'Invalid value supplied for task_retry_seconds'
        PrintUsageExit(1)

    if option == '--trusted':
      option_dict[ARG_TRUSTED] = True

    if option == '--backends':
      option_dict[ARG_BACKENDS] = value
    if option == '--multiprocess':
      option_dict[ARG_MULTIPROCESS] = value
    if option == '--multiprocess_min_port':
      option_dict[ARG_MULTIPROCESS_MIN_PORT] = value
    if option == '--multiprocess_api_server':
      option_dict[ARG_MULTIPROCESS_API_SERVER] = value
    if option == '--multiprocess_api_port':
      option_dict[ARG_MULTIPROCESS_API_PORT] = value
    if option == '--multiprocess_app_instance_id':
      option_dict[ARG_MULTIPROCESS_APP_INSTANCE_ID] = value
    if option == '--multiprocess_backend_id':
      option_dict[ARG_MULTIPROCESS_BACKEND_ID] = value
    if option == '--multiprocess_backend_instance_id':
      option_dict[ARG_MULTIPROCESS_BACKEND_INSTANCE_ID] = value

    if option == '--default_partition':
      option_dict[ARG_DEFAULT_PARTITION] = value

  return args, option_dict

Example 165

Project: python-sharepoint Source File: cmd.py
def main():
    from optparse import OptionParser, OptionGroup
    import os
    import sys
    import warnings
    from lxml import etree

    warnings.simplefilter("ignore")

    description = ["A utility to extract data from SharePoint sites, returning ",
                   "XML. Available actions are 'lists' (returns a list of ",
                   "lists in the SharePoint site), and 'exportlists' (returns ",
                   "data for all or specified lists"]

    parser = OptionParser(usage='%prog action [options]',
                          description=''.join(description))
    parser.add_option('-s', '--site-url', dest='site_url', help='Root URL for the SharePoint site')
    parser.add_option('-u', '--username', dest='username', help='Username')
    parser.add_option('-p', '--password', dest='password', help='Password')
    parser.add_option('-c', '--credentials', dest='credentials', help="File containing 'username:password'.")

    parser.add_option('-n', '--pretty-print', dest='pretty_print', action='store_true', default=True)
    parser.add_option('-N', '--no-pretty-print', dest='pretty_print', action='store_false')

    list_options = OptionGroup(parser, 'List options')
    list_options.add_option('-l', '--list-name', dest='list_names', help='Name of a list to retrieve. Can be repeated to return multiple lists. If not present at all, all lists will be returned.', action='append')
    list_options.add_option('-d', '--data', dest='include_data', action='store_true', default=True, help="Include list data in output (default for exportlists)")
    list_options.add_option('-D', '--no-data', dest='include_data', action='store_false', help="Don't include list data in output")
    list_options.add_option('-f', '--fields', dest='include_field_definitions', action='store_true', default=True, help="Include field definitions data in output (default for exportlists)")
    list_options.add_option('-F', '--no-fields', dest='include_field_definitions', action='store_false', help="Don't include field definitions data in output")
    list_options.add_option('-t', '--transclude-xml', dest='transclude_xml', action='store_true', default=False, help="Transclude linked XML files into row data")
    list_options.add_option('-T', '--no-transclude-xml', dest='transclude_xml', action='store_false', help="Don't transclude XML (default)")
    list_options.add_option('--include-users', dest='include_users', action='store_true', default=False, help="Include data about referenced users")
    list_options.add_option('--no-include-users', dest='include_users', action='store_false', help="Don't include data about users (default)")
    list_options.add_option('--description', dest='description', default='', help='Description when creating lists')
    list_options.add_option('--template', dest='template', default='100', help='List template name')
    list_options.add_option('--timeout', dest='timeout', default=None, type="float", help='Connection timeout (in seconds)')
    parser.add_option_group(list_options)

    options, args = parser.parse_args()

    if not options.site_url:
        sys.stderr.write("--site-url is a required parameter. Use -h for more information.\n")
        sys.exit(ExitCodes.MISSING_ARGUMENT)

    if options.credentials:
        username, password = open(os.path.expanduser(options.credentials)).read().strip().split(':', 1)    
    else:
        username, password = options.username, options.password

    if not username:
        username = raw_input("Username: ")
    if not password:
        from getpass import getpass
        password = getpass()

    opener = basic_auth_opener(options.site_url, username, password)
    site = SharePointSite(options.site_url, opener, timeout=options.timeout)

    if not len(args) == 1:
        sys.stderr.write("You must provide an action. Use -h for more information.\n")
        sys.exit(ExitCodes.NO_SUCH_ACTION)

    action, xml = args[0], None

    if action == 'lists':
        xml = site.as_xml(include_lists=True,
                          list_names=options.list_names or None,
                          include_list_data=False,
                          include_field_definitions=False)
    elif action == 'exportlists':
        xml = site.as_xml(include_lists=True,
                          include_users=options.include_users,
                          list_names=options.list_names or None,
                          include_list_data=options.include_data,
                          include_field_definitions=options.include_field_definitions,
                          transclude_xml=options.transclude_xml)
    elif action == 'deletelists':
        for list_name in options.list_names:
            try:
                site.lists.remove(site.lists[list_name])
            except KeyError:
                sys.stderr.write("No such list: '{0}'\n".format(list_name))
                sys.exit(ExitCodes.NO_SUCH_LIST)
            if not options.list_names:
                sys.stderr.write("You must specify a list. See -h for more information.\n")
                sys.exit(ExitCodes.MISSING_ARGUMENT)
        sys.exit(0)
    elif action == 'addlists':
        for list_name in options.list_names:
            try:
                site.lists.create(list_name, options.description, options.template)
            except KeyError:
                sys.stderr.write("No such list: '{0}'\n".format(list_name))
                sys.exit(ExitCodes.NO_SUCH_LIST)
            if not options.list_names:
                sys.stderr.write("You must specify a list. See -h for more information.\n")
                sys.exit(ExitCodes.MISSING_ARGUMENT)
        xml = site.as_xml(list_names=options.list_names or None,
                          include_field_definitions=options.include_field_definitions)
    elif action == 'shell':
        try:
            from IPython.Shell import IPShellEmbed
            ipshell = IPShellEmbed()
            ipshell()
        except ImportError:
            import code
            import readline
            readline.parse_and_bind("tab: complete")
            shell = code.InteractiveConsole({'site': site})
            shell.interact()
    else:
        sys.stderr.write("Unsupported action: '%s'. Use -h to discover supported actions.\n")
        sys.exit(1)

    if xml is not None:
        sys.stdout.write(etree.tostring(xml, pretty_print=options.pretty_print))

Example 166

Project: GenomicConsensus Source File: options.py
def add_options_to_argument_parser(parser):

    def canonicalizedFilePath(path):
        return os.path.abspath(os.path.expanduser(path))

    basics = parser.add_argument_group("Basic required options")
    basics.add_argument(
        "inputFilename",
        type=canonicalizedFilePath,
        help="The input cmp.h5 file")
    basics.add_argument(
        "--referenceFilename", "--reference", "-r",
        action="store",
        dest="referenceFilename",
        type=canonicalizedFilePath,
        required=True,
        help="The filename of the reference FASTA file")
    basics.add_argument(
        "-o", "--outputFilename",
        dest="outputFilenames",
        required=True,
        type=str,
        action="append",
        default=[],
        help="The output filename(s), as a comma-separated list." + \
             "Valid output formats are .fa/.fasta, .fq/.fastq, .gff")

    parallelism = parser.add_argument_group("Parallelism")
    parallelism.add_argument(
        "-j", "--numWorkers",
        dest="numWorkers",
        type=int,
        default=1,
        help="The number of worker processes to be used")

    filtering = parser.add_argument_group("Output filtering")
    filtering.add_argument(
        "--minConfidence", "-q",
        action="store",
        dest="minConfidence",
        type=int,
        default=Constants.DEFAULT_MIN_CONFIDENCE,
        help="The minimum confidence for a variant call to be output to variants.gff")
    filtering.add_argument(
        "--minCoverage", "-x",
        action="store",
        dest="minCoverage",
        default=Constants.DEFAULT_MIN_COVERAGE,
        type=int,
        help="The minimum site coverage that must be achieved for variant calls and " + \
             "consensus to be calculated for a site.")
    filtering.add_argument(
        "--noEvidenceConsensusCall",
        action="store",
        choices=["nocall", "reference", "lowercasereference"],
        default="lowercasereference",
        help="The consensus base that will be output for sites with no effective coverage.")


    readSelection = parser.add_argument_group("Read selection/filtering")
    readSelection.add_argument(
        "--coverage", "-X",
        action="store",
        dest="coverage",
        type=int,
        default=Constants.DEFAULT_MAX_COVERAGE,
        help="A designation of the maximum coverage level to be used for analysis." + \
             " Exact interpretation is algorithm-specific.")
    readSelection.add_argument(
        "--minMapQV", "-m",
        action="store",
        dest="minMapQV",
        type=float,
        default=Constants.DEFAULT_MIN_MAPQV,
        help="The minimum MapQV for reads that will be used for analysis.")
    # Since the reference isn't loaded at options processing time, we
    # can't grok the referenceWindow specified until later.  We store
    # it as a string (referenceWindowsAsString) and it will later be
    # interpreted and stored as a proper window tuple (referenceWindow)
    readSelection.add_argument(
        "--referenceWindow", "--referenceWindows", "-w",
        action="store",
        dest="referenceWindowsAsString",
        type=str,
        help="The window (or multiple comma-delimited windows) of the reference to " + \
             "be processed, in the format refGroup:refStart-refEnd "                 + \
             "(default: entire reference).",
        default=None)

    readSelection.add_argument(
        "--alignmentSetRefWindows",
        action="store_true",
        dest="referenceWindowsFromAlignment",
        help="The window (or multiple comma-delimited windows) of the reference to " + \
             "be processed, in the format refGroup:refStart-refEnd "                 + \
             "will be pulled from the alignment file.",
        default=False)

    def slurpWindowFile(fname):
        return ",".join(map(str.strip, open(fname).readlines()))

    readSelection.add_argument(
        "--referenceWindowsFile", "-W",
        action="store",
        dest="referenceWindowsAsString",
        type=slurpWindowFile,
        help="A file containing reference window designations, one per line",
        default=None)
    readSelection.add_argument(
        "--barcode",
        type=str,
        dest="_barcode",
        help="Only process reads with the given barcode name.")
    def parseReadStratum(s):
        rs = map(int, s.split("/"))
        assert len(rs) == 2
        assert rs[0] < rs[1]
        return rs
    readSelection.add_argument(
        "--readStratum",
        help="A string of the form 'n/N', where n, and N are integers, 0 <= n < N, designating" \
             " that the reads are to be deterministically split into N strata of roughly even"  \
             " size, and stratum n is to be used for variant and consensus calling.  This is"   \
             " mostly useful for Quiver development.",
        dest="readStratum",
        default=None,
        type=parseReadStratum)
    readSelection.add_argument(
        "--minReadScore",
        action="store",
        dest="minReadScore",
        type=float,
        default=Constants.DEFAULT_MIN_READSCORE,
        help="The minimum ReadScore for reads that will be used for analysis (arrow-only).")
    readSelection.add_argument(
        "--minSnr",
        action="store",
        dest="minHqRegionSnr",
        type=float,
        default=Constants.DEFAULT_MIN_HQREGIONSNR,
        help="The minimum acceptable signal-to-noise over all channels for reads that will be used for analysis (arrow-only).")
    readSelection.add_argument(
        "--minZScore",
        action="store",
        dest="minZScore",
        type=float,
        default=Constants.DEFAULT_MIN_ZSCORE,
        help="The minimum acceptable z-score for reads that will be used for analysis (arrow-only).")
    readSelection.add_argument(
        "--minAccuracy",
        action="store",
        dest="minAccuracy",
        type=float,
        default=Constants.DEFAULT_MIN_ACCURACY,
        help="The minimum acceptable window-global alignment accuracy for reads that will be used for the analysis (arrow-only).")

    algorithm = parser.add_argument_group("Algorithm and parameter settings")
    algorithm.add_argument(
        "--algorithm",
        action="store",
        dest="algorithm",
        type=str,
        choices=["quiver", "arrow", "plurality", "poa", "best"],
        default="best")
    algorithm.add_argument(
        "--parametersFile", "-P",
        dest="parametersFile",
        type=str,
        default=None,
        help="Parameter set filename (QuiverParameters.ini), or directory D " + \
             "such that either D/*/GenomicConsensus/QuiverParameters.ini, "   + \
             "or D/GenomicConsensus/QuiverParameters.ini, is found.  In the " + \
             "former case, the lexically largest path is chosen.")
    algorithm.add_argument(
        "--parametersSpec", "-p",
        action="store",
        dest="parametersSpec",
        type=str,
        default="auto",
        help="Name of parameter set (chemistry.model) to select from the "   + \
             "parameters file, or just the name of the chemistry, in which " + \
             "case the best available model is chosen.  Default is 'auto', " + \
             "which selects the best parameter set from the cmp.h5")

    debugging = parser.add_argument_group("Verbosity and debugging/profiling")
    add_debug_option(debugging)
    debugging.add_argument(
        "--pdbAtStartup",
        action="store_true",
        dest="pdbAtStartup",
        default=False,
        help="Drop into Python debugger at startup (requires ipdb)")
    debugging.add_argument(
        "--profile",
        action="store_true",
        dest="doProfiling",
        default=False,
        help="Enable Python-level profiling (using cProfile).")
    debugging.add_argument(
        "--dumpEvidence", "-d",
        dest="dumpEvidence",
        nargs="?",
        default=None,
        const="variants",
        choices=["variants", "all", "outliers"])
    debugging.add_argument(
        "--evidenceDirectory",
        default="evidence_dump")
    debugging.add_argument(
        "--annotateGFF",
        action="store_true",
        help="Augment GFF variant records with additional information")
    debugging.add_argument(
        "--reportEffectiveCoverage",
        action="store_true",
        help="Additionally record the *post-filtering* coverage at variant sites")

    advanced = parser.add_argument_group("Advanced configuration options")
    advanced.add_argument(
        "--diploid",
        action="store_true",
        help="Enable detection of heterozygous variants (experimental)")
    advanced.add_argument(
        "--queueSize", "-Q",
        action="store",
        dest="queueSize",
        type=int,
        default=200)
    advanced.add_argument(
        "--threaded", "-T",
        action="store_true",
        dest="threaded",
        default=False,
        help="Run threads instead of processes (for debugging purposes only)")
    advanced.add_argument(
        "--referenceChunkSize", "-C",
        action="store",
        dest="referenceChunkSize",
        type=int,
        default=500)
    advanced.add_argument(
        "--fancyChunking",
        default=True,
        action="store_true",
        help="Adaptive reference chunking designed to handle coverage cutouts better")
    advanced.add_argument(
        "--simpleChunking",
        dest="fancyChunking",
        action="store_false",
        help="Disable adaptive reference chunking")
    advanced.add_argument(
        "--referenceChunkOverlap",
        action="store",
        dest="referenceChunkOverlap",
        type=int,
        default=5)
    advanced.add_argument(
        "--autoDisableHdf5ChunkCache",
        action="store",
        type=int,
        default=500,
        help="Disable the HDF5 chunk cache when the number of datasets in the cmp.h5 " + \
             "exceeds the given threshold")
    advanced.add_argument(
        "--aligner", "-a",
        action="store",
        choices=["affine", "simple"],
        default="affine",
        help="The pairwise alignment algorithm that will be used to produce variant calls" \
             " from the consensus (Quiver only).")
    advanced.add_argument(
        "--refineDinucleotideRepeats",
        dest="refineDinucleotideRepeats",
        action="store_true",
        help="Require quiver maximum likelihood search to try one less/more repeat copy in"  \
             " dinucleotide repeats, which seem to be the most frequent cause of suboptimal" \
             " convergence (getting trapped in local optimum) (Quiver only)")
    advanced.add_argument(
        "--noRefineDinucleotideRepeats",
        dest="refineDinucleotideRepeats",
        action="store_false",
        help="Disable dinucleotide refinement")
    advanced.set_defaults(refineDinucleotideRepeats=True)
    advanced.add_argument(
        "--fast",
        dest="fastMode",
        action="store_true",
        help="Cut some corners to run faster.  Unsupported!")
    advanced.add_argument(
        "--skipUnrecognizedContigs",
        action="store_true",
        help="Do not abort when told to process a reference window (via -w/--referenceWindow[s]) " \
             "that has no aligned coverage.  Outputs emptyish files if there are no remaining "    \
             "non-degenerate windows.  Only intended for use by smrtpipe scatter/gather.")

    return parser
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Page 4 Selected