sys.argv

Here are the examples of the python api sys.argv taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: WoT-Replay-To-JSON
Source File: wotrp2j.py
View license
def main():

	parserversion = "0.9.8.0"

	global option_console, option_advanced, option_chat, option_server, filename_source
	option_console = 0
	option_advanced = 0
	option_chat = 0
	option_server = 0
	
	filename_source = ""
	
	replay_version = "0.0.0.0"
	replay_version_dict = ['0', '0', '0', '0']
	

	for argument in sys.argv:
			if argument == "-c":
				option_console = 1
				
			if argument == "-a":
				option_advanced = 1

			if argument == "-chat":
				option_chat = 1
				
			if argument == "-s":
				option_server = 1
			

	printmessage('###### WoT-Replay-To-JSON ' + parserversion + " by vBAddict.net")

	if len(sys.argv)==1:
				printmessage('Please specify filename and options')
				sys.exit(2)

	filename_source = str(sys.argv[1])
	
	printmessage('Processing ' + filename_source)
	
	result_blocks = dict()
	result_blocks['common'] = dict()
	result_blocks['common']['parser'] = "WoT-Replay-To-JSON " + parserversion + " by http://www.vbaddict.net"

	result_blocks['identify'] = dict()
	result_blocks['identify']['arenaUniqueID'] = 0
	
	if not os.path.exists(filename_source) or not os.path.isfile(filename_source) or not os.access(filename_source, os.R_OK):
		result_blocks['common']['message'] = 'cannot read file ' + filename_source
		dumpjson(result_blocks, filename_source, 1)

	f = open(filename_source, 'rb')
	
	try:
		f.seek(4)
		numofblocks = struct.unpack("I",f.read(4))[0]
		printmessage("Found Blocks: " + str(numofblocks))
		blockNum = 1
		datablockPointer = {}
		datablockSize = {}
		startPointer = 8
	except Exception, e:
		result_blocks['common']['message'] = e.message
		dumpjson(result_blocks, filename_source, 1)

	if numofblocks == 0:
		result_blocks['common']['message'] = "unknown file structure"
		dumpjson(result_blocks, filename_source, 1)

	if numofblocks > 5:

		result_blocks['common']['message'] = "uncompressed replay"
		result_blocks['datablock_advanced'] = extract_advanced(filename_source)
			
		if result_blocks['datablock_advanced']['valid'] == 1:
			
			result_blocks['identify']['accountDBID'] = 0
			result_blocks['identify']['internaluserID'] = 0
			if result_blocks['datablock_advanced']['playername'] in result_blocks['datablock_advanced']['roster']:
				rosterdata = dict()			
				rosterdata = result_blocks['datablock_advanced']['roster'][result_blocks['datablock_advanced']['playername']]
				result_blocks['identify']['accountDBID'] = rosterdata['accountDBID'] 
				result_blocks['identify']['countryid'] = rosterdata['countryID']
				result_blocks['identify']['internaluserID'] = rosterdata['internaluserID']
				result_blocks['identify']['tankid'] = rosterdata['tankID']
		
			
			result_blocks['identify']['arenaUniqueID'] = result_blocks['datablock_advanced']['arenaUniqueID']
			result_blocks['identify']['arenaCreateTime'] = result_blocks['datablock_advanced']['arenaCreateTime']
			
			mapsdata = get_json_data("maps.json")
			mapname='unknown'
			for mapdata in mapsdata:
				if mapdata['mapid'] == result_blocks['datablock_advanced']['arenaTypeID']:
						mapname = mapdata['mapidname']
						break

			result_blocks['identify']['mapName'] = mapname
			
			
			result_blocks['identify']['mapid'] = result_blocks['datablock_advanced']['arenaTypeID']
			result_blocks['identify']['playername'] = result_blocks['datablock_advanced']['playername']
			result_blocks['identify']['replay_version'] = result_blocks['datablock_advanced']['replay_version']
			
			result_blocks['identify']['error'] = "none"
			result_blocks['identify']['error_details'] = "none"

			result_blocks['common']['datablock_advanced'] = 1

			if option_chat==1:
				result_blocks['chat'] = extract_chats(filename_source)
				result_blocks['common']['datablock_chat'] = 1
		else:
			result_blocks['common']['message'] = "replay incompatible"
			dumpjson(result_blocks, filename_source, 1)
		
		
		dumpjson(result_blocks, filename_source, 0)

	
	

	while numofblocks >= 1:
		try:
			printmessage("Retrieving data for block " + str(blockNum))
			f.seek(startPointer)
			size = f.read(4)
			datablockSize[blockNum] = struct.unpack("I", size)[0]
			datablockPointer[blockNum] = startPointer + 4
			startPointer=datablockPointer[blockNum]+datablockSize[blockNum]
			blockNum += 1
			numofblocks -= 1
		except Exception, e:
			result_blocks['common']['message'] = e.message
			dumpjson(result_blocks, filename_source, 1)
		
	processing_block = 0
	
	for i in datablockSize:
		
		processing_block += 1
		
		try:
			pass
		except Exception, e:
			result_blocks['common']['message'] = e.message
			dumpjson(result_blocks, filename_source, 1)
			
		printmessage("Retrieving block " + str(processing_block))
		f.seek(datablockPointer[i])
							
		myblock = f.read(int(datablockSize[i]))

		if 'arenaUniqueID' in myblock:

			if version_check(replay_version, "0.8.11.0") > -1 or myblock[0]=='[':
				br_json_list = dict()
		
				try:
					br_json_list = json.loads(myblock)
				except Exception, e:
					printmessage("Error with JSON: " + e.message)
				
				if len(br_json_list)==0:
					continue

				br_block = br_json_list[0]
				br_block['parser'] = dict()
				br_block['parser']['battleResultVersion'] = 14

				if version_check(replay_version, "0.9.8.0") > -1:
					br_block['parser'] = dict()
					br_block['parser']['battleResultVersion'] = 15
					if 'personal' in br_block:
						for vehTypeCompDescr, ownResults in br_block['personal'].copy().iteritems():
							if 'details' in ownResults:
								ownResults['details'] = decode_details(ownResults['details'])
								print ownResults['details']
								br_block['personal'][vehTypeCompDescr] = ownResults

					
				if 'datablock_1' in result_blocks:
					if len(br_json_list) > 0:
						result_blocks['datablock_1']['vehicles'] = br_json_list[1]

					if len(br_json_list) > 1:
						result_blocks['datablock_1']['kills'] = br_json_list[2]

			else:

				try:
					from SafeUnpickler import SafeUnpickler
					br_block = SafeUnpickler.loads(myblock)
					br_block['parser'] = dict()
					br_block['parser']['battleResultVersion'] = 14
				except Exception, e:
					printmessage("Error with unpickling myblock: " + e.message)

			if int(br_block['parser']['battleResultVersion']) < 15:
				if 'personal' in br_block:
					br_block['personal']['details'] = decode_details(br_block['personal']['details'])
					if 'vehicles' in br_block:
						for key, value in br_block['vehicles'].items():
							if 'details' in br_block['vehicles'][key]:
								del br_block['vehicles'][key]['details']
						
					
			result_blocks['datablock_battle_result'] = br_block

			result_blocks['common']['datablock_battle_result'] = 1
			result_blocks['identify']['arenaUniqueID'] = result_blocks['datablock_battle_result']['arenaUniqueID']

				
		else:
			blockdict = dict()
			try:
				blockdict = json.loads(myblock)
			except Exception, e:
				printmessage("Error with JSON: " + e.message)
			
			
			if 'clientVersionFromExe' in blockdict:
				replay_version = cleanReplayVersion(blockdict['clientVersionFromExe'])
				result_blocks['common']['replay_version'] = replay_version
				result_blocks['identify']['replay_version'] = replay_version
				replay_version_dict = replay_version.split('.')
				printmessage("Replay Version: " + str(replay_version))
			
			result_blocks['datablock_' + str(i)] = blockdict
			result_blocks['common']['datablock_' + str(i)] = 1

		result_blocks['common']['message'] = "ok"
	
	result_blocks = get_identify(result_blocks)
		
	if option_advanced==1 or option_chat==1:

		decfile = decrypt_file(filename_source, startPointer)
		uncompressed = decompress_file(decfile)
		if option_advanced==1:
			
			with open(uncompressed, 'rb') as f:
				if is_supported_replay(f):
					result_blocks['datablock_advanced'] = extract_advanced(uncompressed)
					result_blocks['common']['datablock_advanced'] = 1
				else:
					result_blocks['common']['datablock_advanced'] = 0
					result_blocks['common']['message'] = "Unsupported binary replay"
					dumpjson(result_blocks, filename_source, 0)

		if option_chat==1:
			import legacy
			result_blocks['chat_timestamp'] = legacy.Data(open(uncompressed, 'rb')).data[legacy.KEY.CHAT]
			result_blocks['chat'] = "<br/>".join([msg.encode("string-escape") for msg, timestamp in result_blocks['chat_timestamp']])
			result_blocks['common']['datablock_chat'] = 1

			result_blocks['bindata'] = legacy.Data(open(uncompressed, 'rb')).data
			
			
		
	dumpjson(result_blocks, filename_source, 0)

Example 2

Project: PokemonGo-Map
Source File: utils.py
View license
@memoize
def get_args():
    # pre-check to see if the -cf or --config flag is used on the command line
    # if not, we'll use the env var or default value.  this prevents layering of
    # config files, and handles missing config.ini as well
    defaultconfigfiles = []
    if '-cf' not in sys.argv and '--config' not in sys.argv:
        defaultconfigfiles = [os.getenv('POGOMAP_CONFIG', os.path.join(os.path.dirname(__file__), '../config/config.ini'))]
    parser = configargparse.ArgParser(default_config_files=defaultconfigfiles, auto_env_var_prefix='POGOMAP_')
    parser.add_argument('-cf', '--config', is_config_file=True, help='Configuration file')
    parser.add_argument('-a', '--auth-service', type=str.lower, action='append', default=[],
                        help='Auth Services, either one for all accounts or one per account: ptc or google. Defaults all to ptc.')
    parser.add_argument('-u', '--username', action='append', default=[],
                        help='Usernames, one per account.')
    parser.add_argument('-p', '--password', action='append', default=[],
                        help='Passwords, either single one for all accounts or one per account.')
    parser.add_argument('-w', '--workers', type=int,
                        help='Number of search worker threads to start. Defaults to the number of accounts specified.')
    parser.add_argument('-asi', '--account-search-interval', type=int, default=0,
                        help='Seconds for accounts to search before switching to a new account. 0 to disable.')
    parser.add_argument('-ari', '--account-rest-interval', type=int, default=7200,
                        help='Seconds for accounts to rest when they fail or are switched out')
    parser.add_argument('-ac', '--accountcsv',
                        help='Load accounts from CSV file containing "auth_service,username,passwd" lines')
    parser.add_argument('-l', '--location', type=parse_unicode,
                        help='Location, can be an address or coordinates')
    parser.add_argument('-j', '--jitter', help='Apply random -9m to +9m jitter to location',
                        action='store_true', default=False)
    parser.add_argument('-st', '--step-limit', help='Steps', type=int,
                        default=12)
    parser.add_argument('-sd', '--scan-delay',
                        help='Time delay between requests in scan threads',
                        type=float, default=10)
    parser.add_argument('-enc', '--encounter',
                        help='Start an encounter to gather IVs and moves',
                        action='store_true', default=False)
    parser.add_argument('-ed', '--encounter-delay',
                        help='Time delay between encounter pokemon in scan threads',
                        type=float, default=1)
    encounter_list = parser.add_mutually_exclusive_group()
    encounter_list.add_argument('-ewht', '--encounter-whitelist', action='append', default=[],
                                help='List of pokemon to encounter for more stats')
    encounter_list.add_argument('-eblk', '--encounter-blacklist', action='append', default=[],
                                help='List of pokemon to NOT encounter for more stats')
    parser.add_argument('-ld', '--login-delay',
                        help='Time delay between each login attempt',
                        type=float, default=5)
    parser.add_argument('-lr', '--login-retries',
                        help='Number of logins attempts before refreshing a thread',
                        type=int, default=3)
    parser.add_argument('-mf', '--max-failures',
                        help='Maximum number of failures to parse locations before an account will go into a two hour sleep',
                        type=int, default=5)
    parser.add_argument('-msl', '--min-seconds-left',
                        help='Time that must be left on a spawn before considering it too late and skipping it. eg. 600 would skip anything with < 10 minutes remaining. Default 0.',
                        type=int, default=0)
    parser.add_argument('-dc', '--display-in-console',
                        help='Display Found Pokemon in Console',
                        action='store_true', default=False)
    parser.add_argument('-H', '--host', help='Set web server listening host',
                        default='127.0.0.1')
    parser.add_argument('-P', '--port', type=int,
                        help='Set web server listening port', default=5000)
    parser.add_argument('-L', '--locale',
                        help='Locale for Pokemon names (default: {},\
                        check {} for more)'.
                        format(config['LOCALE'], config['LOCALES_DIR']), default='en')
    parser.add_argument('-c', '--china',
                        help='Coordinates transformer for China',
                        action='store_true')
    parser.add_argument('-m', '--mock', type=str,
                        help='Mock mode - point to a fpgo endpoint instead of using the real PogoApi, ec: http://127.0.0.1:9090',
                        default='')
    parser.add_argument('-ns', '--no-server',
                        help='No-Server Mode. Starts the searcher but not the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-os', '--only-server',
                        help='Server-Only Mode. Starts only the Webserver without the searcher.',
                        action='store_true', default=False)
    parser.add_argument('-nsc', '--no-search-control',
                        help='Disables search control',
                        action='store_false', dest='search_control', default=True)
    parser.add_argument('-fl', '--fixed-location',
                        help='Hides the search bar for use in shared maps.',
                        action='store_true', default=False)
    parser.add_argument('-k', '--gmaps-key',
                        help='Google Maps Javascript API Key',
                        required=True)
    parser.add_argument('--skip-empty', help='Enables skipping of empty cells  in normal scans - requires previously populated database (not to be used with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-C', '--cors', help='Enable CORS on web server',
                        action='store_true', default=False)
    parser.add_argument('-D', '--db', help='Database filename',
                        default='pogom.db')
    parser.add_argument('-cd', '--clear-db',
                        help='Deletes the existing database before starting the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-np', '--no-pokemon',
                        help='Disables Pokemon from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ng', '--no-gyms',
                        help='Disables Gyms from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-nk', '--no-pokestops',
                        help='Disables PokeStops from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ss', '--spawnpoint-scanning',
                        help='Use spawnpoint scanning (instead of hex grid). Scans in a circle based on step_limit when on DB', nargs='?', const='nofile', default=False)
    parser.add_argument('--dump-spawnpoints', help='dump the spawnpoints from the db to json (only for use with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-pd', '--purge-data',
                        help='Clear pokemon from database this many hours after they disappear \
                        (0 to disable)', type=int, default=0)
    parser.add_argument('-px', '--proxy', help='Proxy url (e.g. socks5://127.0.0.1:9050)', action='append')
    parser.add_argument('-pxsc', '--proxy-skip-check', help='Disable checking of proxies before start', action='store_true', default=False)
    parser.add_argument('-pxt', '--proxy-timeout', help='Timeout settings for proxy checker in seconds ', type=int, default=5)
    parser.add_argument('-pxd', '--proxy-display', help='Display info on which proxy beeing used (index or full) To be used with -ps', type=str, default='index')
    parser.add_argument('--db-type', help='Type of database to be used (default: sqlite)',
                        default='sqlite')
    parser.add_argument('--db-name', help='Name of the database to be used')
    parser.add_argument('--db-user', help='Username for the database')
    parser.add_argument('--db-pass', help='Password for the database')
    parser.add_argument('--db-host', help='IP or hostname for the database')
    parser.add_argument('--db-port', help='Port for the database', type=int, default=3306)
    parser.add_argument('--db-max_connections', help='Max connections (per thread) for the database',
                        type=int, default=5)
    parser.add_argument('--db-threads', help='Number of db threads; increase if the db queue falls behind',
                        type=int, default=1)
    parser.add_argument('-wh', '--webhook', help='Define URL(s) to POST webhook information to',
                        nargs='*', default=False, dest='webhooks')
    parser.add_argument('-gi', '--gym-info', help='Get all details about gyms (causes an additional API hit for every gym)',
                        action='store_true', default=False)
    parser.add_argument('--disable-clean', help='Disable clean db loop',
                        action='store_true', default=False)
    parser.add_argument('--webhook-updates-only', help='Only send updates (pokémon & lured pokéstops)',
                        action='store_true', default=False)
    parser.add_argument('--wh-threads', help='Number of webhook threads; increase if the webhook queue falls behind',
                        type=int, default=1)
    parser.add_argument('--ssl-certificate', help='Path to SSL certificate file')
    parser.add_argument('--ssl-privatekey', help='Path to SSL private key file')
    parser.add_argument('-ps', '--print-status', action='store_true',
                        help='Show a status screen instead of log messages. Can switch between status and logs by pressing enter.', default=False)
    parser.add_argument('-sn', '--status-name', default=None,
                        help='Enable status page database update using STATUS_NAME as main worker name')
    parser.add_argument('-spp', '--status-page-password', default=None,
                        help='Set the status page password')
    parser.add_argument('-el', '--encrypt-lib', help='Path to encrypt lib to be used instead of the shipped ones')
    parser.add_argument('-odt', '--on-demand_timeout', help='Pause searching while web UI is inactive for this timeout(in seconds)', type=int, default=0)
    verbosity = parser.add_mutually_exclusive_group()
    verbosity.add_argument('-v', '--verbose', help='Show debug messages from PomemonGo-Map and pgoapi. Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    verbosity.add_argument('-vv', '--very-verbose', help='Like verbose, but show debug messages from all modules as well.  Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    parser.set_defaults(DEBUG=False)

    args = parser.parse_args()

    if args.only_server:
        if args.location is None:
            parser.print_usage()
            print(sys.argv[0] + ": error: arguments -l/--location is required")
            sys.exit(1)
    else:
        # If using a CSV file, add the data where needed into the username,password and auth_service arguments.
        # CSV file should have lines like "ptc,username,password", "username,password" or "username".
        if args.accountcsv is not None:
            # Giving num_fields something it would usually not get
            num_fields = -1
            with open(args.accountcsv, 'r') as f:
                for num, line in enumerate(f, 1):

                    fields = []

                    # First time around populate num_fields with current field count.
                    if num_fields < 0:
                        num_fields = line.count(',') + 1

                    csv_input = []
                    csv_input.append('')
                    csv_input.append('<username>')
                    csv_input.append('<username>,<password>')
                    csv_input.append('<ptc/google>,<username>,<password>')

                    # If the number of fields is differend this is not a CSV
                    if num_fields != line.count(',') + 1:
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". Your file started with the following input, '" + csv_input[num_fields] + "' but now you gave us '" + csv_input[line.count(',') + 1] + "'.")
                        sys.exit(1)

                    field_error = ''
                    line = line.strip()

                    # Ignore blank lines and comment lines
                    if len(line) == 0 or line.startswith('#'):
                        continue

                    # If number of fields is more than 1 split the line into fields and strip them
                    if num_fields > 1:
                        fields = line.split(",")
                        fields = map(str.strip, fields)

                    # If the number of fields is one then assume this is "username". As requested..
                    if num_fields == 1:
                        # Empty lines are already ignored.
                        args.username.append(line)

                    # If the number of fields is two then assume this is "username,password". As requested..
                    if num_fields == 2:
                        # If field length is not longer then 0 something is wrong!
                        if len(fields[0]) > 0:
                            args.username.append(fields[0])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.password.append(fields[1])
                        else:
                            field_error = 'password'

                    # If the number of fields is three then assume this is "ptc,username,password". As requested..
                    if num_fields == 3:
                        # If field 0 is not ptc or google something is wrong!
                        if fields[0].lower() == 'ptc' or fields[0].lower() == 'google':
                            args.auth_service.append(fields[0])
                        else:
                            field_error = 'method'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.username.append(fields[1])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[2]) > 0:
                            args.password.append(fields[2])
                        else:
                            field_error = 'password'

                    if num_fields > 3:
                        print 'Too many fields in accounts file: max supported are 3 fields. Found {} fields'.format(num_fields)
                        sys.exit(1)

                    # If something is wrong display error.
                    if field_error != '':
                        type_error = 'empty!'
                        if field_error == 'method':
                            type_error = 'not ptc or google instead we got \'' + fields[0] + '\'!'
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". We found " + str(num_fields) + " fields, so your input should have looked like '" + csv_input[num_fields] + "'\nBut you gave us '" + line + "', your " + field_error + " was " + type_error)
                        sys.exit(1)

        errors = []

        num_auths = len(args.auth_service)
        num_usernames = 0
        num_passwords = 0

        if len(args.username) == 0:
            errors.append('Missing `username` either as -u/--username, csv file using -ac, or in config')
        else:
            num_usernames = len(args.username)

        if args.location is None:
            errors.append('Missing `location` either as -l/--location or in config')

        if len(args.password) == 0:
            errors.append('Missing `password` either as -p/--password, csv file, or in config')
        else:
            num_passwords = len(args.password)

        if args.step_limit is None:
            errors.append('Missing `step_limit` either as -st/--step-limit or in config')

        if num_auths == 0:
            args.auth_service = ['ptc']

        num_auths = len(args.auth_service)

        if num_usernames > 1:
            if num_passwords > 1 and num_usernames != num_passwords:
                errors.append('The number of provided passwords ({}) must match the username count ({})'.format(num_passwords, num_usernames))
            if num_auths > 1 and num_usernames != num_auths:
                errors.append('The number of provided auth ({}) must match the username count ({})'.format(num_auths, num_usernames))

        if len(errors) > 0:
            parser.print_usage()
            print(sys.argv[0] + ": errors: \n - " + "\n - ".join(errors))
            sys.exit(1)

        # Fill the pass/auth if set to a single value
        if num_passwords == 1:
            args.password = [args.password[0]] * num_usernames
        if num_auths == 1:
            args.auth_service = [args.auth_service[0]] * num_usernames

        # Make our accounts list
        args.accounts = []

        # Make the accounts list
        for i, username in enumerate(args.username):
            args.accounts.append({'username': username, 'password': args.password[i], 'auth_service': args.auth_service[i]})

        # Make max workers equal number of accounts if unspecified, and disable account switching
        if args.workers is None:
            args.workers = len(args.accounts)
            args.account_search_interval = None

        # Disable search interval if 0 specified
        if args.account_search_interval == 0:
            args.account_search_interval = None

        # Make sure we don't have an empty account list after adding command line and CSV accounts
        if len(args.accounts) == 0:
            print(sys.argv[0] + ": Error: no accounts specified. Use -a, -u, and -p or --accountcsv to add accounts")
            sys.exit(1)

        args.encounter_blacklist = [int(i) for i in args.encounter_blacklist]
        args.encounter_whitelist = [int(i) for i in args.encounter_whitelist]

        # Decide which scanning mode to use
        if args.spawnpoint_scanning:
            args.scheduler = 'SpawnScan'
        elif args.skip_empty:
            args.scheduler = 'HexSearchSpawnpoint'
        else:
            args.scheduler = 'HexSearch'

    return args

Example 3

Project: clam
Source File: clamdispatcher.py
View license
def main():
    if len(sys.argv) < 4:
        print("[CLAM Dispatcher] ERROR: Invalid syntax, use clamdispatcher.py [pythonpath] settingsmodule projectdir cmd arg1 arg2 ... got: " + " ".join(sys.argv[1:]), file=sys.stderr)
        with open('.done','w') as f:
            f.write(str(1))
        if os.path.exists('.pid'): os.unlink('.pid')
        return 1

    offset = 0
    if '/' in sys.argv[1]:
        #os.environ['PYTHONPATH'] = sys.argv[1]
        for path in sys.argv[1].split(':'):
            print("[CLAM Dispatcher] Adding to PYTHONPATH: " + path, file=sys.stderr)
            sys.path.append(path)
        offset = 1

    settingsmodule = sys.argv[1+offset]
    projectdir = sys.argv[2+offset]
    if projectdir == 'NONE': #Actions
        tmpdir = None
        projectdir = None
    elif projectdir.startswith('tmp://'): #Used for actions with a temporary dir
        tmpdir = projectdir[6:]
        projectdir = None
    else:
        if projectdir[-1] != '/':
            projectdir += '/'
        tmpdir = os.path.join(projectdir,'tmp')

    print("[CLAM Dispatcher] Started CLAM Dispatcher v" + str(VERSION) + " with " + settingsmodule + " (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)

    cmd = sys.argv[3+offset]
    cmd = clam.common.data.unescapeshelloperators(cmd) #shell operators like pipes and redirects were passed in an escaped form
    if sys.version[0] == '2' and isinstance(cmd,str):
        cmd = unicode(cmd,'utf-8') #pylint: disable=undefined-variable
    for arg in sys.argv[4+offset:]:
        arg_u = clam.common.data.unescapeshelloperators(arg)
        if arg_u != arg:
            cmd += " " + arg_u #shell operator (pipe or something)
        else:
            cmd += " " + clam.common.data.shellsafe(arg,'"')


    if not cmd:
        print("[CLAM Dispatcher] FATAL ERROR: No command specified!", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
            if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')
        return 1
    elif projectdir and not os.path.isdir(projectdir):
        print("[CLAM Dispatcher] FATAL ERROR: Project directory "+ projectdir + " does not exist", file=sys.stderr)
        f = open(projectdir + '.done','w')
        f.write(str(1))
        f.close()
        if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')
        return 1

    try:
        #exec("import " + settingsmodule + " as settings")
        settings = __import__(settingsmodule , globals(), locals(),0)
        try:
            if settings.CUSTOM_FORMATS:
                clam.common.data.CUSTOM_FORMATS = settings.CUSTOM_FORMATS
                print("[CLAM Dispatcher] Dependency injection for custom formats succeeded", file=sys.stderr)
        except AttributeError:
            pass
    except ImportError as e:
        print("[CLAM Dispatcher] FATAL ERROR: Unable to import settings module, settingsmodule is " + settingsmodule + ", error: " + str(e), file=sys.stderr)
        print("[CLAM Dispatcher]      hint: If you're using the development server, check you pass the path your service configuration file is in using the -P flag. For Apache integration, verify you add this path to your PYTHONPATH (can be done from the WSGI script)", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
        return 1

    settingkeys = dir(settings)
    if not 'DISPATCHER_POLLINTERVAL' in settingkeys:
        settings.DISPATCHER_POLLINTERVAL = 30
    if not 'DISPATCHER_MAXRESMEM' in settingkeys:
        settings.DISPATCHER_MAXRESMEM = 0
    if not 'DISPATCHER_MAXTIME' in settingkeys:
        settings.DISPATCHER_MAXTIME = 0


    try:
        print("[CLAM Dispatcher] Running " + cmd, file=sys.stderr)
    except (UnicodeDecodeError, UnicodeError, UnicodeEncodeError):
        print("[CLAM Dispatcher] Running " + repr(cmd), file=sys.stderr) #unicode-issues on Python 2

    if sys.version[0] == '2' and isinstance(cmd,unicode): #pylint: disable=undefined-variable
        cmd = cmd.encode('utf-8')
    if projectdir:
        process = subprocess.Popen(cmd,cwd=projectdir, shell=True, stderr=sys.stderr)
    else:
        process = subprocess.Popen(cmd, shell=True, stderr=sys.stderr)
    begintime = datetime.datetime.now()
    if process:
        pid = process.pid
        print("[CLAM Dispatcher] Running with pid " + str(pid) + " (" + begintime.strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.pid','w') as f:
                f.write(str(pid))
    else:
        print("[CLAM Dispatcher] Unable to launch process", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.done','w') as f:
                f.write(str(1))
        return 1

    #intervalf = lambda s: min(s/10.0, 15)
    abort = False
    idle = 0
    done = False
    lastpolltime = datetime.datetime.now()
    lastabortchecktime = datetime.datetime.now()

    while not done:
        d = total_seconds(datetime.datetime.now() - begintime)
        try:
            returnedpid, statuscode = os.waitpid(pid, os.WNOHANG)
            if returnedpid != 0:
                print("[CLAM Dispatcher] Process ended (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s) ", file=sys.stderr)
                done = True
        except OSError: #no such process
            print("[CLAM Dispatcher] Process lost! (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s)", file=sys.stderr)
            statuscode = 1
            done = True

        if done:
            break

        if total_seconds(datetime.datetime.now() - lastabortchecktime) >= min(10, d* 0.5):  #every 10 seconds, faster at beginning
            if projectdir and os.path.exists(projectdir + '.abort'):
                abort = True
            if abort:
                print("[CLAM Dispatcher] ABORTING PROCESS ON SIGNAL! (" + str(d)+"s)", file=sys.stderr)
                os.system("sleep 30 && kill -9 " + str(pid) + " &") #deathtrap in case the process doesn't listen within 30 seconds
                os.kill(pid, signal.SIGTERM)
                os.waitpid(pid, 0)
                if projectdir:
                    os.unlink(projectdir + '.abort')
                    open(projectdir + '.aborted','w')
                    f.close()
                done = True
                break
            lastabortchecktime = datetime.datetime.now()


        if d <= 1:
            idle += 0.05
            time.sleep(0.05)
        elif d <= 2:
            idle += 0.2
            time.sleep(0.2)
        elif d <= 10:
            idle += 0.5
            time.sleep(0.5)
        else:
            idle += 1
            time.sleep(1)

        if settings.DISPATCHER_MAXRESMEM > 0 and total_seconds(datetime.datetime.now() - lastpolltime) >= settings.DISPATCHER_POLLINTERVAL:
            resmem = mem(pid)
            if resmem > settings.DISPATCHER_MAXRESMEM * 1024:
                print("[CLAM Dispatcher] PROCESS EXCEEDS MAXIMUM RESIDENT MEMORY USAGE (" + str(resmem) + ' >= ' + str(settings.DISPATCHER_MAXRESMEM) + ')... ABORTING', file=sys.stderr)
                abort = True
                statuscode = 2
            lastpolltime = datetime.datetime.now()
        elif settings.DISPATCHER_MAXTIME > 0 and d > settings.DISPATCHER_MAXTIME:
            print("[CLAM Dispatcher] PROCESS TIMED OUT.. NO COMPLETION WITHIN " + str(d) + " SECONDS ... ABORTING", file=sys.stderr)
            abort = True
            statuscode = 3

    if projectdir:
        with open(projectdir + '.done','w') as f:
            f.write(str(statuscode))
        if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')

        #remove project index cache (has to be recomputed next time because this project now has a different size)
        if os.path.exists(os.path.join(projectdir,'..','.index')):
            os.unlink(os.path.join(projectdir,'..','.index'))


    if tmpdir and os.path.exists(tmpdir):
        print("[CLAM Dispatcher] Removing temporary files", file=sys.stderr)
        for filename in os.listdir(tmpdir):
            filepath = os.path.join(tmpdir,filename)
            try:
                if os.path.isdir(filepath):
                    shutil.rmtree(filepath)
                else:
                    os.unlink(filepath)
            except: #pylint: disable=bare-except
                print("[CLAM Dispatcher] Unable to remove " + filename, file=sys.stderr)

    d = total_seconds(datetime.datetime.now() - begintime)
    if statuscode > 127:
        print("[CLAM Dispatcher] Status code out of range (" + str(statuscode) + "), setting to 127", file=sys.stderr)
        statuscode = 127
    print("[CLAM Dispatcher] Finished (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "), exit code " + str(statuscode) + ", dispatcher wait time " + str(idle)  + "s, duration " + str(d) + "s", file=sys.stderr)

    return statuscode

Example 4

Project: numexpr
Source File: setup.py
View license
def setup_package():
    metadata = dict(
                      description='Fast numerical expression evaluator for NumPy',
                      author='David M. Cooke, Francesc Alted and others',
                      author_email='[email protected], [email protected]',
                      url='https://github.com/pydata/numexpr',
                      license='MIT',
                      packages=['numexpr'],
                      install_requires=requirements,
                      setup_requires=requirements
    )
    if (len(sys.argv) >= 2 and
        ('--help' in sys.argv[1:] or
         (sys.argv[1] in (
             '--help-commands', 'egg_info', '--version', 'clean', '--name')))):

        # For these actions, NumPy is not required.
        #
        # They are required to succeed without Numpy for example when
        # pip is used to install Numexpr when Numpy is not yet present in
        # the system.
        # (via https://github.com/abhirk/scikit-learn/blob/master/setup.py)
        try:
            from setuptools import setup
        except ImportError:
            from distutils.core import setup

        metadata['name']    = 'numexpr'
        metadata['version'] = version
    else:
        from numpy.distutils.core import setup
        from numpy.distutils.command.build_ext import build_ext as numpy_build_ext

        try:  # Python 3
            # Code taken form numpy/distutils/command/build_py.py
            # XXX: update LICENSES
            from distutils.command.build_py import build_py_2to3 as old_build_py
            from numpy.distutils.misc_util import is_string

            class build_py(old_build_py):

                def run(self):
                    build_src = self.get_finalized_command('build_src')
                    if build_src.py_modules_dict and self.packages is None:
                        self.packages = list(build_src.py_modules_dict.keys())
                    old_build_py.run(self)

                def find_package_modules(self, package, package_dir):
                    modules = old_build_py.find_package_modules(
                        self, package, package_dir)

                    # Find build_src generated *.py files.
                    build_src = self.get_finalized_command('build_src')
                    modules += build_src.py_modules_dict.get(package, [])

                    return modules

                def find_modules(self):
                    old_py_modules = self.py_modules[:]
                    new_py_modules = list(filter(is_string, self.py_modules))
                    self.py_modules[:] = new_py_modules
                    modules = old_build_py.find_modules(self)
                    self.py_modules[:] = old_py_modules

                    return modules

        except ImportError:  # Python 2
            from numpy.distutils.command.build_py import build_py

        DEBUG = False

        def localpath(*args):
            return op.abspath(op.join(*((op.dirname(__file__),) + args)))

        def debug(instring):
            if DEBUG:
                print(" DEBUG: " + instring)


        def configuration():
            from numpy.distutils.misc_util import Configuration, dict_append
            from numpy.distutils.system_info import system_info

            config = Configuration('numexpr')

            #try to find configuration for MKL, either from environment or site.cfg
            if op.exists('site.cfg'):
                mkl_config_data = config.get_info('mkl')
                # Some version of MKL needs to be linked with libgfortran.
                # For this, use entries of DEFAULT section in site.cfg.
                default_config = system_info()
                dict_append(mkl_config_data,
                            libraries=default_config.get_libraries(),
                            library_dirs=default_config.get_lib_dirs())
            else:
                mkl_config_data = {}

            # setup information for C extension
            if os.name == 'nt':
                pthread_win = ['numexpr/win32/pthread.c']
            else:
                pthread_win = []
            extension_config_data = {
                'sources': ['numexpr/interpreter.cpp',
                            'numexpr/module.cpp',
                            'numexpr/numexpr_object.cpp'] + pthread_win,
                'depends': ['numexpr/interp_body.cpp',
                            'numexpr/complex_functions.hpp',
                            'numexpr/interpreter.hpp',
                            'numexpr/module.hpp',
                            'numexpr/msvc_function_stubs.hpp',
                            'numexpr/numexpr_config.hpp',
                            'numexpr/numexpr_object.hpp'],
                'libraries': ['m'],
                'extra_compile_args': ['-funroll-all-loops', ],
            }
            dict_append(extension_config_data, **mkl_config_data)
            if 'library_dirs' in mkl_config_data:
                library_dirs = ':'.join(mkl_config_data['library_dirs'])
            config.add_extension('interpreter', **extension_config_data)
            config.set_options(quiet=True)

            config.make_config_py()
            config.add_subpackage('tests', 'numexpr/tests')

            #version handling
            config.get_version('numexpr/version.py')
            return config


        class cleaner(clean):

            def run(self):
                # Recursive deletion of build/ directory
                path = localpath("build")
                try:
                    shutil.rmtree(path)
                except Exception:
                    debug("Failed to remove directory %s" % path)
                else:
                    debug("Cleaned up %s" % path)

                # Now, the extension and other files
                try:
                    import imp
                except ImportError:
                    if os.name == 'posix':
                        paths = [localpath("numexpr/interpreter.so")]
                    else:
                        paths = [localpath("numexpr/interpreter.pyd")]
                else:
                    paths = []
                    for suffix, _, _ in imp.get_suffixes():
                        if suffix == '.py':
                            continue
                        paths.append(localpath("numexpr", "interpreter" + suffix))
                paths.append(localpath("numexpr/__config__.py"))
                paths.append(localpath("numexpr/__config__.pyc"))
                for path in paths:
                    try:
                        os.remove(path)
                    except Exception:
                        debug("Failed to clean up file %s" % path)
                    else:
                        debug("Cleaning up %s" % path)

                clean.run(self)

        class build_ext(numpy_build_ext):
            def build_extension(self, ext):
                # at this point we know what the C compiler is.
                if self.compiler.compiler_type == 'msvc' or self.compiler.compiler_type == 'intelemw':
                    ext.extra_compile_args = []
                    # also remove extra linker arguments msvc doesn't understand
                    ext.extra_link_args = []
                    # also remove gcc math library
                    ext.libraries.remove('m')
                numpy_build_ext.build_extension(self, ext)

        if setuptools:
            metadata['zip_safe'] = False

        metadata['cmdclass'] = {
            'build_ext': build_ext,
            'clean': cleaner,
            'build_py': build_py,
        }
        metadata['configuration'] = configuration

    setup(**metadata)

Example 5

View license
def bootstrap_3():
    """
    This method continues where the bootstrapper left off, but will quickly pass
    control to the Agent class which will spawn the functional components.

    Most of bootstrap_3 applies only to agent_0, in particular all mongodb
    interactions remains excluded for other sub-agent instances.

    The agent interprets a config file, which will specify in an agent_layout
    section:
      - what nodes should be used for sub-agent startup
      - what bridges should be started
      - what components should be started
      - what are the endpoints for bridges which are not started
    bootstrap_3 will create derived config files for all sub-agents.

    The agent master (agent_0) will collect information about the nodes required
    for all instances.  That is added to the config itself, for the benefit of
    the LRMS initialisation which is expected to block those nodes from the
    scheduler.
    """

    global lrms, agent, bridges

    # find out what agent instance name we have
    if len(sys.argv) != 2:
        raise RuntimeError('invalid number of parameters (%s)' % sys.argv)
    agent_name = sys.argv[1]

    # load the agent config, and overload the config dicts
    agent_cfg  = "%s/%s.cfg" % (os.getcwd(), agent_name)
    print "startup agent %s : %s" % (agent_name, agent_cfg)

    cfg = ru.read_json_str(agent_cfg)
    cfg['agent_name'] = agent_name
    pilot_id = cfg['pilot_id']

    # set up a logger and profiler
    prof = ru.Profiler ('%s.bootstrap_3' % agent_name)
    prof.prof('sync ref', msg='agent start', uid=pilot_id)
    log  = ru.get_logger('%s.bootstrap_3' % agent_name,
                         '%s.bootstrap_3.log' % agent_name, 'DEBUG')  # FIXME?
    log.info('start')
    prof.prof('sync ref', msg='agent start')

    try:
        import setproctitle as spt
        spt.setproctitle('radical.pilot %s' % agent_name)
    except Exception as e:
        log.debug('no setproctitle: %s', e)

    log.setLevel(cfg.get('debug', 'INFO'))

    print "Agent config (%s):\n%s\n\n" % (agent_cfg, pprint.pformat(cfg))

    # quickly set up a mongodb handle so that we can report errors.
    # FIXME: signal handlers need mongo_p, but we won't have that until later
    if agent_name == 'agent_0':

        # Check for the RADICAL_PILOT_DB_HOSTPORT env var, which will hold the
        # address of the tunnelized DB endpoint.
        # If it exists, we overrule the agent config with it.
        hostport = os.environ.get('RADICAL_PILOT_DB_HOSTPORT')
        if hostport:
            dburl = ru.Url(cfg['mongodb_url'])
            dburl.host, dburl.port = hostport.split(':')
            cfg['mongodb_url'] = str(dburl)

        _, mongo_db, _, _, _  = ru.mongodb_connect(cfg['mongodb_url'])
        mongo_p = mongo_db["%s.p" % cfg['session_id']]

        if not mongo_p:
            raise RuntimeError('could not get a mongodb handle')


    # set up signal and exit handlers
    def exit_handler():
        global lrms, agent, bridges

        print 'atexit'
        if lrms:
            lrms.stop()
            lrms = None
        if bridges:
            for b in bridges:
                b.stop()
            bridges = dict()
        if agent:
            agent.stop()
            agent = None
        sys.exit(1)

    def sigint_handler(signum, frame):
        if agent_name == 'agent_0':
            pilot_FAILED(msg='Caught SIGINT. EXITING (%s)' % frame)
        print 'sigint'
        prof.prof('stop', msg='sigint_handler', uid=pilot_id)
        prof.close()
        sys.exit(2)

    def sigterm_handler(signum, frame):
        if agent_name == 'agent_0':
            pilot_FAILED(msg='Caught SIGTERM. EXITING (%s)' % frame)
        print 'sigterm'
        prof.prof('stop', msg='sigterm_handler %s' % os.getpid(), uid=pilot_id)
        prof.close()
        sys.exit(3)

    def sigalarm_handler(signum, frame):
        if agent_name == 'agent_0':
            pilot_FAILED(msg='Caught SIGALRM (Walltime limit?). EXITING (%s)' % frame)
        print 'sigalrm'
        prof.prof('stop', msg='sigalarm_handler', uid=pilot_id)
        prof.close()
        sys.exit(4)

    import atexit
    atexit.register(exit_handler)
    signal.signal(signal.SIGINT,  sigint_handler)
    signal.signal(signal.SIGTERM, sigterm_handler)
    signal.signal(signal.SIGALRM, sigalarm_handler)

    # if anything went wrong up to this point, we would have been unable to
    # report errors into mongodb.  From here on, any fatal error should result
    # in one of the above handlers or exit handlers being activated, thus
    # reporting the error dutifully.

    try:
        # ----------------------------------------------------------------------
        # des Pudels Kern: merge LRMS info into cfg and get the agent started

        if agent_name == 'agent_0':

            # only the master agent creates LRMS and sub-agent config files.
            # The LRMS which will give us the set of agent_nodes to use for
            # sub-agent startup.  Add the remaining LRMS information to the
            # config, for the benefit of the scheduler).

            lrms = rp.agent.RM.create(name   = cfg['lrms'],
                             cfg    = cfg,
                             logger = log)
            cfg['lrms_info'] = lrms.lrms_info


            # the master agent also is the only one which starts bridges.  It
            # has to do so before creating the Agent Worker instance, as that is
            # using the bridges already.

            bridges = start_bridges(cfg, log)
            # FIXME: make sure all communication channels are in place.  This could
            # be replaced with a proper barrier, but not sure if that is worth it...
            time.sleep (1)

            # after we started bridges, we'll add their in and out addresses
            # to the config, so that the communication channels can connect to
            # them.  At this point we also write configs for all sub-agents this
            # instance intents to spawn.
            #
            # FIXME: we should point the address to the node of the subagent
            #        which hosts the bridge, not the local IP.  Until this
            #        is fixed, bridges MUST run on agent_0 (which is what
            #        RM.hostip() below will point to).
            nodeip = rp.agent.RM.hostip(cfg.get('network_interface'), logger=log)
            write_sub_configs(cfg, bridges, nodeip, log)

            # Store some runtime information into the session
            mongo_p.update({"_id": pilot_id},
                           {"$set": {"lm_info"  : lrms.lm_info.get('version_info'),
                                     "lm_detail": lrms.lm_info.get('lm_detail')}})

        # we now have correct bridge addresses added to the agent_0.cfg, and all
        # other agents will have picked that up from their config files -- we
        # can start the agent and all its components!
        agent = rp.worker.Agent(cfg)
        agent.start()

        log.debug('waiting for agent %s to join' % agent_name)
        agent.join()
        log.debug('agent %s joined' % agent_name)

        # ----------------------------------------------------------------------

    except SystemExit:
        log.exception("Exit running agent: %s" % agent_name)
        if agent and not agent.final_cause:
            agent.final_cause = "sys.exit"

    except Exception as e:
        log.exception("Error running agent: %s" % agent_name)
        if agent and not agent.final_cause:
            agent.final_cause = "error"

    finally:

        # in all cases, make sure we perform an orderly shutdown.  I hope python
        # does not mind doing all those things in a finally clause of
        # (essentially) main...
        if agent:
            agent.stop()
        log.debug('agent %s finalized' % agent_name)

        # agent.stop will not tear down bridges -- we do that here at last
        for name,b in bridges.items():
            try:
                log.info("closing bridge %s", b)
                b['handle'].stop()
            except Exception as e:
                log.exception('ignore failing bridge terminate (%s)', e)
        bridges = dict()

        # make sure the lrms release whatever it acquired
        if lrms:
            lrms.stop()
            lrms = None

        # agent_0 will also report final pilot state to the DB
        if agent_name == 'agent_0':
            if agent and agent.final_cause == 'timeout':
                pilot_DONE(mongo_p, pilot_id, log, "TIMEOUT received. Terminating.")
            elif agent and agent.final_cause == 'cancel':
                pilot_CANCELED(mongo_p, pilot_id, log, "CANCEL received. Terminating.")
            elif agent and agent.final_cause == 'sys.exit':
                pilot_CANCELED(mongo_p, pilot_id, log, "EXIT received. Terminating.")
            elif agent and agent.final_cause == 'finalize':
                log.info('shutdown due to component finalization -- assuming error')
                pilot_FAILED(mongo_p, pilot_id, log, "FINALIZE received")
            elif agent:
                pilot_FAILED(mongo_p, pilot_id, log, "TERMINATE received")
            else:
                pilot_FAILED(mongo_p, pilot_id, log, "FAILED startup")

        log.info('stop')
        prof.prof('stop', msg='finally clause agent', uid=pilot_id)
        prof.close()

Example 6

Project: wotdecoder
Source File: findplayer.py
View license
def main():

  nickname = "*"
  clantag = "*"
  csens = re.IGNORECASE
  verbose = 4
  show_errors = False
  owner = False
  recursive = True
  full_path = False
  battle_result = False
  source = os.getcwd()

# Parse arguments
  skip = -1
  for argind, arg in enumerate(sys.argv[1:]):
    if argind == skip: pass
    elif arg == "-c" : csens = 0
    elif arg == "-v0" : verbose = 0
    elif arg == "-v1" : verbose = 1
    elif arg == "-v2" : verbose = 2
    elif arg == "-v3" : verbose = 3
    elif arg == "-v4" : verbose = 4
    elif arg == "-e" : show_errors = True
    elif arg == "-o" : owner = True
    elif arg == "-r" : recursive = False
    elif arg == "-p" : full_path = True
    elif arg == "-b" : battle_result = True
    elif arg == "-i" :
      if len(sys.argv) <= argind+2:
        sys.exit("\nUnspecified input directory.")
      source = sys.argv[argind+2]
      if not os.path.exists(source):
        sys.exit("\n"+source+" doesnt exist.")
      skip = argind+1

    elif arg in ("-h", "-?") or arg.startswith("-") :
                    sys.exit("findplayer scans replay files for players using nickname and/or clantag."
                             "\nUsage:" \
                             "\n\nfindplayer nickname [clantag] -c -v0..3 -e -o -r -p -b -i input_file_or_directory" \
                             "\n\nTry `*` for string wildcard, `?` for character wildcard." \
                             "\n-c   Case sensitive search." \
                             "\n-v0  Verbose 0 = silent running, only give summary." \
                             "\n-v1  + list replay name, default." \
                             "\n-v2  + show match result, frag count." \
                             "\n-v3  + detailed stats." \
                             "\n-v4  + stats summary." \
                             "\n-e   Show errors." \
                             "\n-o   Include replay owner stats." \
                             "\n-r   Turn off recursive subdirectory scan." \
                             "\n-p   Show full patch." \
                             "\n-b   Scan battle_results(.dat) instead of wotreplays." \
                             "\n-i   Specify input directory. Default is current." \
                             "\n\nExamples:" \
                             "\n`*z_?l [1?3]` will match Rasz_pl[123]" \
                             "\n`[*]` will match any person in a clan." \
                             "\n`[]` will only match people without clan." \
                             "\n`??` will list all people with 2 letter nicknames." \
                             "\n`*` will match everyone.")
    elif arg.startswith("[") and arg.endswith("]"): clantag = arg[1:-1]
    else: nickname = arg


  print ("\nLooking for nickname:", nickname, " clantag: ["+clantag+"]")
  print ("Source:", source)
  print ("Verbose:", verbose, "Recursive:", recursive, "Errors:", ("hide","show")[show_errors])


  t1 = time.clock()

  if os.path.isfile(source):
    listdir = [source]
    if source.endswith(".dat"):
      battle_result = True
  else:
    listdir = custom_listfiles(source, ("wotreplay", "dat")[battle_result], recursive, "temp.wotreplay")

# Prepare regex filters
  regexnickname = fnmatch.translate(nickname)
  regexclantag = fnmatch.translate(clantag)
  reobjnickname = re.compile(regexnickname, csens)
  reobjclantag = re.compile(regexclantag, csens)

  matches = 0
  matches_kills = 0
  matches_stats = 0
  errors = 0

  owner_kills = 0
  owner_damage = 0
  owner_spotted = 0
  player_kills = 0
  player_damage = 0
  player_spotted = 0

  for files in listdir:
    while True:

#      if verbose < 2:
#        scan_mask = 1 #1 means try to only decode first block (binary 001)
#      else:
#        scan_mask = 7 #7 means decode everything (binary 111)
      scan_mask = 7 #above speeds -v0 -v1 scanning x3, but it doesnt detect certain errors, defaulting to slower method

      if battle_result:
        chunks = ["", "", ""]
        chunks[2], version = wotdecoder.battle_result(files)
        chunks_bitmask = 4
        processing = 4
      else:
        chunks, chunks_bitmask, processing, version = wotdecoder.replay(files, scan_mask)

#      pprint (chunks[0])
#      pprint (chunks[1])chunks[2]['arenaUniqueID']
#      pprint (chunks[2])

#      pprint (chunks[2]['personal']['accountDBID'])
#      pprint (chunks[2]['players'][ chunks[2]['personal']['accountDBID'] ]['name'])

#      pprint(chunks)

#      print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S'))
#      print(chunks[2]['common']['arenaCreateTime'])
#      print( (datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime'])- datetime(1970, 1, 1, 0, 0)).total_seconds())


#      print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').timestamp())
#      xx = (datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime'])- datetime(1970, 1, 1, 0, 0)).total_seconds()
#      print( datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']))
#      print( datetime.fromtimestamp(xx))
#      print( mapidname[ chunks[2]['common']['arenaTypeID'] & 65535 ])
#      print( chunks[0]['mapName'])

      if (processing >8) or (not chunks_bitmask&5): #ignore replays with no useful data, must have at least first Json or pickle
        errors += 1
        if show_errors:
          print ("\n\n---")
          print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
          print (wotdecoder.status[processing])
          print ("---", end="")
        break

      elif processing ==6: #show error messages for recoverable errors
        errors += 1
        if show_errors:
          print ("\n\n---")
          print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
          print (wotdecoder.status[processing])
          print ("---", end="")

      elif processing ==8: #very broken replay, only first json valid, have to disabble pickle
        errors += 1
        chunks_bitmask = 1
        if show_errors:
          print ("\n\n---")
          print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
          print (wotdecoder.status[processing])
          print ("---", end="")

      match = False
      player_found = 0
      owner_found = 0
      owner_name = ""
      owner_clan = ""

      if chunks_bitmask&4:
        vehicles = chunks[2]['players']
        owner_name = chunks[2]['players'][ chunks[2]['personal']['accountDBID'] ]['name']
        owner_found = chunks[2]['personal']['accountDBID']
      elif chunks_bitmask&2:
        vehicles = chunks[1][1]
        owner_name = chunks[0]['playerName']
      else:
        vehicles = chunks[0]['vehicles']
        owner_name = chunks[0]['playerName']

      for player in vehicles:
        check_player_name = vehicles[player]['name']
        check_player_clan = vehicles[player]['clanAbbrev']

        if not match and reobjnickname.match(check_player_name) and reobjclantag.match(check_player_clan):
          match = True
          matches += 1
          player_found = player
          player_name = vehicles[player]['name']
          player_clan = "["+vehicles[player]['clanAbbrev']+"]"

        if owner_found==0 and (vehicles[player]['name'] == owner_name): #find owner playerID
          owner_found = player
          owner_clan = "["+vehicles[player]['clanAbbrev']+"]"

      if not match: break

      if verbose >0:
            print ("\n\n--------------------------------------------------------------------------------")
            print ("", ("",os.path.dirname(files)+os.path.sep)[full_path] + os.path.basename(files))
            print ("---")
            print ("{0:39}{1:39}".format(player_name+player_clan, ("","| "+owner_name+owner_clan)[owner]))

      if chunks_bitmask&4:
        vehicle_player_found = chunks[2]['players'][player_found]['vehicleid']
        vehicle_owner_found = chunks[2]['players'][owner_found]['vehicleid']

      if verbose >1:
            if chunks_bitmask&4: #is pickle available?
              if chunks[2]['common']['finishReason']==3:
                win_loss="Draw"
              else:
                win_loss = ("Loss","Win ")[chunks[2]['common']['winnerTeam']==chunks[2]['vehicles'][vehicle_player_found]['team']]
              finishReason = "("+ wotdecoder.finishreason[ chunks[2]['common']['finishReason'] ] +")"
#              print ("--- {0:4} on {1:28}{2:>40}".format(win_loss, wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][1], finishReason))
              print ("--- {0:4} on {1:28}{2:>40}".format(win_loss, wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][1], finishReason))
#wotdecoder.gameplayid[ chunks[2]['common']['arenaTypeID'] >>16 ]
#wotdecoder.bonustype[ chunks[2]['common']['bonusType'] ]
            elif chunks_bitmask&2: #is second Json available?
              finishReason = ""
              print ("--- {0:4} on {1:28}{2:15}".format(("Loss","Win ")[chunks[1][0]['isWinner']==1], chunks[0]['mapDisplayName'], finishReason))
            else: #incomplete, all we can tell is tanks
              if owner:
                owner_string = "                       {0:<18}".format(chunks[0]['vehicles'][owner_found]['vehicleType'].split(":")[1])
              else:
                owner_string = ""
              print ("                     {0:<18}{1:39}".format(chunks[0]['vehicles'][player_found]['vehicleType'].split(":")[1], owner_string))

            if chunks_bitmask&4: #is second Json available?
              if owner:
                owner_string_kills = "| Kills  ={0:>5}".format( chunks[2]['vehicles'][vehicle_owner_found]['kills'])
                owner_string_tank = "| {0:8} in {1:<27}".format( ("Died","Survived")[chunks[2]['vehicles'][vehicle_owner_found]['health']>0], wotdecoder.tank[ chunks[2]['vehicles'][vehicle_owner_found]['typeCompDescr'] ][1])
                owner_kills += chunks[2]['vehicles'][vehicle_owner_found]['kills']
              else:
                owner_string_kills = ""
                owner_string_tank = ""
              print ("{0:8} in {1:<27}{2:39}".format(("Died","Survived")[chunks[2]['vehicles'][vehicle_player_found]['health']>0], wotdecoder.tank[ chunks[2]['vehicles'][vehicle_player_found]['typeCompDescr'] ][1], owner_string_tank ))
              print ("Kills  ={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['kills'], "", owner_string_kills ))
              player_kills += chunks[2]['vehicles'][vehicle_player_found]['kills']
              matches_kills += 1

            elif chunks_bitmask&2: #is second Json available?
              if owner:
#                print (player_found, owner_found)
#                pprint (chunks[1][1])
                owner_string_kills = "| Kills  ={0:>5}".format( len(chunks[1][0]['killed']) )
                owner_string_tank = "| {0:8} in {1:<27}".format( ("Died","Survived")[ chunks[1][1][owner_found]['isAlive']==1 ], chunks[1][1][owner_found]['vehicleType'].split(":")[1] )
                owner_kills += chunks[1][2][owner_found]['frags']
              else:
                owner_string_kills = ""
                owner_string_tank = ""
              print ("{0:8} in {1:<27}{2:39}".format(("Died","Survived")[ chunks[1][1][player_found]['isAlive']==1 ], chunks[1][1][player_found]['vehicleType'].split(":")[1], owner_string_tank))
              if player_found in chunks[1][2]: #WTF WG, why Y hate sanity? sometimes not all player frag counts saved :/
                frags = chunks[1][2][player_found]['frags']
              else:
                frags = 0
              print ("Kills  ={0:>5}{1:26}{2:39}".format(frags, "", owner_string_kills))
              player_kills += frags
              matches_kills += 1



      if verbose >2 and chunks_bitmask&4: #is pickle available? use it for detailed stats
        player = int(player)
        if owner:
          if version >= 860:
            chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted'] = chunks[2]['vehicles'][vehicle_owner_found]['damageAssistedTrack'] + chunks[2]['vehicles'][vehicle_owner_found]['damageAssistedRadio']
          owner_string_damage = "| Damage ={0:>5}".format(chunks[2]['vehicles'][vehicle_owner_found]['damageDealt'])
          owner_string_spotted = "| Spotted={0:>5}".format(chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted'])
          owner_damage += chunks[2]['vehicles'][vehicle_owner_found]['damageDealt']
          owner_spotted += chunks[2]['vehicles'][vehicle_owner_found]['damageAssisted']
        else:
          owner_string_damage = ""
          owner_string_spotted = ""
        print ("Damage ={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['damageDealt'], "", owner_string_damage))
        if version >= 860:
          chunks[2]['vehicles'][vehicle_player_found]['damageAssisted'] = chunks[2]['vehicles'][vehicle_player_found]['damageAssistedTrack'] + chunks[2]['vehicles'][vehicle_player_found]['damageAssistedRadio']
        print ("Spotted={0:>5}{1:26}{2:39}".format(chunks[2]['vehicles'][vehicle_player_found]['damageAssisted'], "", owner_string_spotted))
        player_damage += chunks[2]['vehicles'][vehicle_player_found]['damageDealt']
        player_spotted += chunks[2]['vehicles'][vehicle_player_found]['damageAssisted']
        matches_stats += 1
        if battle_result: #we are decoding battle_result, lets more-or-less reconstruct potential replay name
# its not 'pixel' accurate, im too lazy to get tank country and underscores correct.
          timestamp = datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']).strftime('%Y%m%d_%H%M')
          print ("Belongs to~", timestamp+"_"+wotdecoder.tank[ chunks[2]['vehicles'][vehicle_owner_found]['typeCompDescr'] ][1]+"_"+wotdecoder.maps[ chunks[2]['common']['arenaTypeID'] & 65535 ][0]+".wotreplay")



      break


  if matches > 0:
    if verbose >3 and (matches_kills!=0 or matches_stats!=0) : # stats summary
      if matches_kills==0: matches_kills =1 #lets not divide by zero today :)
      if matches_stats==0: matches_stats =1
      if owner:
        owner_string_kills = "| Kills  ={0:>9.2f}".format( owner_kills/matches_kills )
        owner_string_damage = "| Damage ={0:>9.2f}".format( owner_damage/matches_stats )
        owner_string_spotted = "| Spotted={0:>9.2f}".format( owner_spotted/matches_stats )
      else:
        owner_string_kills = ""
        owner_string_damage = ""
        owner_string_spotted = ""

      print ("\nSummary (average):")
      print ("Kills  ={0:>9.2f}{1:23}{2:39}".format(player_kills/matches_kills , "", owner_string_kills))
      print ("Damage ={0:>9.2f}{1:23}{2:39}".format(player_damage/matches_stats , "", owner_string_damage))
      print ("Spotted={0:>9.2f}{1:23}{2:39}".format(player_spotted/matches_stats , "", owner_string_spotted))

    print("\n\nFound", matches, "matches. ", end="")
  else:
    print("\n\nNo matches found. ", end="")
  print(errors, "errors.")

  t2 = time.clock()
  print  ("\nProcessing "+str(len(listdir))+" files took %0.3fms"  % ((t2-t1)*1000))

Example 7

Project: wotdecoder
Source File: wotrepparser.py
View license
def main():

  verbose = False
  recursive = False
  rename = True
  dry = False
  mode = 0
  b_r = 0
  overwrite = False
  source = os.getcwd()
  output = os.getcwd()
  skip = -1

# Parse arguments
  for argind, arg in enumerate(sys.argv[1:]):
    if argind == skip: pass
    elif arg == "-v" : verbose = True
    elif arg == "-r" : recursive = True
    elif arg == "-n" : rename = False
    elif arg == "-b" : b_r = 1
    elif arg == "-b1" : b_r = 2
    elif arg == "-b2" : b_r = 3
    elif arg == "-f" : overwrite = True
    elif arg == "-c" : mode = 1
    elif arg == "-c0" : mode = 2
    elif arg == "-o" :
      if len(sys.argv) <= argind+2:
        sys.exit("\nUnspecified Output directory.")
      output = sys.argv[argind+2]
      skip = argind+1

      if not os.path.isdir(output):
        print("\nOutput directory: "+output+" doesnt exist. Creating.")
        try:
          os.makedirs(output)
        except:
          sys.exit("Cant create "+output)

    elif arg in ("-h", "-?") or arg.startswith("-") :
                    sys.exit("wotrepparser scans replay files and sorts them into categories (incomplete, result, complete, clanwar, error)."
                             "\nUsage:" \
                             "\n\nwotrepparser file_or_directory -o output_directory -v -r -n" \
                             "\n\n-o  Specify output directory. Default is current." \
                             "\n-v  Verbose, display every file processed." \
                             "\n-r  Recursive scan of all subdirectories." \
                             "\n-n  Dont rename files." \
                             "\n-b  Dump raw battle_results pickle to output_directory\\b_r\\number.pickle" \
                             "\n-b1 Decode battle_results pickle, save output_directory\\b_r\\number.json" \
                             "\n-b2 Same as above, but human readable json." \
                             "\n-f  Force overwrite. Default is ask." \
                             "\n-c  Copy instead of moving." \
                             "\n-c0 Dry run, dont copy, dont move.")

    elif source == os.getcwd():
      if not os.path.exists(arg):
        sys.exit("\n"+arg+" doesnt exist.")
      source = arg


  print ("\nSource:", source)
  print ("Output:", output)
  print ("Mode  :", ("move","copy","dry run")[mode]+",",("dont rename","rename")[rename]+("",", verbose")[verbose]+("",", recursive dir scan")[recursive]+ \
         ("",", raw battle_results pickle",", decoded battle_results json",", decoded human readable battle_results json")[b_r]+".\n")




  t1 = time.clock()

  if os.path.isfile(source):
    listdir = [source]
  else:
    listdir = custom_listfiles(source, "wotreplay", recursive, "temp.wotreplay")

#  listdir = custom_listfiles("G:\\World_of_Tanks\\replays\\clanwars\\", "wotreplay", False)
#  listdir += custom_listfiles("G:\\World_of_Tanks\\replays\\complete\\", "wotreplay", False)
#  listdir += custom_listfiles("G:\\World_of_Tanks\\replays\\incomplete\\", "wotreplay", False)
#  listdir = {"G:\\World_of_Tanks\\replays\\incomplete\\20121213_0553_usa-T110_39_crimea.wotreplay"}

  if not os.path.exists(output + os.path.sep + "clanwar"):
    os.makedirs(output + os.path.sep + "clanwar")
  if not os.path.exists(output + os.path.sep + "incomplete"):
    os.makedirs(output + os.path.sep + "incomplete")
  if not os.path.exists(output + os.path.sep + "result"):
    os.makedirs(output + os.path.sep + "result")
  if not os.path.exists(output + os.path.sep + "complete"):
    os.makedirs(output + os.path.sep + "complete")
  if not os.path.exists(output + os.path.sep + "error"):
    os.makedirs(output + os.path.sep + "error")
  if b_r>0 and (not os.path.exists(output + os.path.sep + "b_r")):
    os.makedirs(output + os.path.sep + "b_r")

  errors = 0
  dest = ["incomplete", "result", "complete", "complete", "clanwar", "error"]
  stats = [0, 0, 0, 0, 0, 0]

  for files in listdir:
    while True:
#      print ("\n"+files)
      fileo = os.path.basename(files)

      chunks, chunks_bitmask, processing, version = wotdecoder.replay(files,7) #7 means try to decode all three blocks (binary 111)

      if processing == 3 and (len(chunks[0]['vehicles'])!=len(chunks[1][1])) or \
         processing == 4 and chunks[2]['common']['bonusType'] == 5: #fogofwar = cw, bonusType = 5 = cw
        dest_index = 4
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          clan_tag = ["", ""]
          for playind, player in enumerate(chunks[1][1]):
            if playind == 0:
              first_tag = chunks[1][1][player]['clanAbbrev']
              clan_tag[chunks[1][1][player]['team'] - 1] = first_tag
            elif first_tag != chunks[1][1][player]['clanAbbrev']:
              clan_tag[chunks[1][1][player]['team'] - 1] = chunks[1][1][player]['clanAbbrev']
              break

          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]

          clan_tag[0] = clan_tag[0] +"_"*(5-len(clan_tag[0]))
          clan_tag[1] = clan_tag[1] +"_"*(5-len(clan_tag[1]))

# You can change cw filename format here.
          fileo = "cw"+date+"_"+clan_tag[0]+"_"+clan_tag[1]+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"

      elif processing <6 and chunks_bitmask&2: #is second Json available? use it to determine win/loss
        dest_index = processing-1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]
          fileo = date+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"
      elif processing <6 and chunks_bitmask&4: #is pickle available? use it to determine win/loss
        dest_index = processing-1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[2]['common']['winnerTeam'] == chunks[2]['personal']['team']]
          fileo = date+"_"+winlose+"_"+wotdecoder.tank[chunks[2]['personal']['typeCompDescr']][0]+"_"+wotdecoder.maps[chunks[2]['common']['arenaTypeID'] & 65535][0]+".wotreplay"
      elif processing ==6: #bugged, but has valid score and can be renamed
        dest_index = 5
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]
          fileo = date+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"
      elif processing ==8: #bugged, but has valid pickle, can be renamed and moved to result
        dest_index = 1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[2]['common']['winnerTeam'] == chunks[2]['personal']['team']]
          fileo = date+"_"+winlose+"_"+wotdecoder.tank[chunks[2]['personal']['typeCompDescr']][0]+"_"+wotdecoder.maps[chunks[2]['common']['arenaTypeID'] & 65535][0]+".wotreplay"
      elif processing ==1: #incomplete
        dest_index = processing-1
        stats[dest_index] += 1
      elif processing >6: #bugged, cant be renamed
        dest_index = 5
        stats[dest_index] += 1

      fileo = output + os.path.sep + dest[dest_index] + os.path.sep + fileo
      exists = os.path.isfile(fileo)
      ask = 0
      if not overwrite and exists:
        ask = getkeyboard(fileo, files)
        if ask == 2: overwrite = True
      else: ask = 1

      if mode == 0 and ask>0:
          shutil.move(files, fileo)

      elif mode == 1 and ask>0:
          shutil.copy(files, fileo)

      fileb_r = ""
      if b_r >0 and chunks_bitmask&4:
        fileb_r = output + os.path.sep + "b_r" + os.path.sep + str(chunks[2]['arenaUniqueID']) +("",".pickle",".json",".json")[b_r]
        exists = os.path.isfile(fileb_r)
        ask = 0
        if not overwrite and exists:
          ask = getkeyboard(fileb_r)
          if ask == 2: overwrite = True
        else: ask = 1

        if b_r == 1 and ask>0:
          try:
            fo = open(fileb_r,"wb")
            f = open(files, "rb")
            f.seek(8)
            seek_size = struct.unpack("i",f.read(4))[0]
            f.seek(seek_size,1)
            if chunks_bitmask&2: #replay with Pickle can have 2 or 3 blocks, we are only interested in the last one and need to skip others
              seek_size = struct.unpack("i",f.read(4))[0]
              f.seek(seek_size,1)
            third_size = struct.unpack("i",f.read(4))[0]
            third_chunk = f.read(third_size)
            f.close()
          except:
            raise
          else:
            fo.write(third_chunk)
            fo.close()

        elif b_r == 2 and ask>0:
          try:
            fo = open(fileb_r,"w")
          except:
            raise
          else:
            json.dump(chunks[2],fo)
            fo.close()

        elif b_r == 3 and ask>0:
          try:
            fo = open(fileb_r,"w")
          except:
            raise
          else:
            json.dump(chunks[2], fo, sort_keys=True, indent=4)
            fo.close()

      if verbose:
        print ("\n"+files)
        print ("", dest[dest_index], " | ", wotdecoder.status[processing])
        print (fileo)
        print (fileb_r)
      break


  t2 = time.clock()


  print ("\n{0:10} {1:>5}".format("Processed", str(len(listdir))))

  del dest[2]
  stats[2] += stats[3]
  del stats[3]
  for x in range(0, len(dest)):
    print ("{0:10} {1:>5}".format(dest[x], stats[x]))

  print  ("Took %0.3fms"  % ((t2-t1)*1000))

Example 8

Project: bep
Source File: run.py
View license
def main(): # needs to be done as a main func for setuptools to work correctly in creating an executable
    # for the approach i am taking here using nested subparsers:
    # https://mail.python.org/pipermail/python-list/2010-August/585617.html

    # nargs options:
    # (default): by not specifying nargs at all, you just get a string of 1 item
    # = N   where N is some specified number of args
    # = '?' makes a string of one item, and if no args are given, then default is used.
    # = '*' makes a list of all args passed after command and if no args given, then default is used.
    # = '+' makes list of all args passed after command, but requires at least one arg

    top_parser = argparse.ArgumentParser(description=name.upper(),
                            formatter_class=argparse.RawDescriptionHelpFormatter,
                            #formatter_class=argparse.RawTextHelpFormatter,
                            #add_help=False,
                            epilog=usage.epilog_use)

    #################################
    ### this goes at the top level
    top_parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__))
    top_parser.add_argument('-l', '--language', nargs='?', default='python', help=usage.lang_use)

    group = top_parser.add_mutually_exclusive_group()
    group.add_argument("-v", "--verbose", action="store_true", help=usage.verbose_use)
    group.add_argument("-q", "--quiet", action="store_true", help=usage.quiet_use)
    #################################


    def check_for_all_error(cmd_arg):
        if cmd_arg in ['all', 'All', 'ALL', '--All', '--ALL']:
            raise SystemExit("\nError: Did you mean to specifiy --all instead?")


    # If --all is passed in:
    # Skip stuff below if '--all' is specified w/ one of these accepted cmds
    # (this is some seriously hacky brute force shit!)
    build_up_subparsers = True
    additional_args = []
    cmds_that_accept_all_arg = ['update', 'remove', 'turn_off']
    for cmd in cmds_that_accept_all_arg:
        if cmd in sys.argv:
            for i in sys.argv:  # test for misspecified '--all' command
                check_for_all_error(i)
            if '--all' in sys.argv:
                #print(sys.argv)
                build_up_subparsers = False
                                                                            # TODO add help page for all
                top_parser.add_argument('--all', action='store_true', help=usage.all_use) #metavar="arg")
                args = top_parser.parse_known_args()
                args, additional_args = args
                if len(additional_args) > 1:    # this makes it so that it could only be len(additional_args)==1
                    error_all_arg = "--all can only be called with one of the following args:\n\t"
                    error_all_arg = error_all_arg + '{update, remove, turn_off}'
                    top_parser.error(error_all_arg)
                #else:
                    #additional_args = additional_args[0]


    # To display how to run a command:
    # look at all pkgs and check that passed in package name is one that's already installed
    everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
    any_of_this_pkg_already_installed = lambda pkg_to_process: utils.lang_and_pkg_type_and_pkg_and_branches_tuple(
                                                                        pkg_to_process, everything_already_installed)
    cmds_that_can_display_how_to = cmds_that_accept_all_arg + ['turn_on']
    for cmd in cmds_that_can_display_how_to:    # everything except install i think
        if (cmd in sys.argv) and ('--all' not in sys.argv):
            if ('-h' not in sys.argv) and ('--help' not in sys.argv):
                args = top_parser.parse_known_args()
                args, additional_args = args
                if len(additional_args) == 2:
                    additional_args_copy = copy.copy(additional_args)
                    additional_args_copy.remove(cmd) # 2 things in here, one equal to cmd, the other is what we want to see if it's alreay installed
                    potential_pkg_to_proc = additional_args_copy[0]

                    #print any_of_this_pkg_already_installed(potential_pkg_to_proc)
                    if any_of_this_pkg_already_installed(potential_pkg_to_proc):
                        # should i make a function call out of this instead of relying on the command to be handled below?
                        print(" **** This is how to {} {} ****".format(cmd, potential_pkg_to_proc))
                        build_up_subparsers = False
                    elif potential_pkg_to_proc not in possible_choices:   # else if the other arg/package name passed in is not a pkg_already_installed (& not one of the next possible cmd options)
                        #print an error say that whatever is passed in cannot be updated/turned_on/etc
                        #b/c it's not currently installed.
                        error_msg = "cannot {} {}: not a currently installed package.\n".format(cmd, potential_pkg_to_proc)
                        error_msg = error_msg + "[Execute `{} list` to see installed packages.]".format(name)
                        top_parser.error(error_msg)
                    #else:   # want this instead b/c otherwise the above hides the help pages
                        #additional_args = []     # set back to empty to avoid the flag at the end of argparse stuff
                #else:
                    #error_msg = "An already installed package name must be passed in with {}".format(cmd)
                    #top_parser.error(error_msg)
                else:
                    additional_args = []     # set back to empty to avoid the flag at the end of argparse stuff


    if build_up_subparsers:
        top_subparser = top_parser.add_subparsers(title='Commands',
                                        description='[ These are the commands that can be passed to %(prog)s ]',
                                        #help=usage.subparser_use)
                                        help='[ Command specific help info ]')
        ### create parser for the "list" command
        # maybe make it so that it can list all branches installed for a specific pkg,
        parser_list = top_subparser.add_parser('list', help=usage.list_use)
        parser_list.add_argument('list_arg', action="store_true", help=usage.list_sub_use) #metavar="arg")


        class CheckIfCanBeInstalled(argparse.Action):
            ''' makes sure a repo to install has both a user_name and repo_name:
                    eg. ipython/ipython
                or is an actual path to a repo on the local filesystem'''

            def __call__(self, parser, namespace, arg_value, option_string=None):
                pkg_type = parser.prog.split(' ')[-1]
                if utils.check_if_valid_pkg_to_install(arg_value, pkg_type):
                    setattr(namespace, self.dest, arg_value)
                else:
                    if pkg_type == 'local':
                        error_msg = "\n\tIs not a path that exists on local filesystem."
                        raise parser.error(arg_value + error_msg)
                    else:
                        error_msg = '\nneed to make sure a username and repo_name are specified, like so:\n\tusername/repo_name'
                        raise parser.error(arg_value + error_msg)


        ##################################################
        cmd_help = vars(usage.cmd_help)
        for cmd in ['install', 'update', 'remove', 'turn_off', 'turn_on']:
            if cmd == 'install':
                install_parser = top_subparser.add_parser(cmd, help=usage.install_use.format(packages_file),
                                                          formatter_class=argparse.RawTextHelpFormatter)
                install_parser.set_defaults(top_subparser=cmd)
                install_subparser = install_parser.add_subparsers(dest='pkg_type', help=usage.install_sub_use.format(packages_file))
                for c in repo_choices:
                    pkg_type_to_install = install_subparser.add_parser(c)
                    # pkg_type_to_install.set_defaults(pkg_type_to_install=c) # is the same as 'pkg_type' dest above

                    pkg_type_to_install.add_argument('pkg_to_install',   # like ipython/ipython
                                                     action=CheckIfCanBeInstalled)   # actions here to make sure it's legit

                    # local repos don't get to have a branch specified; a branch would need to be checked out first, then installed.
                    #if c != 'local':
                        #pkg_type_to_install.add_argument('-b', '--branch', dest='branch', default=None)#, action=CheckBranch)    # the branch bit is filled out below

                    if c == 'github':
                        pkg_type_to_install.add_argument('repo_type', default='git', nargs='?')

                    elif c == 'bitbucket':
                        pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg'])

                    # elif c == 'local':    # just get the type of repo from the local filesystem so it doesn't have to be specified
                        # pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg', 'bzr'])

                    #elif c == 'remote':    # TODO not implemented but would be specified like so
                        #pkg_type_to_install.add_argument('repo_type', choices=['git', 'hg', 'bzr'])

                    pkg_type_to_install.add_argument('-b', '--branch', dest='branch', default=None)#, action=CheckBranch)    # the branch bit is filled out below

                for c in other_choices:
                    if c == 'packages':
                        pkg_type_to_install = install_subparser.add_parser(c, help=usage.packages_file_use.format(packages_file))

                    #elif c == 'stable': # TODO not implemented
                        #pkg_type_to_install = install_subparser.add_parser(c)
                        #pkg_type_to_install.add_argument('pkg_to_install')  # like ipython
                        ##pkg_type_to_install.add_argument('--pversion')      # TODO like 1.2.1 (add this in later to install different version of a stable pkg)

                # NOTE this seems like a better way to go in the future:
                # install_parser.set_defaults(func=run_install)
                # then run_install would be defined to run the install process (rather than having the conditionals below)
                # def run_install(args):
                #   install_arg = args.install_arg  # would be a list of pkgs or a string of the packages file
                #   ...process the install_arg to decide what to install
                #   ...then do the install
                ##################################################
            else:
                subparser_parser = top_subparser.add_parser(cmd, help=cmd_help['{}_use'.format(cmd)],
                                                            formatter_class=argparse.RawTextHelpFormatter)
                subparser_parser.set_defaults(top_subparser=cmd)

                ### didn't work, not sure why yet
                #all_dest = '{}_ALL'.format(cmd)
                #subparser_parser.add_argument('--all',
                                                ##help=usage.remove_sub_use.format(name=name),    # FIXME not sure why this wouldn't work
                                                ##action=CheckIfALL, action='store_true')

                #cur_args = vars(top_parser.parse_args())
                #print(cur_args)
                #if 'all' in cur_args:
                    #if cur_args['all']:
                        #break
                this_cmds_help = cmd_help['{}_sub_use'.format(cmd)].format(name=name)
                subparsers_subparser = subparser_parser.add_subparsers(dest='pkg_type', help=this_cmds_help)

                for c in repo_choices:
                    pkg_type_to_proc = subparsers_subparser.add_parser(c)
                    pkg_type_to_proc.add_argument('pkg_to_{}'.format(cmd))   # like ipython
                    pkg_type_to_proc.add_argument('-b', '--branch', dest='branch', default=None)  # needs to be specified in script (for installs though it use default name if not specified)

                #for c in other_choices: #TODO
                    ##if c == 'packages':    # packages args only used for installs
                        ##pkg_type_to_proc = subparsers_subparser.add_parser(c)
                    #if c == 'stable':
                        #pkg_type_to_proc = subparsers_subparser.add_parser(c)
                        #pkg_type_to_proc.add_argument('pkg_to_{}'.format(cmd))  # like ipython
                        #pkg_type_to_proc.add_argument('--pversion', help='package version')      # like 1.2.1 (default should be the newest, but can specify older ones)
            ##################################################



        args = top_parser.parse_args()

        # handle branches here
        if ('top_subparser' in args) and (args.top_subparser == 'install'):
            if ('branch' in args) and (args.branch == None):
                if args.pkg_type == 'local':    # for local, grab the currently checked out branch from the repo and set that as the branch to install
                    branch, repo_type = utils.get_checked_out_local_branch(args.pkg_to_install)
                    args.repo_type = repo_type
                else:
                    branch = utils.get_default_branch(args.repo_type)
                args.branch = branch
            elif ('branch' in args) and (args.branch != None):
                if args.pkg_type == 'local':    # for local, don't allow branch to be specified; just use currently checked out branch
                    error_msg = "for `local` packages a branch cannot be specified;\n"
                    error_msg = error_msg + "check out the desired branch from the repo itself, then install."
                    raise top_parser.error(error_msg)
        elif ('top_subparser' in args) and (args.top_subparser != 'install'):
            if ('branch' in args) and (args.branch == None):
                error_msg = 'need to make sure a branch is specified;\n'
                error_msg = error_msg + "[Execute `{} list` to see installed packages and branches.]".format(name)
                raise top_parser.error(error_msg)


    class noise(object):
        verbose = args.verbose
        quiet = args.quiet


    """
    # REMOVE LATER...this just shows what we're dealing with here
    print('##########################################################')
    print(args)
    if additional_args:
        print(additional_args)
    print('##########################################################')
    #raise SystemExit
    """

    #--------------------------------------------------------------------------------------------------------------

    if noise.quiet:
        print('-'*60)



    #######################################################################################################################
    #### install pkg(s)
    kwargs = dict(packages_file=packages_file, packages_file_path=packages_file_path,
                 noise=noise, install_dirs=install_dirs, installed_pkgs_dir=installed_pkgs_dir)

    if ('top_subparser' in args) and (args.top_subparser == 'install'):
        any_pkgs_processed = install.install_cmd(args, **kwargs)
    #######################################################################################################################



    #######################################################################################################################
    #### if nothing is installed, then don't continue on to other commands (since they only process currently installed stuff)
    everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
    if not everything_already_installed:
        raise SystemExit('\nNo packages installed.')
    #######################################################################################################################



    #######################################################################################################################
    #### list installed pkg(s) (by each package type)
    elif 'list_arg' in args:
        list_packages.list_cmd(everything_already_installed, noise)
    #######################################################################################################################



    #######################################################################################################################
    # for everything else (update, remove, turn_on/off)
    #elif args:
    #elif ((('top_subparser' in args) and (args.top_subparser in ['update', 'remove', 'turn_on', 'turn_off'])) or
         #(('update' in additional_args) or ('remove' in additional_args) or ('turn_off' in additional_args) or
          #('turn_on' in additional_args))):
    else:   # FIXME not sure this is as good as it could be by just using else instead of something more specific

        actions_to_take = {}
        #top_level_any_pkgs_processed = False
        for lang_dir_name, pkg_type_dict in everything_already_installed.items():
            for pkg_type, pkgs_and_branches in pkg_type_dict.items():
                any_pkgs_processed = False
                #if pkgs_and_branches:  # don't think i need this

                pkgs_status = utils.pkgs_and_branches_for_pkg_type_status(pkgs_and_branches)
                pkgs_and_branches_on = pkgs_status['pkg_branches_on']
                pkgs_and_branches_off = pkgs_status['pkg_branches_off']

                kwargs = dict(lang_dir_name=lang_dir_name, pkg_type=pkg_type, noise=noise, install_dirs=install_dirs,
                            pkgs_and_branches_on=pkgs_and_branches_on, pkgs_and_branches_off=pkgs_and_branches_off,
                            additional_args=additional_args, everything_already_installed=everything_already_installed)


                if ('pkg_to_update' in args) or ('update' in additional_args):
                    any_pkgs_processed = update_packages.update_cmd(args, **kwargs)

                elif ('pkg_to_remove' in args) or ('remove' in additional_args):
                    any_pkgs_processed = remove_packages.remove_cmd(args, **kwargs)

                elif ('pkg_to_turn_off' in args) or ('turn_off' in additional_args):
                    any_pkgs_processed = turn_off.turn_off_cmd(args, **kwargs)

                elif ('pkg_to_turn_on' in args) or ('turn_on' in additional_args):
                    any_pkgs_processed = turn_on.turn_on_cmd(args, **kwargs)


                if any_pkgs_processed:
                    #top_level_any_pkgs_processed = True #+= 1
                    if type(any_pkgs_processed) == dict:    # it will be a dict when a pkg didn't actually get processed, but has commands to get processed
                        actions_to_take.update(any_pkgs_processed)

        #if not top_level_any_pkgs_processed: # NOTE KEEP for now, but i don't think this will ever get hit?
            #utils.when_not_quiet_mode('\n[ No action performed ]'.format(pkg_type), noise.quiet)


        if actions_to_take:

            if len(actions_to_take) == 1:
                alert, cmd = actions_to_take.items()[0]
                option = '\n* {}\n{}\n'.format(alert, cmd)
                print(option)

                if not (cmd.startswith('****') and cmd.endswith('****')):

                    print('-'*60)
                    msg = "The above version is installed, would you like to run the\ncommand [y/N]? "
                    response = raw_input(msg)
                    if response:
                        response = response.lower()
                        if response in ['y', 'yes']:
                            utils.cmd_output(cmd)
                        elif response in ['n', 'no']:
                            print("\nBye then.")
                        else:
                            raise SystemExit("\nError: {}: not valid input".format(response))
                    else:
                        print("\nOk, bye then.")


            elif len(actions_to_take) > 1:

                actions_to_take_with_num_keys = {}  # takes the alert, cmd (key, val) pairs from actions_to_take and makes them as a value tuple, w/ a num as each pair's key.
                for num, alert_key in enumerate(actions_to_take, start=1): # actions_to_take is a dict with alert, cmd (key, val) pairs
                    actions_to_take_with_num_keys[num] = (alert_key, actions_to_take[alert_key])
                actions_to_take_with_num_keys = OrderedDict(sorted(actions_to_take_with_num_keys.items(), key=lambda t: t[0]))  # sorted by key (which are nums)

                for num_key, alert_and_cmd_tuple_val in actions_to_take_with_num_keys.items():
                    if num_key == 1:
                        print('')
                    alert, cmd =  alert_and_cmd_tuple_val
                    option = '{}. {}\n{}\n'.format(num_key, alert, cmd)
                    print(option)

                print('-'*60)
                msg = "The versions above are installed.  If you'd like to run the command\n"
                msg = msg + "for an item, enter the number (if not, then just hit enter to exit). "
                response = raw_input(msg)
                if response:
                    try:
                        response = int(response)
                    except ValueError:
                        raise SystemExit("\nError: invalid response: {}".format(response))
                    if response in range(1, len(actions_to_take_with_num_keys)+1):
                        #print response # now run the command
                        # Could either 1. open a subprocess and run from the command line -- easy way
                        # or 2. try to pass back into the the command that got us here -- better way

                        # Number 2 would involve something like this with updating the kwargs:
                        #kwargs = dict(lang_dir_name=lang_dir_name, pkg_type=pkg_type, noise=noise, install_dirs=install_dirs,
                                    #pkgs_and_branches_on=pkgs_and_branches_on, pkgs_and_branches_off=pkgs_and_branches_off,
                                    #additional_args=additional_args, everything_already_installed=everything_already_installed)
                        #actions.update_action(args, **kwargs)

                        # Doing number 1 above, just to get it working, though 2 would probably be better in long run.
                        cmd = actions_to_take_with_num_keys[response][1]    # this gets the command from the alert, cmd tuple
                        if (cmd.startswith('****') and cmd.endswith('****')):
                            print("\nNo command to process,\n{}".format(cmd))
                        else:
                            utils.cmd_output(cmd)
                    else:
                        raise SystemExit("\nError: invalid response: {}".format(response))
                else:
                    print("\nOk, bye then.")

Example 9

Project: python3-trepan
Source File: options.py
View license
def process_options(debugger_name, pkg_version, sys_argv, option_list=None):
    """Handle debugger options. Set `option_list' if you are writing
    another main program and want to extend the existing set of debugger
    options.

    The options dicionary from optparser is returned. sys_argv is
    also updated."""
    usage_str="""%prog [debugger-options] [python-script [script-options...]]

    Runs the extended python debugger"""

    # serverChoices = ('TCP','FIFO', None)

    optparser = OptionParser(usage=usage_str, option_list=option_list,
                             version="%%prog version %s" % pkg_version)

    optparser.add_option("-X", "--trace", dest="linetrace",
                         action="store_true", default=False,
                         help="Show lines before executing them. "
                         "This option also sets --batch")
    optparser.add_option("-F", "--fntrace", dest="fntrace",
                         action="store_true", default=False,
                         help="Show functions before executing them. "
                         "This option also sets --batch")
    optparser.add_option("--basename", dest="basename",
                         action="store_true", default=False,
                         help="Filenames strip off basename, "
                         "(e.g. for regression tests)"
                         )
    #     optparser.add_option("--batch", dest="noninteractive",
    #                          action="store_true", default=False,
    #                          help="Don't run interactive commands shell on "+
    #                          "stops.")
    optparser.add_option("--client", dest="client",
                         action='store_true',
                         help="Connect to an existing debugger process "
                         "started with the --server option. "
                         "See options for client.")
    optparser.add_option("-x", "--command", dest="command",
                         action="store", type='string', metavar='FILE',
                         help="Execute commands from FILE.")
    optparser.add_option("--cd", dest="cd",
                         action="store", type='string', metavar='DIR',
                         help="Change current directory to DIR.")
    optparser.add_option("--confirm", dest="confirm",
                         action="store_true", default=True,
                         help="Confirm potentially dangerous operations")
    optparser.add_option("--dbg_trepan", dest="dbg_trepan",
                         action="store_true", default=False,
                         help="Debug the debugger")
    optparser.add_option("--different", dest="different",
                         action="store_true", default=True,
                         help="Consecutive stops should have "
                         "different positions")
    #     optparser.add_option("--error", dest="errors", metavar='FILE',
    #                          action="store", type='string',
    #                          help="Write debugger's error output "
    #                          + "(stderr) to FILE")
    optparser.add_option("-e", "--exec", dest="execute", type="string",
                         help="list of debugger commands to " +
                         "execute. Separate the commands with ;;")

    optparser.add_option("-H", "--host", dest="host", default='127.0.0.1',
                         action="store", type='string', metavar='IP-OR-HOST',
                         help="connect IP or host name. "
                         "Only valid if --client option given.")

    optparser.add_option("--highlight", dest="highlight",
                         action="store", type='string',
                         metavar='{light|dark|plain}',
                         default='light',
                         help="Use syntax and terminal highlight output. "
                         "'plain' is no highlight")

    optparser.add_option("--private", dest="private",
                         action='store_true', default=False,
                         help="Don't register this as a global debugger")

    optparser.add_option("--main", dest="main",
                         action="store_true", default=True,
                         help="First stop should be in __main__"
                         )
    optparser.add_option("--no-main", dest="main",
                         action="store_false", default=True,
                         help="First stop should be in __main__"
                         )
    optparser.add_option("--post-mortem", dest="post_mortem",
                         action='store_true', default=True,
                         help=("Enter debugger on an uncaught (fatal) "
                               "exception"))

    optparser.add_option("--no-post-mortem", dest="post_mortem",
                         action='store_false', default=True,
                         help=("Don't enter debugger on an uncaught (fatal) "
                               "exception"))

    optparser.add_option("-n", "--nx", dest="noexecute",
                         action="store_true", default=False,
                         help=("Don't execute commands found in any "
                               "initialization files"))

    optparser.add_option("-o", "--output", dest="output", metavar='FILE',
                         action="store", type='string',
                         help=("Write debugger's output (stdout) "
                               "to FILE"))
    optparser.add_option("-P", "--port", dest="port", default=1027,
                         action="store", type='int',
                         help="Use TCP port number NUMBER for "
                         "out-of-process connections.")

    optparser.add_option("--server", dest="server",
                         action='store_true',
                         help="Out-of-process server connection mode")

    # optparser.add_option("--style", dest="style",
    #                      action="store", type='string',
    #                      metavar='*pygments-style*',
    #                      default=None,
    #                      help=("Pygments style; 'none' "
    #                            "uses 8-color rather than 256-color terminal"))

    optparser.add_option("--sigcheck", dest="sigcheck",
                         action="store_true", default=False,
                         help="Set to watch for signal handler changes")
    optparser.add_option("-t", "--target", dest="target",
                         help=("Specify a target to connect to. Arguments"
                               " should be of form, 'protocol address'.")),
    optparser.add_option("--from_ipython", dest='from_ipython', action='store_true',
                         default=False, help="Called from inside ipython")

    # annotate option produces annotations, used in trepan.el for a
    # better emacs integration. Annotations are similar in purpose to
    # those of GDB (see that manual for a description), although the
    # syntax is different.  they have the following format:
    #
    # ^Z^Zannotation-name
    # <arbitrary text>
    # ^Z^Z
    #
    # where ^Z is the ctrl-Z character, and "annotname" is the name of the
    # annotation. A line with only two ^Z ends the annotation (no nesting
    # allowed). See trepan.el for the usage
    optparser.add_option("--annotate", default=0, type="int",
                         help="Use annotations to work inside emacs")

    # Set up to stop on the first non-option because that's the name
    # of the script to be debugged on arguments following that are
    # that scripts options that should be left untouched.  We would
    # not want to interpret and option for the script, e.g. --help, as
    # one one of our own, e.g. --help.

    optparser.disable_interspersed_args()

    sys.argv = list(sys_argv)
    # FIXME: why does this mess up integration tests?
    # (opts, sys.argv) = optparser.parse_args(sys_argv)
    (opts, sys.argv) = optparser.parse_args()
    dbg_opts = {'from_ipython': opts.from_ipython}

    # Handle debugger startup command files: --nx (-n) and --command.
    dbg_initfiles = []
    if not opts.noexecute:
        add_startup_file(dbg_initfiles)

    # As per gdb, first we execute user initialization files and then
    # we execute any file specified via --command.
    if opts.command:
        dbg_initfiles.append(opts.command)
        pass

    dbg_opts['proc_opts'] = {'initfile_list': dbg_initfiles}

    if opts.cd:
        os.chdir(opts.cd)
        pass

    if opts.output:
        try:
            dbg_opts['output'] = Moutput.DebuggerUserOutput(opts.output)
        except IOError:
            _, xxx_todo_changeme, _ = sys.exc_info()
            (errno, strerror) = xxx_todo_changeme.args
            print("I/O in opening debugger output file %s" % opts.output)
            print("error(%s): %s" % (errno, strerror))
        except:
            print("Unexpected error in opening debugger output file %s" %
                  opts.output)
            print(sys.exc_info()[0])
            sys.exit(2)
            pass
        pass

    return opts, dbg_opts, sys.argv

Example 10

Project: gajim
Source File: gajim-remote.py
View license
	def __init__(self):
		self.argv_len = len(sys.argv)
		# define commands dict. Prototype :
		# {
		#	'command': [comment, [list of arguments] ]
		# }
		#
		# each argument is defined as a tuple:
		#    (argument name, help on argument, is mandatory)
		#
		self.commands = {
			'help':[
					_('Shows a help on specific command'),
					[
						#User gets help for the command, specified by this parameter
						(_('command'),
						_('show help on command'), False)
					]
				],
			'toggle_roster_appearance' : [
					_('Shows or hides the roster window'),
					[]
				],
			'show_next_pending_event': [
					_('Pops up a window with the next pending event'),
					[]
				],
			'list_contacts': [
					_('Prints a list of all contacts in the roster. Each contact '
					'appears on a separate line'),
					[
						(_('account'), _('show only contacts of the given account'),
							False)
					]

				],
			'list_accounts': [
					_('Prints a list of registered accounts'),
					[]
				],
			'change_status': [
					_('Changes the status of account or accounts'),
					[
#offline, online, chat, away, xa, dnd, invisible should not be translated
						(_('status'), _('one of: offline, online, chat, away, xa, dnd, invisible '), True),
						(_('message'), _('status message'), False),
						(_('account'), _('change status of account "account". '
		'If not specified, try to change status of all accounts that have '
		'"sync with global status" option set'), False)
					]
				],
			'open_chat': [
					_('Shows the chat dialog so that you can send messages to a contact'),
					[
						('jid', _('JID of the contact that you want to chat with'),
							True),
						(_('account'), _('if specified, contact is taken from the '
						'contact list of this account'), False)
					]
				],
			'send_chat_message':[
					_('Sends new chat message to a contact in the roster. Both OpenPGP key '
					'and account are optional. If you want to set only \'account\', '
					'without \'OpenPGP key\', just set \'OpenPGP key\' to \'\'.'),
					[
						('jid', _('JID of the contact that will receive the message'), True),
						(_('message'), _('message contents'), True),
						(_('pgp key'), _('if specified, the message will be encrypted '
							'using this public key'), False),
						(_('account'), _('if specified, the message will be sent '
							'using this account'), False),
					]
				],
			'send_single_message':[
					_('Sends new single message to a contact in the roster. Both OpenPGP key '
					'and account are optional. If you want to set only \'account\', '
					'without \'OpenPGP key\', just set \'OpenPGP key\' to \'\'.'),
					[
						('jid', _('JID of the contact that will receive the message'), True),
						(_('subject'), _('message subject'), True),
						(_('message'), _('message contents'), True),
						(_('pgp key'), _('if specified, the message will be encrypted '
							'using this public key'), False),
						(_('account'), _('if specified, the message will be sent '
							'using this account'), False),
					]
				],
			'send_groupchat_message':[
					_('Sends new message to a groupchat you\'ve joined.'),
					[
						('room_jid', _('JID of the room that will receive the message'), True),
						(_('message'), _('message contents'), True),
						(_('account'), _('if specified, the message will be sent '
							'using this account'), False),
					]
				],
			'contact_info': [
					_('Gets detailed info on a contact'),
					[
						('jid', _('JID of the contact'), True)
					]
				],
			'account_info': [
					_('Gets detailed info on a account'),
					[
						('account', _('Name of the account'), True)
					]
				],
			'send_file': [
					_('Sends file to a contact'),
					[
						(_('file'), _('File path'), True),
						('jid', _('JID of the contact'), True),
						(_('account'), _('if specified, file will be sent using this '
							'account'), False)
					]
				],
			'prefs_list': [
					_('Lists all preferences and their values'),
					[ ]
				],
			'prefs_put': [
					_('Sets value of \'key\' to \'value\'.'),
					[
						(_('key=value'), _('\'key\' is the name of the preference, '
							'\'value\' is the value to set it to'), True)
					]
				],
			'prefs_del': [
					_('Deletes a preference item'),
					[
						(_('key'), _('name of the preference to be deleted'), True)
					]
				],
			'prefs_store': [
					_('Writes the current state of Gajim preferences to the .config '
						'file'),
					[ ]
				],
			'remove_contact': [
					_('Removes contact from roster'),
					[
						('jid', _('JID of the contact'), True),
						(_('account'), _('if specified, contact is taken from the '
							'contact list of this account'), False)

					]
				],
			'add_contact': [
					_('Adds contact to roster'),
					[
						(_('jid'), _('JID of the contact'), True),
						(_('account'), _('Adds new contact to this account'), False)
					]
				],

			'get_status': [
				_('Returns current status (the global one unless account is specified)'),
					[
						(_('account'), '', False)
					]
				],

			'get_status_message': [
				_('Returns current status message (the global one unless account is specified)'),
					[
						(_('account'), '', False)
					]
				],

			'get_unread_msgs_number': [
				_('Returns number of unread messages'),
					[ ]
				],
			'start_chat': [
				_('Opens \'Start Chat\' dialog'),
					[
						(_('account'), _('Starts chat, using this account'), True)
					]
				],
			'send_xml': [
					_('Sends custom XML'),
					[
						('xml', _('XML to send'), True),
						('account', _('Account in which the xml will be sent; '
						'if not specified, xml will be sent to all accounts'),
							False)
					]
				],
			'handle_uri': [
					_('Handle a xmpp:/ uri'),
					[
						(_('uri'), _('URI to handle'), True),
						(_('account'), _('Account in which you want to handle it'),
							False)
					]
				],
			'join_room': [
					_('Join a MUC room'),
					[
						(_('room'), _('Room JID'), True),
						(_('nick'), _('Nickname to use'), False),
						(_('password'), _('Password to enter the room'), False),
						(_('account'), _('Account from which you want to enter the '
							'room'), False)
					]
				],
			'check_gajim_running':[
					_('Check if Gajim is running'),
					[]
				],
			'toggle_ipython' : [
					_('Shows or hides the ipython window'),
					[]
				],

			}

		self.sbus = None
		if self.argv_len  < 2 or sys.argv[1] not in self.commands.keys():
			# no args or bad args
			send_error(self.compose_help())
		self.command = sys.argv[1]
		if self.command == 'help':
			if self.argv_len == 3:
				print self.help_on_command(sys.argv[2]).encode(PREFERRED_ENCODING)
			else:
				print self.compose_help().encode(PREFERRED_ENCODING)
			sys.exit(0)
		if self.command == 'handle_uri':
			self.handle_uri()
		if self.command == 'check_gajim_running':
			print self.check_gajim_running()
			sys.exit(0)
		self.init_connection()
		self.check_arguments()

		if self.command == 'contact_info':
			if self.argv_len < 3:
				send_error(_('Missing argument "contact_jid"'))

		try:
			res = self.call_remote_method()
		except exceptions.ServiceNotAvailable:
			# At this point an error message has already been displayed
			sys.exit(1)
		else:
			self.print_result(res)

Example 11

Project: kaggle-Rain
Source File: NNregression_v1.py
View license
def do_regression(num_epochs=60, # No. of epochs to train
                  init_file=None,  # Saved parameters to initialise training
                  epoch_size=680780,  # Whole dataset size
                  valid_size=34848,
                  train_batch_multiple=10637,  # No. of minibatches per batch
                  valid_batch_multiple=1089,  # No. of minibatches per batch
                  train_minibatch_size=64, 
                  valid_minibatch_size=32,
                  eval_multiple=50,  # No. of minibatches to ave. in report
                  save_model=True,
                  input_width=19,
                  rng_seed=100009,
                  cross_val=0,  # Cross-validation subset label
                  dataver=1,  # Label for different runs/architectures/etc
                  rate_init=1.0,
                  rate_decay=0.999983):

    ###################################################
    ################# 0. User inputs ##################
    ###################################################
    for i in range(1,len(sys.argv)):
        if sys.argv[i].startswith('-'):
            option = sys.argv[i][1:]
            if option == 'i': init_file = sys.argv[i+1]
            elif option[0:2] == 'v=' : dataver = int(option[2:])
            elif option[0:3] == 'cv=' : cross_val = int(option[3:])
            elif option[0:3] == 'rs=' : rng_seed = int(option[3:])
            elif option[0:3] == 'ri=' : rate_init = np.float32(option[3:])
            elif option[0:3] == 'rd=' : rate_decay = np.float32(option[3:])
                                
    print("Running with dataver %s" % (dataver))
    print("Running with cross_val %s" % (cross_val))
    
    
    ###################################################
    ############# 1. Housekeeping values ##############
    ###################################################
    # Batch size is possibly not equal to epoch size due to memory limits
    train_batch_size = train_batch_multiple*train_minibatch_size 
    assert epoch_size >= train_batch_size
    
    # Number of times we expect the training/validation generator to be called
    max_train_gen_calls = (num_epochs*epoch_size)//train_batch_size 

    # Number of evaluations (total minibatches / eval_multiple)
    num_eval = max_train_gen_calls*train_batch_multiple / eval_multiple
    
    
    ###################################################
    ###### 2. Define model and theano variables #######
    ###################################################
    if rng_seed is not None:
        print("Setting RandomState with seed=%i" % (rng_seed))
        rng = np.random.RandomState(rng_seed)
        set_rng(rng)
    
    print("Defining variables...")
    index = T.lscalar() # Minibatch index
    x = T.tensor3('x') # Inputs 
    y = T.fvector('y') # Target
    
    print("Defining model...")
    network_0 = build_1Dregression_v1(
                        input_var=x,
                        input_width=input_width,
                        nin_units=12,
                        h_num_units=[64,128,256,128,64],
                        h_grad_clip=1.0,
                        output_width=1
                        )
                        
    if init_file is not None:
        print("Loading initial model parametrs...")
        init_model = np.load(init_file)
        init_params = init_model[init_model.files[0]]           
        LL.set_all_param_values([network_0], init_params)
        
    
    ###################################################                                
    ################ 3. Import data ###################
    ###################################################
    # Loading data generation model parameters
    print("Defining shared variables...")
    train_set_y = theano.shared(np.zeros(1, dtype=theano.config.floatX),
                                borrow=True) 
    train_set_x = theano.shared(np.zeros((1,1,1), dtype=theano.config.floatX),
                                borrow=True)
    
    valid_set_y = theano.shared(np.zeros(1, dtype=theano.config.floatX),
                                borrow=True)
    valid_set_x = theano.shared(np.zeros((1,1,1), dtype=theano.config.floatX),
                                borrow=True)
    
    # Validation data (pick a single augmented instance, rand0 here)
    print("Creating validation data...")    
    chunk_valid_data = np.load(
        "./valid/data_valid_augmented_cv%s_t%s_rand0.npy" 
        % (cross_val, input_width)
        ).astype(theano.config.floatX)
    chunk_valid_answers = np.load(
        "./valid/data_valid_expected_cv%s.npy" 
        % (cross_val)
        ).astype(theano.config.floatX)     
    
    print "chunk_valid_answers.shape", chunk_valid_answers.shape
    print("Assigning validation data...")
    valid_set_y.set_value(chunk_valid_answers[:])
    valid_set_x.set_value(chunk_valid_data.transpose(0,2,1))
    
    # Create output directory
    if not os.path.exists("output_cv%s_v%s" % (cross_val, dataver)):
        os.makedirs("output_cv%s_v%s" % (cross_val, dataver))
    
    
    ###################################################                                
    ########### 4. Create Loss expressions ############
    ###################################################
    print("Defining loss expressions...")
    prediction_0 = LL.get_output(network_0) 
    train_loss = aggregate(T.abs_(prediction_0 - y.dimshuffle(0,'x')))
    
    valid_prediction_0 = LL.get_output(network_0, deterministic=True)
    valid_loss = aggregate(T.abs_(valid_prediction_0 - y.dimshuffle(0,'x')))
    
    
    ###################################################                                
    ############ 5. Define update method  #############
    ###################################################
    print("Defining update choices...")
    params = LL.get_all_params(network_0, trainable=True)
    learn_rate = T.scalar('learn_rate', dtype=theano.config.floatX)
    
    updates = lasagne.updates.adadelta(train_loss, params,
                                       learning_rate=learn_rate)
    
    
    ###################################################                                
    ######### 6. Define train/valid functions #########
    ###################################################    
    print("Defining theano functions...")
    train_model = theano.function(
        [index, learn_rate],
        train_loss,
        updates=updates,
        givens={
            x: train_set_x[(index*train_minibatch_size):
                            ((index+1)*train_minibatch_size)],
            y: train_set_y[(index*train_minibatch_size):
                            ((index+1)*train_minibatch_size)]  
        }
    )
    
    validate_model = theano.function(
        [index],
        valid_loss,
        givens={
            x: valid_set_x[index*valid_minibatch_size:
                            (index+1)*valid_minibatch_size],
            y: valid_set_y[index*valid_minibatch_size:
                            (index+1)*valid_minibatch_size]
        }
    )
    
    
    ###################################################                                
    ################ 7. Begin training ################
    ###################################################  
    print("Begin training...")
    sys.stdout.flush()
    
    cum_iterations = 0
    this_train_loss = 0.0
    this_valid_loss = 0.0
    best_valid_loss = np.inf
    best_iter = 0
    
    train_eval_scores = np.empty(num_eval)
    valid_eval_scores = np.empty(num_eval)
    eval_index = 0
    aug_index = 0
    
    for batch in xrange(max_train_gen_calls):
        start_time = time.time()        
        chunk_train_data = np.load(
            "./train/data_train_augmented_cv%s_t%s_rand%s.npy" %
            (cross_val, input_width, aug_index)
            ).astype(theano.config.floatX)
        chunk_train_answers = np.load(
            "./train/data_train_expected_cv%s.npy" % 
            (cross_val)
            ).astype(theano.config.floatX)     
            
        train_set_y.set_value(chunk_train_answers[:])
        train_set_x.set_value(chunk_train_data.transpose(0, 2, 1))
        
        # Iterate over minibatches in each batch
        for mini_index in xrange(train_batch_multiple):
            this_rate = np.float32(rate_init*(rate_decay**cum_iterations))
            this_train_loss += train_model(mini_index, this_rate)
            cum_iterations += 1
            
            # Report loss 
            if (cum_iterations % eval_multiple == 0):
                this_train_loss = this_train_loss / eval_multiple
                this_valid_loss = np.mean([validate_model(i) for
                                    i in xrange(valid_batch_multiple)])
                train_eval_scores[eval_index] = this_train_loss
                valid_eval_scores[eval_index] = this_valid_loss
                
                # Save report every five evaluations
                if ((eval_index+1) % 5 == 0):
                    np.savetxt(
                        "output_cv%s_v%s/training_scores.txt" %
                        (cross_val, dataver),
                         train_eval_scores, fmt="%.5f"
                         )
                    np.savetxt(
                        "output_cv%s_v%s/validation_scores.txt" %
                        (cross_val, dataver),
                         valid_eval_scores, fmt="%.5f"
                         )
                    np.savetxt(
                        "output_cv%s_v%s/last_learn_rate.txt" %
                        (cross_val, dataver),
                        [np.array(this_rate)], fmt="%.5f"
                        )
                
                # Save model if best validation score
                if (this_valid_loss < best_valid_loss):  
                    best_valid_loss = this_valid_loss
                    best_iter = cum_iterations-1
                    
                    if save_model:
                        np.savez("output_cv%s_v%s/model.npz" % 
                                 (cross_val, dataver),
                                 LL.get_all_param_values(network_0))
                    
                # Reset evaluation reports
                eval_index += 1
                this_train_loss = 0.0
                this_valid_loss = 0.0
                
        aug_index += 1
        
        end_time = time.time()
        print("Computing time for batch %d: %f" % (batch, end_time-start_time))
        
    print("Best validation loss %f after %d epochs" %
          (best_valid_loss, (best_iter*train_minibatch_size//epoch_size)))
    
    del train_set_x, train_set_y, valid_set_x, valid_set_y
    gc.collect()
    
    return None

Example 12

Project: kaggle-Rain
Source File: NNregression_v2.py
View license
def do_regression(num_epochs=60, # No. of epochs to train
                  init_file=None,  # Saved parameters to initialise training
                  epoch_size=680780,  # Whole dataset size
                  valid_size=34848, # Size of validation holdout set
                  train_batch_multiple=10637,  # No. of minibatches per batch
                  valid_batch_multiple=1089,  # No. of minibatches per batch
                  train_minibatch_size=64,
                  valid_minibatch_size=32,
                  eval_multiple=50,  # No. of minibatches to ave. in report
                  save_model=True,
                  input_width=19,
                  rng_seed=100005,
                  cross_val=0,  # Cross-validation subset label
                  dataver=2,  # Label for different runs/architectures/etc
                  rate_init=1.0,
                  rate_decay=0.999983):

    ###################################################
    ################# 0. User inputs ##################
    ###################################################
    for i in range(1,len(sys.argv)):
        if sys.argv[i].startswith('-'):
            option = sys.argv[i][1:]
            if option == 'i': init_file = sys.argv[i+1]
            elif option[0:2] == 'v=' : dataver = int(option[2:])
            elif option[0:3] == 'cv=' : cross_val = int(option[3:])
            elif option[0:3] == 'rs=' : rng_seed = int(option[3:])
            elif option[0:3] == 'ri=' : rate_init = np.float32(option[3:])
            elif option[0:3] == 'rd=' : rate_decay = np.float32(option[3:])
                                
    print("Running with dataver %s" % (dataver))
    print("Running with cross_val %s" % (cross_val))
    
    
    ###################################################
    ############# 1. Housekeeping values ##############
    ###################################################
    # Batch size is possibly not equal to epoch size due to memory limits
    train_batch_size = train_batch_multiple*train_minibatch_size 
    assert epoch_size >= train_batch_size
    
    # Number of times we expect the training/validation generator to be called
    max_train_gen_calls = (num_epochs*epoch_size)//train_batch_size 

    # Number of evaluations (total minibatches / eval_multiple)
    num_eval = max_train_gen_calls*train_batch_multiple / eval_multiple
    
    
    ###################################################
    ###### 2. Define model and theano variables #######
    ###################################################
    if rng_seed is not None:
        print("Setting RandomState with seed=%i" % (rng_seed))
        rng = np.random.RandomState(rng_seed)
        set_rng(rng)
    
    print("Defining variables...")
    index = T.lscalar() # Minibatch index
    x = T.tensor3('x') # Inputs 
    y = T.fvector('y') # Target
    
    print("Defining model...")
    network_0 = build_1Dregression_v2(
                        input_var=x,
                        input_width=input_width,
                        h_num_units=[120,120,120],
                        h_grad_clip=1.0,
                        output_width=1
                        )
                        
    if init_file is not None:
        print("Loading initial model parametrs...")
        init_model = np.load(init_file)
        init_params = init_model[init_model.files[0]]           
        LL.set_all_param_values([network_0], init_params)
        
    
    ###################################################                                
    ################ 3. Import data ###################
    ###################################################
    # Loading data generation model parameters
    print("Defining shared variables...")
    train_set_y = theano.shared(np.zeros(1, dtype=theano.config.floatX),
                                borrow=True) 
    train_set_x = theano.shared(np.zeros((1,1,1), dtype=theano.config.floatX),
                                borrow=True)
    
    valid_set_y = theano.shared(np.zeros(1, dtype=theano.config.floatX),
                                borrow=True)
    valid_set_x = theano.shared(np.zeros((1,1,1), dtype=theano.config.floatX),
                                borrow=True)
    
    # Validation data (pick a single augmented instance, rand0 here)
    print("Creating validation data...")    
    chunk_valid_data = np.load(
        "./valid/data_valid_augmented_cv%s_t%s_rand0.npy" 
        % (cross_val, input_width)
        ).astype(theano.config.floatX)
    chunk_valid_answers = np.load(
        "./valid/data_valid_expected_cv%s.npy" 
        % (cross_val)
        ).astype(theano.config.floatX)     
    
    print "chunk_valid_answers.shape", chunk_valid_answers.shape
    print("Assigning validation data...")
    valid_set_y.set_value(chunk_valid_answers[:])
    valid_set_x.set_value(chunk_valid_data.transpose(0,2,1))
    
    # Create output directory
    if not os.path.exists("output_cv%s_v%s" % (cross_val, dataver)):
        os.makedirs("output_cv%s_v%s" % (cross_val, dataver))
    
    
    ###################################################                                
    ########### 4. Create Loss expressions ############
    ###################################################
    print("Defining loss expressions...")
    prediction_0 = LL.get_output(network_0) 
    train_loss = aggregate(T.abs_(prediction_0 - y.dimshuffle(0,'x')))
    
    valid_prediction_0 = LL.get_output(network_0, deterministic=True)
    valid_loss = aggregate(T.abs_(valid_prediction_0 - y.dimshuffle(0,'x')))
    
    
    ###################################################                                
    ############ 5. Define update method  #############
    ###################################################
    print("Defining update choices...")
    params = LL.get_all_params(network_0, trainable=True)
    learn_rate = T.scalar('learn_rate', dtype=theano.config.floatX)
    
    updates = lasagne.updates.adadelta(train_loss, params,
                                       learning_rate=learn_rate)
    
    
    ###################################################                                
    ######### 6. Define train/valid functions #########
    ###################################################    
    print("Defining theano functions...")
    train_model = theano.function(
        [index, learn_rate],
        train_loss,
        updates=updates,
        givens={
            x: train_set_x[(index*train_minibatch_size):
                            ((index+1)*train_minibatch_size)],
            y: train_set_y[(index*train_minibatch_size):
                            ((index+1)*train_minibatch_size)]  
        }
    )
    
    validate_model = theano.function(
        [index],
        valid_loss,
        givens={
            x: valid_set_x[index*valid_minibatch_size:
                            (index+1)*valid_minibatch_size],
            y: valid_set_y[index*valid_minibatch_size:
                            (index+1)*valid_minibatch_size]
        }
    )
    
    
    ###################################################                                
    ################ 7. Begin training ################
    ###################################################  
    print("Begin training...")
    sys.stdout.flush()
    
    cum_iterations = 0
    this_train_loss = 0.0
    this_valid_loss = 0.0
    best_valid_loss = np.inf
    best_iter = 0
    
    train_eval_scores = np.empty(num_eval)
    valid_eval_scores = np.empty(num_eval)
    eval_index = 0
    aug_index = 0
    
    for batch in xrange(max_train_gen_calls):
        start_time = time.time()        
        chunk_train_data = np.load(
            "./train/data_train_augmented_cv%s_t%s_rand%s.npy" %
            (cross_val, input_width, aug_index)
            ).astype(theano.config.floatX)
        chunk_train_answers = np.load(
            "./train/data_train_expected_cv%s.npy" % 
            (cross_val)
            ).astype(theano.config.floatX)     
            
        train_set_y.set_value(chunk_train_answers[:])
        train_set_x.set_value(chunk_train_data.transpose(0, 2, 1))
        
        # Iterate over minibatches in each batch
        for mini_index in xrange(train_batch_multiple):
            this_rate = np.float32(rate_init*(rate_decay**cum_iterations))
            this_train_loss += train_model(mini_index, this_rate)
            cum_iterations += 1
            
            # Report loss 
            if (cum_iterations % eval_multiple == 0):
                this_train_loss = this_train_loss / eval_multiple
                this_valid_loss = np.mean([validate_model(i) for
                                    i in xrange(valid_batch_multiple)])
                train_eval_scores[eval_index] = this_train_loss
                valid_eval_scores[eval_index] = this_valid_loss
                
                # Save report every five evaluations
                if ((eval_index+1) % 5 == 0):
                    np.savetxt(
                        "output_cv%s_v%s/training_scores.txt" %
                        (cross_val, dataver),
                         train_eval_scores, fmt="%.5f"
                         )
                    np.savetxt(
                        "output_cv%s_v%s/validation_scores.txt" %
                        (cross_val, dataver),
                         valid_eval_scores, fmt="%.5f"
                         )
                    np.savetxt(
                        "output_cv%s_v%s/last_learn_rate.txt" %
                        (cross_val, dataver),
                        [np.array(this_rate)], fmt="%.5f"
                        )
                
                # Save model if best validation score
                if (this_valid_loss < best_valid_loss):  
                    best_valid_loss = this_valid_loss
                    best_iter = cum_iterations-1
                    
                    if save_model:
                        np.savez("output_cv%s_v%s/model.npz" % 
                                 (cross_val, dataver),
                                 LL.get_all_param_values(network_0))
                    
                # Reset evaluation reports
                eval_index += 1
                this_train_loss = 0.0
                this_valid_loss = 0.0
                
        aug_index += 1
            
        end_time = time.time()
        print("Computing time for batch %d: %f" % (batch, end_time-start_time))
        
    print("Best validation loss %f after %d epochs" %
          (best_valid_loss, (best_iter*train_minibatch_size//epoch_size)))
    
    del train_set_x, train_set_y, valid_set_x, valid_set_y
    gc.collect()
    
    return None

Example 13

View license
def run():
    logger.info("streamondemand.platformcode.launcher run")

    # The start() function is not always executed on old platforms (XBMC versions under 12.0)
    if config.OLD_PLATFORM:
        config.verify_directories_created()

    # Extract item from sys.argv
    if sys.argv[2]:
        item = Item().fromurl(sys.argv[2])

    # If no item, this is mainlist
    else:
        item = Item(action="selectchannel", viewmode="movie")

    logger.info("streamondemand.platformcode.launcher "+item.tostring())

    # Set server filters
    server_white_list = []
    server_black_list = []
    if config.get_setting('filter_servers') == 'true':
        server_white_list, server_black_list = set_server_list()

    try:

        # If item has no action, stops here
        if item.action == "":
            logger.info("streamondemand.platformcode.launcher Item sin accion")
            return

        # Action for main menu in channelselector
        if ( item.action=="selectchannel" ):
            import channelselector
            itemlist = channelselector.getmainlist()

            # Check for updates only on first screen
            if config.get_setting("updatecheck2") == "true":
                logger.info("streamondemand.platformcode.launcher Check for plugin updates enabled")
                from core import updater
                
                try:
                    version = updater.checkforupdates()

                    if version:
                        import xbmcgui
                        advertencia = xbmcgui.Dialog()
                        advertencia.ok("Versione "+version+" disponible","E' possibile fare il download della nuova versione\nselezionare la relativa voce nel menu principale")

                        itemlist.insert(0,Item(title="Download versione "+version, version=version, channel="updater", action="update", thumbnail=channelselector.get_thumbnail_path() + "Crystal_Clear_action_info.png"))
                except:
                    import xbmcgui
                    advertencia = xbmcgui.Dialog()
                    advertencia.ok("Impossibile connettersi","Non è stato possibile verificare","aggiornamenti")
                    logger.info("cstreamondemand.platformcode.launcher Fallo al verificar la actualización")

            else:
                logger.info("streamondemand.platformcode.launcher Check for plugin updates disabled")

            xbmctools.renderItems(itemlist, item)

        # Action for updating plugin
        elif (item.action=="update"):

            from core import updater
            updater.update(item)
            if config.get_system_platform()!="xbox":
                import xbmc
                xbmc.executebuiltin( "Container.Refresh" )

        # Action for channel types on channelselector: movies, series, etc.
        elif (item.action=="channeltypes"):
            import channelselector
            itemlist = channelselector.getchanneltypes()

            xbmctools.renderItems(itemlist, item)

        # Action for channel listing on channelselector
        elif (item.action=="listchannels"):
            import channelselector
            itemlist = channelselector.filterchannels(item.category)

            xbmctools.renderItems(itemlist, item)

        # Action in certain channel specified in "action" and "channel" parameters
        else:

            # Entry point for a channel is the "mainlist" action, so here we check parental control
            if item.action=="mainlist":
                
                # Parental control
                can_open_channel = False

                # If it is an adult channel, and user has configured pin, asks for it
                if channeltools.is_adult(item.channel) and config.get_setting("adult_pin")!="":

                    import xbmc
                    keyboard = xbmc.Keyboard("","PIN para canales de adultos",True)
                    keyboard.doModal()

                    if (keyboard.isConfirmed()):
                        tecleado = keyboard.getText()
                        if tecleado==config.get_setting("adult_pin"):
                            can_open_channel = True

                # All the other cases can open the channel
                else:
                    can_open_channel = True

                if not can_open_channel:
                    return

            # Checks if channel exists
            channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel+".py")
            logger.info("streamondemand.platformcode.launcher channel_file=%s" % channel_file)

            if item.channel in ["personal","personal2","personal3","personal4","personal5"]:
                import channels.personal as channel

            elif os.path.exists(channel_file):
                try:
                    channel = __import__('channels.%s' % item.channel, fromlist=["channels.%s" % item.channel])
                except:
                    exec "import channels."+item.channel+" as channel"

            logger.info("streamondemand.platformcode.launcher running channel {0} {1}".format(channel.__name__, channel.__file__))

            # Special play action
            if item.action == "play":
                logger.info("streamondemand.platformcode.launcher play")

                # First checks if channel has a "play" function
                if hasattr(channel, 'play'):
                    logger.info("streamondemand.platformcode.launcher executing channel 'play' method")
                    itemlist = channel.play(item)

                    # Play should return a list of playable URLS
                    if len(itemlist) > 0:
                        item = itemlist[0]
                        xbmctools.play_video(item)
                    
                    # If not, shows user an error message
                    else:
                        import xbmcgui
                        ventana_error = xbmcgui.Dialog()
                        ok = ventana_error.ok("plugin", "No hay nada para reproducir")

                # If player don't have a "play" function, not uses the standard play from xbmctools
                else:
                    logger.info("streamondemand.platformcode.launcher executing core 'play' method")
                    xbmctools.play_video(item)

            # Special action for findvideos, where the plugin looks for known urls
            elif item.action == "findvideos":

                if item.strm:
                    # Special action for playing a video from the library
                    play_from_library(item, channel, server_white_list, server_black_list)

                # First checks if channel has a "findvideos" function
                if hasattr(channel, 'findvideos'):
                    itemlist = getattr(channel, item.action)(item)

                # If not, uses the generic findvideos function
                else:
                    logger.info("streamondemand.platformcode.launcher no channel 'findvideos' method, "
                                "executing core method")
                    from core import servertools
                    itemlist = servertools.find_video_items(item)
                    if config.get_setting('filter_servers') == 'true':
                        itemlist = filtered_servers(itemlist, server_white_list, server_black_list)


                from platformcode import subtitletools
                subtitletools.saveSubtitleName(item)

                # Show xbmc items as "movies", so plot is visible
                import xbmcplugin

                handle = sys.argv[1]
                xbmcplugin.setContent(int( handle ),"movies")

                # Add everything to XBMC item list
                if type(itemlist) == list and itemlist:
                    xbmctools.renderItems(itemlist, item)

                # If not, it shows an empty list
                # FIXME: Aquí deberíamos mostrar alguna explicación del tipo "No hay elementos, esto pasa por bla bla bla"
                else:
                    xbmctools.renderItems([], item)

            # Special action for adding a movie to the library
            elif item.action == "add_pelicula_to_library":
                library.add_pelicula_to_library(item)

            # Special action for adding a serie to the library
            elif item.action == "add_serie_to_library":
                library.add_serie_to_library(item, channel)

            # Special action for downloading all episodes from a serie
            elif item.action == "download_all_episodes":
                downloadtools.download_all_episodes(item, channel)

            # Special action for searching, first asks for the words then call the "search" function
            elif item.action=="search":
                logger.info("streamondemand.platformcode.launcher search")
                
                import xbmc
                keyboard = xbmc.Keyboard("")
                keyboard.doModal()
                
                if (keyboard.isConfirmed()):
                    tecleado = keyboard.getText()
                    tecleado = tecleado.replace(" ", "+")
                    itemlist = channel.search(item,tecleado)
                else:
                    itemlist = []
                
                xbmctools.renderItems(itemlist, item)

            # For all other actions
            else:
                logger.info("streamondemand.platformcode.launcher executing channel '"+item.action+"' method")
                itemlist = getattr(channel, item.action)(item)

                # Activa el modo biblioteca para todos los canales genéricos, para que se vea el argumento
                import xbmcplugin

                handle = sys.argv[1]
                xbmcplugin.setContent(int( handle ),"movies")

                # Añade los items a la lista de XBMC
                if type(itemlist) == list and itemlist:
                    xbmctools.renderItems(itemlist, item)

                # If not, it shows an empty list
                # FIXME: Aquí deberíamos mostrar alguna explicación del tipo "No hay elementos, esto pasa por bla bla bla"
                else:
                    xbmctools.renderItems([], item)

    except urllib2.URLError,e:
        import traceback
        logger.error("streamondemand.platformcode.launcher "+traceback.format_exc())

        import xbmcgui
        ventana_error = xbmcgui.Dialog()

        # Grab inner and third party errors
        if hasattr(e, 'reason'):
            logger.info("streamondemand.platformcode.launcher Razon del error, codigo: {0}, Razon: {1}".format(e.reason[0], e.reason[1]))
            texto = config.get_localized_string(30050) # "No se puede conectar con el sitio web"
            ok = ventana_error.ok ("plugin", texto)
        
        # Grab server response errors
        elif hasattr(e,'code'):
            logger.info("streamondemand.platformcode.launcher codigo de error HTTP : %d" %e.code)
            texto = (config.get_localized_string(30051) % e.code) # "El sitio web no funciona correctamente (error http %d)"
            ok = ventana_error.ok ("plugin", texto)
    
    except:
        import traceback
        import xbmcgui
        logger.error("streamondemand.platformcode.launcher "+traceback.format_exc())
        
        patron = 'File "'+os.path.join(config.get_runtime_path(),"channels","").replace("\\","\\\\")+'([^.]+)\.py"'
        canal = scrapertools.find_single_match(traceback.format_exc(),patron)
        
        if canal:
            xbmcgui.Dialog().ok(
                "Errore inaspettato in " + canal,
                "Protrebbe essere un errore di connessione. Il canale web "
                "potrebbe aver modificato la sua struttura oppure si è "
                "verificato un errore in streamondemand.\nPer dettagli consulta il log.")
        else:
            xbmcgui.Dialog().ok(
                "Si è verificato un errore su streamondemand",
                "Per dettagli consulta il log." )

Example 14

View license
def play_video(item,desdefavoritos=False,desdedescargados=False,desderrordescargas=False,strmfile=False):
    from core import servertools
    
    logger.info("streamondemand.platformcode.xbmctools play_video")
    #logger.info(item.tostring('\n'))

    try:
        item.server = item.server.lower()
    except:
        item.server = ""

    if item.server=="":
        item.server="directo"

    view = False
    # Abre el diálogo de selección
    opciones = []
    default_action = config.get_setting("default_action")
    logger.info("default_action="+default_action)

    # Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
    muestra_dialogo = (config.get_setting("player_mode")=="0" and not strmfile)

    # Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
    video_urls,puedes,motivo = servertools.resolve_video_urls_for_playing(item.server,item.url,item.password,muestra_dialogo)

    # Si puedes ver el vídeo, presenta las opciones
    if puedes:
        
        for video_url in video_urls:
            opciones.append(config.get_localized_string(30151) + " " + video_url[0])

        if item.server=="local":
            opciones.append(config.get_localized_string(30164))
        else:
            opcion = config.get_localized_string(30153)
            opciones.append(opcion) # "Descargar"
    
            if item.channel=="favoritos": 
                opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"
            else:
                opciones.append(config.get_localized_string(30155)) # "Añadir a favoritos"
        
            if not strmfile:
                opciones.append(config.get_localized_string(30161)) # "Añadir a Biblioteca"
        
            if item.channel!="descargas":
                opciones.append(config.get_localized_string(30157)) # "Añadir a lista de descargas"
            else:
                if item.category=="errores":
                    opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
                    opciones.append(config.get_localized_string(30160)) # "Pasar de nuevo a lista de descargas"
                else:
                    opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"

            if config.get_setting("jdownloader_enabled")=="true":
                opciones.append(config.get_localized_string(30158)) # "Enviar a JDownloader"

        if default_action=="3":
            seleccion = len(opciones)-1
    
        # Busqueda de trailers en youtube    
        if not item.channel in ["Trailer","ecarteleratrailers"]:
            opciones.append(config.get_localized_string(30162)) # "Buscar Trailer"

    # Si no puedes ver el vídeo te informa
    else:
        if item.server!="":
            advertencia = xbmcgui.Dialog()
            if "<br/>" in motivo:
                resultado = advertencia.ok("Non è possibile guardare il video perché...", motivo.split("<br/>")[0],
                                           motivo.split("<br/>")[1], item.url)
            else:
                resultado = advertencia.ok("Non è possibile guardare il video perché...", motivo, item.url)
        else:
            resultado = advertencia.ok("Non è possibile guardare il video perché...", "Il server che lo ospita non è",
                                       "ancora supportato da streamondemand", item.url)

        if item.channel=="favoritos": 
            opciones.append(config.get_localized_string(30154)) # "Quitar de favoritos"

        if item.channel=="descargas":
            if item.category=="errores":
                opciones.append(config.get_localized_string(30159)) # "Borrar descarga definitivamente"
            else:
                opciones.append(config.get_localized_string(30156)) # "Quitar de lista de descargas"
        
        if len(opciones)==0:
            return

    # Si la accion por defecto es "Preguntar", pregunta
    if default_action=="0": # and server!="torrent":
        dia = xbmcgui.Dialog()
        seleccion = dia.select(config.get_localized_string(30163), opciones) # "Elige una opción"
        #dia.close()
        '''
        elif default_action=="0" and server=="torrent":
            advertencia = xbmcgui.Dialog()
            logger.info("video_urls[0]="+str(video_urls[0][1]))
            if puedes and ('"status":"COMPLETED"' in video_urls[0][1] or '"percent_done":100' in video_urls[0][1]):
                listo  = "y está listo para ver"
            else:
                listo = "y se está descargando"
            resultado = advertencia.ok( "Torrent" , "El torrent ha sido añadido a la lista" , listo )
            seleccion=-1
        '''
    elif default_action=="1":
        seleccion = 0
    elif default_action=="2":
        seleccion = len(video_urls)-1
    elif default_action=="3":
        seleccion = seleccion
    else:
        seleccion=0

    logger.info("seleccion=%d" % seleccion)
    logger.info("seleccion=%s" % opciones[seleccion])

    # No ha elegido nada, lo más probable porque haya dado al ESC 
    if seleccion==-1:
        #Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
        listitem = xbmcgui.ListItem( item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
        xbmcplugin.setResolvedUrl(int(sys.argv[ 1 ]),False,listitem)    # JUR Added
        #if config.get_setting("subtitulo") == "true":
        #    config.set_setting("subtitulo", "false")
        return

    if opciones[seleccion]==config.get_localized_string(30158): # "Enviar a JDownloader"
        #d = {"web": url}urllib.urlencode(d)
        from core import scrapertools
        
        if item.subtitle!="":
            data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail + " " + item.subtitle)
        else:
            data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail)

        return

    if opciones[seleccion]==config.get_localized_string(30158).replace("jDownloader","pyLoad"): # "Enviar a pyLoad"
        logger.info("Enviando a pyload...")

        if item.show!="":
            package_name = item.show
        else:
            package_name = "streamondemand"

        from core import pyload_client
        pyload_client.download(url=item.url,package_name=package_name)
        return

    elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
        # En "extra" está el nombre del fichero en favoritos
        os.remove( item.url )
        xbmc.executebuiltin( "Container.Refresh" )
        return

    # Ha elegido uno de los vídeos
    elif seleccion < len(video_urls):
        mediaurl = video_urls[seleccion][1]
        if len(video_urls[seleccion])>3:
            wait_time = video_urls[seleccion][2]
            item.subtitle = video_urls[seleccion][3]
        elif len(video_urls[seleccion])>2:
            wait_time = video_urls[seleccion][2]
        else:
            wait_time = 0
        view = True

    # Descargar
    elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"

        download_title = item.fulltitle
        if item.hasContentDetails=="true":
            download_title = item.contentTitle

        # El vídeo de más calidad es el último
        mediaurl = video_urls[len(video_urls)-1][1]

        from core import downloadtools
        keyboard = xbmc.Keyboard(download_title)
        keyboard.doModal()
        if (keyboard.isConfirmed()):
            download_title = keyboard.getText()
            devuelve = downloadtools.downloadbest(video_urls,download_title)
            
            if devuelve==0:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download", "Scaricato con successo")
            elif devuelve==-1:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download", "Download interrotto")
            else:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download", "Errore nel download")
        return

    elif opciones[seleccion]==config.get_localized_string(30154): #"Quitar de favoritos"
        from channels import favoritos
        # En "extra" está el nombre del fichero en favoritos
        favoritos.deletebookmark(urllib.unquote_plus( item.extra ))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30102) , item.title , config.get_localized_string(30105)) # 'Se ha quitado de favoritos'

        xbmc.executebuiltin( "Container.Refresh" )
        return

    elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
        from channels import descargas
        descargas.delete_error_bookmark(urllib.unquote_plus( item.extra ))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
        xbmc.executebuiltin( "Container.Refresh" )
        return

    elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
        from channels import descargas
        descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( item.extra ))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
        return

    elif opciones[seleccion]==config.get_localized_string(30155): #"Añadir a favoritos":
        from channels import favoritos
        from core import downloadtools

        download_title = item.fulltitle
        download_thumbnail = item.thumbnail
        download_plot = item.plot

        if item.hasContentDetails=="true":
            download_title = item.contentTitle
            download_thumbnail = item.contentThumbnail
            download_plot = item.contentPlot

        keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title)+" ["+item.channel+"]")
        keyboard.doModal()
        if keyboard.isConfirmed():
            title = keyboard.getText()
            favoritos.savebookmark(titulo=title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=title)
            advertencia = xbmcgui.Dialog()
            resultado = advertencia.ok(config.get_localized_string(30102) , title , config.get_localized_string(30108)) # 'se ha añadido a favoritos'
        return

    elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
        # La categoría es el nombre del fichero en la lista de descargas
        from channels import descargas
        descargas.deletebookmark((urllib.unquote_plus( item.extra )))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'

        xbmc.executebuiltin( "Container.Refresh" )
        return

    elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
        from core import downloadtools

        download_title = item.fulltitle
        download_thumbnail = item.thumbnail
        download_plot = item.plot

        if item.hasContentDetails=="true":
            download_title = item.contentTitle
            download_thumbnail = item.contentThumbnail
            download_plot = item.contentPlot

        keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title))
        keyboard.doModal()
        if keyboard.isConfirmed():
            download_title = keyboard.getText()

            from channels import descargas
            descargas.savebookmark(titulo=download_title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=download_title)

            advertencia = xbmcgui.Dialog()
            resultado = advertencia.ok(config.get_localized_string(30101) , download_title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
        return

    elif opciones[seleccion] == config.get_localized_string(30161):  # "Añadir a Biblioteca":  # Library

        titulo = item.fulltitle
        if titulo == "":
            titulo = item.title
        #library.savelibrary(titulo,item.url,item.thumbnail,item.server,item.plot,canal=item.channel,category=item.category,Serie=item.show)
        # TODO ¿SOLO peliculas?
        #logger.debug(item.tostring('\n'))
        new_item = item.clone(title=titulo, action="play_from_library", category="Cine",
                              fulltitle=item.fulltitle, channel=item.channel)
        #logger.debug(new_item.tostring('\n'))
        insertados, sobreescritos, fallidos = library.save_library_movie(new_item)

        advertencia = xbmcgui.Dialog()
        if fallidos == 0:
            advertencia.ok(config.get_localized_string(30131), titulo,
                           config.get_localized_string(30135))  # 'se ha añadido a la biblioteca'
        return

    elif opciones[seleccion]==config.get_localized_string(30162): #"Buscar Trailer":
        config.set_setting("subtitulo", "false")
        xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" % ( sys.argv[ 0 ] , item.clone(channel="trailertools", action="buscartrailer", contextual=True).tourl()))
        return

    # Si no hay mediaurl es porque el vídeo no está :)
    logger.info("streamondemand.platformcode.xbmctools mediaurl="+mediaurl)
    if mediaurl=="":
        if server == "unknown":
            alertUnsopportedServer()
        else:
            alertnodisponibleserver(item.server)
        return

    # Si hay un tiempo de espera (como en megaupload), lo impone ahora
    if wait_time>0:
        continuar = handle_wait(wait_time,server,"Cargando vídeo...")
        if not continuar:
            return

    # Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
    if strmfile:
        xlistitem = getLibraryInfo(mediaurl)
    else:
        play_title = item.fulltitle
        play_thumbnail = item.thumbnail
        play_plot = item.plot

        if item.hasContentDetails=="true":
            play_title = item.contentTitle
            play_thumbnail = item.contentThumbnail
            play_plot = item.contentPlot

        try:
            xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail, path=mediaurl)
        except:
            xlistitem = xbmcgui.ListItem( play_title, iconImage="DefaultVideo.png", thumbnailImage=play_thumbnail)

        xlistitem.setInfo( "video", { "Title": play_title, "Plot" : play_plot , "Studio" : item.channel , "Genre" : item.category } )

        #set_infoLabels(listitem,plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem

    # Lanza el reproductor
        # Lanza el reproductor

    if strmfile and not item.from_biblioteca: #Si es un fichero strm no hace falta el play
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
        if item.subtitle != "":
            xbmc.sleep(2000)
            xbmc.Player().setSubtitles(item.subtitle)

    #Movido del conector "torrent" aqui
    elif item.server=="torrent":

        #Opciones disponibles para Reproducir torrents
        torrent_options = []
        torrent_options.append(["Client  (necessario libtorrent)"])
        torrent_options.append(["Client interno MCT (necessario libtorrent)"])

        #Plugins externos se pueden añadir otros
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.xbmctorrent")'):
            torrent_options.append(["Plugin esterno: xbmctorrent","plugin://plugin.video.xbmctorrent/play/%s"])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.pulsar")'):
            torrent_options.append(["Plugin esterno: pulsar","plugin://plugin.video.pulsar/play?uri=%s"])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")'):
            torrent_options.append(["Plugin esterno: quasar","plugin://plugin.video.quasar/play?uri=%s"])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.stream")'):
            torrent_options.append(["Plugin esterno: stream","plugin://plugin.video.stream/play/%s"])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrenter")'):
            torrent_options.append(["Plugin esterno: torrenter","plugin://plugin.video.torrenter/?action=playSTRM&url=%s"])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrentin")'):
            torrent_options.append(["Plugin esterno: torrentin","plugin://plugin.video.torrentin/?uri=%s&image="])


        if len(torrent_options)>1:
            seleccion = xbmcgui.Dialog().select("Aprire torrent con...", [opcion[0] for opcion in torrent_options])
        else:
            seleccion = 0

        #Plugins externos
        if seleccion > 1:
            mediaurl = urllib.quote_plus(item.url)
            xbmc.executebuiltin( "PlayMedia(" + torrent_options[seleccion][1] % mediaurl +")" )

        if seleccion ==1:
            from platformcode import mct
            mct.play( mediaurl, xbmcgui.ListItem("", iconImage=item.thumbnail, thumbnailImage=item.thumbnail), subtitle=item.subtitle )

        #Reproductor propio (libtorrent)
        if seleccion == 0:
            import time
            videourl = None
            played = False
  
            #Importamos el cliente
            from btserver import Client
  
            #Iniciamos el cliente:
            c = Client(url=mediaurl, is_playing_fnc= xbmc.Player().isPlaying, wait_time=None, timeout=5, temp_path =os.path.join(config.get_data_path(),"torrent") )
  
            #Mostramos el progreso
            progreso = xbmcgui.DialogProgress()
            progreso.create( "streamondemand - Torrent" , "Avviando...")
  
  
            #Mientras el progreso no sea cancelado ni el cliente cerrado
            while not progreso.iscanceled() and not c.closed:
  
                try:
                    #Obtenemos el estado del torrent
                    s = c.status
      
                    #Montamos las tres lineas con la info del torrent
                    txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \
                    (s.progress_file, s.file_size, s.str_state, s._download_rate)
                    txt2 =  'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \
                    (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, s.trackers)
                    txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \
                    (s.trk_peers,s.dht_peers, s.pex_peers, s.lsd_peers)
      
                    progreso.update(s.buffer,txt, txt2, txt3)
      
      
                    time.sleep(1)
      
                    #Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia
                    if s.buffer == 100 and not played:
      
                        #Cerramos el progreso
                        progreso.close()
        
                        #Obtenemos el playlist del torrent
                        videourl = c.get_play_list()
        
                        #Iniciamos el reproductor
                        playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
                        playlist.clear()
                        playlist.add( videourl, xlistitem )
                        xbmcPlayer = xbmc.Player()
                        xbmcPlayer.play(playlist)
        
                        #Marcamos como reproducido para que no se vuelva a iniciar
                        played = True
        
                        #Y esperamos a que el reproductor se cierre
                        while xbmc.Player().isPlaying():
                          time.sleep(1)
        
                        #Cuando este cerrado,  Volvemos a mostrar el dialogo
                        progreso.create( "streamondemand - Torrent" , "Avviando...")
      
                except:
                    import traceback
                    logger.info(traceback.format_exc())
                    break

            progreso.update(100,"Terminato, elimina dati"," "," ")

            #Detenemos el cliente
            if not c.closed:
                c.stop()

            #Y cerramos el progreso
            progreso.close()

            return

    else:
        logger.info("player_mode="+config.get_setting("player_mode"))
        logger.info("mediaurl="+mediaurl)
        if config.get_setting("player_mode")=="3" or "megacrypter.com" in mediaurl:
            import download_and_play
            download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath") )
            return

        elif config.get_setting("player_mode")=="0" or (config.get_setting("player_mode")=="3" and mediaurl.startswith("rtmp")):
            # Añadimos el listitem a una lista de reproducción (playlist)
            playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
            playlist.clear()
            playlist.add( mediaurl, xlistitem )

            # Reproduce
            playersettings = config.get_setting('player_type')
            logger.info("streamondemand.platformcode.xbmctools playersettings="+playersettings)

            if config.get_system_platform()=="xbox":
                player_type = xbmc.PLAYER_CORE_AUTO
                if playersettings == "0":
                    player_type = xbmc.PLAYER_CORE_AUTO
                    logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_AUTO")
                elif playersettings == "1":
                    player_type = xbmc.PLAYER_CORE_MPLAYER
                    logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_MPLAYER")
                elif playersettings == "2":
                    player_type = xbmc.PLAYER_CORE_DVDPLAYER
                    logger.info("streamondemand.platformcode.xbmctools PLAYER_CORE_DVDPLAYER")

                xbmcPlayer = xbmc.Player( player_type )
            else:
                xbmcPlayer = xbmc.Player()

            xbmcPlayer.play(playlist)

            if item.channel=="cuevana" and item.subtitle!="":
                logger.info("subtitulo="+subtitle)
                if item.subtitle!="" and (opciones[seleccion].startswith("Ver") or opciones[seleccion].startswith("Watch")):
                    logger.info("streamondemand.platformcode.xbmctools Con subtitulos")
                    setSubtitles()

        elif config.get_setting("player_mode")=="1":
            logger.info("mediaurl :"+ mediaurl)
            logger.info("Tras setResolvedUrl")
            xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl))

        elif config.get_setting("player_mode")=="2":
            xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )

    if item.subtitle!="" and view:
        logger.info("Subtítulos externos: "+item.subtitle)
        xbmc.Player().setSubtitles(item.subtitle)

Example 15

Project: xbmc-addons-chinese
Source File: addon.py
View license
def main():
	if param.startswith("?stream="):
		def fixURL(tmpurl):
			tmpurl = tmpurl.replace("vtime.cntv.cloudcdn.net:8000", "vtime.cntv.cloudcdn.net") #Global (HDS/FLV) - wrong port
			tmpurl = tmpurl.replace("tv.fw.live.cntv.cn", "tvhd.fw.live.cntv.cn") #China - 403 Forbidden
			return tmpurl
		
		def tryHLSStream(jsondata, subkey):
			print("Trying stream {0}".format(subkey))
			
			if jsondata["hls_url"].has_key(subkey) and jsondata["hls_url"][subkey] != "":
				try:
					tmpurl = jsondata["hls_url"][subkey]
					tmpurl = fixURL(tmpurl)
					
					req = urllib2.Request(tmpurl)
					conn = urllib2.urlopen(req, timeout=TIMEOUT_S)
					conn.read(8) #Try reading a few bytes
					
					return tmpurl
				except Exception:
					print("{0} failed.".format(subkey))
					print(traceback.format_exc())
			
			return None
		
		def tryFLVStream(jsondata, streamName):
			if jsondata["hds_url"].has_key(streamName):
				url = jsondata["hds_url"][streamName]
				url = url + "&hdcore=2.11.3"
				
				return url
		
		pDialog = xbmcgui.DialogProgress()
		pDialog.create(addon.getLocalizedString(30009), addon.getLocalizedString(30010))
		pDialog.update(0)
		try:
			#Locate the M3U8 file
			resp = urllib2.urlopen("http://vdn.live.cntv.cn/api2/live.do?channel=pa://cctv_p2p_hd" + param[8:])
			data = resp.read().decode("utf-8")
			
			if pDialog.iscanceled(): return
			
			url = None
			jsondata = jsonimpl.loads(data)
			
			urlsTried = 0
			urlsToTry = 5
			
			if jsondata.has_key("hls_url"):
				if url == None:
					urlsTried += 1
					pDialog.update(urlsTried / urlsToTry * 100, "{0} {1} (HLS)".format(addon.getLocalizedString(30011), "hls1"))
					url = tryHLSStream(jsondata, "hls1")
				if url == None:
					urlsTried += 1
					pDialog.update(urlsTried / urlsToTry * 100, "{0} {1} (HLS)".format(addon.getLocalizedString(30011), "hls1"))
					url = tryHLSStream(jsondata, "hls2")
				if url == None:
					urlsTried += 1
					pDialog.update(urlsTried / urlsToTry * 100, "{0} {1} (HLS)".format(addon.getLocalizedString(30011), "hls1"))
					url = tryHLSStream(jsondata, "hls3")
				if url == None:
					urlsTried += 1
					pDialog.update(urlsTried / urlsToTry * 100, "{0} {1} (HLS)".format(addon.getLocalizedString(30011), "hls1"))
					url = tryHLSStream(jsondata, "hls4")
				if url == None:
					urlsTried += 1
					pDialog.update(urlsTried / urlsToTry * 100, "{0} {1} (HLS)".format(addon.getLocalizedString(30011), "hls1"))
					url = tryHLSStream(jsondata, "hls5")
			
			if pDialog.iscanceled(): return
			
			#if url is None and jsondata.has_key("hls_url"):
			#	tryHLSStream(jsondata, "hls4")
			
			if url is None:
				showNotification(30002)
				pDialog.close()
				return
			
			print("Loading URL {0}".format(url))
			
			auth = urlparse.parse_qs(urlparse.urlparse(url)[4])["AUTH"][0]
			print("Got AUTH {0}".format(auth))
			
			url = url + "|" + urllib.urlencode( { "Cookie" : "AUTH=" + auth } )
			
			print("Built URL {0}".format(url))
			
			pDialog.close()
			xbmc.Player().play(url)
			
		except Exception:
			showNotification(30000)
			print(traceback.format_exc())
			pDialog.close()
			return

	elif param.startswith("?city="):
		city = param[6:]
		
		def addStream(channelID, channelName):
			li = xbmcgui.ListItem(channelName, iconImage=addon_path + "/resources/media/" + city + ".png")
			xbmcplugin.addDirectoryItem(handle=addon_handle, url=sys.argv[0] + "?stream=" + channelID, listitem=li)
		
		if city == "anhui":
			addStream("anqingxinwen", "安庆新闻综合")
		if city == "beijing":
			addStream("btv2", "BTV文艺")
			addStream("btv3", "BTV科教")
			addStream("btv4", "BTV影视")
			addStream("btv5", "BTV财经")
			addStream("btv6", "BTV体育")
			addStream("btv7", "BTV生活")
			addStream("btv8", "BTV青少")
			addStream("btv9", "BTV新闻")
			addStream("btvchild", "BTV卡酷少儿")
			addStream("btvjishi", "BTV纪实")
			addStream("btvInternational", "BTV国际")
		if city == "tianjin":
			addStream("tianjin1", "天津1套")
			addStream("tianjin2", "天津2套")
			addStream("tianjinbh", "滨海新闻综合")
			addStream("tianjinbh2", "滨海综艺频道")
		if city == "guangxi":
			addStream("gxzy", "广西综艺")
		if city == "guangdong":
			addStream("cztv1", "潮州综合")
			addStream("cztv2", "潮州公共")
			addStream("foshanxinwen", "佛山新闻综合")
			addStream("guangzhouxinwen", "广州新闻")
			addStream("guangzhoujingji", "广州经济")
			addStream("guangzhoushaoer", "广州少儿")
			addStream("guangzhouzonghe", "广州综合")
			addStream("guangzhouyingyu", "广州英语")
			addStream("shaoguanzonghe", "韶关综合")
			addStream("shaoguangonggong", "韶关公共")
			addStream("shenzhencjsh", "深圳财经")
			addStream("zhuhaiyitao", "珠海一套")
			addStream("zhuhaiertao", "珠海二套")
		if city == "sichuan":
			addStream("cdtv1", "成都新闻综合")
			addStream("cdtv2new", "成都经济资讯服务")
			addStream("cdtv5", "成都公共")
		if city == "liaoning":
			addStream("daliannews", "大连一套")
			addStream("liaoningds", "辽宁都市")
		if city == "jiangxi":
			addStream("ganzhou", "赣州新闻综合")
			addStream("nanchangnews", "南昌新闻")
		if city == "hubei":
			addStream("hubeidst", "湖北电视台综合频道")
			addStream("hubeigonggong", "湖北公共")
			addStream("hubeijiaoyu", "湖北教育")
			addStream("hubeitiyu", "湖北体育")
			addStream("hubeiyingshi", "湖北影视")
			addStream("hubeijingshi", "湖北经视")
			addStream("hubeigouwu", "湖北购物")
			addStream("jznews", "荆州新闻频道")
			addStream("wuhanetv", "武汉教育")
			addStream("jzlongs", "湖北垄上频道")
			addStream("xiangyangtai", "襄阳广播电视台")
		if city == "heilongjiang":
			addStream("haerbinnews", "哈尔滨新闻综合")
		if city == "xinjiang":
			addStream("xjtv2", "维语新闻综合")
			addStream("xjtv3", "哈语新闻综合")
			addStream("xjtv5", "维语综艺")
			addStream("xjtv8", "哈语综艺")
			addStream("xjtv9", "维语经济生活")
		if city == "hebei":
			addStream("hebeinongmin", "河北农民频道")
			addStream("hebeijingji", "河北经济")
			addStream("shijiazhuangyitao", "石家庄一套")
			addStream("shijiazhuangertao", "石家庄二套")
			addStream("shijiazhuangsantao", "石家庄三套")
			addStream("shijiazhuangsitao", "石家庄四套")
			addStream("xingtaizonghe", "邢台综合")
			addStream("xingtaishenghuo", "邢台生活")
			addStream("xingtaigonggong", "邢台公共")
			addStream("xingtaishahe", "邢台沙河")
		if city == "shandong":
			addStream("jinannews", "济南新闻")
			addStream("qingdaonews", "青岛新闻综合")
			addStream("yantaixinwenzonghe", "烟台新闻综合")
			addStream("yantaixinjingjishenghuo", "烟台经济生活")
			addStream("yantaigonggong", "烟台公共频道")
		if city == "gansu":
			addStream("jingcailanzhou", "睛彩兰州")
		if city == "yunnan":
			addStream("lijiangnews", "丽江新闻综合频道")
			addStream("lijiangpublic", "丽江公共频道")
		if city == "neimenggu":
			addStream("neimenggu2", "蒙语频道")
			addStream("neimengwh", "内蒙古文化频道")
		if city == "jiangsu":
			addStream("nanjingnews", "南京新闻")
			addStream("nantongxinwen", "南通新闻频道")
			addStream("nantongshejiao", "南通社教频道")
			addStream("nantongshenghuo", "南通生活频道")
			addStream("wuxixinwenzonghe", "无锡新闻综合")
			addStream("wuxidoushizixun", "无锡都市资讯")
			addStream("wuxiyuele", "无锡娱乐")
			addStream("wuxijingji", "无锡经济")
			addStream("wuxiyidong", "无锡移动")
			addStream("wuxishenghuo", "无锡生活")
		if city == "zhejiang":
			addStream("nbtv1", "宁波一套")
			addStream("nbtv2", "宁波二套")
			addStream("nbtv3", "宁波三套")
			addStream("nbtv4", "宁波四套")
			addStream("nbtv5", "宁波五套")
		if city == "shanghai":
			addStream("shnews", "上海新闻综合")
		if city == "fujian":
			addStream("xiamen1", "厦门一套")
			addStream("xiamen2", "厦门二套")
			addStream("xiamen3", "厦门三套")
			addStream("xiamen4", "厦门四套")
			addStream("xiamenyidong", "厦门移动")
		if city == "shaanxi":
			addStream("xiannews", "西安新闻")
		if city == "xizang":
			addStream("xizang2", "藏语频道")
		if city == "jilin":
			addStream("yanbianguangbo", "延边卫视视频广播")
			addStream("yanbianam", "延边卫视AM")
			addStream("yanbianfm", "延边卫视FM")
		
		xbmcplugin.endOfDirectory(addon_handle)

	elif param.startswith("?category="):
		category = param[10:]
		
		def addStream(channelID, channelName):
			li = xbmcgui.ListItem(channelName, iconImage=addon_path + "/resources/media/" + channelID + ".png")
			xbmcplugin.addDirectoryItem(handle=addon_handle, url=sys.argv[0] + "?stream=" + channelID, listitem=li)
		
		if category == "yangshi":
			addStream("cctv1", "CCTV-1 综合")
			addStream("cctv2", "CCTV-2 财经")
			addStream("cctv3", "CCTV-3 综艺")
			addStream("cctv4", "CCTV-4 (亚洲)")
			addStream("cctveurope", "CCTV-4 (欧洲)")
			addStream("cctvamerica", "CCTV-4 (美洲)")
			addStream("cctv5", "CCTV-5 体育")
			addStream("cctv6", "CCTV-6 电影")
			addStream("cctv7", "CCTV-7 军事 农业")
			addStream("cctv8", "CCTV-8 电视剧")
			addStream("cctvjilu", "CCTV-9 纪录")
			addStream("cctvdoc", "CCTV-9 纪录(英)")
			addStream("cctv10", "CCTV-10 科教")
			addStream("cctv11", "CCTV-11 戏曲")
			addStream("cctv12", "CCTV-12 社会与法")
			addStream("cctv13", "CCTV-13 新闻")
			addStream("cctvchild", "CCTV-14 少儿")
			addStream("cctv15", "CCTV-15 音乐")
			addStream("cctv9", "CCTV-NEWS")
			addStream("cctv5plus", "CCTV体育赛事")
		if category == "weishi":
			addStream("anhui", "安徽卫视")
			addStream("btv1", "北京卫视")
			addStream("bingtuan", "兵团卫视")
			addStream("chongqing", "重庆卫视")
			addStream("dongfang", "东方卫视")
			addStream("dongnan", "东南卫视")
			addStream("gansu", "甘肃卫视")
			addStream("guangdong", "广东卫视")
			addStream("guangxi", "广西卫视")
			addStream("guizhou", "贵州卫视")
			addStream("hebei", "河北卫视")
			addStream("henan", "河南卫视")
			addStream("heilongjiang", "黑龙江卫视")
			addStream("hubei", "湖北卫视")
			addStream("jilin", "吉林卫视")
			addStream("jiangxi", "江西卫视")
			addStream("kangba", "康巴卫视")
			addStream("liaoning", "辽宁卫视")
			addStream("travel", "旅游卫视")
			addStream("neimenggu", "内蒙古卫视")
			addStream("ningxia", "宁夏卫视")
			addStream("qinghai", "青海卫视")
			addStream("shandong", "山东卫视")
			addStream("sdetv", "山东教育台")
			addStream("shenzhen", "深圳卫视")
			addStream("shan1xi", "山西卫视")
			addStream("shan3xi", "陕西卫视")
			addStream("shenzhen", "深圳卫视")
			addStream("sichuan", "四川卫视")
			addStream("tianjin", "天津卫视")
			addStream("xizang", "西藏卫视")
			addStream("xiamen", "厦门卫视")
			addStream("xianggangweishi", "香港卫视")
			addStream("xinjiang", "新疆卫视")
			addStream("yanbian", "延边卫视")
			addStream("yunnan", "云南卫视")
			addStream("zhejiang", "浙江卫视")
		
		if category == "shuzi":
			addStream("zhongxuesheng", "CCTV中学生")
			addStream("xinkedongman", "CCTV新科动漫")
			addStream("zhinan", "CCTV电视指南")
		
		if category == "chengshi":
			def addCity(cityID, cityName):
				li = xbmcgui.ListItem(cityName, iconImage=addon_path + "/resources/media/" + cityID + ".png")
				xbmcplugin.addDirectoryItem(handle=addon_handle, url=sys.argv[0] + "?city=" + cityID, listitem=li, isFolder=True)
			
			addCity("anhui", "Anhui 安徽")
			addCity("beijing", "Beijing 北京")
			addCity("fujian", "Fujian 福建")
			addCity("gansu", "Gansu 甘肃")
			addCity("guangdong", "Guangdong 广东")
			addCity("guangxi", "Guangxi 广西")
			addCity("hebei", "Hebei 河北")
			addCity("heilongjiang", "Heilongjiang 黑龙江")
			addCity("hubei", "Hubei 湖北")
			addCity("jilin", "Jilin 吉林")
			addCity("jiangsu", "Jiangsu 江苏")
			addCity("jiangxi", "Jiangxi 江西")
			addCity("liaoning", "Liaoning 辽宁")
			addCity("neimenggu", "Inner Mongolia 内蒙古")
			addCity("shandong", "Shandong 山东")
			addCity("shaanxi", "Shaanxi 陕西")
			addCity("shanghai", "Shanghai 上海")
			addCity("sichuan", "Sichuan 四川")
			addCity("tianjin", "Tianjin 天津")
			addCity("xizang", "Tibet 西藏")
			addCity("xinjiang", "Xinjiang 新疆")
			addCity("yunnan", "Yunnan 云南")
			addCity("zhejiang", "Zhejiang 浙江")
		
		xbmcplugin.endOfDirectory(addon_handle)
		
	else:
		def addCategory(categoryID, categoryName):
				li = xbmcgui.ListItem(categoryName)
				xbmcplugin.addDirectoryItem(handle=addon_handle, url=sys.argv[0] + "?category=" + categoryID, listitem=li, isFolder=True)
		
		addCategory("yangshi", "National Channels 央视频道")
		addCategory("weishi", "Provincial Channels 卫视频道")
		addCategory("shuzi", "Digital Channels 数字频道")
		addCategory("chengshi", "City-based Channels 城市频道")
		
		xbmcplugin.endOfDirectory(addon_handle)

Example 16

Project: jobTree
Source File: jobTreeSlave.py
View license
def main():
    sys.path.append(sys.argv[1])
    sys.argv.remove(sys.argv[1])
    
    #Now we can import all the stuff..
    from sonLib.bioio import getBasicOptionParser
    from sonLib.bioio import parseBasicOptions
    from sonLib.bioio import logger
    from sonLib.bioio import addLoggingFileHandler, redirectLoggerStreamHandlers
    from sonLib.bioio import setLogLevel
    from sonLib.bioio import getTotalCpuTime, getTotalCpuTimeAndMemoryUsage
    from sonLib.bioio import getTempDirectory
    from sonLib.bioio import makeSubDir
    from jobTree.src.job import Job
    from jobTree.src.master import getEnvironmentFileName, getConfigFileName, listChildDirs, getTempStatsFile, setupJobAfterFailure
    from sonLib.bioio import system
    
    ########################################## 
    #Input args
    ##########################################
    
    jobTreePath = sys.argv[1]
    jobFile = sys.argv[2]
    
    ##########################################
    #Load the environment for the job
    ##########################################
    
    #First load the environment for the job.
    fileHandle = open(getEnvironmentFileName(jobTreePath), 'r')
    environment = cPickle.load(fileHandle)
    fileHandle.close()
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)
    #os.environ = environment
    #os.putenv(key, value)
        
    ##########################################
    #Setup the temporary directories.
    ##########################################
        
    #Dir to put all the temp files in.
    localSlaveTempDir = getTempDirectory()
    localTempDir = makeSubDir(os.path.join(localSlaveTempDir, "localTempDir"))
    
    ##########################################
    #Setup the logging
    ##########################################
    
    #Setup the logging. This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>
    
    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?    
    tempSlaveLogFile = os.path.join(localSlaveTempDir, "slave_log.txt")
    
    #Save the original stdout and stderr (by opening new file descriptors to the
    #same files)
    origStdOut = os.dup(1)
    origStdErr = os.dup(2)
    
    #Open the file to send stdout/stderr to.
    logDescriptor = os.open(tempSlaveLogFile, os.O_WRONLY | os.O_CREAT | os.O_APPEND)

    #Replace standard output with a descriptor for the log file
    os.dup2(logDescriptor, 1)
    
    #Replace standard error with a descriptor for the log file
    os.dup2(logDescriptor, 2)
    
    #Since we only opened the file once, all the descriptors duped from the
    #original will share offset information, and won't clobber each others'
    #writes. See <http://stackoverflow.com/a/5284108/402891>. This shouldn't
    #matter, since O_APPEND seeks to the end of the file before every write, but
    #maybe there's something odd going on...
    
    #Close the descriptor we used to open the file
    os.close(logDescriptor)
    
    for handler in list(logger.handlers): #Remove old handlers
        logger.removeHandler(handler)
    
    #Add the new handler. The sys.stderr stream has been redirected by swapping
    #the file descriptor out from under it.
    logger.addHandler(logging.StreamHandler(sys.stderr))

    #Put a message at the top of the log, just to make sure it's working.
    print "---JOBTREE SLAVE OUTPUT LOG---"
    sys.stdout.flush()
    
    #Log the number of open file descriptors so we can tell if we're leaking
    #them.
    logger.debug("Next available file descriptor: {}".format(
        nextOpenDescriptor()))
    
    ##########################################
    #Parse input files
    ##########################################
    
    config = ET.parse(getConfigFileName(jobTreePath)).getroot()
    setLogLevel(config.attrib["log_level"])
    job = Job.read(jobFile)
    job.messages = [] #This is the only way to stop messages logging twice, as are read only in the master
    job.children = [] #Similarly, this is where old children are flushed out.
    job.write() #Update status, to avoid reissuing children after running a follow on below.
    if os.path.exists(job.getLogFileName()): #This cleans the old log file
        os.remove(job.getLogFileName())
    logger.info("Parsed arguments and set up logging")

     #Try loop for slave logging
    ##########################################
    #Setup the stats, if requested
    ##########################################
    
    if config.attrib.has_key("stats"):
        startTime = time.time()
        startClock = getTotalCpuTime()
        stats = ET.Element("slave")
    else:
        stats = None
    
    ##########################################
    #The max time 
    ##########################################
    
    maxTime = float(config.attrib["job_time"])
    assert maxTime > 0.0
    assert maxTime < sys.maxint

    ##########################################
    #Slave log file trapped from here on in
    ##########################################

    slaveFailed = False
    try:
        
        ##########################################
        #The next job
        ##########################################
        
        def globalTempDirName(job, depth):
            return job.getGlobalTempDirName() + str(depth)
        
        command, memoryAvailable, cpuAvailable, depth = job.followOnCommands[-1]
        defaultMemory = int(config.attrib["default_memory"])
        defaultCpu = int(config.attrib["default_cpu"])
        assert len(job.children) == 0
        
        startTime = time.time() 
        while True:
            job.followOnCommands.pop()
            
            ##########################################
            #Global temp dir
            ##########################################
            
            globalTempDir = makeSubDir(globalTempDirName(job, depth))
            i = 1
            while os.path.isdir(globalTempDirName(job, depth+i)):
                system("rm -rf %s" % globalTempDirName(job, depth+i))
                i += 1
                
            ##########################################
            #Old children, not yet deleted
            #
            #These may exist because of the lazy cleanup
            #we do
            ##########################################
        
            for childDir in listChildDirs(job.jobDir):
                logger.debug("Cleaning up old child %s" % childDir)
                system("rm -rf %s" % childDir)
        
            ##########################################
            #Run the job
            ##########################################
        
            if command != "": #Not a stub
                if command[:11] == "scriptTree ":
                    ##########################################
                    #Run the target
                    ##########################################
                    
                    loadStack(command).execute(job=job, stats=stats,
                                    localTempDir=localTempDir, globalTempDir=globalTempDir, 
                                    memoryAvailable=memoryAvailable, cpuAvailable=cpuAvailable, 
                                    defaultMemory=defaultMemory, defaultCpu=defaultCpu, depth=depth)
            
                else: #Is another command
                    system(command) 
            
            ##########################################
            #Cleanup/reset a successful job/checkpoint
            ##########################################
            
            job.remainingRetryCount = int(config.attrib["try_count"])
            system("rm -rf %s/*" % (localTempDir))
            job.update(depth=depth, tryCount=job.remainingRetryCount)
            
            ##########################################
            #Establish if we can run another job
            ##########################################
            
            if time.time() - startTime > maxTime:
                logger.info("We are breaking because the maximum time the job should run for has been exceeded")
                break
            
            #Deal with children
            if len(job.children) >= 1:  #We are going to have to return to the parent
                logger.info("No more jobs can run in series by this slave, its got %i children" % len(job.children))
                break
            
            if len(job.followOnCommands) == 0:
                logger.info("No more jobs can run by this slave as we have exhausted the follow ons")
                break
            
            #Get the next job and see if we have enough cpu and memory to run it..
            command, memory, cpu, depth = job.followOnCommands[-1]
            
            if memory > memoryAvailable:
                logger.info("We need more memory for the next job, so finishing")
                break
            if cpu > cpuAvailable:
                logger.info("We need more cpus for the next job, so finishing")
                break
            
            logger.info("Starting the next job")
        
        ##########################################
        #Finish up the stats
        ##########################################
        
        if stats != None:
            totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            stats.attrib["time"] = str(time.time() - startTime)
            stats.attrib["clock"] = str(totalCpuTime - startClock)
            stats.attrib["memory"] = str(totalMemoryUsage)
            tempStatsFile = getTempStatsFile(jobTreePath)
            fileHandle = open(tempStatsFile + ".new", "w")
            ET.ElementTree(stats).write(fileHandle)
            fileHandle.close()
            os.rename(tempStatsFile + ".new", tempStatsFile) #This operation is atomic
        
        logger.info("Finished running the chain of jobs on this node, we ran for a total of %f seconds" % (time.time() - startTime))
    
    ##########################################
    #Where slave goes wrong
    ##########################################
    except: #Case that something goes wrong in slave
        traceback.print_exc()
        logger.critical("Exiting the slave because of a failed job on host %s", socket.gethostname())
        job = Job.read(jobFile)
        setupJobAfterFailure(job, config)
        job.write()
        slaveFailed = True

    ##########################################
    #Cleanup
    ##########################################
    
    #Close the slave logging
    #Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    #Flush at the OS level
    os.fsync(1)
    os.fsync(2)
    
    #Close redirected stdout and replace with the original standard output.
    os.dup2(origStdOut, 1)
    
    #Close redirected stderr and replace with the original standard error.
    os.dup2(origStdOut, 2)
    
    #sys.stdout and sys.stderr don't need to be modified at all. We don't need
    #to call redirectLoggerStreamHandlers since they still log to sys.stderr
    
    #Close our extra handles to the original standard output and standard error
    #streams, so we don't leak file handles.
    os.close(origStdOut)
    os.close(origStdErr)
    
    #Now our file handles are in exactly the state they were in before.
    
    #Copy back the log file to the global dir, if needed
    if slaveFailed:
        truncateFile(tempSlaveLogFile)
        system("mv %s %s" % (tempSlaveLogFile, job.getLogFileName()))
    #Remove the temp dir
    system("rm -rf %s" % localSlaveTempDir)
    
    #This must happen after the log file is done with, else there is no place to put the log
    if (not slaveFailed) and len(job.followOnCommands) == 0 and len(job.children) == 0 and len(job.messages) == 0:
        ##########################################
        #Cleanup global files at the end of the chain
        ##########################################
        job.delete()            

Example 17

Project: pelisalacarta
Source File: launcher.py
View license
def run():
    logger.info("pelisalacarta.platformcode.launcher run")

    # Extract item from sys.argv
    if sys.argv[2]:
        item = Item().fromurl(sys.argv[2])

    # If no item, this is mainlist
    else:
        item = Item(channel="channelselector", action="getmainlist", viewmode="movie")

    logger.info("pelisalacarta.platformcode.launcher "+item.tostring())
    
    try:

        # If item has no action, stops here
        if item.action == "":
            logger.info("pelisalacarta.platformcode.launcher Item sin accion")
            return

        # Action for main menu in channelselector
        if item.action == "getmainlist":
            import channelselector
            itemlist = channelselector.getmainlist()

            # Check for updates only on first screen
            if config.get_setting("updatecheck2") == "true":
                logger.info("pelisalacarta.platformcode.launcher Check for plugin updates enabled")
                from core import updater
                
                try:
                    version = updater.checkforupdates()

                    if version:
                        platformtools.dialog_ok("Versión "+version+" disponible",
                                                "Ya puedes descargar la nueva versión del plugin\n"
                                                "desde el listado principal")

                        itemlist.insert(0, Item(title="Descargar version "+version, version=version, channel="updater",
                                                action="update", thumbnail=channelselector.get_thumbnail_path() +
                                                "Crystal_Clear_action_info.png"))
                except:
                    platformtools.dialog_ok("No se puede conectar", "No ha sido posible comprobar",
                                            "si hay actualizaciones")
                    logger.info("cpelisalacarta.platformcode.launcher Fallo al verificar la actualización")

            else:
                logger.info("pelisalacarta.platformcode.launcher Check for plugin updates disabled")

            platformtools.render_items(itemlist, item)

        # Action for updating plugin
        elif item.action == "update":

            from core import updater
            updater.update(item)
            if config.get_system_platform() != "xbox":
                import xbmc
                xbmc.executebuiltin("Container.Refresh")

        # Action for channel types on channelselector: movies, series, etc.
        elif item.action == "getchanneltypes":
            import channelselector
            itemlist = channelselector.getchanneltypes()

            platformtools.render_items(itemlist, item)

        # Action for channel listing on channelselector
        elif item.action == "filterchannels":
            import channelselector
            itemlist = channelselector.filterchannels(item.channel_type)

            platformtools.render_items(itemlist, item)

        # Special action for playing a video from the library
        elif item.action == "play_from_library":
            play_from_library(item)
            return

        # Action in certain channel specified in "action" and "channel" parameters
        else:

            # Entry point for a channel is the "mainlist" action, so here we check parental control
            if item.action == "mainlist":
                
                # Parental control
                can_open_channel = False

                # If it is an adult channel, and user has configured pin, asks for it
                if channeltools.is_adult(item.channel) and config.get_setting("adult_pin") != "":

                    tecleado = platformtools.dialog_input("", "PIN para canales de adultos", True)
                    if tecleado is not None:
                        if tecleado == config.get_setting("adult_pin"):
                            can_open_channel = True

                # All the other cases can open the channel
                else:
                    can_open_channel = True

                if not can_open_channel:
                    return

            # Checks if channel exists
            channel_file = os.path.join(config.get_runtime_path(), 'channels', item.channel+".py")
            logger.info("pelisalacarta.platformcode.launcher channel_file=%s" % channel_file)

            channel = None

            if item.channel in ["personal", "personal2", "personal3", "personal4", "personal5"]:
                import channels.personal as channel

            elif os.path.exists(channel_file):
                try:
                    channel = __import__('channels.%s' % item.channel, None, None, ["channels.%s" % item.channel])
                except ImportError:
                    exec "import channels."+item.channel+" as channel"

            logger.info("pelisalacarta.platformcode.launcher running channel "+channel.__name__+" "+channel.__file__)

            # Special play action
            if item.action == "play":
                logger.info("pelisalacarta.platformcode.launcher play")
                # logger.debug("item_toPlay: " + "\n" + item.tostring('\n'))

                # First checks if channel has a "play" function
                if hasattr(channel, 'play'):
                    logger.info("pelisalacarta.platformcode.launcher executing channel 'play' method")
                    itemlist = channel.play(item)
                    b_favourite = item.isFavourite
                    # Play should return a list of playable URLS
                    if len(itemlist) > 0:
                        item = itemlist[0]
                        if b_favourite:
                            item.isFavourite = True
                        platformtools.play_video(item)

                    # If not, shows user an error message
                    else:
                        platformtools.dialog_ok("plugin", "No hay nada para reproducir")

                # If player don't have a "play" function, not uses the standard play from platformtools
                else:
                    logger.info("pelisalacarta.platformcode.launcher executing core 'play' method")
                    platformtools.play_video(item)

            # Special action for findvideos, where the plugin looks for known urls
            elif item.action == "findvideos":

                # First checks if channel has a "findvideos" function
                if hasattr(channel, 'findvideos'):
                    itemlist = getattr(channel, item.action)(item)

                # If not, uses the generic findvideos function
                else:
                    logger.info("pelisalacarta.platformcode.launcher no channel 'findvideos' method, "
                                "executing core method")
                    from core import servertools
                    itemlist = servertools.find_video_items(item)

                if config.get_setting('filter_servers') == 'true':
                    itemlist = filtered_servers(itemlist)

                from platformcode import subtitletools
                subtitletools.saveSubtitleName(item)

                platformtools.render_items(itemlist, item)

            # Special action for adding a movie to the library
            elif item.action == "add_pelicula_to_library":
                library.add_pelicula_to_library(item)

            # Special action for adding a serie to the library
            elif item.action == "add_serie_to_library":
                library.add_serie_to_library(item, channel)

            # Special action for downloading all episodes from a serie
            elif item.action == "download_all_episodes":
                downloadtools.download_all_episodes(item, channel)

            # Special action for searching, first asks for the words then call the "search" function
            elif item.action == "search":
                logger.info("pelisalacarta.platformcode.launcher search")
                
                tecleado = platformtools.dialog_input("")
                if tecleado is not None:
                    tecleado = tecleado.replace(" ", "+")
                    # TODO revisar 'personal.py' porque no tiene función search y daría problemas
                    itemlist = channel.search(item, tecleado)
                else:
                    itemlist = []
                
                platformtools.render_items(itemlist, item)

            # For all other actions
            else:
                logger.info("pelisalacarta.platformcode.launcher executing channel '"+item.action+"' method")
                itemlist = getattr(channel, item.action)(item)
                platformtools.render_items(itemlist, item)

    except urllib2.URLError, e:
        import traceback
        logger.error("pelisalacarta.platformcode.launcher "+traceback.format_exc())

        # Grab inner and third party errors
        if hasattr(e, 'reason'):
            logger.info("pelisalacarta.platformcode.launcher Razon del error, codigo: "+str(e.reason[0])+", Razon: " +
                        str(e.reason[1]))
            texto = config.get_localized_string(30050)  # "No se puede conectar con el sitio web"
            platformtools.dialog_ok("plugin", texto)

        # Grab server response errors
        elif hasattr(e, 'code'):
            logger.info("pelisalacarta.platformcode.launcher codigo de error HTTP : %d" % e.code)
            # "El sitio web no funciona correctamente (error http %d)"
            platformtools.dialog_ok("plugin", config.get_localized_string(30051) % e.code)
    
    except:
        import traceback
        logger.error("pelisalacarta.platformcode.launcher "+traceback.format_exc())
        
        patron = 'File "'+os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\")+'([^.]+)\.py"'
        canal = scrapertools.find_single_match(traceback.format_exc(), patron)
        
        try:
            import xbmc
            xbmc_version = int(xbmc.getInfoLabel("System.BuildVersion").split(".", 1)[0])
            if xbmc_version > 13:
                log_name = "kodi.log"
            else:
                log_name = "xbmc.log"
            log_message = "Ruta: "+xbmc.translatePath("special://logpath")+log_name
        except:
            log_message = ""

        if canal:
            platformtools.dialog_ok(
                "Error inesperado en el canal " + canal,
                "Puede deberse a un fallo de conexión, la web del canal "
                "ha cambiado su estructura, o un error interno de pelisalacarta.",
                "Para saber más detalles, consulta el log.", log_message)
        else:
            platformtools.dialog_ok(
                "Se ha producido un error en pelisalacarta",
                "Comprueba el log para ver mas detalles del error.",
                log_message)

Example 18

Project: termite-data-server
Source File: widget.py
View license
def console():
    """ Defines the behavior of the console web2py execution """
    import optparse
    import textwrap

    usage = "python web2py.py"

    description = """\
    web2py Web Framework startup script.
    ATTENTION: unless a password is specified (-a 'passwd') web2py will
    attempt to run a GUI. In this case command line options are ignored."""

    description = textwrap.dedent(description)

    parser = optparse.OptionParser(
        usage, None, optparse.Option, ProgramVersion)

    parser.description = description

    msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
           'Note: This value is ignored when using the \'interfaces\' option.')
    parser.add_option('-i',
                      '--ip',
                      default='127.0.0.1',
                      dest='ip',
                      help=msg)

    parser.add_option('-p',
                      '--port',
                      default='8000',
                      dest='port',
                      type='int',
                      help='port of server (8000)')

    msg = ('password to be used for administration '
           '(use -a "<recycle>" to reuse the last password))')
    parser.add_option('-a',
                      '--password',
                      default='<ask>',
                      dest='password',
                      help=msg)

    parser.add_option('-c',
                      '--ssl_certificate',
                      default='',
                      dest='ssl_certificate',
                      help='file that contains ssl certificate')

    parser.add_option('-k',
                      '--ssl_private_key',
                      default='',
                      dest='ssl_private_key',
                      help='file that contains ssl private key')

    msg = ('Use this file containing the CA certificate to validate X509 '
           'certificates from clients')
    parser.add_option('--ca-cert',
                      action='store',
                      dest='ssl_ca_certificate',
                      default=None,
                      help=msg)

    parser.add_option('-d',
                      '--pid_filename',
                      default='httpserver.pid',
                      dest='pid_filename',
                      help='file to store the pid of the server')

    parser.add_option('-l',
                      '--log_filename',
                      default='httpserver.log',
                      dest='log_filename',
                      help='file to log connections')

    parser.add_option('-n',
                      '--numthreads',
                      default=None,
                      type='int',
                      dest='numthreads',
                      help='number of threads (deprecated)')

    parser.add_option('--minthreads',
                      default=None,
                      type='int',
                      dest='minthreads',
                      help='minimum number of server threads')

    parser.add_option('--maxthreads',
                      default=None,
                      type='int',
                      dest='maxthreads',
                      help='maximum number of server threads')

    parser.add_option('-s',
                      '--server_name',
                      default=socket.gethostname(),
                      dest='server_name',
                      help='server name for the web server')

    msg = 'max number of queued requests when server unavailable'
    parser.add_option('-q',
                      '--request_queue_size',
                      default='5',
                      type='int',
                      dest='request_queue_size',
                      help=msg)

    parser.add_option('-o',
                      '--timeout',
                      default='10',
                      type='int',
                      dest='timeout',
                      help='timeout for individual request (10 seconds)')

    parser.add_option('-z',
                      '--shutdown_timeout',
                      default='5',
                      type='int',
                      dest='shutdown_timeout',
                      help='timeout on shutdown of server (5 seconds)')

    parser.add_option('--socket-timeout',
                      default=5,
                      type='int',
                      dest='socket_timeout',
                      help='timeout for socket (5 second)')

    parser.add_option('-f',
                      '--folder',
                      default=os.getcwd(),
                      dest='folder',
                      help='folder from which to run web2py')

    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      dest='verbose',
                      default=False,
                      help='increase --test verbosity')

    parser.add_option('-Q',
                      '--quiet',
                      action='store_true',
                      dest='quiet',
                      default=False,
                      help='disable all output')

    msg = ('set debug output level (0-100, 0 means all, 100 means none; '
           'default is 30)')
    parser.add_option('-D',
                      '--debug',
                      dest='debuglevel',
                      default=30,
                      type='int',
                      help=msg)

    msg = ('run web2py in interactive shell or IPython (if installed) with '
           'specified appname (if app does not exist it will be created). '
           'APPNAME like a/c/f (c,f optional)')
    parser.add_option('-S',
                      '--shell',
                      dest='shell',
                      metavar='APPNAME',
                      help=msg)

    msg = ('run web2py in interactive shell or bpython (if installed) with '
           'specified appname (if app does not exist it will be created).\n'
           'Use combined with --shell')
    parser.add_option('-B',
                      '--bpython',
                      action='store_true',
                      default=False,
                      dest='bpython',
                      help=msg)

    msg = 'only use plain python shell; should be used with --shell option'
    parser.add_option('-P',
                      '--plain',
                      action='store_true',
                      default=False,
                      dest='plain',
                      help=msg)

    msg = ('auto import model files; default is False; should be used '
           'with --shell option')
    parser.add_option('-M',
                      '--import_models',
                      action='store_true',
                      default=False,
                      dest='import_models',
                      help=msg)

    msg = ('run PYTHON_FILE in web2py environment; '
           'should be used with --shell option')
    parser.add_option('-R',
                      '--run',
                      dest='run',
                      metavar='PYTHON_FILE',
                      default='',
                      help=msg)

    msg = ('run scheduled tasks for the specified apps: expects a list of '
           'app names as -K app1,app2,app3 '
           'or a list of app:groups as -K app1:group1:group2,app2:group1 '
           'to override specific group_names. (only strings, no spaces '
           'allowed. Requires a scheduler defined in the models')
    parser.add_option('-K',
                      '--scheduler',
                      dest='scheduler',
                      default=None,
                      help=msg)

    msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
    parser.add_option('-X',
                      '--with-scheduler',
                      action='store_true',
                      default=False,
                      dest='with_scheduler',
                      help=msg)

    msg = ('run doctests in web2py environment; '
           'TEST_PATH like a/c/f (c,f optional)')
    parser.add_option('-T',
                      '--test',
                      dest='test',
                      metavar='TEST_PATH',
                      default=None,
                      help=msg)

    msg = 'trigger a cron run manually; usually invoked from a system crontab'
    parser.add_option('-C',
                      '--cron',
                      action='store_true',
                      dest='extcron',
                      default=False,
                      help=msg)

    msg = 'triggers the use of softcron'
    parser.add_option('--softcron',
                      action='store_true',
                      dest='softcron',
                      default=False,
                      help=msg)

    parser.add_option('-Y',
                      '--run-cron',
                      action='store_true',
                      dest='runcron',
                      default=False,
                      help='start the background cron process')

    parser.add_option('-J',
                      '--cronjob',
                      action='store_true',
                      dest='cronjob',
                      default=False,
                      help='identify cron-initiated command')

    parser.add_option('-L',
                      '--config',
                      dest='config',
                      default='',
                      help='config file')

    parser.add_option('-F',
                      '--profiler',
                      dest='profiler_dir',
                      default=None,
                      help='profiler dir')

    parser.add_option('-t',
                      '--taskbar',
                      action='store_true',
                      dest='taskbar',
                      default=False,
                      help='use web2py gui and run in taskbar (system tray)')

    parser.add_option('',
                      '--nogui',
                      action='store_true',
                      default=False,
                      dest='nogui',
                      help='text-only, no GUI')

    msg = ('should be followed by a list of arguments to be passed to script, '
           'to be used with -S, -A must be the last option')
    parser.add_option('-A',
                      '--args',
                      action='store',
                      dest='args',
                      default=None,
                      help=msg)

    parser.add_option('--no-banner',
                      action='store_true',
                      default=False,
                      dest='nobanner',
                      help='Do not print header banner')

    msg = ('listen on multiple addresses: '
           '"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
           '(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
           'square [] brackets)')
    parser.add_option('--interfaces',
                      action='store',
                      dest='interfaces',
                      default=None,
                      help=msg)

    msg = 'runs web2py tests'
    parser.add_option('--run_system_tests',
                      action='store_true',
                      dest='run_system_tests',
                      default=False,
                      help=msg)

    msg = ('adds coverage reporting (needs --run_system_tests), '
           'python 2.7 and the coverage module installed. '
           'You can alter the default path setting the environmental '
           'var "COVERAGE_PROCESS_START". '
           'By default it takes gluon/tests/coverage.ini')
    parser.add_option('--with_coverage',
                      action='store_true',
                      dest='with_coverage',
                      default=False,
                      help=msg)

    if '-A' in sys.argv:
        k = sys.argv.index('-A')
    elif '--args' in sys.argv:
        k = sys.argv.index('--args')
    else:
        k = len(sys.argv)
    sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
    (options, args) = parser.parse_args()
    options.args = [options.run] + other_args
    global_settings.cmd_options = options
    global_settings.cmd_args = args

    try:
        options.ips = list(set( # no duplicates
            [addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
             if not is_loopback_ip_address(addrinfo=addrinfo)]))
    except socket.gaierror:
        options.ips = []

    if options.run_system_tests:
        run_system_tests(options)

    if options.quiet:
        capture = cStringIO.StringIO()
        sys.stdout = capture
        logger.setLevel(logging.CRITICAL + 1)
    else:
        logger.setLevel(options.debuglevel)

    if options.config[-3:] == '.py':
        options.config = options.config[:-3]

    if options.cronjob:
        global_settings.cronjob = True  # tell the world
        options.plain = True    # cronjobs use a plain shell
        options.nobanner = True
        options.nogui = True

    options.folder = os.path.abspath(options.folder)

    #  accept --interfaces in the form
    #  "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
    #  (no spaces; optional key:cert indicate SSL)
    if isinstance(options.interfaces, str):
        interfaces = options.interfaces.split(';')
        options.interfaces = []
        for interface in interfaces:
            if interface.startswith('['):  # IPv6
                ip, if_remainder = interface.split(']', 1)
                ip = ip[1:]
                if_remainder = if_remainder[1:].split(':')
                if_remainder[0] = int(if_remainder[0])  # numeric port
                options.interfaces.append(tuple([ip] + if_remainder))
            else:  # IPv4
                interface = interface.split(':')
                interface[1] = int(interface[1])  # numeric port
                options.interfaces.append(tuple(interface))

    #  accepts --scheduler in the form
    #  "app:group1,group2,app2:group1"
    scheduler = []
    options.scheduler_groups = None
    if isinstance(options.scheduler, str):
        if ':' in options.scheduler:
            for opt in options.scheduler.split(','):
                scheduler.append(opt.split(':'))
            options.scheduler = ','.join([app[0] for app in scheduler])
            options.scheduler_groups = scheduler

    if options.numthreads is not None and options.minthreads is None:
        options.minthreads = options.numthreads  # legacy

    create_welcome_w2p()

    if not options.cronjob:
        # If we have the applications package or if we should upgrade
        if not os.path.exists('applications/__init__.py'):
            write_file('applications/__init__.py', '')

    return options, args

Example 19

Project: termite-visualizations
Source File: widget.py
View license
def console():
    """ Defines the behavior of the console web2py execution """
    import optparse
    import textwrap

    usage = "python web2py.py"

    description = """\
    web2py Web Framework startup script.
    ATTENTION: unless a password is specified (-a 'passwd') web2py will
    attempt to run a GUI. In this case command line options are ignored."""

    description = textwrap.dedent(description)

    parser = optparse.OptionParser(
        usage, None, optparse.Option, ProgramVersion)

    parser.description = description

    msg = ('IP address of the server (e.g., 127.0.0.1 or ::1); '
           'Note: This value is ignored when using the \'interfaces\' option.')
    parser.add_option('-i',
                      '--ip',
                      default='127.0.0.1',
                      dest='ip',
                      help=msg)

    parser.add_option('-p',
                      '--port',
                      default='8000',
                      dest='port',
                      type='int',
                      help='port of server (8000)')

    msg = ('password to be used for administration '
           '(use -a "<recycle>" to reuse the last password))')
    parser.add_option('-a',
                      '--password',
                      default='<ask>',
                      dest='password',
                      help=msg)

    parser.add_option('-c',
                      '--ssl_certificate',
                      default='',
                      dest='ssl_certificate',
                      help='file that contains ssl certificate')

    parser.add_option('-k',
                      '--ssl_private_key',
                      default='',
                      dest='ssl_private_key',
                      help='file that contains ssl private key')

    msg = ('Use this file containing the CA certificate to validate X509 '
           'certificates from clients')
    parser.add_option('--ca-cert',
                      action='store',
                      dest='ssl_ca_certificate',
                      default=None,
                      help=msg)

    parser.add_option('-d',
                      '--pid_filename',
                      default='httpserver.pid',
                      dest='pid_filename',
                      help='file to store the pid of the server')

    parser.add_option('-l',
                      '--log_filename',
                      default='httpserver.log',
                      dest='log_filename',
                      help='file to log connections')

    parser.add_option('-n',
                      '--numthreads',
                      default=None,
                      type='int',
                      dest='numthreads',
                      help='number of threads (deprecated)')

    parser.add_option('--minthreads',
                      default=None,
                      type='int',
                      dest='minthreads',
                      help='minimum number of server threads')

    parser.add_option('--maxthreads',
                      default=None,
                      type='int',
                      dest='maxthreads',
                      help='maximum number of server threads')

    parser.add_option('-s',
                      '--server_name',
                      default=socket.gethostname(),
                      dest='server_name',
                      help='server name for the web server')

    msg = 'max number of queued requests when server unavailable'
    parser.add_option('-q',
                      '--request_queue_size',
                      default='5',
                      type='int',
                      dest='request_queue_size',
                      help=msg)

    parser.add_option('-o',
                      '--timeout',
                      default='10',
                      type='int',
                      dest='timeout',
                      help='timeout for individual request (10 seconds)')

    parser.add_option('-z',
                      '--shutdown_timeout',
                      default='5',
                      type='int',
                      dest='shutdown_timeout',
                      help='timeout on shutdown of server (5 seconds)')

    parser.add_option('--socket-timeout',
                      default=5,
                      type='int',
                      dest='socket_timeout',
                      help='timeout for socket (5 second)')

    parser.add_option('-f',
                      '--folder',
                      default=os.getcwd(),
                      dest='folder',
                      help='folder from which to run web2py')

    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      dest='verbose',
                      default=False,
                      help='increase --test verbosity')

    parser.add_option('-Q',
                      '--quiet',
                      action='store_true',
                      dest='quiet',
                      default=False,
                      help='disable all output')

    msg = ('set debug output level (0-100, 0 means all, 100 means none; '
           'default is 30)')
    parser.add_option('-D',
                      '--debug',
                      dest='debuglevel',
                      default=30,
                      type='int',
                      help=msg)

    msg = ('run web2py in interactive shell or IPython (if installed) with '
           'specified appname (if app does not exist it will be created). '
           'APPNAME like a/c/f (c,f optional)')
    parser.add_option('-S',
                      '--shell',
                      dest='shell',
                      metavar='APPNAME',
                      help=msg)

    msg = ('run web2py in interactive shell or bpython (if installed) with '
           'specified appname (if app does not exist it will be created).\n'
           'Use combined with --shell')
    parser.add_option('-B',
                      '--bpython',
                      action='store_true',
                      default=False,
                      dest='bpython',
                      help=msg)

    msg = 'only use plain python shell; should be used with --shell option'
    parser.add_option('-P',
                      '--plain',
                      action='store_true',
                      default=False,
                      dest='plain',
                      help=msg)

    msg = ('auto import model files; default is False; should be used '
           'with --shell option')
    parser.add_option('-M',
                      '--import_models',
                      action='store_true',
                      default=False,
                      dest='import_models',
                      help=msg)

    msg = ('run PYTHON_FILE in web2py environment; '
           'should be used with --shell option')
    parser.add_option('-R',
                      '--run',
                      dest='run',
                      metavar='PYTHON_FILE',
                      default='',
                      help=msg)

    msg = ('run scheduled tasks for the specified apps: expects a list of '
           'app names as -K app1,app2,app3 '
           'or a list of app:groups as -K app1:group1:group2,app2:group1 '
           'to override specific group_names. (only strings, no spaces '
           'allowed. Requires a scheduler defined in the models')
    parser.add_option('-K',
                      '--scheduler',
                      dest='scheduler',
                      default=None,
                      help=msg)

    msg = 'run schedulers alongside webserver, needs -K app1 and -a too'
    parser.add_option('-X',
                      '--with-scheduler',
                      action='store_true',
                      default=False,
                      dest='with_scheduler',
                      help=msg)

    msg = ('run doctests in web2py environment; '
           'TEST_PATH like a/c/f (c,f optional)')
    parser.add_option('-T',
                      '--test',
                      dest='test',
                      metavar='TEST_PATH',
                      default=None,
                      help=msg)

    msg = 'trigger a cron run manually; usually invoked from a system crontab'
    parser.add_option('-C',
                      '--cron',
                      action='store_true',
                      dest='extcron',
                      default=False,
                      help=msg)

    msg = 'triggers the use of softcron'
    parser.add_option('--softcron',
                      action='store_true',
                      dest='softcron',
                      default=False,
                      help=msg)

    parser.add_option('-Y',
                      '--run-cron',
                      action='store_true',
                      dest='runcron',
                      default=False,
                      help='start the background cron process')

    parser.add_option('-J',
                      '--cronjob',
                      action='store_true',
                      dest='cronjob',
                      default=False,
                      help='identify cron-initiated command')

    parser.add_option('-L',
                      '--config',
                      dest='config',
                      default='',
                      help='config file')

    parser.add_option('-F',
                      '--profiler',
                      dest='profiler_dir',
                      default=None,
                      help='profiler dir')

    parser.add_option('-t',
                      '--taskbar',
                      action='store_true',
                      dest='taskbar',
                      default=False,
                      help='use web2py gui and run in taskbar (system tray)')

    parser.add_option('',
                      '--nogui',
                      action='store_true',
                      default=False,
                      dest='nogui',
                      help='text-only, no GUI')

    msg = ('should be followed by a list of arguments to be passed to script, '
           'to be used with -S, -A must be the last option')
    parser.add_option('-A',
                      '--args',
                      action='store',
                      dest='args',
                      default=None,
                      help=msg)

    parser.add_option('--no-banner',
                      action='store_true',
                      default=False,
                      dest='nobanner',
                      help='Do not print header banner')

    msg = ('listen on multiple addresses: '
           '"ip1:port1:key1:cert1:ca_cert1;ip2:port2:key2:cert2:ca_cert2;..." '
           '(:key:cert:ca_cert optional; no spaces; IPv6 addresses must be in '
           'square [] brackets)')
    parser.add_option('--interfaces',
                      action='store',
                      dest='interfaces',
                      default=None,
                      help=msg)

    msg = 'runs web2py tests'
    parser.add_option('--run_system_tests',
                      action='store_true',
                      dest='run_system_tests',
                      default=False,
                      help=msg)

    msg = ('adds coverage reporting (needs --run_system_tests), '
           'python 2.7 and the coverage module installed. '
           'You can alter the default path setting the environmental '
           'var "COVERAGE_PROCESS_START". '
           'By default it takes gluon/tests/coverage.ini')
    parser.add_option('--with_coverage',
                      action='store_true',
                      dest='with_coverage',
                      default=False,
                      help=msg)

    if '-A' in sys.argv:
        k = sys.argv.index('-A')
    elif '--args' in sys.argv:
        k = sys.argv.index('--args')
    else:
        k = len(sys.argv)
    sys.argv, other_args = sys.argv[:k], sys.argv[k + 1:]
    (options, args) = parser.parse_args()
    options.args = [options.run] + other_args
    global_settings.cmd_options = options
    global_settings.cmd_args = args

    try:
        options.ips = list(set( # no duplicates
            [addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
             if not is_loopback_ip_address(addrinfo=addrinfo)]))
    except socket.gaierror:
        options.ips = []

    if options.run_system_tests:
        run_system_tests(options)

    if options.quiet:
        capture = cStringIO.StringIO()
        sys.stdout = capture
        logger.setLevel(logging.CRITICAL + 1)
    else:
        logger.setLevel(options.debuglevel)

    if options.config[-3:] == '.py':
        options.config = options.config[:-3]

    if options.cronjob:
        global_settings.cronjob = True  # tell the world
        options.plain = True    # cronjobs use a plain shell
        options.nobanner = True
        options.nogui = True

    options.folder = os.path.abspath(options.folder)

    #  accept --interfaces in the form
    #  "ip1:port1:key1:cert1:ca_cert1;[ip2]:port2;ip3:port3:key3:cert3"
    #  (no spaces; optional key:cert indicate SSL)
    if isinstance(options.interfaces, str):
        interfaces = options.interfaces.split(';')
        options.interfaces = []
        for interface in interfaces:
            if interface.startswith('['):  # IPv6
                ip, if_remainder = interface.split(']', 1)
                ip = ip[1:]
                if_remainder = if_remainder[1:].split(':')
                if_remainder[0] = int(if_remainder[0])  # numeric port
                options.interfaces.append(tuple([ip] + if_remainder))
            else:  # IPv4
                interface = interface.split(':')
                interface[1] = int(interface[1])  # numeric port
                options.interfaces.append(tuple(interface))

    #  accepts --scheduler in the form
    #  "app:group1,group2,app2:group1"
    scheduler = []
    options.scheduler_groups = None
    if isinstance(options.scheduler, str):
        if ':' in options.scheduler:
            for opt in options.scheduler.split(','):
                scheduler.append(opt.split(':'))
            options.scheduler = ','.join([app[0] for app in scheduler])
            options.scheduler_groups = scheduler

    if options.numthreads is not None and options.minthreads is None:
        options.minthreads = options.numthreads  # legacy

    create_welcome_w2p()

    if not options.cronjob:
        # If we have the applications package or if we should upgrade
        if not os.path.exists('applications/__init__.py'):
            write_file('applications/__init__.py', '')

    return options, args

Example 20

Project: voltdb-client-python
Source File: voter.py
View license
def main():
    # prints required command line arguments if these were not passed in correctly
    if len(sys.argv) < 8:
        print "ClientVoter [number of contestants] [votes per phone number] [transactions per second] [client feedback interval (seconds)] [test duration (seconds)] [lag record delay (seconds)] [server list (comma separated)]"
        exit(1)

    # checks for validity of 1st command line argument
    # NOTE: 0th command line argument is the file name of this python program
    global max_contestant
    max_contestant = int(sys.argv[1])
    if max_contestant < 1 or max_contestant > 12:
        print "Number of contestants must be between 1 and 12"
        exit(1)

    # sets up global variables, including:
    global results_lock             # a lock for manipulating result data
    global params_lock              # a lock for manipulating the parameter list
    global requestPermit            # control rate at which requests are generated
    global availableThreads         # Allow a thread to indicate it is available to service a request
    global shouldContinue
    global min_execution_secs       # minimum number of seconds used to execute stored procedure
    global max_execution_secs       # maximum number of seconds used to execute stored procedure
    global tot_execution_secs       # total number of seconds used to execute stored procedures
    global tot_executions           # total number of executed stored procedures
    global tot_executions_latency   # total number of executed stored procedures as a measure for latency
    global check_latency            # boolean value: if false, latency is not yet being measures; if true, latency is being measured
    global latency_counter          # array used to show how many stored procedures fell into various time ranges for latency (e.g.: 200 stored procedures had latency in range between 25 and 50 milliseconds
    global vote_result_counter      # array used to show how many votes got (0) Accepted, (1) Rejected due to invalid contestant, (2) Rejected due to voter being over the limit (of phone calls)
    global invocation_params
    results_lock = threading.Lock()
    params_lock = threading.Lock()
    requestPermit = threading.Semaphore(0)
    availableThreads = threading.Semaphore(0)
    invocation_params = []
    shouldContinue = True
    min_execution_secs = 999999
    max_execution_secs = -1
    tot_execution_secs = 0
    tot_executions = 0
    tot_executions_latency = 0
    check_latency = False
    latency_counter = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    vote_result_counter = [0, 0, 0]

    # assigns values to other variables using command line arguments and creativity
    max_votes_per_phone_number = long(sys.argv[2])
    transactions_per_sec = long(sys.argv[3])
    transactions_per_milli = transactions_per_sec / float(1000) # uses millis, not secs
    client_feedback_interval_secs = long(sys.argv[4])
    test_duration_secs = long(sys.argv[5])
    lag_latency_secs = long(sys.argv[6])
    server_list = sys.argv[7]
    this_outstanding = 0
    last_outstanding = 0
    contestant_names = "Edwina Burnam,Tabatha Gehling,Kelly Clauss,Jessie Alloway,Alana Bregman,Jessie Eichman,Allie Rogalski,Nita Coster,Kurt Walser,Ericka Dieter,Loraine Nygren,Tania Mattioli"

    print "Allowing %d votes per phone number" % max_votes_per_phone_number
    print "Submitting %d SP calls/sec" % transactions_per_sec
    print "Feedback interval = %d second(s)" % client_feedback_interval_secs
    print "Running for %d second(s)" % test_duration_secs
    print "Latency not recorded for %d second(s)" % lag_latency_secs

    phone_number = None
    contestant_number = None
    transactions_this_sec = 0
    last_milli = time.time() * 1000 # uses millis, not secs
    this_milli = time.time() * 1000 # uses millis, not secs

    # parses the list of servers specified at command line and creates corresponding URL for each, adding these to the dictionary
    volt_servers = server_list.rsplit(",")

    # invokes the stored procedure 'Initialize' to set up database with contestant names/numbers
    # uses quick parse hack to process the response of the invocation
    # contestant names/numbers entered into database if this is the first client to connect; otherwise, existing configuration info retrieved
    client = FastSerializer(volt_servers[0])
    initprocedure = VoltProcedure( client, "Initialize", [ FastSerializer.VOLTTYPE_INTEGER, FastSerializer.VOLTTYPE_STRING ])

    response = initprocedure.call( [max_contestant, contestant_names ] )

    # sets up start and end times of the voting process (and of latency measurements) based on command line-specified duration and delay values
    start_time = time.time()
    end_time = start_time + test_duration_secs
    current_time = start_time
    last_feedback_time = start_time
    num_sp_calls = 0
    start_recording_latency = start_time + lag_latency_secs

    thread_list = []

    for x in range(5):
        thread = doQueries(volt_servers[x % len(volt_servers)])
        thread.setDaemon(True)
        thread.start()

    # main while loop of voter client, used to invoke stored procedure 'Vote' repeatedly
    while end_time > current_time:
        availableThreads.acquire()
        num_sp_calls = num_sp_calls + 1

        # generates random 10-digit 'phone number' and not entirely random contestant number
        # the contestant number (as generated below) is most likely to be 2
        # NOTE: every 100 votes, the contestant number is made to be potentially invalid
        phone_number = random.randint(1000000000, 9999999999)
        contestant_number = (int(random.random() * max_contestant) * int(random.random() * max_contestant)) % max_contestant + 1
        if num_sp_calls % 100 == 0:
            contestant_number = (int(random.random() * max_contestant) + 1) * 2

        params_lock.acquire()
        invocation_params.append([ phone_number, contestant_number ])
        params_lock.release()
        requestPermit.release()

        # if more votes per second are happening than the command line argument allows: waits until enough time has passed to resume voting
        # this block uses millis, not secs
        transactions_this_sec = transactions_this_sec + 1
        if transactions_this_sec >= transactions_per_milli:
            this_milli = time.time() * 1000
            while this_milli <= last_milli:
                this_milli = time.time() * 1000
                time.sleep(0) #yield to other threads
            last_milli = this_milli
            transactions_this_sec = 0

        current_time = time.time()

        if not check_latency and current_time >= start_recording_latency:
            check_latency = True

        # if enough time has passed since last status report: reports current voting status (prints some data to console)
        if current_time >= (last_feedback_time + client_feedback_interval_secs):
            elapsed_time_secs_2 = time.time() - start_time
            last_feedback_time = current_time
            run_time_secs = end_time - start_time
            if tot_executions_latency == 0:
                tot_executions_latency = 1

            percent_complete = (float(elapsed_time_secs_2) / float(run_time_secs)) * 100
            if percent_complete > 100:
                percent_complete = 100

            # lock necessary, because global variables manipulated in this block may also be used by other threads (those responsible for invoking stored procedure 'Vote')
            # execution times are printed in millis, not secs
            results_lock.acquire()
            this_outstanding = num_sp_calls - tot_executions
            avg_latency = float(tot_execution_secs) * 1000 / float(tot_executions_latency)
            print "%f%% Complete | SP Calls: %d at %f SP/sec | outstanding = %d (%d) | min = %d | max = %d | avg = %f" % (percent_complete, num_sp_calls, (float(num_sp_calls) / float(elapsed_time_secs_2)), this_outstanding, (this_outstanding - last_outstanding), (min_execution_secs * 1000), (max_execution_secs * 1000), avg_latency)
            last_outstanding = this_outstanding
            results_lock.release()
    shouldContinue = False
    # joins outstanding threads (those responsible for invoking stored procedure 'Vote')
    for thread in thread_list:
        if thread.isAlive():
            thread.join()

    elapsed_time_secs = time.time() - start_time

    # prints statistics about the numbers of accepted/rejected votes
    print
    print "****************************************************************************"
    print "Voting Results"
    print "****************************************************************************"
    print " - Accepted votes = %d" % vote_result_counter[0]
    print " - Rejected votes (invalid contestant) = %d" % vote_result_counter[1]
    print " - Rejected votes (voter over limit) = %d" % vote_result_counter[2]
    print

    winner_name = "<<UNKNOWN>>"
    winner_votes = -1

    # invokes the stored procedure 'Results' to retrieve all stored tuples in database
    # uses quick parse hack to process the response of the invocation
    # analyzes the processed data to determine number of votes per contestant, winner, and number of votes for winner
    resultsprocedure = VoltProcedure( client, "Results", [])
    response = resultsprocedure.call([])
    table = response.tables[0]
    if len(table.tuples) == 0:
        print " - No results to report."
    else:
        for row in table.tuples:
            result_name = row[0]
            result_votes = row[2]
            print " - Contestant %s received %d vote(s)" % (result_name, result_votes)

            if result_votes > winner_votes:
                winner_votes = result_votes
                winner_name = result_name

    # prints winner data
    # prints statistics about average latency and distribution of stored procedures across ranges in latency
    print
    print " - Contestant %s was the winner with %d vote(s)" % (winner_name, winner_votes)
    print
    print "****************************************************************************"
    print "System Statistics"
    print "****************************************************************************"
    print " - Ran for %f second(s)" % elapsed_time_secs
    print " - Performed %d Stored Procedure call(s)" % num_sp_calls
    print " - At %f call(s) per second" % (num_sp_calls / elapsed_time_secs)
    print " - Average Latency = %f ms" % (float(tot_execution_secs) * 1000 / float(tot_executions_latency))
    print " - Latency   0ms -  25ms = %d" % latency_counter[0]
    print " - Latency  25ms -  50ms = %d" % latency_counter[1]
    print " - Latency  50ms -  75ms = %d" % latency_counter[2]
    print " - Latency  75ms - 100ms = %d" % latency_counter[3]
    print " - Latency 100ms - 125ms = %d" % latency_counter[4]
    print " - Latency 125ms - 150ms = %d" % latency_counter[5]
    print " - Latency 150ms - 175ms = %d" % latency_counter[6]
    print " - Latency 175ms - 200ms = %d" % latency_counter[7]
    print " - Latency 200ms+        = %d" % latency_counter[8]

Example 21

Project: onionbalance
Source File: settings.py
View license
def generate_config():
    """
    Entry point for interactive config file generation.
    """

    # Parse initial command line options
    args = parse_cmd_args().parse_args()

    # Simplify the logging output for the command line tool
    logger = log.get_config_generator_logger()

    logger.info("Beginning OnionBalance config generation.")

    # If CLI options have been provided, don't enter interactive mode
    # Crude check to see if any options beside --verbosity are set.
    verbose = True if '-v' in sys.argv else False

    if ((len(sys.argv) > 1 and not verbose) or len(sys.argv) > 3 or
            args.no_interactive):
        interactive = False
        logger.info("Entering non-interactive mode.")
    else:
        interactive = True
        logger.info("No command line arguments found, entering interactive "
                    "mode.")

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    # Check if output directory exists, if not try create it
    output_path = None
    if interactive:
        output_path = input("Enter path to store generated config "
                            "[{}]: ".format(os.path.abspath(args.output)))
    output_path = output_path or args.output
    try:
        util.try_make_dir(output_path)
    except OSError:
        logger.exception("Problem encountered when trying to create the "
                         "output directory %s.", os.path.abspath(output_path))
    else:
        logger.debug("Created the output directory '%s'.",
                     os.path.abspath(output_path))

    # The output directory should be empty to avoid having conflict keys
    # or config files.
    if not util.is_directory_empty(output_path):
        logger.error("The specified output directory is not empty. Please "
                     "delete any files and folders or specify another output "
                     "directory.")
        sys.exit(1)

    # Load master key if specified
    key_path = None
    if interactive:
        # Read key path from user
        key_path = input("Enter path to master service private key "
                         "(Leave empty to generate a key): ")
    key_path = args.key or key_path
    if key_path:
        if not os.path.isfile(key_path):
            logger.error("The specified master service private key '%s' "
                         "could not be found. Please confirm the path and "
                         "file permissions are correct.", key_path)
            sys.exit(1)
        else:
            # Try load the specified private key file
            master_key = util.key_decrypt_prompt(key_path)
            if not master_key:
                logger.error("The specified master private key %s could not "
                             "be loaded.", os.path.abspath(master_key))
                sys.exit(1)
            else:
                master_onion_address = util.calc_onion_address(master_key)
                logger.info("Successfully loaded a master key for service "
                            "%s.onion.", master_onion_address)

    else:
        # No key specified, begin generating a new one.
        master_key = Crypto.PublicKey.RSA.generate(1024)
        master_onion_address = util.calc_onion_address(master_key)
        logger.debug("Created a new master key for service %s.onion.",
                     master_onion_address)

    # Finished loading/generating master key, now try generate keys for
    # each service instance
    num_instances = None
    if interactive:
        num_instances = input("Number of instance services to create "
                              "[{}]: ".format(args.num_instances))
        # Cast to int if a number was specified
        try:
            num_instances = int(num_instances)
        except ValueError:
            num_instances = None
    num_instances = num_instances or args.num_instances
    logger.debug("Creating %d service instances.", num_instances)

    tag = None
    if interactive:
        tag = input("Provide a tag name to group these instances "
                    "[{}]: ".format(args.tag))
    tag = tag or args.tag

    # Create HiddenServicePort line for instance torrc file
    service_virtual_port = None
    if interactive:
        service_virtual_port = input("Specify the service virtual port (for "
                                     "client connections) [{}]: ".format(
                                         args.service_virtual_port))
    service_virtual_port = service_virtual_port or args.service_virtual_port

    service_target = None
    if interactive:
        # In interactive mode, change default target to match the specified
        # virtual port
        default_service_target = u'127.0.0.1:{}'.format(service_virtual_port)
        service_target = input("Specify the service target IP and port (where "
                               "your service is listening) [{}]: ".format(
                                   default_service_target))
        service_target = service_target or default_service_target
    service_target = service_target or args.service_target
    torrc_port_line = u'HiddenServicePort {} {}'.format(service_virtual_port,
                                                        service_target)

    instances = []
    for i in range(0, num_instances):
        instance_key = Crypto.PublicKey.RSA.generate(1024)
        instance_address = util.calc_onion_address(instance_key)
        logger.debug("Created a key for instance %s.onion.",
                     instance_address)
        instances.append((instance_address, instance_key))

    # Write master service key to directory
    master_passphrase = None
    if interactive:
        master_passphrase = getpass.getpass(
            "Provide an optional password to encrypt the master private "
            "key (Not encrypted if no password is specified): ")
    master_passphrase = master_passphrase or args.password

    # Finished reading input, starting to write config files.
    master_dir = os.path.join(output_path, 'master')
    util.try_make_dir(master_dir)
    master_key_file = os.path.join(master_dir,
                                   '{}.key'.format(master_onion_address))
    with open(master_key_file, "wb") as key_file:
        os.chmod(master_key_file, 384)  # chmod 0600 in decimal
        key_file.write(master_key.exportKey(passphrase=master_passphrase))
        logger.debug("Successfully wrote master key to file %s.",
                     os.path.abspath(master_key_file))

    # Create YAML OnionBalance settings file for these instances
    service_data = {'key': '{}.key'.format(master_onion_address)}
    service_data['instances'] = [{'address': address,
                                  'name': '{}{}'.format(tag, i+1)} for
                                 i, (address, _) in enumerate(instances)]
    settings_data = {'services': [service_data]}
    config_yaml = yaml.dump(settings_data, default_flow_style=False)

    config_file_path = os.path.join(master_dir, 'config.yaml')
    with open(config_file_path, "w") as config_file:
        config_file.write(u"# OnionBalance Config File\n")
        config_file.write(config_yaml)
        logger.info("Wrote master service config file '%s'.",
                    os.path.abspath(config_file_path))

    # Write master service torrc
    master_torrc_path = os.path.join(master_dir, 'torrc-server')
    master_torrc_template = pkg_resources.resource_string(__name__,
                                                          'data/torrc-server')
    with open(master_torrc_path, "w") as master_torrc_file:
        master_torrc_file.write(master_torrc_template.decode('utf-8'))

    # Try generate config files for each service instance
    for i, (instance_address, instance_key) in enumerate(instances):
        # Create a numbered directory for instance
        instance_dir = os.path.join(output_path, '{}{}'.format(tag, i+1))
        instance_key_dir = os.path.join(instance_dir, instance_address)
        util.try_make_dir(instance_key_dir)
        os.chmod(instance_key_dir, 1472)  # chmod 2700 in decimal

        instance_key_file = os.path.join(instance_key_dir, 'private_key')
        with open(instance_key_file, "wb") as key_file:
            os.chmod(instance_key_file, 384)  # chmod 0600 in decimal
            key_file.write(instance_key.exportKey())
            logger.debug("Successfully wrote key for instance %s.onion to "
                         "file.", instance_address)

        # Write torrc file for each instance
        instance_torrc = os.path.join(instance_dir, 'instance_torrc')
        instance_torrc_template = pkg_resources.resource_string(
            __name__, 'data/torrc-instance')
        with open(instance_torrc, "w") as torrc_file:
            torrc_file.write(instance_torrc_template.decode('utf-8'))
            # The ./ relative path prevents Tor from raising relative
            # path warnings. The relative path may need to be edited manual
            # to work on Windows systems.
            torrc_file.write(u"HiddenServiceDir {}\n".format(
                instance_address))
            torrc_file.write(u"{}\n".format(torrc_port_line))

    # Output final status message
    logger.info("Done! Successfully generated an OnionBalance config and %d "
                "instance keys for service %s.onion.",
                num_instances, master_onion_address)

    sys.exit(0)

Example 22

View license
def main():
  device_sdk_version = get_device_sdk_version()
  if device_sdk_version < 18:
    legacy_script = os.path.join(os.path.dirname(sys.argv[0]), 'systrace-legacy.py')
    os.execv(legacy_script, sys.argv)

  usage = "Usage: %prog [options] [category1 [category2 ...]]"
  desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
  parser = optparse.OptionParser(usage=usage, description=desc)
  parser.add_option('-o', dest='output_file', help='write HTML to FILE',
                    default='trace.html', metavar='FILE')
  parser.add_option('-t', '--time', dest='trace_time', type='int',
                    help='trace for N seconds', metavar='N')
  parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
                    help='use a trace buffer size of N KB', metavar='N')
  parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
                    help='specify a comma-separated list of kernel functions to trace')
  parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
                    action='store_true', help='list the available categories and exit')
  parser.add_option('-a', '--app', dest='app_name', default=None, type='string',
                    action='store', help='enable application-level tracing for comma-separated ' +
                    'list of app cmdlines')
  parser.add_option('--no-fix-threads', dest='fix_threads', default=True,
                    action='store_false', help='don\'t fix missing or truncated thread names')

  parser.add_option('--link-assets', dest='link_assets', default=False,
                    action='store_true', help='link to original CSS or JS resources '
                    'instead of embedding them')
  parser.add_option('--from-file', dest='from_file', action='store',
                    help='read the trace from a file (compressed) rather than running a live trace')
  parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
                    type='string', help='')
  parser.add_option('-e', '--serial', dest='device_serial', type='string',
                    help='adb device serial number')

  options, args = parser.parse_args()

  if options.list_categories:
    atrace_args = ['adb', 'shell', 'atrace', '--list_categories']
    expect_trace = False
  elif options.from_file is not None:
    atrace_args = ['cat', options.from_file]
    expect_trace = True
  else:
    atrace_args = ['adb', 'shell', 'atrace', '-z']
    expect_trace = True

    if options.trace_time is not None:
      if options.trace_time > 0:
        atrace_args.extend(['-t', str(options.trace_time)])
      else:
        parser.error('the trace time must be a positive number')

    if options.trace_buf_size is not None:
      if options.trace_buf_size > 0:
        atrace_args.extend(['-b', str(options.trace_buf_size)])
      else:
        parser.error('the trace buffer size must be a positive number')

    if options.app_name is not None:
      atrace_args.extend(['-a', options.app_name])

    if options.kfuncs is not None:
      atrace_args.extend(['-k', options.kfuncs])

    atrace_args.extend(args)

    if options.fix_threads:
      atrace_args.extend([';', 'ps', '-t'])

  if atrace_args[0] == 'adb':
    add_adb_serial(atrace_args, options.device_serial)

  script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))

  if options.link_assets:
    src_dir = os.path.join(script_dir, options.asset_dir, 'src')
    build_dir = os.path.join(script_dir, options.asset_dir, 'build')

    js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)

    css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
    js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
    js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)

  else:
    css_filename = os.path.join(script_dir, flattened_css_file)
    js_filename = os.path.join(script_dir, flattened_js_file)
    css = compiled_css_tag % (open(css_filename).read())
    js = compiled_js_tag % (open(js_filename).read())
    templates = ''

  html_filename = options.output_file

  adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)

  result = None
  data = []

  # Read the text portion of the output and watch for the 'TRACE:' marker that
  # indicates the start of the trace data.
  while result is None:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      sys.stderr.write(err)
      sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      parts = out.split('\nTRACE:', 1)

      txt = parts[0].replace('\r', '')
      if len(parts) == 2:
        # The '\nTRACE:' match stole the last newline from the text, so add it
        # back here.
        txt += '\n'
      sys.stdout.write(txt)
      sys.stdout.flush()

      if len(parts) == 2:
        data.append(parts[1])
        sys.stdout.write("downloading trace...")
        sys.stdout.flush()
        break

    result = adb.poll()

  # Read and buffer the data portion of the output.
  while True:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    keepReading = False
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      if len(err) > 0:
        keepReading = True
        sys.stderr.write(err)
        sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      if len(out) > 0:
        keepReading = True
        data.append(out)

    if result is not None and not keepReading:
      break

    result = adb.poll()

  if result == 0:
    if expect_trace:
      data = ''.join(data)

      # Collapse CRLFs that are added by adb shell.
      if data.startswith('\r\n'):
        data = data.replace('\r\n', '\n')

      # Skip the initial newline.
      data = data[1:]

      if not data:
        print >> sys.stderr, ('No data was captured.  Output file was not ' +
          'written.')
        sys.exit(1)
      else:
        # Indicate to the user that the data download is complete.
        print " done\n"

      # Extract the thread list dumped by ps.
      threads = {}
      if options.fix_threads:
        parts = data.split('USER     PID   PPID  VSIZE  RSS     WCHAN    PC        NAME', 1)
        if len(parts) == 2:
          data = parts[0]
          for line in parts[1].splitlines():
            cols = line.split(None, 8)
            if len(cols) == 9:
              tid = int(cols[1])
              name = cols[8]
              threads[tid] = name

      # Decompress and preprocess the data.
      out = zlib.decompress(data)
      if options.fix_threads:
        def repl(m):
          tid = int(m.group(2))
          if tid > 0:
            name = threads.get(tid)
            if name is None:
              name = m.group(1)
              if name == '<...>':
                name = '<' + str(tid) + '>'
              threads[tid] = name
            return name + '-' + m.group(2)
          else:
            return m.group(0)
        out = re.sub(r'^\s*(\S+)-(\d+)', repl, out, flags=re.MULTILINE)

      html_prefix = read_asset(script_dir, 'prefix.html')
      html_suffix = read_asset(script_dir, 'suffix.html')

      html_file = open(html_filename, 'w')
      html_file.write(html_prefix % (css, js, templates))
      html_out = out.replace('\n', '\\n\\\n')
      html_file.write(html_out)
      html_file.write(html_suffix)
      html_file.close()
      print "\n    wrote file://%s\n" % os.path.abspath(options.output_file)

  else: # i.e. result != 0
    print >> sys.stderr, 'adb returned error code %d' % result
    sys.exit(1)

Example 23

View license
def main():
  device_sdk_version = get_device_sdk_version()
  if device_sdk_version < 18:
    legacy_script = os.path.join(os.path.dirname(sys.argv[0]), 'systrace-legacy.py')
    os.execv(legacy_script, sys.argv)

  usage = "Usage: %prog [options] [category1 [category2 ...]]"
  desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
  parser = optparse.OptionParser(usage=usage, description=desc)
  parser.add_option('-o', dest='output_file', help='write HTML to FILE',
                    default='trace.html', metavar='FILE')
  parser.add_option('-t', '--time', dest='trace_time', type='int',
                    help='trace for N seconds', metavar='N')
  parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
                    help='use a trace buffer size of N KB', metavar='N')
  parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
                    help='specify a comma-separated list of kernel functions to trace')
  parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
                    action='store_true', help='list the available categories and exit')
  parser.add_option('-a', '--app', dest='app_name', default=None, type='string',
                    action='store', help='enable application-level tracing for comma-separated ' +
                    'list of app cmdlines')
  parser.add_option('--no-fix-threads', dest='fix_threads', default=True,
                    action='store_false', help='don\'t fix missing or truncated thread names')

  parser.add_option('--link-assets', dest='link_assets', default=False,
                    action='store_true', help='link to original CSS or JS resources '
                    'instead of embedding them')
  parser.add_option('--from-file', dest='from_file', action='store',
                    help='read the trace from a file (compressed) rather than running a live trace')
  parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
                    type='string', help='')
  parser.add_option('-e', '--serial', dest='device_serial', type='string',
                    help='adb device serial number')

  options, args = parser.parse_args()

  if options.list_categories:
    atrace_args = ['adb', 'shell', 'atrace', '--list_categories']
    expect_trace = False
  elif options.from_file is not None:
    atrace_args = ['cat', options.from_file]
    expect_trace = True
  else:
    atrace_args = ['adb', 'shell', 'atrace', '-z']
    expect_trace = True

    if options.trace_time is not None:
      if options.trace_time > 0:
        atrace_args.extend(['-t', str(options.trace_time)])
      else:
        parser.error('the trace time must be a positive number')

    if options.trace_buf_size is not None:
      if options.trace_buf_size > 0:
        atrace_args.extend(['-b', str(options.trace_buf_size)])
      else:
        parser.error('the trace buffer size must be a positive number')

    if options.app_name is not None:
      atrace_args.extend(['-a', options.app_name])

    if options.kfuncs is not None:
      atrace_args.extend(['-k', options.kfuncs])

    atrace_args.extend(args)

    if options.fix_threads:
      atrace_args.extend([';', 'ps', '-t'])

  if atrace_args[0] == 'adb':
    add_adb_serial(atrace_args, options.device_serial)

  script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))

  if options.link_assets:
    src_dir = os.path.join(script_dir, options.asset_dir, 'src')
    build_dir = os.path.join(script_dir, options.asset_dir, 'build')

    js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)

    css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
    js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
    js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)

  else:
    css_filename = os.path.join(script_dir, flattened_css_file)
    js_filename = os.path.join(script_dir, flattened_js_file)
    css = compiled_css_tag % (open(css_filename).read())
    js = compiled_js_tag % (open(js_filename).read())
    templates = ''

  html_filename = options.output_file

  adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)

  result = None
  data = []

  # Read the text portion of the output and watch for the 'TRACE:' marker that
  # indicates the start of the trace data.
  while result is None:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      sys.stderr.write(err)
      sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      parts = out.split('\nTRACE:', 1)

      txt = parts[0].replace('\r', '')
      if len(parts) == 2:
        # The '\nTRACE:' match stole the last newline from the text, so add it
        # back here.
        txt += '\n'
      sys.stdout.write(txt)
      sys.stdout.flush()

      if len(parts) == 2:
        data.append(parts[1])
        sys.stdout.write("downloading trace...")
        sys.stdout.flush()
        break

    result = adb.poll()

  # Read and buffer the data portion of the output.
  while True:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    keepReading = False
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      if len(err) > 0:
        keepReading = True
        sys.stderr.write(err)
        sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      if len(out) > 0:
        keepReading = True
        data.append(out)

    if result is not None and not keepReading:
      break

    result = adb.poll()

  if result == 0:
    if expect_trace:
      data = ''.join(data)

      # Collapse CRLFs that are added by adb shell.
      if data.startswith('\r\n'):
        data = data.replace('\r\n', '\n')

      # Skip the initial newline.
      data = data[1:]

      if not data:
        print >> sys.stderr, ('No data was captured.  Output file was not ' +
          'written.')
        sys.exit(1)
      else:
        # Indicate to the user that the data download is complete.
        print " done\n"

      # Extract the thread list dumped by ps.
      threads = {}
      if options.fix_threads:
        parts = data.split('USER     PID   PPID  VSIZE  RSS     WCHAN    PC        NAME', 1)
        if len(parts) == 2:
          data = parts[0]
          for line in parts[1].splitlines():
            cols = line.split(None, 8)
            if len(cols) == 9:
              tid = int(cols[1])
              name = cols[8]
              threads[tid] = name

      # Decompress and preprocess the data.
      out = zlib.decompress(data)
      if options.fix_threads:
        def repl(m):
          tid = int(m.group(2))
          if tid > 0:
            name = threads.get(tid)
            if name is None:
              name = m.group(1)
              if name == '<...>':
                name = '<' + str(tid) + '>'
              threads[tid] = name
            return name + '-' + m.group(2)
          else:
            return m.group(0)
        out = re.sub(r'^\s*(\S+)-(\d+)', repl, out, flags=re.MULTILINE)

      html_prefix = read_asset(script_dir, 'prefix.html')
      html_suffix = read_asset(script_dir, 'suffix.html')

      html_file = open(html_filename, 'w')
      html_file.write(html_prefix % (css, js, templates))
      html_out = out.replace('\n', '\\n\\\n')
      html_file.write(html_out)
      html_file.write(html_suffix)
      html_file.close()
      print "\n    wrote file://%s\n" % os.path.abspath(options.output_file)

  else: # i.e. result != 0
    print >> sys.stderr, 'adb returned error code %d' % result
    sys.exit(1)

Example 24

View license
def main():
  device_sdk_version = get_device_sdk_version()
  if device_sdk_version < 18:
    legacy_script = os.path.join(os.path.dirname(sys.argv[0]), 'systrace-legacy.py')
    os.execv(legacy_script, sys.argv)

  usage = "Usage: %prog [options] [category1 [category2 ...]]"
  desc = "Example: %prog -b 32768 -t 15 gfx input view sched freq"
  parser = optparse.OptionParser(usage=usage, description=desc)
  parser.add_option('-o', dest='output_file', help='write HTML to FILE',
                    default='trace.html', metavar='FILE')
  parser.add_option('-t', '--time', dest='trace_time', type='int',
                    help='trace for N seconds', metavar='N')
  parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
                    help='use a trace buffer size of N KB', metavar='N')
  parser.add_option('-k', '--ktrace', dest='kfuncs', action='store',
                    help='specify a comma-separated list of kernel functions to trace')
  parser.add_option('-l', '--list-categories', dest='list_categories', default=False,
                    action='store_true', help='list the available categories and exit')
  parser.add_option('-a', '--app', dest='app_name', default=None, type='string',
                    action='store', help='enable application-level tracing for comma-separated ' +
                    'list of app cmdlines')
  parser.add_option('--no-fix-threads', dest='fix_threads', default=True,
                    action='store_false', help='don\'t fix missing or truncated thread names')

  parser.add_option('--link-assets', dest='link_assets', default=False,
                    action='store_true', help='link to original CSS or JS resources '
                    'instead of embedding them')
  parser.add_option('--from-file', dest='from_file', action='store',
                    help='read the trace from a file (compressed) rather than running a live trace')
  parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
                    type='string', help='')
  parser.add_option('-e', '--serial', dest='device_serial', type='string',
                    help='adb device serial number')

  options, args = parser.parse_args()

  if options.list_categories:
    atrace_args = ['adb', 'shell', 'atrace', '--list_categories']
    expect_trace = False
  elif options.from_file is not None:
    atrace_args = ['cat', options.from_file]
    expect_trace = True
  else:
    atrace_args = ['adb', 'shell', 'atrace', '-z']
    expect_trace = True

    if options.trace_time is not None:
      if options.trace_time > 0:
        atrace_args.extend(['-t', str(options.trace_time)])
      else:
        parser.error('the trace time must be a positive number')

    if options.trace_buf_size is not None:
      if options.trace_buf_size > 0:
        atrace_args.extend(['-b', str(options.trace_buf_size)])
      else:
        parser.error('the trace buffer size must be a positive number')

    if options.app_name is not None:
      atrace_args.extend(['-a', options.app_name])

    if options.kfuncs is not None:
      atrace_args.extend(['-k', options.kfuncs])

    atrace_args.extend(args)

    if options.fix_threads:
      atrace_args.extend([';', 'ps', '-t'])

  if atrace_args[0] == 'adb':
    add_adb_serial(atrace_args, options.device_serial)

  script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))

  if options.link_assets:
    src_dir = os.path.join(script_dir, options.asset_dir, 'src')
    build_dir = os.path.join(script_dir, options.asset_dir, 'build')

    js_files, js_flattenizer, css_files, templates = get_assets(src_dir, build_dir)

    css = '\n'.join(linked_css_tag % (os.path.join(src_dir, f)) for f in css_files)
    js = '<script language="javascript">\n%s</script>\n' % js_flattenizer
    js += '\n'.join(linked_js_tag % (os.path.join(src_dir, f)) for f in js_files)

  else:
    css_filename = os.path.join(script_dir, flattened_css_file)
    js_filename = os.path.join(script_dir, flattened_js_file)
    css = compiled_css_tag % (open(css_filename).read())
    js = compiled_js_tag % (open(js_filename).read())
    templates = ''

  html_filename = options.output_file

  adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)

  result = None
  data = []

  # Read the text portion of the output and watch for the 'TRACE:' marker that
  # indicates the start of the trace data.
  while result is None:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      sys.stderr.write(err)
      sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      parts = out.split('\nTRACE:', 1)

      txt = parts[0].replace('\r', '')
      if len(parts) == 2:
        # The '\nTRACE:' match stole the last newline from the text, so add it
        # back here.
        txt += '\n'
      sys.stdout.write(txt)
      sys.stdout.flush()

      if len(parts) == 2:
        data.append(parts[1])
        sys.stdout.write("downloading trace...")
        sys.stdout.flush()
        break

    result = adb.poll()

  # Read and buffer the data portion of the output.
  while True:
    ready = select.select([adb.stdout, adb.stderr], [], [adb.stdout, adb.stderr])
    keepReading = False
    if adb.stderr in ready[0]:
      err = os.read(adb.stderr.fileno(), 4096)
      if len(err) > 0:
        keepReading = True
        sys.stderr.write(err)
        sys.stderr.flush()
    if adb.stdout in ready[0]:
      out = os.read(adb.stdout.fileno(), 4096)
      if len(out) > 0:
        keepReading = True
        data.append(out)

    if result is not None and not keepReading:
      break

    result = adb.poll()

  if result == 0:
    if expect_trace:
      data = ''.join(data)

      # Collapse CRLFs that are added by adb shell.
      if data.startswith('\r\n'):
        data = data.replace('\r\n', '\n')

      # Skip the initial newline.
      data = data[1:]

      if not data:
        print >> sys.stderr, ('No data was captured.  Output file was not ' +
          'written.')
        sys.exit(1)
      else:
        # Indicate to the user that the data download is complete.
        print " done\n"

      # Extract the thread list dumped by ps.
      threads = {}
      if options.fix_threads:
        parts = data.split('USER     PID   PPID  VSIZE  RSS     WCHAN    PC        NAME', 1)
        if len(parts) == 2:
          data = parts[0]
          for line in parts[1].splitlines():
            cols = line.split(None, 8)
            if len(cols) == 9:
              tid = int(cols[1])
              name = cols[8]
              threads[tid] = name

      # Decompress and preprocess the data.
      out = zlib.decompress(data)
      if options.fix_threads:
        def repl(m):
          tid = int(m.group(2))
          if tid > 0:
            name = threads.get(tid)
            if name is None:
              name = m.group(1)
              if name == '<...>':
                name = '<' + str(tid) + '>'
              threads[tid] = name
            return name + '-' + m.group(2)
          else:
            return m.group(0)
        out = re.sub(r'^\s*(\S+)-(\d+)', repl, out, flags=re.MULTILINE)

      html_prefix = read_asset(script_dir, 'prefix.html')
      html_suffix = read_asset(script_dir, 'suffix.html')

      html_file = open(html_filename, 'w')
      html_file.write(html_prefix % (css, js, templates))
      html_out = out.replace('\n', '\\n\\\n')
      html_file.write(html_out)
      html_file.write(html_suffix)
      html_file.close()
      print "\n    wrote file://%s\n" % os.path.abspath(options.output_file)

  else: # i.e. result != 0
    print >> sys.stderr, 'adb returned error code %d' % result
    sys.exit(1)

Example 25

Project: stone
Source File: cli.py
View license
def main():
    """The entry point for the program."""

    if '--' in sys.argv:
        cli_args = sys.argv[1:sys.argv.index('--')]
        generator_args = sys.argv[sys.argv.index('--')+1:]
    else:
        cli_args = sys.argv[1:]
        generator_args = []

    args = _cmdline_parser.parse_args(cli_args)
    debug = False
    if args.verbose is None:
        logging_level = logging.WARNING
    elif args.verbose == 1:
        logging_level = logging.INFO
    elif args.verbose == 2:
        logging_level = logging.DEBUG
        debug = True
    else:
        print('error: I can only be so garrulous, try -vv.', file=sys.stderr)
        sys.exit(1)

    logging.basicConfig(level=logging_level)

    if args.spec and args.spec[0].startswith('+') and args.spec[0].endswith('.py'):
        # Hack: Special case for defining a spec in Python for testing purposes
        # Use this if you want to define a Stone spec using a Python module.
        # The module should should contain an api variable that references a
        # :class:`stone.api.Api` object.
        try:
            api = imp.load_source('api', args.api[0]).api
        except ImportError as e:
            print('error: Could not import API description due to:',
                  e, file=sys.stderr)
            sys.exit(1)
    else:
        if args.spec:
            specs = []
            read_from_stdin = False
            for spec_path in args.spec:
                if spec_path == '-':
                    read_from_stdin = True
                elif not spec_path.endswith('.stone'):
                    print("error: Specification '%s' must have a .stone extension."
                          % spec_path,
                          file=sys.stderr)
                    sys.exit(1)
                elif not os.path.exists(spec_path):
                    print("error: Specification '%s' cannot be found." % spec_path,
                          file=sys.stderr)
                    sys.exit(1)
                else:
                    with open(spec_path) as f:
                        specs.append((spec_path, f.read()))
            if read_from_stdin and specs:
                print("error: Do not specify stdin and specification files "
                      "simultaneously.", file=sys.stderr)
                sys.exit(1)

        if not args.spec or read_from_stdin:
            specs = []
            if debug:
                print('Reading specification from stdin.')

            if six.PY2:
                UTF8Reader = codecs.getreader('utf8')
                sys.stdin = UTF8Reader(sys.stdin)
                stdin_text = sys.stdin.read()
            else:
                stdin_text = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8').read()

            parts = stdin_text.split('namespace')
            if len(parts) == 1:
                specs.append(('stdin.1', parts[0]))
            else:
                specs.append(
                    ('stdin.1', '%snamespace%s' % (parts.pop(0), parts.pop(0))))
                while parts:
                    specs.append(('stdin.%s' % (len(specs) + 1),
                                  'namespace%s' % parts.pop(0)))

        if args.filter_by_route_attr:
            route_filter, route_filter_errors = parse_route_attr_filter(
                args.filter_by_route_attr, debug)
            if route_filter_errors:
                print('Error(s) in route filter:', file=sys.stderr)
                for err in route_filter_errors:
                    print(err, file=sys.stderr)
                sys.exit(1)

        else:
            route_filter = None

        # TODO: Needs version
        tower = TowerOfStone(specs, debug=debug)

        try:
            api = tower.parse()
        except InvalidSpec as e:
            print('%s:%s: error: %s' % (e.path, e.lineno, e.msg), file=sys.stderr)
            if debug:
                print('A traceback is included below in case this is a bug in '
                      'Stone.\n', traceback.format_exc(), file=sys.stderr)
            sys.exit(1)
        if api is None:
            print('You must fix the above parsing errors for generation to '
                  'continue.', file=sys.stderr)
            sys.exit(1)

        if args.whitelist_namespace_routes:
            for namespace_name in args.whitelist_namespace_routes:
                if namespace_name not in api.namespaces:
                    print('error: Whitelisted namespace missing from spec: %s' %
                          namespace_name, file=sys.stderr)
                    sys.exit(1)
            for namespace in api.namespaces.values():
                if namespace.name not in args.whitelist_namespace_routes:
                    namespace.routes = []
                    namespace.route_by_name = {}

        if args.blacklist_namespace_routes:
            for namespace_name in args.blacklist_namespace_routes:
                if namespace_name not in api.namespaces:
                    print('error: Blacklisted namespace missing from spec: %s' %
                          namespace_name, file=sys.stderr)
                    sys.exit(1)
                else:
                    api.namespaces[namespace_name].routes = []
                    api.namespaces[namespace_name].route_by_name = {}

        if route_filter:
            for namespace in api.namespaces.values():
                filtered_routes = []
                for route in namespace.routes:
                    if route_filter.eval(route):
                        filtered_routes.append(route)
                    else:
                        del namespace.route_by_name[route.name]
                namespace.routes = filtered_routes

        if args.attribute:
            attrs = set(args.attribute)
            if ':all' in attrs:
                attrs = {field.name for field in api.route_schema.fields}
        else:
            attrs = set()

        for namespace in api.namespaces.values():
            for route in namespace.routes:
                for k in list(route.attrs.keys()):
                    if k not in attrs:
                        del route.attrs[k]

        # Remove attrs that weren't specified from the route schema
        for field in api.route_schema.fields[:]:
            if field.name not in attrs:
                api.route_schema.fields.remove(field)
                del api.route_schema._fields_by_name[field.name]
            else:
                attrs.remove(field.name)

        # Error if specified attr isn't even a field in the route schema
        if attrs:
            attr = attrs.pop()
            print('error: Attribute not defined in stone_cfg.Route: %s' %
                  attr, file=sys.stderr)
            sys.exit(1)

    if args.generator in _builtin_generators:
        generator_module = __import__(
            'stone.target.%s' % args.generator, fromlist=[''])
    elif not os.path.exists(args.generator):
        print("error: Generator '%s' cannot be found." % args.generator,
              file=sys.stderr)
        sys.exit(1)
    elif not os.path.isfile(args.generator):
        print("error: Generator '%s' must be a file." % args.generator,
              file=sys.stderr)
        sys.exit(1)
    elif not Compiler.is_stone_generator(args.generator):
        print("error: Generator '%s' must have a .stoneg.py extension." %
              args.generator, file=sys.stderr)
        sys.exit(1)
    else:
        # A bit hacky, but we add the folder that the generator is in to our
        # python path to support the case where the generator imports other
        # files in its local directory.
        new_python_path = os.path.dirname(args.generator)
        if new_python_path not in sys.path:
            sys.path.append(new_python_path)
        try:
            generator_module = imp.load_source('user_generator', args.generator)
        except:
            print("error: Importing generator '%s' module raised an exception:" %
                  args.generator, file=sys.stderr)
            raise

    c = Compiler(
        api,
        generator_module,
        generator_args,
        args.output,
        clean_build=args.clean_build,
    )
    try:
        c.build()
    except GeneratorException as e:
        print('%s: error: %s raised an exception:\n%s' %
              (args.generator, e.generator_name, e.traceback),
              file=sys.stderr)
        sys.exit(1)

    if not sys.argv[0].endswith('stone'):
        # If we aren't running from an entry_point, then return api to make it
        # easier to do debugging.
        return api

Example 26

Project: PoGoMap-GUI
Source File: utils.py
View license
@memoize
def get_args():
    # fuck PEP8
    defaultconfigpath = os.getenv('POGOMAP_CONFIG', os.path.join(os.path.dirname(__file__), '../config/config.ini'))
    parser = configargparse.ArgParser(default_config_files=[defaultconfigpath], auto_env_var_prefix='POGOMAP_')
    parser.add_argument('-cf', '--config', is_config_file=True, help='Configuration file')
    parser.add_argument('-a', '--auth-service', type=str.lower, action='append', default=[],
                        help='Auth Services, either one for all accounts or one per account: ptc or google. Defaults all to ptc.')
    parser.add_argument('-u', '--username', action='append', default=[],
                        help='Usernames, one per account.')
    parser.add_argument('-p', '--password', action='append', default=[],
                        help='Passwords, either single one for all accounts or one per account.')
    parser.add_argument('-w', '--workers', type=int,
                        help='Number of search worker threads to start. Defaults to the number of accounts specified.')
    parser.add_argument('-asi', '--account-search-interval', type=int, default=0,
                        help='Seconds for accounts to search before switching to a new account. 0 to disable.')
    parser.add_argument('-ari', '--account-rest-interval', type=int, default=7200,
                        help='Seconds for accounts to rest when they fail or are switched out')
    parser.add_argument('-ac', '--accountcsv',
                        help='Load accounts from CSV file containing "auth_service,username,passwd" lines')
    parser.add_argument('-l', '--location', type=parse_unicode,
                        help='Location, can be an address or coordinates')
    parser.add_argument('-j', '--jitter', help='Apply random -9m to +9m jitter to location',
                        action='store_true', default=False)
    parser.add_argument('-st', '--step-limit', help='Steps', type=int,
                        default=12)
    parser.add_argument('-sd', '--scan-delay',
                        help='Time delay between requests in scan threads',
                        type=float, default=10)
    parser.add_argument('-enc', '--encounter',
                        help='Start an encounter to gather IVs and moves',
                        action='store_true', default=False)
    parser.add_argument('-ed', '--encounter-delay',
                        help='Time delay between encounter pokemon in scan threads',
                        type=float, default=1)
    encounter_list = parser.add_mutually_exclusive_group()
    encounter_list.add_argument('-ewht', '--encounter-whitelist', action='append', default=[],
                                help='List of pokemon to encounter for more stats')
    encounter_list.add_argument('-eblk', '--encounter-blacklist', action='append', default=[],
                                help='List of pokemon to NOT encounter for more stats')
    parser.add_argument('-ld', '--login-delay',
                        help='Time delay between each login attempt',
                        type=float, default=5)
    parser.add_argument('-lr', '--login-retries',
                        help='Number of logins attempts before refreshing a thread',
                        type=int, default=3)
    parser.add_argument('-mf', '--max-failures',
                        help='Maximum number of failures to parse locations before an account will go into a two hour sleep',
                        type=int, default=5)
    parser.add_argument('-msl', '--min-seconds-left',
                        help='Time that must be left on a spawn before considering it too late and skipping it. eg. 600 would skip anything with < 10 minutes remaining. Default 0.',
                        type=int, default=0)
    parser.add_argument('-dc', '--display-in-console',
                        help='Display Found Pokemon in Console',
                        action='store_true', default=False)
    parser.add_argument('-H', '--host', help='Set web server listening host',
                        default='127.0.0.1')
    parser.add_argument('-P', '--port', type=int,
                        help='Set web server listening port', default=5000)
    parser.add_argument('-L', '--locale',
                        help='Locale for Pokemon names (default: {},\
                        check {} for more)'.
                        format(config['LOCALE'], config['LOCALES_DIR']), default='en')
    parser.add_argument('-c', '--china',
                        help='Coordinates transformer for China',
                        action='store_true')
    parser.add_argument('-m', '--mock', type=str,
                        help='Mock mode - point to a fpgo endpoint instead of using the real PogoApi, ec: http://127.0.0.1:9090',
                        default='')
    parser.add_argument('-ns', '--no-server',
                        help='No-Server Mode. Starts the searcher but not the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-os', '--only-server',
                        help='Server-Only Mode. Starts only the Webserver without the searcher.',
                        action='store_true', default=False)
    parser.add_argument('-nsc', '--no-search-control',
                        help='Disables search control',
                        action='store_false', dest='search_control', default=True)
    parser.add_argument('-fl', '--fixed-location',
                        help='Hides the search bar for use in shared maps.',
                        action='store_true', default=False)
    parser.add_argument('-k', '--gmaps-key',
                        help='Google Maps Javascript API Key',
                        required=True)
    parser.add_argument('--skip-empty', help='Enables skipping of empty cells  in normal scans - requires previously populated database (not to be used with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-C', '--cors', help='Enable CORS on web server',
                        action='store_true', default=False)
    parser.add_argument('-D', '--db', help='Database filename',
                        default='pogom.db')
    parser.add_argument('-cd', '--clear-db',
                        help='Deletes the existing database before starting the Webserver.',
                        action='store_true', default=False)
    parser.add_argument('-np', '--no-pokemon',
                        help='Disables Pokemon from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ng', '--no-gyms',
                        help='Disables Gyms from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-nk', '--no-pokestops',
                        help='Disables PokeStops from the map (including parsing them into local db)',
                        action='store_true', default=False)
    parser.add_argument('-ss', '--spawnpoint-scanning',
                        help='Use spawnpoint scanning (instead of hex grid). Scans in a circle based on step_limit when on DB', nargs='?', const='nofile', default=False)
    parser.add_argument('--dump-spawnpoints', help='dump the spawnpoints from the db to json (only for use with -ss)',
                        action='store_true', default=False)
    parser.add_argument('-pd', '--purge-data',
                        help='Clear pokemon from database this many hours after they disappear \
                        (0 to disable)', type=int, default=0)
    parser.add_argument('-px', '--proxy', help='Proxy url (e.g. socks5://127.0.0.1:9050)', action='append')
    parser.add_argument('-pxsc', '--proxy-skip-check', help='Disable checking of proxies before start', action='store_true', default=False)
    parser.add_argument('-pxt', '--proxy-timeout', help='Timeout settings for proxy checker in seconds ', type=int, default=5)
    parser.add_argument('-pxd', '--proxy-display', help='Display info on which proxy beeing used (index or full) To be used with -ps', type=str, default='index')
    parser.add_argument('--db-type', help='Type of database to be used (default: sqlite)',
                        default='sqlite')
    parser.add_argument('--db-name', help='Name of the database to be used')
    parser.add_argument('--db-user', help='Username for the database')
    parser.add_argument('--db-pass', help='Password for the database')
    parser.add_argument('--db-host', help='IP or hostname for the database')
    parser.add_argument('--db-port', help='Port for the database', type=int, default=3306)
    parser.add_argument('--db-max_connections', help='Max connections (per thread) for the database',
                        type=int, default=5)
    parser.add_argument('--db-threads', help='Number of db threads; increase if the db queue falls behind',
                        type=int, default=1)
    parser.add_argument('-wh', '--webhook', help='Define URL(s) to POST webhook information to',
                        nargs='*', default=False, dest='webhooks')
    parser.add_argument('-gi', '--gym-info', help='Get all details about gyms (causes an additional API hit for every gym)',
                        action='store_true', default=False)
    parser.add_argument('--disable-clean', help='Disable clean db loop',
                        action='store_true', default=False)
    parser.add_argument('--webhook-updates-only', help='Only send updates (pokémon & lured pokéstops)',
                        action='store_true', default=False)
    parser.add_argument('--wh-threads', help='Number of webhook threads; increase if the webhook queue falls behind',
                        type=int, default=1)
    parser.add_argument('--ssl-certificate', help='Path to SSL certificate file')
    parser.add_argument('--ssl-privatekey', help='Path to SSL private key file')
    parser.add_argument('-ps', '--print-status', action='store_true',
                        help='Show a status screen instead of log messages. Can switch between status and logs by pressing enter.', default=False)
    parser.add_argument('-sn', '--status-name', default=None,
                        help='Enable status page database update using STATUS_NAME as main worker name')
    parser.add_argument('-spp', '--status-page-password', default=None,
                        help='Set the status page password')
    parser.add_argument('-el', '--encrypt-lib', help='Path to encrypt lib to be used instead of the shipped ones')
    parser.add_argument('-odt', '--on-demand_timeout', help='Pause searching while web UI is inactive for this timeout(in seconds)', type=int, default=0)
    verbosity = parser.add_mutually_exclusive_group()
    verbosity.add_argument('-v', '--verbose', help='Show debug messages from PomemonGo-Map and pgoapi. Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    verbosity.add_argument('-vv', '--very-verbose', help='Like verbose, but show debug messages from all modules as well.  Optionally specify file to log to.', nargs='?', const='nofile', default=False, metavar='filename.log')
    verbosity.add_argument('-d', '--debug', help='Deprecated, use -v or -vv instead.', action='store_true')
    parser.set_defaults(DEBUG=False)

    args = parser.parse_args()

    if args.only_server:
        if args.location is None:
            parser.print_usage()
            print(sys.argv[0] + ": error: arguments -l/--location is required")
            sys.exit(1)
    else:
        # If using a CSV file, add the data where needed into the username,password and auth_service arguments.
        # CSV file should have lines like "ptc,username,password", "username,password" or "username".
        if args.accountcsv is not None:
            # Giving num_fields something it would usually not get
            num_fields = -1
            with open(args.accountcsv, 'r') as f:
                for num, line in enumerate(f, 1):

                    fields = []

                    # First time around populate num_fields with current field count.
                    if num_fields < 0:
                        num_fields = line.count(',') + 1

                    csv_input = []
                    csv_input.append('')
                    csv_input.append('<username>')
                    csv_input.append('<username>,<password>')
                    csv_input.append('<ptc/google>,<username>,<password>')

                    # If the number of fields is differend this is not a CSV
                    if num_fields != line.count(',') + 1:
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". Your file started with the following input, '" + csv_input[num_fields] + "' but now you gave us '" + csv_input[line.count(',') + 1] + "'.")
                        sys.exit(1)

                    field_error = ''
                    line = line.strip()

                    # Ignore blank lines and comment lines
                    if len(line) == 0 or line.startswith('#'):
                        continue

                    # If number of fields is more than 1 split the line into fields and strip them
                    if num_fields > 1:
                        fields = line.split(",")
                        fields = map(str.strip, fields)

                    # If the number of fields is one then assume this is "username". As requested..
                    if num_fields == 1:
                        # Empty lines are already ignored.
                        args.username.append(line)

                    # If the number of fields is two then assume this is "username,password". As requested..
                    if num_fields == 2:
                        # If field length is not longer then 0 something is wrong!
                        if len(fields[0]) > 0:
                            args.username.append(fields[0])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.password.append(fields[1])
                        else:
                            field_error = 'password'

                    # If the number of fields is three then assume this is "ptc,username,password". As requested..
                    if num_fields == 3:
                        # If field 0 is not ptc or google something is wrong!
                        if fields[0].lower() == 'ptc' or fields[0].lower() == 'google':
                            args.auth_service.append(fields[0])
                        else:
                            field_error = 'method'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[1]) > 0:
                            args.username.append(fields[1])
                        else:
                            field_error = 'username'

                        # If field length is not longer then 0 something is wrong!
                        if len(fields[2]) > 0:
                            args.password.append(fields[2])
                        else:
                            field_error = 'password'

                    if num_fields > 3:
                        print 'Too many fields in accounts file: max supported are 3 fields. Found {} fields'.format(num_fields)
                        sys.exit(1)

                    # If something is wrong display error.
                    if field_error != '':
                        type_error = 'empty!'
                        if field_error == 'method':
                            type_error = 'not ptc or google instead we got \'' + fields[0] + '\'!'
                        print(sys.argv[0] + ": Error parsing CSV file on line " + str(num) + ". We found " + str(num_fields) + " fields, so your input should have looked like '" + csv_input[num_fields] + "'\nBut you gave us '" + line + "', your " + field_error + " was " + type_error)
                        sys.exit(1)

        errors = []

        num_auths = len(args.auth_service)
        num_usernames = 0
        num_passwords = 0

        if len(args.username) == 0:
            errors.append('Missing `username` either as -u/--username, csv file using -ac, or in config')
        else:
            num_usernames = len(args.username)

        if args.location is None:
            errors.append('Missing `location` either as -l/--location or in config')

        if len(args.password) == 0:
            errors.append('Missing `password` either as -p/--password, csv file, or in config')
        else:
            num_passwords = len(args.password)

        if args.step_limit is None:
            errors.append('Missing `step_limit` either as -st/--step-limit or in config')

        if num_auths == 0:
            args.auth_service = ['ptc']

        num_auths = len(args.auth_service)

        if num_usernames > 1:
            if num_passwords > 1 and num_usernames != num_passwords:
                errors.append('The number of provided passwords ({}) must match the username count ({})'.format(num_passwords, num_usernames))
            if num_auths > 1 and num_usernames != num_auths:
                errors.append('The number of provided auth ({}) must match the username count ({})'.format(num_auths, num_usernames))

        if len(errors) > 0:
            parser.print_usage()
            print(sys.argv[0] + ": errors: \n - " + "\n - ".join(errors))
            sys.exit(1)

        # Fill the pass/auth if set to a single value
        if num_passwords == 1:
            args.password = [args.password[0]] * num_usernames
        if num_auths == 1:
            args.auth_service = [args.auth_service[0]] * num_usernames

        # Make our accounts list
        args.accounts = []

        # Make the accounts list
        for i, username in enumerate(args.username):
            args.accounts.append({'username': username, 'password': args.password[i], 'auth_service': args.auth_service[i]})

        # Make max workers equal number of accounts if unspecified, and disable account switching
        if args.workers is None:
            args.workers = len(args.accounts)
            args.account_search_interval = None

        # Disable search interval if 0 specified
        if args.account_search_interval == 0:
            args.account_search_interval = None

        # Make sure we don't have an empty account list after adding command line and CSV accounts
        if len(args.accounts) == 0:
            print(sys.argv[0] + ": Error: no accounts specified. Use -a, -u, and -p or --accountcsv to add accounts")
            sys.exit(1)

        args.encounter_blacklist = [int(i) for i in args.encounter_blacklist]
        args.encounter_whitelist = [int(i) for i in args.encounter_whitelist]

        # Decide which scanning mode to use
        if args.spawnpoint_scanning:
            args.scheduler = 'SpawnScan'
        elif args.skip_empty:
            args.scheduler = 'HexSearchSpawnpoint'
        else:
            args.scheduler = 'HexSearch'

    return args

Example 27

Project: PyDev.Debugger
Source File: runfiles.py
View license
def main():
    import sys

    # Separate the nose params and the pydev params.
    pydev_params = []
    other_test_framework_params = []
    found_other_test_framework_param = None

    NOSE_PARAMS = '--nose-params'
    PY_TEST_PARAMS = '--py-test-params'

    for arg in sys.argv[1:]:
        if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
            pydev_params.append(arg)

        else:
            if not found_other_test_framework_param:
                found_other_test_framework_param = arg
            else:
                other_test_framework_params.append(arg)


    # Here we'll run either with nose or with the pydev_runfiles.
    from _pydev_runfiles import pydev_runfiles
    from _pydev_runfiles import pydev_runfiles_xml_rpc
    from _pydevd_bundle import pydevd_constants
    from pydevd_file_utils import _NormFile

    DEBUG = 0
    if DEBUG:
        sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
        sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
        if found_other_test_framework_param:
            sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))

    try:
        configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
    except:
        sys.stderr.write('Command line received: %s\n' % (sys.argv,))
        raise
    pydev_runfiles_xml_rpc.initialize_server(configuration.port)  # Note that if the port is None, a Null server will be initialized.

    NOSE_FRAMEWORK = "nose"
    PY_TEST_FRAMEWORK = "py.test"
    test_framework = None  # Default (pydev)
    try:
        if found_other_test_framework_param:
            if found_other_test_framework_param == NOSE_PARAMS:
                test_framework = NOSE_FRAMEWORK
                import nose

            elif found_other_test_framework_param == PY_TEST_PARAMS:
                test_framework = PY_TEST_FRAMEWORK
                import pytest

            else:
                raise ImportError()

        else:
            raise ImportError()

    except ImportError:
        if found_other_test_framework_param:
            sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
                test_framework,))
            if DEBUG:
                import traceback
                traceback.print_exception(sys.exc_info()[1])

        test_framework = None

    # Clear any exception that may be there so that clients don't see it.
    # See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
    if hasattr(sys, 'exc_clear'):
        sys.exc_clear()

    if not test_framework:

        return pydev_runfiles.main(configuration)  # Note: still doesn't return a proper value.

    else:
        # We'll convert the parameters to what nose or py.test expects.
        # The supported parameters are:
        # runfiles.py  --config-file|-t|--tests <Test.test1,Test2>  dirs|files --nose-params xxx yyy zzz
        # (all after --nose-params should be passed directly to nose)

        # In java:
        # --tests = Constants.ATTR_UNITTEST_TESTS
        # --config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE


        # The only thing actually handled here are the tests that we want to run, which we'll
        # handle and pass as what the test framework expects.

        py_test_accept_filter = {}
        files_to_tests = configuration.files_to_tests

        if files_to_tests:
            # Handling through the file contents (file where each line is a test)
            files_or_dirs = []
            for file, tests in files_to_tests.items():
                if test_framework == NOSE_FRAMEWORK:
                    for test in tests:
                        files_or_dirs.append(file + ':' + test)

                elif test_framework == PY_TEST_FRAMEWORK:
                    file = _NormFile(file)
                    py_test_accept_filter[file] = tests
                    files_or_dirs.append(file)

                else:
                    raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

        else:
            if configuration.tests:
                # Tests passed (works together with the files_or_dirs)
                files_or_dirs = []
                for file in configuration.files_or_dirs:
                    if test_framework == NOSE_FRAMEWORK:
                        for t in configuration.tests:
                            files_or_dirs.append(file + ':' + t)

                    elif test_framework == PY_TEST_FRAMEWORK:
                        file = _NormFile(file)
                        py_test_accept_filter[file] = configuration.tests
                        files_or_dirs.append(file)

                    else:
                        raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
            else:
                # Only files or dirs passed (let it do the test-loading based on those paths)
                files_or_dirs = configuration.files_or_dirs

        argv = other_test_framework_params + files_or_dirs


        if test_framework == NOSE_FRAMEWORK:
            # Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
            # show_stdout_option = ['-s']
            # processes_option = ['--processes=2']
            argv.insert(0, sys.argv[0])
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))

            from _pydev_runfiles import pydev_runfiles_nose
            PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.start_pydev_nose_plugin_singleton(configuration)
            argv.append('--with-pydevplugin')
            # Return 'not' because it will return 'success' (so, exit == 0 if success)
            return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])

        elif test_framework == PY_TEST_FRAMEWORK:
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv,))
                sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))

            def dotted(p):
                # Helper to convert path to have dots instead of slashes
                return os.path.normpath(p).replace(os.sep, "/").replace('/', '.')

            curr_dir = os.path.realpath('.')
            curr_dotted = dotted(curr_dir) + '.'

            # Overcome limitation on py.test:
            # When searching conftest if we have a structure as:
            # /my_package
            # /my_package/conftest.py
            # /my_package/tests
            # /my_package/tests/test_my_package.py
            # The test_my_package won't have access to the conftest contents from the
            # test_my_package.py file unless the working dir is set to /my_package.
            #
            # See related issue (for which we work-around below):
            # https://bitbucket.org/hpk42/pytest/issue/639/conftest-being-loaded-twice-giving

            for path in sys.path:
                path_dotted = dotted(path)
                if curr_dotted.startswith(path_dotted):
                    os.chdir(path)
                    break

            for i in xrange(len(argv)):
                arg = argv[i]
                # Workaround bug in py.test: if we pass the full path it ends up importing conftest
                # more than once (so, always work with relative paths).
                if os.path.isfile(arg) or os.path.isdir(arg):
                    from _pydev_bundle.pydev_imports import relpath
                    try:
                        # May fail if on different drives
                        arg = relpath(arg)
                    except ValueError:
                        pass
                    else:
                        argv[i] = arg

            # To find our runfile helpers (i.e.: plugin)...
            d = os.path.dirname(__file__)
            if d not in sys.path:
                sys.path.insert(0, d)

            import pickle, zlib, base64

            # Update environment PYTHONPATH so that it finds our plugin if using xdist.
            os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)

            # Set what should be skipped in the plugin through an environment variable
            s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
            if pydevd_constants.IS_PY3K:
                s = s.decode('ascii')  # Must be str in py3.
            os.environ['PYDEV_PYTEST_SKIP'] = s

            # Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
            # main pid to give xml-rpc notifications).
            os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
            os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)

            argv.append('-p')
            argv.append('_pydev_runfiles.pydev_runfiles_pytest2')
            if 'unittest' in sys.modules or 'unittest2' in sys.modules:
                sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
            return pytest.main(argv)

        else:
            raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

Example 28

Project: bodhi
Source File: pickledb.py
View license
def load_sqlalchemy_db():
    print "\nLoading pickled database %s" % sys.argv[2]
    db = file(sys.argv[2], 'r')
    data = pickle.load(db)

    import transaction
    from bodhi.server.models import Base
    from bodhi.server.models import Release, Update, Build, Comment, User, Bug, CVE
    from bodhi.server.models import Package, Group
    from bodhi.server.models import UpdateType, UpdateStatus, UpdateRequest
    from sqlalchemy import create_engine
    from sqlalchemy.orm.exc import NoResultFound

    # Caches for quick lookup
    releases = {}
    packages = {}
    users = {}
    critpath = {}

    aliases = []

    engine = bodhi.server.config['sqlalchemy.url']
    Session = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
    Session.configure(bind=engine)
    db = Session()

    # Allow filtering of releases to load
    whitelist = []
    if '--release' in sys.argv:
        for r in sys.argv[sys.argv.index('--release') + 1].split(','):
            whitelist.append(r)
        print('whitelist = %r' % whitelist)

    # Legacy format was just a list of update dictionaries
    # Now we'll pull things out into an organized dictionary:
    # {'updates': [], 'releases': []}
    if isinstance(data, dict):
        for release in data['releases']:
            try:
                db.query(Release).filter_by(name=release['name']).one()
            except NoResultFound:
                del(release['metrics'])
                del(release['locked'])
                r = Release(**release)
                r.stable_tag = "%s-updates" % r.dist_tag
                r.testing_tag = "%s-testing" % r.stable_tag
                r.candidate_tag = "%s-candidate" % r.stable_tag
                r.pending_testing_tag = "%s-pending" % r.testing_tag
                r.pending_stable_tag = "%s-pending" % r.stable_tag
                r.override_tag = "%s-override" % r.dist_tag
                db.add(r)
        data = data['updates']

    progress = ProgressBar(widgets=[SimpleProgress(), Percentage(), Bar()])

    for u in progress(data):
        try:
            release = releases[u['release'][0]]
        except KeyError:
            try:
                release = db.query(Release).filter_by(name=u['release'][0]).one()
            except NoResultFound:
                release = Release(name=u['release'][0], long_name=u['release'][1],
                                  id_prefix=u['release'][2],
                                  dist_tag=u['release'][3])
                db.add(release)
            releases[u['release'][0]] = release
            if whitelist:
                if release.name in whitelist:
                    critpath[release.name] = get_critpath_pkgs(release.name.lower())
                    print('%s critpath packages for %s' % (len(critpath[release.name]),
                                                           release.name))
            else:
                critpath[release.name] = get_critpath_pkgs(release.name.lower())
                print('%s critpath packages for %s' % (len(critpath[release.name]),
                                                       release.name))

        if whitelist and release.name not in whitelist:
            continue

        ## Backwards compatbility
        request = u['request']
        if u['request'] == 'move':
            u['request'] = 'stable'
        elif u['request'] == 'push':
            u['request'] = 'testing'
        elif u['request'] == 'unpush':
            u['request'] = 'obsolete'
        if u['approved'] not in (True, False):
            u['approved'] = None
        if u.has_key('update_id'):
            u['updateid'] = u['update_id']
            u['alias'] = u['update_id']

            if u['alias']:
                split = u['alias'].split('-')
                year, id = split[-2:]
                aliases.append((int(year), int(id)))

        if not u.has_key('date_modified'):
            u['date_modified'] = None

        # Port to new enum types
        if u['request']:
            if u['request'] == 'stable':
                u['request'] = UpdateRequest.stable
            elif u['request'] == 'testing':
                u['request'] = UpdateRequest.testing
            else:
                raise Exception("Unknown request: %s" % u['request'])

        if u['type'] == 'bugfix':
            u['type'] = UpdateType.bugfix
        elif u['type'] == 'newpackage':
            u['type'] = UpdateType.newpackage
        elif u['type'] == 'enhancement':
            u['type'] = UpdateType.enhancement
        elif u['type'] == 'security':
            u['type'] = UpdateType.security
        else:
            raise Exception("Unknown type: %r" % u['type'])

        if u['status'] == 'pending':
            u['status'] = UpdateStatus.pending
        elif u['status'] == 'testing':
            u['status'] = UpdateStatus.testing
        elif u['status'] == 'obsolete':
            u['status'] = UpdateStatus.obsolete
        elif u['status'] == 'stable':
            u['status'] = UpdateStatus.stable
        elif u['status'] == 'unpushed':
            u['status'] = UpdateStatus.unpushed
        else:
            raise Exception("Unknown status: %r" % u['status'])

        try:
            update = db.query(Update).filter_by(title=u['title']).one()
            continue
        except NoResultFound:
            update = Update(title=u['title'],
                            date_submitted=u['date_submitted'],
                            date_pushed=u['date_pushed'],
                            date_modified=u['date_modified'],
                            release=release,
                            old_updateid=u['updateid'],
                            alias=u['updateid'],
                            pushed=u['pushed'],
                            notes=u['notes'],
                            karma=u['karma'],
                            type=u['type'],
                            status=u['status'],
                            request=u['request'],
                            )
                            #approved=u['approved'])
            db.add(update)
            db.flush()

            try:
                user = users[u['submitter']]
            except KeyError:
                try:
                    user = db.query(User).filter_by(name=u['submitter']).one()
                except NoResultFound:
                    user = User(name=u['submitter'])
                    db.add(user)
                    db.flush()
                users[u['submitter']] = user
            user.updates.append(update)

        ## Create Package and Build objects
        for pkg, nvr in u['builds']:
            try:
                package = packages[pkg]
            except KeyError:
                try:
                    package = db.query(Package).filter_by(name=pkg).one()
                except NoResultFound:
                    package = Package(name=pkg)
                    db.add(package)
                packages[pkg] = package
            if package.name in critpath[update.release.name]:
                update.critpath = True
            try:
                build = db.query(Build).filter_by(nvr=nvr).one()
            except NoResultFound:
                build = Build(nvr=nvr, package=package)
                db.add(build)
                update.builds.append(build)

        ## Create all Bugzilla objects for this update
        for bug_num, bug_title, security, parent in u['bugs']:
            try:
                bug = db.query(Bug).filter_by(bug_id=bug_num).one()
            except NoResultFound:
                bug = Bug(bug_id=bug_num, security=security, parent=parent,
                          title=bug_title)
                db.add(bug)
            update.bugs.append(bug)

        ## Create all CVE objects for this update
        for cve_id in u['cves']:
            try:
                cve = db.query(CVE).filter_by(cve_id=cve_id).one()
            except NoResultFound:
                cve = CVE(cve_id=cve_id)
                db.add(cve)
            update.cves.append(cve)

        ## Create all Comments for this update
        for c in u['comments']:
            try:
                timestamp, author, text, karma, anonymous = c
            except ValueError:
                timestamp, author, text, karma = c
                anonymous = '@' in author

            comment = Comment(timestamp=timestamp, text=text,
                              karma=karma, anonymous=anonymous)
            db.add(comment)
            db.flush()
            update.comments.append(comment)
            if anonymous:
                name = u'anonymous'
            else:
                name = author
            group = None
            if not anonymous and ' (' in name:
                split = name.split(' (')
                name = split[0]
                group = split[1][:-1]
                assert group, name
            try:
                user = users[name]
            except KeyError:
                try:
                    user = db.query(User).filter_by(name=name).one()
                except NoResultFound:
                    user = User(name=name)
                    db.add(user)
                    db.flush()
                users[name] = user

            comment.user = user

            if group:
                try:
                    group = db.query(Group).filter_by(name=group).one()
                except NoResultFound:
                    group = Group(name=group)
                    db.add(group)
                    db.flush()
                user.groups.append(group)

        db.flush()

    # Hack to get the Bodhi2 alias generator working with bodhi1 data.
    # The new generator assumes that the alias is assigned at submission time, as opposed to push time.
    year, id = max(aliases)
    print('Highest alias = %r %r' % (year, id))
    up = db.query(Update).filter_by(alias=u'FEDORA-%s-%s' % (year, id)).one()
    print(up.title)
    up.date_submitted = up.date_pushed
    db.flush()

    transaction.commit()

    print("\nDatabase migration complete!")
    print(" * %d updates" % db.query(Update).count())
    print(" * %d builds" % db.query(Build).count())
    print(" * %d comments" % db.query(Comment).count())
    print(" * %d users" % db.query(User).count())
    print(" * %d bugs" % db.query(Bug).count())
    print(" * %d CVEs" % db.query(CVE).count())

Example 29

Project: mtpy
Source File: calibratefiles.py
View license
def main():

    if len(sys.argv) < 3:
        sys.exit('\nNeed at least 2 arguments:\n <path to files> \n '
                '<config file> \n '
                '[optional:<output dir>] \n [optional:<station>] \n '
                '[optional:<recursive flag -R>] \n '
                '[optional:<re-orientation flag -O]\n\n')

    outdir = None
    stationname = None
    recursive = False
    orientation = False

    if len(sys.argv) > 3:
        optionals = sys.argv[3:]
        for o in optionals:
            o = o.strip()
            if o[0] == '-':
                if o[1].lower() == 'r':
                    recursive = True
                elif o[1].lower() == 'o':
                    orientation = True
                continue
            elif outdir is None:
                outdir = o
                continue
            elif stationname is None:
                stationname = o.upper() 
                continue
    pathname_raw = sys.argv[1] 
    pathname = op.abspath(op.realpath(pathname_raw))

    if not op.isdir(pathname):
        sys.exit('Data file(s) path not existing: {0}'.format(pathname))

    configfilename_raw = sys.argv[2]
    configfile = op.abspath(op.realpath(op.join(os.curdir,configfilename_raw)))

    if not op.isfile(configfile):
        sys.exit('Config file not found: {0}'.format(configfile))

    if recursive is True:
        lo_dirs = [pathname]
        for i,j,k in os.walk(pathname):
            lof = [op.abspath(op.join(i,f)) for f in j]
            lo_dirs.extend(lof)
        pathname = list(set(lo_dirs))
    else:
        pathname = [pathname]

    #config_dict = MTcf.read_survey_configfile(configfile)
    try:
        config_dict = MTcf.read_survey_configfile(configfile)
        #done internally already 
        #MTcf.validate_dict(config_dict)
    except:
        sys.exit( 'Config file invalid or cannot be read: {0}'.format(configfile) )

    #----------------------------------------------------------------------------

    #select files by header entries:
    components = ['ex', 'ey', 'bx', 'by', 'bz']
    lo_allfiles = []
    lo_allheaders = []
    lo_allstations = []
    for folder in pathname:
        wd = op.abspath(op.realpath(folder)) 
        if not op.isdir(wd):
            #print 'Directory not existing: %s' % (wd)
            lo_foldernames.remove(wd)
            continue    
        dirfiles = [op.abspath(op.join(wd,i)) for i in os.listdir(wd)]
        for tmpfile in dirfiles:
            try:
                header = MTfh.read_ts_header(tmpfile)
                if header['channel'].lower() in components:
                    if stationname is not None:
                        if stationname.upper() != header['station'].upper():
                            continue
                    lo_allstations.append(header['station'].upper())
                    lo_allfiles.append(op.abspath(op.join(wd,tmpfile)))
                    lo_allheaders.append(header)
            except:
                continue

    lo_allstations = list(set(lo_allstations))

    #check, if list of files is empty
    if len(lo_allfiles) == 0:
        sys.exit('Directory(ies) do(es) not contain files to calibrate:'
            ' {0}'.format(pathname)) 

    #-------------------------------------------------------
    # set up the directory structure for the output:
    
    #1. generic calibration output directory
    cal_outdir = op.abspath(op.join(pathname[0],'calibrated'))

    if outdir is not None:
        try:
            cal_outdir = op.abspath(op.join(os.curdir,outdir))
            if not op.isdir(cal_outdir):
                os.makedirs(cal_outdir)
                print 'generated ', cal_outdir
        except:
            print 'Output directory cannot be generated: '\
            '{0} - using generic location'.format(cal_outdir)

            cal_outdir = op.abspath(op.join(pathname[0],'calibrated'))
    try:
        if not op.isdir(cal_outdir):
            os.makedirs(cal_outdir)
    except:
        #this only comes up, if the generic location cannot be generated
        sys.exit('Generic directory cannot be generated: {0}'.format(cal_outdir))

    print '\t Output directory ok: {0}\n'.format(cal_outdir)

    #if re-orientation is required, do it first:
    if orientation is True:
        print '\n\t....re-orient data first...\n'
        ori_outdir = op.abspath(op.join(cal_outdir,'../reoriented_tmp'))
        try:
            if not op.isdir(ori_outdir):
                os.makedirs(ori_outdir)
        except:
            #this only comes up, if the generic location cannot be generated
            sys.exit('Re-orientation directory cannot be generated:'
                ' {0}'.format(ori_outdir))
        
        
        MTfh.reorient_files(lo_allfiles, configfile, lo_stations = lo_allstations,
                             outdir = ori_outdir)

        #change to calibration setup :
        outdir = cal_outdir
        new_inputdir = ori_outdir 

        #file structure has changed, so the re-oriented files have to be read again:
        components = ['ex', 'ey', 'bx', 'by', 'bz']
        lo_allfiles = []
        lo_allheaders = []
        lo_allstations = []
        dirfiles = [op.abspath(op.join(new_inputdir,i)) for i in  os.listdir(new_inputdir) ]
        for tmpfile in dirfiles:
            header = MTfh.read_ts_header(tmpfile)
            lo_allstations.append(header['station'].upper())
            lo_allfiles.append(tmpfile)
            lo_allheaders.append(header)

        lo_allstations = list(set(lo_allstations))

        #check, if list of files is empty
        if len(lo_allfiles) == 0:
            sys.exit('Directory(ies) do(es) not contain files to calibrate:'
                ' {0}'.format(ori_outdir))

    #-------------------------------------------------
    #calibration

    lo_calibrated_files = []
    lo_calibrated_stations = []
    for file_idx, filename in enumerate(lo_allfiles):
        curr_station = lo_allheaders[file_idx]['station'].upper()
        if stationname is not None:
            if stationname.upper() != curr_station.upper():
                continue
        print 'reading file {0}...'.format(filename)

        channel = lo_allheaders[file_idx]['channel']
        lo_calibrated_stations.append(curr_station)

        #get configuration dictionary for this station

        try:
            stationdict = config_dict[curr_station]
        except:
            print 'no entry for station {0} found in configuration file'\
            ' {1} skipping file'.format(curr_station, configfile )
            continue

        latitude = float(stationdict['latitude'])
        longitude = float(stationdict['longitude'])
        elevation = float(stationdict['elevation'])

        field = channel[0]
        direction = channel[1]

        station_type = stationdict['station_type']

        if field == 'e':
            if station_type == 'b':
                continue
            #check North-South axis orientation
            if direction == 'x':
                #angle = float(stationdict['e_xaxis_azimuth'])
                dipolelength = float(stationdict['e_xaxis_length'])

            #check East-West axis orientation
            if direction == 'y':
                #angle = float(stationdict['e_yaxis_azimuth'])
                dipolelength = float(stationdict['e_yaxis_length'])

            logger = stationdict['e_logger_type']
            gain = float(stationdict['e_logger_gain'])
            instrument = stationdict.get('e_instrument_type','electrodes')
            instrument_amplification = float(stationdict['e_instrument_amplification'])

        elif field == 'b':
            if station_type == 'e':
                continue

            dipolelength = 1.
            logger = stationdict['b_logger_type']
            gain = float(stationdict['b_logger_gain'])
            instrument = stationdict.get('b_instrument_type','coils')
            instrument_amplification = float(stationdict['b_instrument_amplification'])


        MTcb.calibrate_file(filename, outdir, instrument, instrument_amplification,
                            logger, gain, dipolelength, curr_station, channel, 
                            latitude, longitude, elevation,  offset = 0 )
        #print 'calibrated file {0},{1}'.format(outdir, filename)
        lo_calibrated_files.append(filename)
    
    lo_calibrated_stations = list(set(lo_calibrated_stations))
    if len(lo_calibrated_files) == 0:
        if stationname is not None:
            print 'No files found for station {0}'.format(stationname)
            return
        else:
            print 'No files found for stations {0}'.format(lo_allstations)

    print '{0} files calibrated for stations'\
            ' {1}'.format(len(lo_calibrated_files),lo_calibrated_stations)

Example 30

View license
def main():

    if len(sys.argv) < 3:
        raise MTex.MTpyError_inputarguments('Need at least 2 arguments: <path to files> <response file> [<output dir>] [<channel(s)>] ')


    pathname_raw = sys.argv[1] 
    directory = op.abspath(op.realpath(pathname_raw))

    responsefilename_raw = sys.argv[2]
    responsefile = op.abspath(op.realpath(responsefilename_raw))


    if not op.isdir(directory):
        raise MTex.MTpyError_inputarguments('Directory not existing: %s' % (directory))

    if not op.isfile(responsefile):
        raise MTex.MTpyError_inputarguments('Response file not existing: %s' % (responsefile))
    
    #check, if response file is in proper shape (3 columns freq,re,im of real values):
    try:
        responsedata = np.loadtxt(responsefile)
        s = responsedata.shape
        if s[1] != 3:
            raise
        freq_min = responsedata[0,0]
        freq_max = responsedata[-1,0]

    except: 
        raise MTex.MTpyError_inputarguments('Response file (%s) in wrong format - must be 3 columns: freq,real,imag' % (responsefile))

    #set up output directory: 
    try:
        outdir_raw = sys.argv[3]
        outdir = op.abspath(outdir_raw)
    except:
        outdir = op.join(directory,'instr_resp_corrected')

    try:
        if not op.isdir(outdir):
            os.makedirs(outdir)
    except:
        raise MTex.MTpyError_inputarguments('Output directory cannot be generated: %s' % (outdir))

    #define channels to be considered for correction:
    try:
        lo_channels = list(set([i.upper() if len(i)==2 else 'B'+i.upper() for i in  sys.argv[4].split(',')]))
    except:
        print 'No channel list found - using BX, BY, HX, HY'
        lo_channels = ['BX', 'BY', 'HX', 'HY', 'BZ', 'HZ']


    #collect file names  within the folder 
    lo_allfiles = os.listdir(directory)

    lo_allfiles = [op.abspath(op.join(directory,i))  for i in lo_allfiles if op.isfile(op.abspath(op.join(directory,i)))]# if op.isfile(i)==True]

    #generate list of list-of-files-for-each-channel:
    lo_lo_files_for_channels = [[] for i in lo_channels  ]

    #check the files for information about the determined channels:
    for fn in lo_allfiles:
        header_dict = MTfh.read_ts_header(fn)
        if len(header_dict.keys()) == 0 :
            continue
        ch = header_dict['channel'].upper()
        if ch not in lo_channels:
            continue    

        try:
            ch_idx = lo_channels.index(ch)
        except ValueError:
            continue

        # use the current file, if it contains a header line and contains signal from the requested channel:
        lo_lo_files_for_channels[ch_idx].append(fn)

    #if no files had header lines or did not contain data from the appropriate channel(s):
    if np.sum([len(i) for i in lo_lo_files_for_channels]) == 0:
        print 'channels: ', lo_channels, ' - directory: ',directory
        raise MTex.MTpyError_inputarguments('No information for channels found in directory {0} - Check header lines!'.format(directory))

    #=============================================
    # start the instrument correction
        
    # looping over all requested channels:
    for ch in lo_channels:
        #skip, if no data are available for the current channel:
        if [len(i) for i in lo_lo_files_for_channels][lo_channels.index(ch)] == 0:
            continue

        #set up lists for the infos needed later, esp. for the file handling
        lo_files = lo_lo_files_for_channels[lo_channels.index(ch)]
        lo_t_mins = []
        lo_headers = []

        #read in header lines and sort files by increasing starttimes t_min
        for fn in lo_files:
            header_dict = MTfh.read_ts_header(fn)
            lo_t_mins.append(header_dict['t_min'])
            lo_headers.append(header_dict)

        #sort all the collected lists by t_min
        idxs = np.array(lo_t_mins).argsort()
        lo_t_mins = [lo_t_mins[i] for i in idxs]
        lo_files = [lo_files[i] for i in idxs]
        lo_headers = [lo_headers[i] for i in idxs]
           

        # finding consecutive, continuous time axes:
        lo_timeaxes = []
        ta_old = None

        for idx, header in enumerate(lo_headers):
            ta_cur = np.arange(int(header['nsamples']))/float(header['samplingrate']) + float(header['t_min'])

            #if there is no old ta:
            if ta_old == None:
                ta_old = ta_cur 
                continue

            # if gap between old and new ta is too big:
            if (ta_cur[0] - ta_old[-1]) > (2*1./float(header['samplingrate'])):
                lo_timeaxes.append(np.array(ta_old))
                ta_old = ta_cur 
                continue

            #find index of new ta which is closest to the end of old_ta - most commonly it's '0' !
            overlap = np.abs(ta_cur - ta_old[-1]).argmin()
            ta_cur = ta_cur[overlap:]
            ta_old = np.concatenate([ta_old,ta_cur])
        
        #append last active time axis ta:
        lo_timeaxes.append(np.array(ta_old))

        #determine maximal period from response file and existinng time axes. 
        #win = get_windowlength() = max([ i[-1]-i[0] for i in lo_timeaxes] ) 
        # the minimum of the maximal resolvable signal period and the longest continuous time axis:
        winmax = 1./freq_min
        #for debugging set large window size:
        #winmax = 5e5
        #later on, if the TS is longer than 3 times this time window, we want to cut out subsections of the time series. These cuts shall consist of triplets of subwindows, each of which shall not be longer than this maximum period.

        #Now the data set has to be corrected/deconvolved by looping over the collected time axes:
        for ta in lo_timeaxes:
            print '\nhandling time axis: {0} - {1} ({2} samples) '.format(ta[0],ta[-1],len(ta))

            #if the time section is shorter than 3 times the maximum defined by the response function, read in the whole data set at once for this interval
            if (ta[-1] - ta[0]) < (3 * winmax) : 
                print 'time axis short enough ({0} seconds) - reading all at once'.format(ta[-1] - ta[0])

                #collect the appropriate files in a list
                #after the MTpy preprocessing the start end end of the time series coincide with files start and endings, so no "half files" are involved. 
                cur_time = ta[0]
                data = []
                files = []
                headers = []
                starttimes = []

                while cur_time < ta[-1]:
                    for idx,header in enumerate(lo_headers):
                        ta_cur = np.arange(int(header['nsamples']))/float(header['samplingrate']) + float(header['t_min'])
                        if cur_time in ta_cur:
                            start_idx = np.where(ta_cur == cur_time)[0][0]
                            break
                    fn = lo_files[idx]
                    files.append(fn)
                    headers.append(header)
                    starttimes.append(float(header['t_min']))
                    cur_data = np.loadtxt(fn)

                    print 'current data section length: ',len(cur_data)
                    if ta_cur[-1] <= ta[-1]:
                        data.extend(cur_data[start_idx:].tolist())
                        cur_time = ta_cur[-1] + 1./float(header['samplingrate'])  
                    else:
                        end_idx = np.where(ta_cur == ta[-1])[0][0] 
                        data.extend(cur_data[start_idx:end_idx+1].tolist())
                        cur_time = ta[-1]
                    print 'current data length: ',len(data)

                data = np.array(data)
                data = scipy.signal.detrend(data)
                #at this point, the data set should be set up for the given time axis
                corrected_timeseries = MTin.correct_for_instrument_response(data,float(header['samplingrate']), responsedata)  

                print 'corrected TS starting at {0}, length {1}'.format(ta[0],len(corrected_timeseries))

                #now, save this TS back into the appropriate files, including headers
                for idx,fn in enumerate(files):

                    # output file name: use input file name and append '_true'
                    inbasename = op.basename(fn)
                    outbasename = ''.join([op.splitext(inbasename)[0]+'_true',op.splitext(inbasename)[1]])
                    outfn = op.join(outdir,outbasename)

                    outF = open(outfn,'w')
                    header = headers[idx]
                    unit = header['unit']
                    if unit[-6:].lower() != '(true)':
                        unit +='(true)'
                    header['unit'] = unit
                    headerline = MTfh.get_ts_header_string(header)
                    outF.write(headerline)
                    starttime = starttimes[idx]
                    length = int(float(header['nsamples']))
                    
                    startidx = (np.abs(starttime - ta)).argmin()
                    print startidx,length,len(corrected_timeseries),len(ta)
                    print '\nhandling file {0} - starttime {1}, - nsamples {2}'.format(outfn,starttime,length)
                    print outdir,outfn
                    data = corrected_timeseries[startidx:startidx+length]
                    np.savetxt(outF,data)
                    outF.close()


                #To do so, use the time axis and run over the input files again,determine the filenames. Use them, and put them (slightly modified, perhaps?) into the given output directory  
                #return corrected_timeseries

            else:

                #find partition into pieces of length 'winmax'. the remainder is equally split between start and end:

                #assume constant sampling rate, just use the last opened header (see above): 
                samplingrate = float(header['samplingrate'])  

                #total time axis length:
                ta_length = ta[-1] - ta[0] + 1./samplingrate
                
                #partition into winmax long windows 
                n_windows = int(ta_length/winmax)
                remainder = ta_length%winmax
                lo_windowstarts = [ta[0]]
                for i in range(n_windows+1):
                    t0 = ta[0] + remainder/2. + i * winmax
                    lo_windowstarts.append(t0)
                print 'time axis long ({0} seconds) - processing in {1} sections (window: {2})'.format(ta_length,n_windows, winmax)


                # lists of input file(s) containing the data - for all 3 sections of the moving window
                section1_lo_input_files = []
                section2_lo_input_files = []
                section3_lo_input_files = []
                section1_data = []
                section2_data = []
                section3_data = []
                section1_ta = []
                section2_ta = []
                section3_ta = []
                file_open = False

                # loop over the winmax long sections:
                for idx_t0, t0 in enumerate(lo_windowstarts):

                    #the last two window-starts do not get an own window:
                    if idx_t0 > n_windows - 1:
                        # since there are 'n_windows' full intervals for the moving window
                        break

                    print '\n----- section {0} ----\n'.format(idx_t0 + 1)
                    if idx_t0 == n_windows - 1 :
                        endtime = ta[-1]
                    else:
                        endtime = lo_windowstarts[idx_t0+3]-1./samplingrate
                    print 's1 = {0} - {1}, s2 = {2} - {3}, s3 = {4} - {5}\n'.format(t0,lo_windowstarts[idx_t0+1]-1./samplingrate, lo_windowstarts[idx_t0+1],lo_windowstarts[idx_t0+2]-1./samplingrate,lo_windowstarts[idx_t0+2],endtime)

                    #for each step (except for he last one obviously), 3 consecutive parts are read in, concatenated and deconvolved. Then the central part is taken as 'true' data. 
                    #only for the first and the last sections (start and end pieces) are handled together with the respective following/preceding section

 
                    #the data currently under processing:
                    data = []
                    timeaxis = []

                    #list of current input data files and their starting times - only files, which contain parts of the time interval for the output!
                    lo_infiles = []
                    lo_t0s = []

                    #if old data are present from the step before:
                    if (len(section2_data) > 0) and (len(section3_data) > 0):
                        print 'old data found....moving window'
                        section1_data = section2_data
                        section2_data = section3_data
                        section1_ta = section2_ta
                        section2_ta = section3_ta
                        section1_lo_input_files = section2_lo_input_files
                        section2_lo_input_files = section3_lo_input_files
                        section1_lo_t0s = section2_lo_t0s 
                        section2_lo_t0s = section3_lo_t0s 

                    #otherwise, it's the first step, so all 3 sections have to be read
                    else:
                        print 'first section...initialise data collection'
                        section1_data, section1_lo_input_files,section1_lo_t0s = read_ts_data_from_files(t0,lo_windowstarts[idx_t0 +1],lo_t_mins, lo_files)
                        section2_data, section2_lo_input_files,section2_lo_t0s = read_ts_data_from_files(lo_windowstarts[idx_t0 +1],lo_windowstarts[idx_t0 +2],lo_t_mins, lo_files)

                        section1_ta = np.arange(len(section1_data))/samplingrate + t0
                        section2_ta = np.arange(len(section2_data))/samplingrate + lo_windowstarts[idx_t0 +1]
                        
                    

                    if idx_t0 < n_windows - 1:
                        print 'lll'
                        section3_data, section3_lo_input_files,section3_lo_t0s  = read_ts_data_from_files(lo_windowstarts[idx_t0 +2],lo_windowstarts[idx_t0 +3],lo_t_mins, lo_files)
                    else:
                        print 'jjjj'
                        #for the last section, there is no lo_windowstarts[idx_t0 +3], so it must be the end of the overall time axis
                        section3_data, section3_lo_input_files,section3_lo_t0s  = read_ts_data_from_files(lo_windowstarts[idx_t0 +2],ta[-1]+1./samplingrate,lo_t_mins, lo_files)


                    section3_ta = np.arange(len(section3_data)) / samplingrate + lo_windowstarts[idx_t0 +2]
                    data = np.concatenate([section1_data, section2_data, section3_data])
                    timeaxis = np.concatenate([section1_ta, section2_ta, section3_ta])  
                    print 'sections ta: ',section1_ta[0],section1_ta[-1],len(section1_ta), section2_ta[0],section2_ta[-1],len(section2_ta),section3_ta[0],section3_ta[-1],len(section3_ta)
                    #continue
                    data = np.array(data)
                    # remove mean and linear trend (precaution, since it should be removed as low frequency 
                    # content anyway)
                    data = scipy.signal.detrend(data)
                    
                    #----------------------------
                    # the actual deconvolution:
                    corrected_data = MTin.correct_for_instrument_response(data, samplingrate, responsedata) 
                    #----------------------------

                    if idx_t0 == 0:
                        # for the first window, the first section is output as well 
                        startidx = 0
                        lo_infiles.extend(section1_lo_input_files)
                        lo_t0s.extend(section1_lo_t0s)

                    else:
                        #otherwise, just the central section, so take the start of this middle section 
                        startidx = (np.abs(timeaxis - lo_windowstarts[idx_t0 + 1])).argmin() 
                    
                    #collect the respective input filenames and their starting times
                    for fn in section2_lo_input_files:
                        if fn not in lo_infiles:
                            lo_infiles.append(fn)
                    for t0 in section2_lo_t0s:
                        if t0 not in lo_t0s:
                            lo_t0s.append(t0)

                    if idx_t0 == n_windows - 1:
                        #for the last window, the last section is added
                        for fn in section3_lo_input_files:
                            if fn not in lo_infiles:
                                lo_infiles.append(fn)
                        for t0 in section3_lo_t0s:
                            if t0 not in lo_t0s:
                                lo_t0s.append(t0)

                        data2write = corrected_data[startidx:]
                        timeaxis2write = timeaxis[startidx:]
                    
                    else:
                        #for all windows but the last, get the middle section end 
                        endidx = (np.abs(timeaxis - lo_windowstarts[idx_t0 + 2])).argmin() 
                        #and cut out the section
                        data2write = corrected_data[startidx:endidx]
                        timeaxis2write = timeaxis[startidx:endidx]

                    #print 'indizes:',len(timeaxis),startidx,len(timeaxis2write),timeaxis2write[0],timeaxis2write[-1]
                    
                    # at this point, we got 
                    # - the corrected data
                    # - the sorted list of the involved input files (and their starting times)
                    #
                    # write data to the output files, whose structure is following the input file(s):

                    # maximum time stamp of the current data set (section)
                    tmax = timeaxis2write[-1]

                    #initialise the current time to the beginning of the section to be written
                    t = timeaxis2write[0]
                    print '\nfiles involved in this section',lo_infiles
                    
                    while t < tmax: 
                        print t

                        if file_open == False:
                            print 'no file open...preparing new one'
                            # take the first of the input files in the list:
                            print 'read header of input file {0} '.format(lo_infiles[0])
                            header = MTfh.read_ts_header(lo_infiles[0])
                            ta_tmp = np.arange(float(header['nsamples'])) / float(header['samplingrate']) + float(header['t_min'])
                            unit = header['unit']
                            if unit[-6:].lower() != '(true)':
                                unit +='(true)'
                            header['unit'] = unit
                            headerline = MTfh.get_ts_header_string(header)

                            # output file name: use input file name and append '_true'
                            inbasename = op.basename(fn)
                            outbasename = ''.join([op.splitext(inbasename)[0]+'_true',op.splitext(inbasename)[1]])
                            outfn = op.join(outdir,outbasename)
                            print 'write header to output file {0} '.format(outfn)
                            outF = open(outfn,'w')
                            outF.write(headerline)

                            # if the section exceeds the time axis of the file:
                            if tmax > ta_tmp[-1]:
                                print 'data longer than space left in file...storing first part '
                                #write as many samples to the files as there belong 
                                np.savetxt( outF, data2write[:int(float(header['nsamples']))] )
                                print 'write data to output file {0} '.format(outfn)

                                #close the file
                                outF.close()
                                file_open = False
                                print ' output file closed : {0} '.format(outfn)
                                datalength = 0 
                                #drop out the first elements of the lists
                                dummy = lo_infiles.pop(0)
                                print 'dropped {0}'.format(dummy)
                                dummy = lo_t0s.pop(0)
                                print 'dropped {0}'.format(dummy)

                                #cut the written part of the data
                                print 'data cut from {0} to {1}'.format(len(data2write),len(data2write[int(float(header['nsamples'])):]))
                                data2write = data2write[int(float(header['nsamples'])):]
                                timeaxis2write = timeaxis2write[int(float(header['nsamples'])):]
                                #define the current time as one sample after the end of the file, which was just closed
                                t = ta_tmp[-1] + 1./ float(header['samplingrate'])
                                print 'current time set to {0}'.format(t)
                                # and back to the while condition, since there are unwritten data

                            #if the section is not longer than the time axis of the newly opened file:
                            else: 
                                print 'data fits into open file : {0}'.format(outF.name)
                                #write everything
                                np.savetxt(outF,data2write)
                                print 'wrote data to file'
                                #check, if by chance this is exactly the correct number of samples for this file: 
                                if tmax == ta_tmp[-1]:
                                    #if so, close it
                                    print 'file full....closing...'
                                    outF.close()
                                    file_open = False
                                    datalength = 0 

                                    print 'written data to file and closed it: {0}'.format(outF.name)

                                    dummy = lo_infiles.pop(0)
                                    dummy = lo_t0s.pop(0)
                                    #actually, the infile list should be empty now, since the whole section has been written to a file!!
                                
                                # otherwise, the section is shorter, so the file (potentially) misses entries 
                                else:
                                    datalength = len(data2write)

                                    print 'file not full...{0} remains open - {1} of max. {2} samples written '.format(outF.name,datalength,int(float(header['nsamples'])))
                                    file_open = True

                                #define the current time as at the end of the time axis of the section, i.e. 'go to next section':
                                t = tmax
                                print 'current time set to ',t
                        
                        #otherwise, a file is already open and the next section has to be appended there:
                        else:
                            print 'open file {0} is waiting for data...'.format(outF.name)
                            header = MTfh.read_ts_header(lo_infiles[0])
                            ta_tmp = np.arange(float(header['nsamples'])) / float(header['samplingrate']) + float(header['t_min'])
                            print tmax,ta_tmp[-1],'{0} of {1} samples used...{2} waiting\n'.format(datalength,len(ta_tmp),len(data2write))

                            #check, if the data exceeds the time axis of the open file:
                            if tmax > ta_tmp[-1]:
                                print 'data longer than space left in file...storing first {0} here and cut of the rest ({1}) for further processing...\n'.format(len(ta_tmp)-datalength,len(data2write)-len(ta_tmp)+datalength)

                                #determine the index of the section time axis, which belongs to the last entry of the currently open file - including the last value!
                                endidx = (np.abs(timeaxis2write - ta_tmp[-1] )).argmin() +1 
                                #write the respective part of the data to the file and close it then
                                np.savetxt(outF,data2write[:endidx])
                                outF.close()
                                file_open = False
                                print 'closed full file {0}'.format(outF.name)
                                #cut out the first bit, which is already written
                                print 'cutting data to remaining bit ({0}->{1})\n'.format(len(data2write),len(data2write)-len(data2write[:endidx]))
                                data2write = data2write[endidx:]
                                timeaxis2write = timeaxis2write[endidx:]
                                # drop the file, which is used and done
                                print 'dropping name of closed file',lo_infiles[0]
                                dummy = lo_infiles.pop(0)
                                dummy = lo_t0s.pop(0)
                                #set the current time to the start of the next file 
                                t = ta_tmp[-1] + 1./ float(header['samplingrate'])
                                print 'current time set to ',t

                            #if the section is not longer than the time axis of the open file:
                            else:
                                print 'data fits into the open file :',outF.name
                                #write everything
                                np.savetxt(outF,data2write)
                                datalength += len(data2write)
                                print 'file contains {0} samples - {1} open '.format(datalength,int(float(header['nsamples']))-datalength)
                                #check, if by chance this is exactly the correct number of samples for this file: 
                                if tmax == ta_tmp[-1]:
                                    #if so, close it
                                    outF.close()
                                    print 'written data to file {0}'.format(outfn)
                                    file_open = False
                                    dummy = lo_infiles.pop(0)
                                    dummy = lo_t0s.pop(0)
                                    #actually, the infile list should be empty now, since the whole section has been written to a file!!
                                
                                # otherwise, the section is shorter, so the file (potentially) misses entries 
                                else:
                                    file_open = True

                                #define the current time as at the end of the time axis of the section => go to next section:
                                t = tmax

                # after looping over all sections, check, if the last file has been closed:
                if file_open == True:
                    outF.close()
                    print 'written data to file {0}'.format(outF.name)

Example 31

Project: gkno_launcher
Source File: gkno.py
View license
def main():

  # Define a class for processing the command line.
  command = cl.commandLine()

  # Define a help class.
  gknoHelp = hp.helpInformation(os.getenv('GKNOCOMMITID'), __date__, __version__)

  # Define a plotting class for drawing the graph.
  plot = pg.plotGraph()

  #Define an object to hold information for creating web content.
  web = w.webContent()

  # Define the source paths of all the gkno machinery.
  sourcePath                     = os.path.abspath(sys.argv[0])[0:os.path.abspath(sys.argv[0]).rfind('/src/gkno.py')]
  configurationFilesPath         = sourcePath + '/config_files'
  pipelineConfigurationFilesPath = sourcePath + '/config_files/pipes'
  toolConfigurationFilesPath     = sourcePath + '/config_files/tools'
  resourcesPath                  = sourcePath + '/resources'
  toolsPath                      = sourcePath + '/tools'

  # Define the commit id of the this version of gkno.
  commitId = os.getenv('GKNOCOMMITID')

  # Define an admin utilities object. This handles all of the build/update steps
  # along with 'resource' management.
  admin = au.adminUtils(sourcePath)

  # Initialise the gkno specific configuration file.
  gknoConfiguration = gc.gknoConfiguration(configurationFilesPath)

  # Determine if gkno is being run in admin mode and then determine the mode.
  admin.isRequested, admin.mode = command.isAdmin(admin.allModes)
  mode                          = command.determineMode(admin.isRequested, gknoConfiguration)

  # Print gkno title and version to the screen.
  write.printHeader(__version__, __date__, os.getenv('GKNOCOMMITID'))

  # List the gkno arguments if requested.
  #if mode == "gkno help": gknoHelp.gknoArgumentHelp()

  # Check to see if the configuration files are to be found in a directory other than the default.
  path                  = command.getConfigurationFilePath(gknoConfiguration.options)
  userConfigurationPath = path if path else None

  # Define a class for handling files. In the initialisation, determine the names of all available
  # tools and pipelines.
  files = fh.fileHandling(toolConfigurationFilesPath, pipelineConfigurationFilesPath, userConfigurationPath)

  # Define a cloass for handling arguments whose values are a command to evaluate.
  evalCom = ec.evaluateCommands()

  # If not being run in admin mode, determine the name of the pipeline being run. Note that
  # for the code, a tool is considered a pipeline with a single task, so the terminology
  # 'pipeline' is used throughout for both cases.
  if not admin.isRequested:

    # Check that gkno has been built before proceeding.
    if not admin.isBuilt(): adminErrors.adminErrors().gknoNotBuilt()
    pipeline = command.determinePipeline()

  # Otherwise, handle the admin functions.
  else:
    if admin.run(sys.argv):

      # Check that all of the tools were successfully built. If not, post a warning about which tools cannot
      # be used.
      if admin.allBuilt: exit(0)
      else: adminErrors.adminErrors().failedToolBuilds(admin.builtTools)
    else: adminErrors.adminErrors().errors.terminate(adminErrors.adminErrors().errorCode)

  # Display gkno arguments if requested.
  if mode == 'gkno help': gknoHelp.gknoArgumentHelp(gknoConfiguration.arguments)

  # If the pipeline name has not been supplied, general help must be required.
  if not pipeline and mode != 'web': gknoHelp.generalHelp(mode, command.category, admin, pipelineConfigurationFilesPath)

  # If json files for the web page were requested, set the pipelinesList to all pipelines and loop over
  # them all, generating the required information. Otherwise, set the list to the pipeline to be run.
  pipelinesList = files.pipelines if mode == 'web' else [pipeline]
  for pipeline in pipelinesList:
  
    # Get the path to the pipeline and configuration file.
    filename = files.checkPipeline(toolConfigurationFilesPath, pipelineConfigurationFilesPath, userConfigurationPath, pipeline)

    # Generate a super pipeline class that holds information about the full collection of all nested
    # pipeline.
    superpipeline = sp.superpipelineClass(filename)

    # Dig down into the pipeline configuration files, validate the contents of the configuration files
    # and build the super pipeline tiered structure.
    superpipeline.getNestedPipelineData(files, pipelineConfigurationFilesPath, userConfigurationPath, filename)
  
    # Check that no pipeline arguments conflict with gkno arguments.
    superpipeline.checkForArgumentConflicts(gknoConfiguration.arguments, gknoConfiguration.shortForms)
  
    # If help categories were requested, or a list of all available pipelines, print them here.
    if mode == 'categories' or mode == 'list-all': gknoHelp.generalHelp(mode, command.category, admin, pipelineConfigurationFilesPath)

    # Generate a list of all tasks, tools, unique and shared node IDs from all pipelines.
    superpipeline.setTools()
  
    # Now that all the tasks are known, check that each pipeline only contains valid tasks. If a 
    # task in the pipeline addresses a node in a contained pipeline, knowledge of all pipelines is
    # required to perform this check.
    superpipeline.checkContainedTasks()
    
    # Loop over the list of required tools, open and process their configuration files and store.
    superpipeline.addTools(files, toolConfigurationFilesPath, userConfigurationPath)
  
    # Loop over all of the pipeline configuration files and check that all nodes that define a tool
    # argument have valid arguments for the tool.
    superpipeline.checkArgumentsInPipeline()

    # Create an arguments object. This will be populated with all of the arguments available for this
    # pipeline along with associated functions. Add all of the top level pipeline arguments to this
    # object.
    args = ag.arguments()
    args.addPipelineArguments(superpipeline.pipelineConfigurationData[superpipeline.pipeline].longFormArguments)

    # Define the graph object that will contain the pipeline graph and necessary operations and methods
    # to build and modify it.
    graph = gr.pipelineGraph(superpipeline.pipeline)

    # Loop over the tiers of nested pipelines and build them into the graph.
    for tier in superpipeline.pipelinesByTier.keys():
      for pipelineName in superpipeline.pipelinesByTier[tier]:
        graph.buildPipelineTasks(superpipeline.pipelineConfigurationData[pipelineName], superpipeline)

    # Associate configuration file unique node ids with graph node ids for unique nodes pointing to nodes
    # in nested pipelines.
    graph.findUniqueNodes(superpipeline)

    # Parse the configuration files identifying nodes that are to be connected.
    graph.connectNodes(superpipeline)

    # If any pipeline configuration nodes are given commands to evaluate, check the validity of the instructions
    # and implement them.
    evalCom.checkCommands(graph, superpipeline)

    # Determine which graph nodes are required. A node may be used by multiple tasks and may be optional
    # for some and required by others. For each node, loop over all edges and check if any of the edges
    # are listed as required. If so, the node is required and should be marked as such.
    graph.markRequiredNodes(superpipeline)

    # If the main pipeline lists a tool whose arguments should be imported, check that the listed tool is
    # valid, that none of the arguments conflict with the pipeline and then add the arguments to the
    # allowed arguments.
    args.importArguments(graph, superpipeline)

    # Now that arguments have been imported from tools, check that there are no problems.
    args.checkArguments(superpipeline)

    # Now that the graph is built, parse all of the arguments in the pipelines and associate them with the
    # graph nodes and vice versa.
    args.assignNodesToArguments(graph, superpipeline)

    # If web page json files are being created, update the list of categories and the pipeline information. Also
    # generate a reduced plot of the pipeline.
    if mode == 'web' and not superpipeline.getPipelineData(superpipeline.pipeline).isDevelopment:
      web.updateCategories(superpipeline.pipelineConfigurationData[superpipeline.pipeline])
      web.updatePipelineInformation(superpipeline.pipelineConfigurationData[superpipeline.pipeline], args.arguments)

      # Generate the workflow for the pipeline.
      workflow = graph.generateWorkflow()

      # Check for required arguments. This check will create required nodes that are unset, but not terminate gkno,
      # since we are just trying to create pipeline plots.
      dc.checkRequiredArguments(graph, superpipeline, args, isTerminate = False)

      # Generate the plot.
      plot.plot(superpipeline, graph, str(superpipeline.pipeline), isReduced = True)

  # Get information about individual tools, write out web content and terminate.
  if mode == 'web':
    web.updateTools(files, toolConfigurationFilesPath)
    web.getGknoArguments(gknoConfiguration.arguments)
    web.writeContent(os.getenv('GKNOCOMMITID'), __version__, __date__)

    print('All web content successfully generated.')
    exit(0)

  # Generate the workflow.
  workflow = graph.generateWorkflow()

  # Process the command line arguments.
  command.processArguments(superpipeline, args, gknoConfiguration)

  # If the pipeline is being rerun, determine the random string to use, if possible.
  if gknoConfiguration.getGknoArgument('GKNO-RERUN', command.gknoArguments):
    randomString = files.getRandomString(pipeline)
    if not randomString: command.errors.cannotRerunPipeline(pipeline)
    else: superpipeline.randomString = randomString

  # Check if a parameter set is to be removed.
  removeParameterSet = gknoConfiguration.getGknoArgument('GKNO-REMOVE-PARAMETER-SET', command.gknoArguments)

  # Determine if a parameter set is being exported. If so, there is no need to check that all required
  # arguments are set, since the pipeline is not being executed.
  graph.exportParameterSet = gknoConfiguration.getGknoArgument('GKNO-EXPORT-PARAMETER-SET', command.gknoArguments)

  # Determine the requested parameter set and add the parameters to the graph.
  parSet             = ps.parameterSets()
  graph.parameterSet = command.getParameterSetName(command.gknoArguments, gknoConfiguration)
  if removeParameterSet: parSet.removeParameterSet(graph, superpipeline, removeParameterSet)

  # Step through the workflow and determine the default parameter sets for all of the tasks. Populate
  # the nodes with these task level default parameter sets, creating nodes where necessary.
  command.addGknoArguments(graph.addTaskParameterSets(superpipeline, 'default', gknoConfiguration))

  # Now add the default parameter set for the pipelines.
  graph.addPipelineParameterSets(superpipeline, args, 'default', resourcesPath)

  if graph.parameterSet and graph.parameterSet != 'none' and graph.parameterSet != 'None': 
    graph.addParameterSet(superpipeline, args, superpipeline.pipeline, graph.parameterSet, resourcesPath)

  # If help was requested, print out the relevent help information.
  # TODO ADMIN HELP
  if mode == 'help': gknoHelp.pipelineHelp(superpipeline, graph, args.arguments, False)

  # Write out help on gkno specific (e.g. not associated with a specific pipeline) arguments.
  elif mode == 'gkno help': gknoHelp.pipelineHelp(superpipeline, graph, args.arguments, gknoConfiguration.arguments)

  # Parse the command line arguments and associate the supplied command line argument values with the graph node.
  command.parseTasksAsArguments(superpipeline)
  associatedNodes = command.associateArgumentsWithGraphNodes(graph.graph, superpipeline)

  # Create nodes for all of the defined arguments for which a node does not already exist and add the
  # argument values to the node.
  graph.attachArgumentValuesToNodes(graph, superpipeline, args, command.pipelineArguments, associatedNodes)

  # Loop over all nodes and expand lists of arguments. This is only valid for arguments that are either options,
  # or inputs to a task that are not simulateously outputs of another task.
  graph.expandLists()

  # Check that all of the values associated with all of the nodes are of the correct type (e.g. integer, flag etc)
  # and also that any files also have the correct extension.
  dc.checkValues(graph, superpipeline)

  # Determine whether or not to output a visual representation of the pipeline graph.
  plot.isPlotRequired(command.gknoArguments, gknoConfiguration)
  if plot.isFullPlot: plot.plot(superpipeline, graph, plot.fullPlotFilename, isReduced = False)
  if plot.isReducedPlot: plot.plot(superpipeline, graph, plot.reducedPlotFilename, isReduced = True)

  # If multiple values have been supplied to linked arguments, determine if they should be reordered.
  if not gknoConfiguration.getGknoArgument('GKNO-DO-NOT-REORDER', command.gknoArguments): command.linkedArguments(graph, superpipeline, args)

  # Loop over all of the nodes in the graph and ensure that all required arguments have been set. Any output files
  # for which construction instructions are provided can be omitted from this check. This will ensure that all required
  # input files are set, ensuring that filename construction can proceed. The check will be performed again after
  # filenames have been constructed, without the omission of constructed files.
  if not graph.exportParameterSet: dc.checkRequiredArguments(graph, superpipeline, args, isTerminate = True)

  # Check for greedy tasks in the pipeline and mark the relevant nodes and edges.
  graph.setGreedyTasks(superpipeline)

  # If multiple outputs have been specified, but only single inputs with multiple options, ensure that there are
  # the same number of input files as there output files.
  graph.propogateInputs()

  # If the user has requested that a parameter set is to be exported, export the parameter set and terminate.
  if graph.exportParameterSet: parSet.export(graph, superpipeline, args, command.pipelineArguments)

  # Loop over the tasks in the pipeline and construct filenames for arguments that require them, but weren't given
  # any on the command line. In addition, if multiple options are given to a task, this routine will generate new
  # nodes for the task and files and link them together as necessary.
  graph.constructFiles(superpipeline)

  # Check that all of the streams are correctly marked in the superpipeline. This checks to see if a task is
  # marked as accepting a stream, but the task is itelf a pipeline, for example. In this case, the first task
  # in the nested pipeline needs to be marked as accepting a stream.
  superpipeline.checkStreams(graph)

  # Determine which files are marked for deletion.
  superpipeline.determineFilesToDelete(graph)

  # Print the workflow to screen.
  write.workflow(superpipeline, workflow)

  # If any input values have been reordered, warn the user.
  write.reordered(graph, command.reorderedLists)

  # Having constructed all of the output file names (which may then be linked to other tasks as outputs), rerun the
  # check of the values to ensure that the data types and the ssociated extensions are valid. This will provide a
  # check of whether tools can be linked as described in the configuration file. In the previous check, not all of the
  # filenames were present (but the check ensured that the values provided on the command line were valid). If a task
  # outputs a file with and extension 'ext1' and the file is then passed to a file that requires files with the
  # extension 'ext2', the pipeline is invalid. The output filename has been constructed as file.ext1 and so the following
  # routine will flag the file as invalid as input to the next task.
  dc.checkValues(graph, superpipeline)

  # If the pipeline has instructions to terminate based on input conditions, modify the pipeline.
  graph.terminatePipeline(superpipeline)

  # Having reached this point, all of the required values have been checked, are present and have the correct data
  # type. In the construction of the graph, a number of non-required nodes could have been created and, since they
  # are not required, they could be unpopoulated. March through the graph and purge any nodes that have no values or
  # are isolated.
  dc.purgeEmptyNodes(graph)

  # Check if any tasks have been listed as outputting to a stream. If so, check that the task can output to a
  # stream and the task it feeds into can accept a stream. If everything is ok, update the graph to reflect
  # the streaming nodes.
  graph.checkStreams(superpipeline)

  # For all files marked as intermediate, determine the latest task in the pipeline that uses them. Ensure that
  # the data structures inside 'struct' only associate the files to delete with this latest task.
  graph.deleteFiles()
  
  # Set the absolute paths of all the files used in the pipeline.
  requiredInputFiles = dc.setFilePaths(graph, command.gknoArguments, gknoConfiguration)

  # Determine the execution structure of the pipeline.
  struct = es.executionStructure()
  struct.determineExecutionStructure(graph)

  # If a task has multiple divisions, ensure that there are the same number of input and output files for the task.
  # It is possible that n input files were specified on the command line, leading to n executions of the task, but
  # m output files were specified. This will lead to problems when constructing the command lines.
  graph.checkNumberOfOutputs()

  # For values that are commands to be executed at runtime, include any values from other graph nodes as necessary.
  evalCom.addValues(graph)

  # Generate a makefiles object and then build all the command lines for the tasks as well as creating a list of each
  # tasks dependencies and output.
  make = mk.makefiles()
  make.generateCommandLines(graph, superpipeline, struct)

  # Determine if multiple makefiles have been requested and whether to add a unique id to the makefiles.
  make.isMultipleMakefiles, make.makefileId = command.checkMakefiles(gknoConfiguration.options)

  # Update the intermediate files.
  make.updateIntermediates(struct)

  # Open the required makefiles. This is either a single makefile that will run all tasks, or a set of makefiles broken
  # up by the phase, subphase and division
  make.openMakefiles(superpipeline.pipeline, superpipeline.randomString, struct)

  # Add the header text to the file(s).
  make.addHeader(commitId, __date__, __version__, superpipeline.pipeline, sourcePath, toolsPath, resourcesPath)

  # Include the paths of all the required executables.
  make.addUniqueExecutables(graph, struct)

  # Add phony information.
  make.addPhony()

  # Get the intermediate and output files for the whole pipeline.
  outputs = make.getAllOutputs(struct, superpipeline.randomString)

  # Remove the 'ok' file used to indicated successful execution.
  make.removeOk()

  # Add the command lines to the makefiles.
  for phase in struct.phaseInformation:
    for subphase in range(1, struct.phaseInformation[phase].numberSubphases + 1):
      for division in range(1, struct.phaseInformation[phase].numberDivisions + 1):
        make.addCommandLines(graph, struct, phase, subphase, division)

  # Write final information to the makefile, then close the file.
  make.completeFile(outputs)

  # Close the makefiles.
  make.closeFiles()

  # Check that all of the dependent files exist (excluding dependencies that are created by tasks in the pipeline).
  success = files.checkFileExistence(requiredInputFiles, resourcesPath, toolsPath)

  # Having established the mode of operation and checked that the command lines are
  # valid etc., ping the website to log use of gkno.
  if not gknoConfiguration.getGknoArgument('GKNO-DNL', command.gknoArguments): tracking.phoneHome(sourcePath, pipeline)

  # Prior to execution of the makefile, ensure that all required executables exist. This first requires parsing the
  # user settings file.
  exe.executables().checkExecutables(toolsPath, superpipeline, admin.userSettings['compiled tools'])

  # Execute the generated script unless the user has explicitly asked for it not to be run, or if multiple makefiles
  # have been generated.
  if gknoConfiguration.options['GKNO-DO-NOT-EXECUTE'].longFormArgument not in command.gknoArguments and not make.isMultipleMakefiles and success:

    # Get the number of parallel jobs to be requested.
    jobsArgument = gknoConfiguration.options['GKNO-JOBS'].longFormArgument
    numberJobs   = command.gknoArguments[jobsArgument][0] if jobsArgument in command.gknoArguments else 1

    # Generate the execution command.
    execute = 'make -k -j ' + str(numberJobs) + ' --file ' + make.singleFilename
    success = subprocess.call(execute.split())

Example 32

Project: sonospy
Source File: widget.py
View license
def console():
    """ Defines the behavior of the console web2py execution """

    usage = "python web2py.py"

    description = """\
    web2py Web Framework startup script.
    ATTENTION: unless a password is specified (-a 'passwd') web2py will
    attempt to run a GUI. In this case command line options are ignored."""

    description = dedent(description)

    parser = OptionParser(usage, None, Option, ProgramVersion)

    parser.description = description

    parser.add_option('-i',
                      '--ip',
                      default='127.0.0.1',
                      dest='ip',
                      help='ip address of the server (127.0.0.1)')

    parser.add_option('-p',
                      '--port',
                      default='8000',
                      dest='port',
                      type='int',
                      help='port of server (8000)')

    msg = 'password to be used for administration'
    msg += ' (use -a "<recycle>" to reuse the last password))'
    parser.add_option('-a',
                      '--password',
                      default='<ask>',
                      dest='password',
                      help=msg)

    parser.add_option('-c',
                      '--ssl_certificate',
                      default='',
                      dest='ssl_certificate',
                      help='file that contains ssl certificate')

    parser.add_option('-k',
                      '--ssl_private_key',
                      default='',
                      dest='ssl_private_key',
                      help='file that contains ssl private key')

    parser.add_option('-d',
                      '--pid_filename',
                      default='httpserver.pid',
                      dest='pid_filename',
                      help='file to store the pid of the server')

    parser.add_option('-l',
                      '--log_filename',
                      default='httpserver.log',
                      dest='log_filename',
                      help='file to log connections')

    parser.add_option('-n',
                      '--numthreads',
                      default='10',
                      type='int',
                      dest='numthreads',
                      help='number of threads')

    parser.add_option('-s',
                      '--server_name',
                      default=socket.gethostname(),
                      dest='server_name',
                      help='server name for the web server')

    msg = 'max number of queued requests when server unavailable'
    parser.add_option('-q',
                      '--request_queue_size',
                      default='5',
                      type='int',
                      dest='request_queue_size',
                      help=msg)

    parser.add_option('-o',
                      '--timeout',
                      default='10',
                      type='int',
                      dest='timeout',
                      help='timeout for individual request (10 seconds)')

    parser.add_option('-z',
                      '--shutdown_timeout',
                      default='5',
                      type='int',
                      dest='shutdown_timeout',
                      help='timeout on shutdown of server (5 seconds)')
    parser.add_option('-f',
                      '--folder',
                      default=os.getcwd(),
                      dest='folder',
                      help='folder from which to run web2py')

    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      dest='verbose',
                      default=False,
                      help='increase --test verbosity')

    parser.add_option('-Q',
                      '--quiet',
                      action='store_true',
                      dest='quiet',
                      default=False,
                      help='disable all output')

    msg = 'set debug output level (0-100, 0 means all, 100 means none;'
    msg += ' default is 30)'
    parser.add_option('-D',
                      '--debug',
                      dest='debuglevel',
                      default=30,
                      type='int',
                      help=msg)

    msg = 'run web2py in interactive shell or IPython (if installed) with'
    msg += ' specified appname'
    parser.add_option('-S',
                      '--shell',
                      dest='shell',
                      metavar='APPNAME',
                      help=msg)

    msg = 'only use plain python shell; should be used with --shell option'
    parser.add_option('-P',
                      '--plain',
                      action='store_true',
                      default=False,
                      dest='plain',
                      help=msg)

    msg = 'auto import model files; default is False; should be used'
    msg += ' with --shell option'
    parser.add_option('-M',
                      '--import_models',
                      action='store_true',
                      default=False,
                      dest='import_models',
                      help=msg)

    msg = 'run PYTHON_FILE in web2py environment;'
    msg += ' should be used with --shell option'
    parser.add_option('-R',
                      '--run',
                      dest='run',
                      metavar='PYTHON_FILE',
                      default='',
                      help=msg)

    msg = 'run doctests in web2py environment; ' +\
        'TEST_PATH like a/c/f (c,f optional)'
    parser.add_option('-T',
                      '--test',
                      dest='test',
                      metavar='TEST_PATH',
                      default=None,
                      help=msg)

    parser.add_option('-W',
                      '--winservice',
                      dest='winservice',
                      default='',
                      help='-W install|start|stop as Windows service')

    msg = 'trigger a cron run manually; usually invoked from a system crontab'
    parser.add_option('-C',
                      '--cron',
                      action='store_true',
                      dest='extcron',
                      default=False,
                      help=msg)

    parser.add_option('-N',
                      '--no-cron',
                      action='store_true',
                      dest='nocron',
                      default=False,
                      help='do not start cron automatically')

    parser.add_option('-L',
                      '--config',
                      dest='config',
                      default='',
                      help='config file')

    parser.add_option('-F',
                      '--profiler',
                      dest='profiler_filename',
                      default=None,
                      help='profiler filename')

    parser.add_option('-t',
                      '--taskbar',
                      action='store_true',
                      dest='taskbar',
                      default=False,
                      help='use web2py gui and run in taskbar (system tray)')

    parser.add_option('',
                      '--nogui',
                      action='store_true',
                      default=False,
                      dest='nogui',
                      help='text-only, no GUI')

    parser.add_option('-A',
                      '--args',
                      action='store',
                      dest='args',
                      default='',
                      help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option')

    if '-A' in sys.argv: k = sys.argv.index('-A')
    elif '--args' in sys.argv: k = sys.argv.index('--args')
    else: k=len(sys.argv)
    sys.argv, other_args = sys.argv[:k], sys.argv[k+1:]
    (options, args) = parser.parse_args()
    options.args = [options.run] + other_args

    if options.quiet:
        capture = cStringIO.StringIO()
        sys.stdout = capture
        logging.getLogger().setLevel(logging.CRITICAL + 1)
    else:
        logging.getLogger().setLevel(options.debuglevel)

    if options.config[-3:] == '.py':
        options.config = options.config[:-3]

    if not os.path.exists('applications'):
        os.mkdir('applications')

    if not os.path.exists('deposit'):
        os.mkdir('deposit')

    if not os.path.exists('site-packages'):
        os.mkdir('site-packages')

    sys.path.append(os.path.join(os.getcwd(),'site-packages'))

    # If we have the applications package or if we should upgrade
    if not os.path.exists('applications/__init__.py'):
        fp = open('applications/__init__.py', 'w')
        fp.write('')
        fp.close()

    if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'):
        w2p_pack('welcome.w2p','applications/welcome')
        os.unlink('NEWINSTALL')

    return (options, args)

Example 33

Project: Nagstamon
Source File: Config.py
View license
    def __init__(self):
        """
            read config file and set the appropriate attributes
            supposed to be sensible defaults
        """
        # move from minute interval to seconds
        self.update_interval_seconds = 60
        self.short_display = False
        self.long_display = True
        self.show_tooltips = True
        self.show_grid = True
        self.grid_use_custom_intensity = False
        self.grid_alternation_intensity = 10
        self.highlight_new_events = True
        self.default_sort_field = 'status'
        self.default_sort_order = 'descending'
        self.filter_all_down_hosts = False
        self.filter_all_unreachable_hosts = False
        self.filter_all_flapping_hosts = False
        self.filter_all_unknown_services = False
        self.filter_all_warning_services = False
        self.filter_all_critical_services = False
        self.filter_all_flapping_services = False
        self.filter_acknowledged_hosts_services = False
        self.filter_hosts_services_disabled_notifications = False
        self.filter_hosts_services_disabled_checks = False
        self.filter_hosts_services_maintenance = False
        self.filter_services_on_acknowledged_hosts = False
        self.filter_services_on_down_hosts = False
        self.filter_services_on_hosts_in_maintenance = False
        self.filter_services_on_unreachable_hosts = False
        self.filter_hosts_in_soft_state = False
        self.filter_services_in_soft_state = False
        self.position_x = 30
        self.position_y = 30
        self.popup_details_hover = True
        self.popup_details_clicking = False
        self.close_details_hover = True
        self.close_details_clicking = False
        self.connect_by_host = True
        self.connect_by_dns = False
        self.connect_by_ip = False
        self.use_default_browser = True
        self.use_custom_browser = False
        self.custom_browser = ''
        self.debug_mode = False
        self.debug_to_file = False
        self.debug_file = os.path.expanduser('~') + os.sep + "nagstamon.log"
        self.check_for_new_version = True
        self.notification = True
        self.notification_flashing = True
        self.notification_desktop = False
        self.notification_actions = False
        self.notification_sound = True
        self.notification_sound_repeat = False
        self.notification_default_sound = True
        self.notification_custom_sound = False
        self.notification_custom_sound_warning = ''
        self.notification_custom_sound_critical = ''
        self.notification_custom_sound_down = ''
        self.notification_action_warning = False
        self.notification_action_warning_string = ''
        self.notification_action_critical = False
        self.notification_action_critical_string = ''
        self.notification_action_down = False
        self.notification_action_down_string = ''
        self.notification_action_ok = False
        self.notification_action_ok_string = ''
        self.notification_custom_action = False
        self.notification_custom_action_string = ''
        self.notification_custom_action_separator = ''
        self.notification_custom_action_single = False
        self.notify_if_up = False
        self.notify_if_warning = True
        self.notify_if_critical = True
        self.notify_if_unknown = True
        self.notify_if_unreachable = True
        self.notify_if_down = True
        # Regular expression filters
        self.re_host_enabled = False
        self.re_host_pattern = ''
        self.re_host_reverse = False
        self.re_service_enabled = False
        self.re_service_pattern = ''
        self.re_service_reverse = False
        self.re_status_information_enabled = False
        self.re_status_information_pattern = ''
        self.re_status_information_reverse = False
        self.color_ok_text = self.default_color_ok_text = '#FFFFFF'
        self.color_ok_background = self.default_color_ok_background = '#006400'
        self.color_warning_text = self.default_color_warning_text = "#000000"
        self.color_warning_background = self.default_color_warning_background = '#FFFF00'
        self.color_critical_text = self.default_color_critical_text = '#FFFFFF'
        self.color_critical_background = self.default_color_critical_background = '#FF0000'
        self.color_unknown_text = self.default_color_unknown_text = '#000000'
        self.color_unknown_background = self.default_color_unknown_background = '#FFA500'
        self.color_unreachable_text = self.default_color_unreachable_text = '#FFFFFF'
        self.color_unreachable_background = self.default_color_unreachable_background = '#8B0000'
        self.color_down_text = self.default_color_down_text = '#FFFFFF'
        self.color_down_background = self.default_color_down_background = '#000000'
        self.color_error_text = self.default_color_error_text = '#000000'
        self.color_error_background = self.default_color_error_background = '#D3D3D3'
        self.statusbar_floating = True
        self.icon_in_systray = False
        # ##self.appindicator = False
        self.fullscreen = False
        self.fullscreen_display = 0
        self.font = ''
        self.defaults_acknowledge_sticky = False
        self.defaults_acknowledge_send_notification = False
        self.defaults_acknowledge_persistent_comment = False
        self.defaults_acknowledge_all_services = False
        self.defaults_acknowledge_comment = 'acknowledged'
        self.defaults_submit_check_result_comment = 'check result submitted'
        self.defaults_downtime_duration_hours = 2
        self.defaults_downtime_duration_minutes = 0
        self.defaults_downtime_comment = 'scheduled downtime'
        self.defaults_downtime_type_fixed = True
        self.defaults_downtime_type_flexible = False
        # internal flag to determine if keyring is available at all - defaults to False
        # use_system_keyring is checked and defined some lines later after config file was read
        self.keyring_available = False
        # setting for keyring usage
        self.use_system_keyring = False

        # Special FX
        # Centreon
        self.re_criticality_enabled = False
        self.re_criticality_pattern = ''
        self.re_criticality_reverse = False

        # the app is unconfigured by default and will stay so if it
        # would not find a config file
        self.unconfigured = True

        # adding cli args variable
        self.cli_args = {}

        # Parse the command line
        parser = argparse.ArgumentParser(description='Nagstamon for your CLI')
        # might be not necessary anymore - to be tested
        # ##parser.add_argument('-psn', action='store_true',
        # ##    help='force ~/.nagstamon as config folder (used by launchd in MacOSX)')
        # necessary because otherwise setup.py goes crazy of argparse

        # separate NagstaCLI from
        if len(sys.argv) > 2 or (len(sys.argv) > 1 and sys.argv[1] in ['--help', '-h']):
            parser.add_argument('--servername', type=str, help="name of the (Nagios)server. Look in nagstamon config")
            parser.add_argument('--hostname', type=str)
            parser.add_argument('--comment', type=str, default="")
            parser.add_argument('--service', type=str, default="", help="specify service, if needed. Mostly the whole host goes to downstate")
            parser.add_argument('--fixed', type=str, choices=['y', 'n'], default="y", help="fixed=n means wait for service/host to go down, then start the downtime")
            parser.add_argument('--start_time', type=str, help="start time for downtime")
            parser.add_argument('--hours', type=int, help="amount of hours for downtime")
            parser.add_argument('--minutes', type=int, help="amount of minutes for downtime")
            parser.add_argument('--config', type=str, help="Path for configuration folder")
            parser.add_argument('--output', type=str, choices=['y', 'n'], default="y", help="lists given parameter (for debugging)")
        else:
            parser.add_argument('config', nargs='?', help='Path for configuration folder')

        self.cli_args, unknown = parser.parse_known_args()

        # try to use a given config file - there must be one given
        # if sys.argv is larger than 1
        # ##if args.psn:
        # ##    # new configdir approach
        # ##    self.configdir = os.path.expanduser('~') + os.sep + '.nagstamon'
        # ##elif args.cfgpath:
        if len(sys.argv) < 3 and self.cli_args.config:
            # allow to give a config file
            self.configdir = self.cli_args.config

        # otherwise if there exits a configdir in current working directory it should be used
        elif os.path.exists(os.getcwd() + os.sep + 'nagstamon.config'):
            self.configdir = os.getcwd() + os.sep + 'nagstamon.config'
        else:
            # ~/.nagstamon/nagstamon.conf is the user conf file
            # os.path.expanduser('~') finds out the user HOME dir where
            # nagstamon expects its conf file to be
            self.configdir = os.path.expanduser('~') + os.sep + '.nagstamon'

        self.configfile = self.configdir + os.sep + 'nagstamon.conf'

        # make path fit for actual os, normcase for letters and normpath for path
        self.configfile = os.path.normpath(os.path.normcase(self.configfile))

        # because the name of the configdir is also stored in the configfile
        # there may be situations where the name gets overwritten by a
        # wrong name so it will be stored here temporarily
        configdir_temp = self.configdir

        # default settings dicts
        self.servers = dict()
        self.actions = dict()

        if os.path.exists(self.configfile):
            # instantiate a configparser to parse the conf file
            # SF.net bug #3304423 could be fixed with allow_no_value argument which
            # is only available since Python 2.7
            # since Python 3 '%' will be interpolated by default which crashes
            # with some URLs
            config = configparser.ConfigParser(allow_no_value=True, interpolation=None)
            config.read(self.configfile)

            # go through all sections of the conf file
            for section in config.sections():
                # go through all items of each sections (in fact there is only on
                # section which has to be there to comply to the .INI file standard

                for i in config.items(section):
                    # omit config file info as it makes no sense to store its path
                    if not i[0] in ('configfile', 'configdir'):
                        # create a key of every config item with its appropriate value
                        # check first if it is a bool value and convert string if it is
                        if i[1] in BOOLPOOL:
                            object.__setattr__(self, i[0], BOOLPOOL[i[1]])
                        # in case there are numbers intify them to avoid later conversions
                        # treat negative value specially as .isdecimal() will not detect it
                        elif i[1].isdecimal() or \
                                (i[1].startswith('-') and i[1].split('-')[1].isdecimal()):
                            object.__setattr__(self, i[0], int(i[1]))
                        else:
                            object.__setattr__(self, i[0], i[1])

            # because the switch from Nagstamon 1.0 to 1.0.1 brings the use_system_keyring property
            # and all the thousands 1.0 installations do not know it yet it will be more comfortable
            # for most of the Windows users if it is only defined as False after it was checked
            # from config file
            # if not self.__dict__.has_key("use_system_keyring"):
            if 'use_system_keyring' not in self.__dict__.keys():
                if self.unconfigured is True:
                    # an unconfigured system should start with no keyring to prevent crashes
                    self.use_system_keyring = False
                else:
                    # a configured system seemed to be able to run and thus use system keyring
                    if platform.system() in NON_LINUX:
                        self.use_system_keyring = True
                    else:
                        self.use_system_keyring = self.KeyringAvailable()

            # reset self.configdir to temporarily saved value in case it differs from
            # the one read from configfile and so it would fail to save next time
            self.configdir = configdir_temp

            # Servers configuration...
            self.servers = self._LoadServersMultipleConfig()
            # ... and actions
            self.actions = self.LoadMultipleConfig("actions", "action", "Action")

            # seems like there is a config file so the app is not unconfigured anymore
            self.unconfigured = False

        # Load actions if Nagstamon is not unconfigured, otherwise load defaults
        if self.unconfigured is True:
            self.actions = self._DefaultActions()

        # do some conversion stuff needed because of config changes and code cleanup
        self._LegacyAdjustments()

Example 34

Project: PySAR
Source File: igram_viewer.py
View license
def main(argv):

  markerSize=16
  markerSize2=16
  markerColor='g'
  markerColor2='red'
  lineWidth=2
  fontSize=22
  unit='cm'
 
  if len(sys.argv)>2:

    try:
      opts, args = getopt.getopt(argv,"f:F:v:a:b:s:m:c:w:u:l:h:")

    except getopt.GetoptError:
      Usage() ; sys.exit(1)
  
    for opt,arg in opts:
      if opt == '-f':
        timeSeriesFile = arg
      elif opt == '-F':
        timeSeriesFile_2 = arg
      elif opt == '-v':
        velocityFile = arg
      elif opt == '-a':
        vmin = float(arg)
      elif opt == '-b':
        vmax = float(arg)
      elif opt == '-s':
        fontSize = int(arg)
      elif opt == '-m':
        lineWidth=int(arg)
      elif opt == '-c':
        markerColor=arg
      elif opt == '-w':
        lineWidth=int(arg)
      elif opt == '-u':
        unit=arg
      elif opt == '-l':
        lbound=float(arg)
      elif opt == '-h':
        hbound=float(arg)


  elif len(sys.argv)==2:
    if argv[0]=='-h':
       Usage(); sys.exit(1)
    elif os.path.isfile(argv[0]):
       timeSeriesFile = argv[0]
       h5timeseries = h5py.File(timeSeriesFile)
       if not 'interferograms' in h5timeseries.keys():
          print 'ERROR'
          Usage(); sys.exit(1)
    else:
       
       Usage(); sys.exit(1)
       
  elif len(sys.argv)<2:
    Usage(); sys.exit(1)



  if unit in ('m','M'):
     unitFac=1
  elif unit in ('cm','Cm','CM'):
     unitFac=100
  elif unit in ('mm','Mm','MM','mM'):
     unitFac=1000
  else:
     print 'Warning:'
     print 'wrong unit input!'
     print 'cm is considered to display the displacement'
############################################

  
  if not os.path.isfile(timeSeriesFile):
       Usage();sys.exit(1)

  h5timeseries = h5py.File(timeSeriesFile)
#  if not 'timeseries' in h5timeseries.keys():
#          Usage(); sys.exit(1)

 
  igramList = h5timeseries['interferograms'].keys()
  dates=range(len(igramList))

#  dateIndex={}
#  for ni in range(len(dateList)):
#    dateIndex[dateList[ni]]=ni
#  tbase=[]
#  d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])

#  for ni in range(len(dateList)):
#    d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
#    diff = d2-d1
#    tbase.append(diff.days)

#  dates=[]
#  for ni in range(len(dateList)):
#    d = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
#    dates.append(d)
  
#  datevector=[]
#  for i in range(len(dates)):
#    datevector.append(np.float(dates[i].year) + np.float(dates[i].month-1)/12 + np.float(dates[i].day-1)/365)
#  datevector2=[round(i,2) for i in datevector]
###########################################  
#  eventDates=['20041223','20051003']
 # try:
 #   eventDates
 #   events=[]
  #  for ni in range(len(eventDates)):
  #    d = datetime.datetime(*time.strptime(eventDates[ni],"%Y%m%d")[0:5])
  #    events.append(d)
 # except:
  #  print ''
  #print events
###########################################
  try:
     velocityFile
     h5file=h5py.File(velocityFile,'r')
     k=h5file.keys()
     dset= h5file[k[0]].get(k[0])
     print 'The file to display is: ' + k[0]
  except:
     dset = h5timeseries['interferograms'][h5timeseries['interferograms'].keys()[-1]].get(h5timeseries['interferograms'].keys()[-1])
 # timeseries = np.zeros((len(h5timeseries['timeseries'].keys()),np.shape(dset)[0],np.shape(dset)[1]),np.float32)
 # for date in h5timeseries['timeseries'].keys():
 #   timeseries[dateIndex[date]] = h5timeseries['timeseries'].get(date)
  
###########################################
  
  fig = plt.figure()
  ax=fig.add_subplot(111)
  try:
    vmin
    vmax
    ax.imshow(dset,vmin=vmin,vmax=vmax)
  except:
    ax.imshow(dset)

  fig2 = plt.figure(2)
  ax2=fig2.add_subplot(111) 
 # print dates
 # print dateList

  try:
     timeSeriesFile_2
     h5timeseries_2=h5py.File(timeSeriesFile_2)
  except:
     print""   
##########################################  
  def onclick(event):
    if event.button==1:
      print 'click'
      xClick = int(event.xdata)
      yClick = int(event.ydata)
      print xClick
      print yClick
      Dis=[]
      for igram in h5timeseries['interferograms'].keys():
             Dis.append( h5timeseries['interferograms'][igram].get(igram)[yClick][xClick])    
      ax2.cla()
      
      
      try:
         Dis2=[]
         for igram in h5timeseries['interferograms'].keys():
             Dis2.append( h5timeseries_2['interferograms'][igram].get(igram)[yClick][xClick])
         dis2=array(Dis2)
  #       dis2=round(dis2/2./pi)
  #       dis2=dis2*unitFac
         ax2.plot(dates,dis2, '^',ms=markerSize2, alpha=0.7, mfc=markerColor2)

      except:
         Dis2=[]
      
    #  ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
      dis=array(Dis)
      print dis
   #   dis=round(dis/2./pi)
     
 #     dis=dis*unitFac
      ax2.plot(dates,dis, '-ko',ms=markerSize, lw=lineWidth, alpha=0.7, mfc=markerColor)
     # print dis
     # print dates
 #     print dset[yClick][xClick]

  #    ax2.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
   #   if unitFac==100:
   #     ax2.set_ylabel('Displacement [cm]',fontsize=fontSize)
   #   elif unitFac==1000:
   #     ax2.set_ylabel('Displacement [mm]',fontsize=fontSize)
   #   else:
   #     ax2.set_ylabel('Displacement [m]',fontsize=fontSize)

#      ax2.set_xlabel('Time [years]',fontsize=fontSize)
#      ds=datevector[0]-0.2
#      de=datevector[-1]+0.2
#      ys=int(ds)
#      ye=int(de)
#      ms=int((ds-ys)*12)+1
#      me=int((de-ye)*12)+1
      
      
      
#      dss=datetime.datetime(ys,ms,1,0,0)
#      dee=datetime.datetime(ye,me,1,0,0)
#      ax2.set_xlim(dss,dee)
      
      try:
        lbound
        hbound
        ax2.set_ylim(lbound,hbound)
          
      except: 
        ax2.set_ylim(min(dis)-0.4*abs(min(dis)),max(dis)+0.4*max(dis))

      for tick in ax2.xaxis.get_major_ticks():
                tick.label.set_fontsize(fontSize)
      for tick in ax2.yaxis.get_major_ticks():
                tick.label.set_fontsize(fontSize)
                # specify integer or one of preset strings, e.g.
                #tick.label.set_fontsize('x-small')
               # tick.label.set_rotation('vertical')


      fig2.autofmt_xdate()
 
      plt.show()
      import scipy.io as sio 
      Phase={}
      Phase['phase']=Dis
     # Delay['time']=datevector
      sio.savemat('phase.mat', {'phase': Phase})
      
  cid = fig.canvas.mpl_connect('button_press_event', onclick)
  plt.show()

Example 35

Project: PySAR
Source File: tsviewer.py
View license
def main(argv):

  #default settings
  markerSize=16
  markerSize2=16
  markerColor='g'
  markerColor2='red'
  lineWidth=2
  fontSize=16
  unit='cm'
  Save_timeseries='no'
  dispTsFig='yes'
  dispVelFig='yes'
  dispContour='only'
  contour_step=200
  smoothContour='no'
  radius=0;
  edgeWidth=1.5
  fig_dpi=300

  if len(sys.argv)>2:
    try:
      opts, args = getopt.getopt(argv,"f:F:v:a:b:s:m:c:w:u:l:h:S:D:C:V:t:T:d:r:x:y:P:p:")
    except getopt.GetoptError:
      Usage() ; sys.exit(1)
 
    for opt,arg in opts:
      if   opt == '-f':     timeSeriesFile = arg
      elif opt == '-F':     timeSeriesFile_2 = arg
      elif opt == '-v':     velocityFile = arg
      elif opt == '-a':     vmin = float(arg)
      elif opt == '-b':     vmax = float(arg)
      elif opt == '-s':     fontSize = int(arg)
      elif opt == '-m':     markerSize=int(arg);       markerSize2=int(arg)
      elif opt == '-S':     Save_timeseries=arg
      elif opt == '-c':     markerColor=arg
      elif opt == '-w':     lineWidth=int(arg)
      elif opt == '-u':     unit=arg
      elif opt == '-l':     lbound=float(arg)
      elif opt == '-h':     hbound=float(arg)
      elif opt == '-D':     demFile=arg
      elif opt == '-C':     dispContour=arg
      elif opt == '-V':     contour_step=float(arg)
      elif opt == '-t':     minDate=arg
      elif opt == '-T':     maxDate=arg
      elif opt == '-d':     datesNot2show = arg.split()
      elif opt == '-r':     radius=abs(int(arg))
      elif opt == '-x':     xsub = [int(i) for i in arg.split(':')];   xsub.sort();   dispVelFig='no'
      elif opt == '-y':     ysub = [int(i) for i in arg.split(':')];   ysub.sort();   dispVelFig='no'
      elif opt == '-P':     dispTsFig=arg
      elif opt == '-p':     dispVelFig=arg


  elif len(sys.argv)==2:
    if argv[0]=='-h':
       Usage(); sys.exit(1)
    elif os.path.isfile(argv[0]):
       timeSeriesFile = argv[0]
       h5timeseries = h5py.File(timeSeriesFile)
       if not 'timeseries' in h5timeseries.keys():
          print 'ERROR'
          Usage(); sys.exit(1)
    else:  Usage(); sys.exit(1)
  elif len(sys.argv)<2:
    Usage(); sys.exit(1)

  if   unit in ('m','M'):              unitFac=1
  elif unit in ('cm','Cm','CM'):       unitFac=100
  elif unit in ('mm','Mm','MM','mM'):  unitFac=1000
  else:
     print 'Warning:'
     print 'wrong unit input!'
     print 'cm is considered to display the displacement'

##############################################################
# Read time series file info

  if not os.path.isfile(timeSeriesFile):
     Usage();sys.exit(1)

  h5timeseries = h5py.File(timeSeriesFile)
  if not 'timeseries' in h5timeseries.keys():
     Usage(); sys.exit(1)
 
  dateList1 = h5timeseries['timeseries'].keys()

##############################################################
# Dates to show time series plot

  import matplotlib.dates as mdates
  years    = mdates.YearLocator()   # every year
  months   = mdates.MonthLocator()  # every month
  yearsFmt = mdates.DateFormatter('%Y')

  print '*******************'
  print 'All dates existed:'
  print dateList1
  print '*******************'

  try:
     datesNot2show
     print 'dates not to show: '+str(datesNot2show)
  except:  datesNot2show=[]

  try:
    minDate
    minDateyy=yyyymmdd2years(minDate)
    print 'minimum date: '+minDate
    for date in dateList1:
       yy=yyyymmdd2years(date)
       if yy < minDateyy:
           datesNot2show.append(date)
  except:  pass
  try:
    maxDate
    maxDateyy=yyyymmdd2years(maxDate)
    print 'maximum date: '+maxDate
    for date in dateList1:
       yy=yyyymmdd2years(date)
       if yy > maxDateyy:
           datesNot2show.append(date)
  except:  pass

  try:
     dateList=[]
     for date in dateList1:
        if date not in datesNot2show:
           dateList.append(date)
     print '--------------------------------------------'
     print 'dates used to show time series displacements:'
     print dateList
     print '--------------------------------------------'
  except:
     dateList=dateList1
     print 'using all dates to show time series displacement'

###################################################################
# Date info

  dateIndex={}
  for ni in range(len(dateList)):
     dateIndex[dateList[ni]]=ni
  tbase=[]
  d1 = datetime.datetime(*time.strptime(dateList[0],"%Y%m%d")[0:5])

  for ni in range(len(dateList)):
     d2 = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
     diff = d2-d1
     tbase.append(diff.days)

  dates=[]
  for ni in range(len(dateList)):
     d = datetime.datetime(*time.strptime(dateList[ni],"%Y%m%d")[0:5])
     dates.append(d)
  
  datevector=[]
  for i in range(len(dates)):
     datevector.append(np.float(dates[i].year) + np.float(dates[i].month-1)/12 + np.float(dates[i].day-1)/365)
  datevector2=[round(i,2) for i in datevector]


###########################################
# Plot Fig 1 - Velocity / last epoch of time series / DEM

  import matplotlib.pyplot as plt
  if dispVelFig in ('yes','Yes','y','Y','YES'):
     fig = plt.figure()
     ax=fig.add_subplot(111)

     try:
        velocityFile
        h5file=h5py.File(velocityFile,'r')
        k=h5file.keys()
        dset= h5file[k[0]].get(k[0])
        print 'display: ' + k[0]
     except:
        dset = h5timeseries['timeseries'].get(h5timeseries['timeseries'].keys()[-1])
        print 'display: last epoch of timeseries'

     #DEM/contour option
     try:
        demFile
        import _readfile as readfile
        if   os.path.basename(demFile).split('.')[1]=='hgt':  amp,dem,demRsc = readfile.read_float32(demFile)
        elif os.path.basename(demFile).split('.')[1]=='dem':  dem,demRsc = readfile.read_dem(demFile)

        if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):
           print 'show DEM as basemap'
           cmap_dem=plt.get_cmap('gray')
           import _pysar_utilities as ut
           plt.imshow(ut.hillshade(dem,50.0),cmap=cmap_dem)
        if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):
           print 'show contour'
           if smoothContour in ('yes','Yes','y','Y','YES'):
              import scipy.ndimage as ndimage
              dem=ndimage.gaussian_filter(dem,sigma=10.0,order=0)
           contour_sequence=np.arange(-6000,9000,contour_step)
           plt.contour(dem,contour_sequence,origin='lower',colors='black',alpha=0.5)
     except: print 'No DEM file' 

     try:     img=ax.imshow(dset,vmin=vmin,vmax=vmax)
     except:  img=ax.imshow(dset)

     import matplotlib.patches as patches      # need for draw rectangle of points selected on VelFig

########################################## 
# Plot Fig 2 - Time series plot
  import scipy.stats as stats
  fig2 = plt.figure(2)
  ax2=fig2.add_subplot(111) 

  try:
     timeSeriesFile_2
     h5timeseries_2=h5py.File(timeSeriesFile_2)
     print 'plot 2nd time series'
  except:  pass   

  ########### Plot Time Series with x/y ##########
  try:
     xsub
     ysub
     try:     xmin=xsub[0];         xmax=xsub[1]+1;         print 'x='+str(xsub[0])+':'+str(xsub[1])
     except:  xmin=xsub[0]-radius;  xmax=xsub[0]+radius+1;  print 'x='+str(xsub[0])+'+/-'+str(radius)
     try:     ymin=ysub[0];         ymax=ysub[1]+1;         print 'y='+str(ysub[0])+':'+str(ysub[1])
     except:  ymin=ysub[0]-radius;  ymax=ysub[0]+radius+1;  print 'y='+str(ysub[0])+'+/-'+str(radius)
     try:
        fig
        rectSelect=patches.Rectangle((xmin,ymin),radius*2+1,radius*2+1,fill=False,lw=edgeWidth)
        ax.add_patch(rectSelect)
     except: pass

     Dis=[]
     for date in dateList:  Dis.append(h5timeseries['timeseries'].get(date)[ymin:ymax,xmin:xmax])
     Dis0=array(Dis)
     dis=Dis0*unitFac
     dis=reshape(dis,(len(dateList),-1))
     dis_mean=stats.nanmean(dis,1)
     if (xmax-xmin)*(ymax-ymin)==1:  dis_std=[0]*len(dateList)
     else:                           dis_std=stats.nanstd(dis,1)
     (_, caps, _)=ax2.errorbar(dates,dis_mean,yerr=dis_std,fmt='-ko',\
                               ms=markerSize, lw=lineWidth, alpha=1, mfc=markerColor,\
                               elinewidth=edgeWidth,ecolor='black',capsize=markerSize*0.5)
     for cap in caps:  cap.set_markeredgewidth(edgeWidth)
     print dis_mean

     # x axis format
     ax2.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
     if unitFac==100:     ax2.set_ylabel('Displacement [cm]',fontsize=fontSize)
     elif unitFac==1000:  ax2.set_ylabel('Displacement [mm]',fontsize=fontSize)
     else:                ax2.set_ylabel('Displacement [m]' ,fontsize=fontSize)
     ax2.set_xlabel('Time [years]',fontsize=fontSize)
     ax2.set_title('x='+str(xmin)+':'+str(xmax-1)+', y='+str(ymin)+':'+str(ymax-1))
     ax2.xaxis.set_major_locator(years)
     ax2.xaxis.set_major_formatter(yearsFmt)
     ax2.xaxis.set_minor_locator(months)
     datemin = datetime.date(int(datevector[0]),1,1)
     datemax = datetime.date(int(datevector[-1])+1,1,1)
     ax2.set_xlim(datemin, datemax)

     # y axis format
     try:
        lbound
        hbound
        ax2.set_ylim(lbound,hbound)
     except:
        ax2.set_ylim(nanmin(dis_mean-dis_std)-0.4*abs(nanmin(dis_mean)),\
                     nanmax(dis_mean+dis_std)+0.4*abs(nanmax(dis_mean)))

     for tick in ax2.xaxis.get_major_ticks():  tick.label.set_fontsize(fontSize)
     for tick in ax2.yaxis.get_major_ticks():  tick.label.set_fontsize(fontSize)
     #fig2.autofmt_xdate()     #adjust x overlap by rorating, may enble again

     if Save_timeseries in ('yes','Yes','Y','y','YES'):
        import scipy.io as sio
        Delay={}
        Delay['displacement']=Dis0
        Delay['unit']='m'
        Delay['time']=datevector
        tsNameBase='ts_x'+str(xmin)+'_'+str(xmax-1)+'y'+str(ymin)+'_'+str(ymax-1)
        sio.savemat(tsNameBase+'.mat', {'displacement': Delay})
        print 'saved data to '+tsNameBase+'.mat'
        plt.savefig(tsNameBase+'.pdf',dpi=fig_dpi)
        print 'saved plot to '+tsNameBase+'.pdf'

  except:  print 'No x/y input' ; pass

  ########### Plot Time Series with Click ##########
  def onclick(event):
    if event.button==1:
      xClick = int(event.xdata)
      yClick = int(event.ydata)
      print 'x='+str(xClick)+'+/-'+str(radius)+', y='+str(yClick)+'+/-'+str(radius)
      xmin=xClick-radius;  xmax=xClick+radius+1;
      ymin=yClick-radius;  ymax=yClick+radius+1;
      try:
         fig
         rectSelect=patches.Rectangle((xmin,ymin),radius*2+1,radius*2+1,fill=False,lw=edgeWidth)
         ax.add_patch(rectSelect)
      except: pass

      ax2.cla()

      #plot 1st time series
      Dis=[]
      for date in dateList:  Dis.append(h5timeseries['timeseries'].get(date)[ymin:ymax,xmin:xmax])
      Dis0=array(Dis)
      dis=Dis0*unitFac
      dis=reshape(dis,(len(dateList),-1))
      dis_mean=stats.nanmean(dis,1)
      if (xmax-xmin)*(ymax-ymin)==1:  dis_std=[0]*len(dateList)
      else:                           dis_std=stats.nanstd(dis,1)
      (_, caps, _)=ax2.errorbar(dates,dis_mean,yerr=dis_std,fmt='-ko',\
                                ms=markerSize, lw=lineWidth, alpha=1, mfc=markerColor,\
                                elinewidth=edgeWidth,ecolor='black',capsize=markerSize*0.5)
      for cap in caps:  cap.set_markeredgewidth(edgeWidth)
      print dis_mean

      #plot 2nd time series
      try:
         timeSeriesFile_2
         Dis2=[]
         for date in dateList:  Dis2.append(h5timeseries_2['timeseries'].get(date)[ymin:ymax,xmin:xmax])
         dis2=array(Dis2)
         dis2=dis2*unitFac
         dis2=reshape(dis2,(len(dateList),-1))
         dis2_mean=stats.nanmean(dis2,1)
         if (xmax-xmin)*(ymax-ymin)==1:  dis2_std=[0]*len(dateList)
         else:                           dis2_std=stats.nanstd(dis2,1)
         (_, caps, _)=ax2.errorbar(dates,dis2_mean,yerr=dis2_std,fmt='^',\
                                   ms=markerSize2, lw=lineWidth, alpha=1, mfc=markerColor2,\
                                   elinewidth=edgeWidth,ecolor='black',capsize=markerSize*0.5)
         for cap in caps:  cap.set_markeredgewidth(edgeWidth)
      except:  Dis2=[]

      #axis formating
      ax2.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S')
      if unitFac==100:     ax2.set_ylabel('Displacement [cm]',fontsize=fontSize)
      elif unitFac==1000:  ax2.set_ylabel('Displacement [mm]',fontsize=fontSize)
      else:                ax2.set_ylabel('Displacement [m]' ,fontsize=fontSize)
      ax2.set_xlabel('Time [years]',fontsize=fontSize)
      ax2.set_title('x='+str(xClick)+'+/-'+str(radius)+', y='+str(yClick)+'+/-'+str(radius))
      #ds=datevector[0]-0.2
      #de=datevector[-1]+0.2
      #ys=int(ds)
      #ye=int(de)
      #ms=int((ds-ys)*12)+1
      #me=int((de-ye)*12)+1
      #dss=datetime.datetime(ys,ms,1,0,0)
      #dee=datetime.datetime(ye,me,1,0,0)
      #ax2.set_xlim(dss,dee)
      ax2.xaxis.set_major_locator(years)
      ax2.xaxis.set_major_formatter(yearsFmt)
      ax2.xaxis.set_minor_locator(months)
      datemin = datetime.date(int(datevector[0]),1,1)
      datemax = datetime.date(int(datevector[-1])+1,1,1)
      ax2.set_xlim(datemin, datemax)

      try:
        lbound
        hbound
        ax2.set_ylim(lbound,hbound)
      except:
        ax2.set_ylim(nanmin(dis_mean-dis_std)-0.4*abs(nanmin(dis_mean)),\
                     nanmax(dis_mean+dis_std)+0.4*abs(nanmax(dis_mean)))

      for tick in ax2.xaxis.get_major_ticks():  tick.label.set_fontsize(fontSize)
      for tick in ax2.yaxis.get_major_ticks():  tick.label.set_fontsize(fontSize)
      #fig2.autofmt_xdate()     #adjust x overlap by rorating, may enble again

      if Save_timeseries in ('yes','Yes','Y','y','YES'):
         import scipy.io as sio
         Delay={}
         Delay['displacement']=Dis0
         Delay['unit']='m'
         Delay['time']=datevector
         tsNameBase='ts_x'+str(xmin)+'_'+str(xmax-1)+'y'+str(ymin)+'_'+str(ymax-1)
         sio.savemat(tsNameBase+'.mat', {'displacement': Delay})
         print 'saved data to '+tsNameBase+'.mat'
         plt2.savefig(tsNameBase+'.pdf',dpi=fig_dpi)
         print 'saved plot to '+tsNameBase+'.pdf'

      if dispTsFig in ('yes','Yes','Y','y','YES'):  plt.show()
  try:
     cid = fig.canvas.mpl_connect('button_press_event', onclick)       # Click function is available when VelFig is shown
  except: pass

  if dispTsFig in ('yes','Yes','Y','y','YES'):  plt.show()

Example 36

Project: PySAR
Source File: view.py
View license
def main(argv):

  #################  default values  ################
  flip_lr='no'
  flip_ud='no'
  disp_geo = 'yes'
  #font_size=8
  color_map='jet'
  figs_rows=5
  figs_cols=8
  rewrapping='yes'
  allData2display='yes'
  Wspace = 0.1
  Hspace = 0.1
  title = 'out'
  showRef = 'yes'
  ref_color='k'
  ref_symbol='s'
  ref_size =10
  dip_opposite = 'no'
  saveFig='no'
  dispFig='yes'
  dispContour='only'
  contour_step=200
  contour_sigma=3.0
  fig_dpi=300

  ###################  get input args  ###############
  if len(sys.argv)>2:
     try:
        opts, args = getopt.getopt(argv,"h:D:O:G:S:f:m:M:v:u:s:c:e:d:r:p:w:i:j:t:R:a:b:k:x:y:C:V:P:o:g:l:L:")
     except getopt.GetoptError:
        Usage() ; sys.exit(1)
     if opts==[]: Usage() ; sys.exit(1)

     for opt,arg in opts:
        if opt in ("-h","--help"):
           Usage() ; sys.exit()
        elif opt == '-f': File = arg
        elif opt == '-D': demFile=arg
        elif opt == '-w': rewrapping = arg
        elif opt == '-m': min = float(arg);         rewrapping='no'
        elif opt == '-M': max = float(arg);         rewrapping='no'
        elif opt == '-v': flip_lr = arg
        elif opt == '-u': flip_ud = arg
        elif opt == '-s': font_size = int(arg)
        elif opt == '-c': color_map = arg
        elif opt == '-e': epoch_number = int(arg);  allData2display='no'
        elif opt == '-d': epoch_date = arg;         allData2display='no'
        elif opt == '-r': figs_rows = int(arg)
        elif opt == '-p': figs_cols = int(arg)
        elif opt == '-i': Wspace = float(arg)
        elif opt == '-j': Hspace = float(arg)
        elif opt == '-t': title = arg
        elif opt == '-R': showRef = arg
        elif opt == '-a': ref_color = arg
        elif opt == '-b': ref_symbol = arg
        elif opt == '-k': ref_size=int(arg)
        elif opt == '-x': win_x = [int(i) for i in arg.split(':')];      win_x.sort()
        elif opt == '-y': win_y = [int(i) for i in arg.split(':')];      win_y.sort()
        elif opt == '-G': disp_geo = arg
        elif opt == '-O': dip_opposite=arg
        elif opt == '-S': saveFig=arg 
        elif opt == '-C': dispContour=arg
        elif opt == '-V': contour_step=float(arg)
        elif opt == '-P': dispFig=arg
        elif opt == '-o': figName=arg
        elif opt == '-g': contour_sigma = float(arg)
        elif opt == '-l': win_lat = [float(i) for i in arg.split(':')];  win_lat.sort()
        elif opt == '-L': win_lon = [float(i) for i in arg.split(':')];  win_lon.sort()

  elif len(sys.argv)==2:
     if argv[0]=='-h':              Usage(); sys.exit(1)
     elif os.path.isfile(argv[0]):  File = argv[0]
     else:
        print 'Input file does not existed: '+argv[0];  sys.exit(1)
  elif len(sys.argv)<2:             Usage(); sys.exit(1)

  if color_map == 'hsv':
     ################################################
     cdict1 = {'red':   ((0.0, 0.0, 0.0),
                   (0.5, 0.0, 0.0),
                   (0.6, 1.0, 1.0),
                   (0.8, 1.0, 1.0),
                   (1.0, 0.5, 0.5)),
        
         'green': ((0.0, 0.0, 0.0),
                   (0.2, 0.0, 0.0),
                   (0.4, 1.0, 1.0),
                   (0.6, 1.0, 1.0),
                   (0.8, 0.0, 0.0),
                   (1.0, 0.0, 0.0)),
      
         'blue':  ((0.0, 0.5, .5),
                   (0.2, 1.0, 1.0),
                   (0.4, 1.0, 1.0),
                   (0.5, 0.0, 0.0),
                   (1.0, 0.0, 0.0),)
        }

     from matplotlib.colors import LinearSegmentedColormap
     ccmap = LinearSegmentedColormap('BlueRed1', cdict1)
        
     ################################################
  else:  ccmap=plt.get_cmap(color_map)


  ##################################################
  ext = os.path.splitext(File)[1]
  if ext == '.h5':
     import h5py
     h5file=h5py.File(File,'r')
     k=h5file.keys()
     if   'interferograms' in k: k[0] = 'interferograms'
     elif 'coherence'      in k: k[0] = 'coherence'
     elif 'timeseries'     in k: k[0] = 'timeseries'
     print 'Input: '+str(k)
     if k[0] in ('dem','velocity','mask','temporal_coherence','rmse'):
        allData2display = 'no'

  elif ext in ['.unw','.int','.cor','.hgt','.dem','.trans','.mli','.slc']:
     import pysar._readfile as readfile
     allData2display = 'no'
     k = [ext]
  else: 
     print 'File extension not recogized: '+ext
     print 'Support file format:\n\
                PySAR HDF5 files: velocity.h5, timeseries.h5, LoadedData.h5, ...\n\
                ROI_PAC    files: .unw .cor .int .hgt .dem .trans .mli'
     sys.exit(1)


####################################################################
########################## Plot One ################################

  if allData2display == 'no':
    try:    font_size
    except: font_size=12

    ################# File Reading ##################
    ##### PySAR HDF5
    if k[0] in ('dem','velocity','mask','temporal_coherence','rmse'):
       atr  = h5file[k[0]].attrs
       dset = h5file[k[0]].get(k[0])
       data = dset[0:dset.shape[0],0:dset.shape[1]]
       # rewrapping
       if rewrapping in ('yes','Yes','Y','y','YES'):
          print 'Rewrapping disabled for '+k[0]

    elif k[0] == 'timeseries':
       dateList=h5file[k[0]].keys()
       try:
          epoch_number
       except:
          try:
             epoch_date
             if len(epoch_date)==6:  epoch_date=yymmdd2yyyymmdd(epoch_date)
             epoch_number=dateList.index(epoch_date)
          except:
             print 'Unrecognized epoch input!';  sys.exit(1)
       print 'Displaying date: '+dateList[epoch_number]
       atr  = h5file[k[0]].attrs
       dset = h5file[k[0]].get(dateList[epoch_number])
       data = dset[0:dset.shape[0],0:dset.shape[1]]
       # rewrapping
       if rewrapping in ('yes','Yes','y','Y','YES'):
          print 'Rewrapping. Set min/max to -pi/pi. Showing phase'
          range2phase=4*np.pi/float(atr['WAVELENGTH'])         #double-way, 2*2*pi/lamda
          data=range2phase*data
          data=rewrap(data)
          min = -np.pi
          max = np.pi
          figUnit='(radian)'
       elif rewrapping in ('no','No','N','n','NO'):
          print 'No rewrapping. Showing displacement.'
          figUnit='(m)'

    elif k[0] in ('interferograms','coherence','wrapped'):
       ifgramList=h5file[k[0]].keys()
       try:
          epoch_number
       except:
          for i in range(len(ifgramList)):
             if epoch_date in ifgramList[i]:   epoch_number = i
       print 'Displaying: '+ifgramList[epoch_number]
       atr  = h5file[k[0]][ifgramList[epoch_number]].attrs
       dset = h5file[k[0]][ifgramList[epoch_number]].get(ifgramList[epoch_number])
       data = dset[0:dset.shape[0],0:dset.shape[1]]

       # rewrapping
       if k[0] in ('coherence','wrapped') and rewrapping in ('yes','Yes','y','Y','YES'):
          print 'No rewrapping for coherence/wrapped files, set to "no"'
          rewrapping='no'
       if rewrapping in ('yes','Yes','y','Y','YES'):
          print 'Rewrapping. Set min/max to -pi/pi.'
          data = np.angle(np.exp(1j*data))
          min  = -np.pi
          max  = np.pi

    ##### ROI_PAC product
    elif k[0] in ['.slc','.mli']:
       data,p,atr = readfile.read_complex64(File)
       data= np.nanlog10(data)
       figUnit = '(dB)'
    elif k[0] == '.int':
       a,data,atr = readfile.read_complex64(File)
       min = -np.pi
       max =  np.pi
       rewrapping = 'no'
       figUnit = '(radian)'
    elif k[0] in ['.unw', '.cor', '.hgt', '.trans']:
       a,data,atr = readfile.read_float32(File)
       if   k[0] == '.unw':   figUnit = '(radian)'
       elif k[0] == '.hgt':   figUnit = '(m)'
       if k[0] in ['.cor','.hgt','.trans']: rewrapping='no'
       if rewrapping in ('yes','Yes','y','Y','True','true'):
          print 'Rewrapping. Set min/max to -pi/pi.'
          data = rewrap(data)
          min = -np.pi
          max =  np.pi
    elif k[0] == '.dem':
       data,atr = readfile.read_dem(File)
       figUnit = '(m)'


    ############## Data Option ##################
    # Opposite Sign
    if dip_opposite in ('yes','Yes','Y','y','YES'):
       data=-1*data
    # Subset
    try:      # y/latitude direction
      win_lat
      try:
        atr['Y_FIRST']
        win_y=[0]*2
        win_y[0]=int((win_lat[1]-float(atr['Y_FIRST']))/float(atr['Y_STEP']))
        win_y[1]=int((win_lat[0]-float(atr['Y_FIRST']))/float(atr['Y_STEP']))
        if win_y[0]<0: win_y[0]=0; print 'input latitude > max latitude! Set to max'
        print 'subset in latitude  - '+str(win_lat[0])+':'+str(win_lat[1])
      except:  print 'Non-geocoded file, cannot use LatLon option';   Usage(); sys.exit(1)
    except:
      try:
        win_y
        print 'subset in y direction - '+str(win_y[0])+':'+str(win_y[1])
      except: win_y = [0,int(atr['FILE_LENGTH'])]
    try:      # x/longitude direction
      win_lon
      try:
        atr['X_FIRST']
        win_x=[0]*2
        win_x[0]=int((win_lon[0]-float(atr['X_FIRST']))/float(atr['X_STEP']))
        win_x[1]=int((win_lon[1]-float(atr['X_FIRST']))/float(atr['X_STEP']))
        if win_x[0]<0: win_x[0]=0; print 'input longitude > max longitude! Set to max'
        print 'subset in longitude - '+str(win_lon[0])+':'+str(win_lon[1])
      except:  print 'Non-geocoded file, cannot use LatLon option';   Usage(); sys.exit(1)
    except:
      try:
        win_x
        print 'subset in x direction - '+str(win_x[0])+':'+str(win_x[1])
      except: win_x = [0,int(atr['WIDTH'])]

    data = data[win_y[0]:win_y[1],win_x[0]:win_x[1]]

    # Reference Point
    try:
       xref=atr['ref_x']-win_x[0]
       yref=atr['ref_y']-win_y[0]
    except:  print 'No reference point'
    try:
       xref=xref-atr['subset_x0']
       yref=yref-atr['subset_y0']
    except:  print 'No subset'
    # Geo coordinate
    try:
       lon_step = float(atr['X_STEP'])
       lat_step = float(atr['Y_STEP'])
       lon_unit = atr['Y_UNIT']
       lat_unit = atr['X_UNIT']
       ullon     = float(atr['X_FIRST'])+win_x[0]*lon_step
       ullat     = float(atr['Y_FIRST'])+win_y[0]*lat_step
       llcrnrlon = ullon
       llcrnrlat = ullat+lat_step*data.shape[0]
       urcrnrlon = ullon+lon_step*data.shape[1]
       urcrnrlat = ullat
       geocoord='yes'
       print 'Input file is Geocoded'
    except:  geocoord='no'
    # Flip
    if flip_lr in ('yes','Yes','Y','y','YES'):  data=np.fliplr(data);  xref=np.shape(data)[1]-xref-1 
    if flip_ud in ('yes','Yes','Y','y','YES'):  data=np.flipud(data);  yref=np.shape(data)[0]-yref-1

    # Colorbar Extend
    data_min = np.nanmin(data)
    data_max = np.nanmax(data)
    try:    min
    except: min = data_min
    try:    max
    except: max = data_max
    if   min <= data_min and max >= data_max: cb_extend='neither'
    elif min >  data_min and max >= data_max: cb_extend='min'
    elif min <= data_min and max <  data_max: cb_extend='max'
    else:                                     cb_extend='both'

    ############## DEM Option ##################
    try:
       demFile
       print 'Show topography'
       import pysar._readfile as readfile
       if   os.path.basename(demFile).split('.')[1]=='hgt':  amp,dem,demRsc = readfile.read_float32(demFile)
       elif os.path.basename(demFile).split('.')[1]=='dem':      dem,demRsc = readfile.read_dem(demFile)

       # Subset
       dem = dem[win_y[0]:win_y[1],win_x[0]:win_x[1]]
       # Flip
       if flip_lr in ('yes','Yes','Y','y','YES'):  dem=np.fliplr(dem)
       if flip_ud in ('yes','Yes','Y','y','YES'):  dem=np.flipud(dem)

       # DEM data preparation
       if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):           #DEM basemap
          print 'plot DEM as basemap'
          cmap_dem=plt.get_cmap('gray')
          import pysar._pysar_utilities as ut
       if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):     #contour
          print 'plot contour'
          #if smoothContour in ('yes','Yes','y','Y','YES'):
          import scipy.ndimage as ndimage
          dem=ndimage.gaussian_filter(dem,sigma=contour_sigma,order=0)
          contour_sequence=np.arange(-6000,9000,contour_step)
    except:  print 'No DEM file'

    ############## Data Plot and Output  ################
    # Figure Title
    if   k[0]=='velocity':                    figTitle = 'Velocity (m/yr)'
    elif k[0]=='temporal_coherence':          figTitle = 'Temporal coherence'
    elif k[0]=='dem':                         figTitle = 'DEM error'
    elif k[0]=='rmse':                        figTitle = 'RMSE (m/yr)'
    elif k[0]=='mask':                        figTitle = 'Pixels with no valid value.'
    elif k[0]=='coherence':                   figTitle = ifgramList[epoch_number]
    elif k[0]in('interferograms','wrapped'):  figTitle = ifgramList[epoch_number]+' (radian)'
    elif k[0]=='timeseries':
       try:    master_date=atr['ref_date']
       except: master_date=atr['DATE']
       if len(master_date)==6:     master_date=yymmdd2yyyymmdd(master_date)
       if dip_opposite in ('yes','Yes','Y','y','YES'): figTitle = dateList[epoch_number]+'_'+master_date+' '+figUnit
       else:                                           figTitle = master_date+'_'+dateList[epoch_number]+' '+figUnit
    elif k[0] in ['.unw','.cor','.hgt','.dem','.trans','.mli','.slc']:
       try:    figTitle = File+' '+figUnit
       except: figTitle = File

    # Plot in Geo-coordinate: plot in map
    if geocoord == 'yes' and disp_geo in ('yes','Yes','y','Y','YES'):
       print 'display Lat/Lon'
       fig = plt.figure()
       ax = fig.add_axes([0.1,0.1,0.8,0.8])
       try: plt.title(figTitle,fontsize=font_size)
       except: pass

       # Map - DEM - Data
       from mpl_toolkits.basemap import Basemap
       m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
                   resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
       try:
          demFile
          if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):
             m.imshow(ut.hillshade(np.flipud(dem),50.0),cmap=cmap_dem)
          if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):
             import numpy.matlib
             c_x = np.linspace(llcrnrlon,urcrnrlon,num=dem.shape[1],endpoint='FALSE').reshape(1,dem.shape[1])
             c_xx= np.matlib.repmat(c_x,dem.shape[0],1)
             c_y = np.linspace(llcrnrlat,urcrnrlat,num=dem.shape[0],endpoint='FALSE').reshape(dem.shape[0],1)
             c_yy= np.matlib.repmat(c_y,1,dem.shape[1])
             m.contour(c_xx,c_yy,np.flipud(dem),contour_sequence,origin='lower',colors='black',alpha=0.5,latlon='FALSE')
       except:  pass
       try:     im = m.imshow(np.flipud(data),cmap=ccmap,vmin=min,vmax=max)
       except:  im = m.imshow(np.flipud(data),cmap=ccmap)

       # Reference Point
       if showRef in ('yes','Yes','Y','y','YES'):
          try:
             refPoint=ref_color+ref_symbol
             ref_lon = llcrnrlon + xref*lon_step
             ref_lat = urcrnrlat + yref*lat_step
             plt.plot(ref_lon,ref_lat,refPoint,ms=ref_size)
          except:  print 'No reference point'

       # Colorbar
       from mpl_toolkits.axes_grid1 import make_axes_locatable
       divider = make_axes_locatable(ax)
       cax = divider.append_axes("right",size="5%", pad=0.30)
       plt.colorbar(im,cax=cax,extend=cb_extend)
       #plt.colorbar(im,cax=cax)

       # Status bar
       def format_coord(x,y):
         col = int((x-ullon)/lon_step+0.5)
         row = int((y-ullat)/lat_step+0.5)
         if col>=0 and col<=data.shape[1] and row >=0 and row<=data.shape[0]:
            z = data[row,col]
            return 'lat=%.4f,  lon=%.4f,  value=%.4f'%(x,y,z)
         else:
            return 'lat=%.4f,  lon=%.4f'%(x,y)
       ax.format_coord = format_coord


    # Plot in x/y coordinate: row and column
    else:
       fig = plt.figure()
       ax = fig.add_axes([0.1,0.1,0.8,0.8])
       try: plt.title(figTitle,fontsize=font_size)
       except: pass

       # Plot
       try:
          demFile
          if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):
             ax.imshow(ut.hillshade(dem,50.0),cmap=cmap_dem)
          if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):
             ax.contour(dem,contour_sequence,origin='lower',colors='black',alpha=0.5)
       except:  pass
       try:     im = ax.imshow(data,cmap=ccmap, vmin=min, vmax=max)
       except:  im = ax.imshow(data,cmap=ccmap)
       cbar = plt.colorbar(im,extend=cb_extend)
       #cbar.set_label('m/yr')

       # Reference Point
       if showRef in ('yes','Yes','Y','y','YES'):
          try:
             refPoint=ref_color+ref_symbol
             ax.plot(xref,yref,refPoint,ms=ref_size)
          except:  print 'No reference point'

       plt.xlim(0,np.shape(data)[1])
       plt.ylim(  np.shape(data)[0],0)

       # Status bar
       def format_coord(x,y):
         col = int(x+0.5)
         row = int(y+0.5)
         if col>=0 and col<=data.shape[1] and row >=0 and row<=data.shape[0]:
            z = data[row,col]
            return 'x=%.4f,  y=%.4f,  value=%.4f'%(x,y,z)
         else:
            return 'x=%.4f,  y=%.4f'%(x,y)
       ax.format_coord = format_coord

    # Save Figure
    if saveFig in ('yes','Yes','Y','y','YES'):
       try:  figName
       except:
          if   k[0]=='velocity':            figName='velocity.pdf'
          elif k[0]=='temporal_coherence':  figName='temporal_coherence.pdf'
          elif k[0]=='dem':                 figName='DEM_error.pdf'
          elif k[0]=='rmse':                figName='rmse.pdf'
          elif k[0]=='mask':                figName='mask.pdf'
          elif k[0]=='timeseries':
             figName=os.path.basename(File).split('.')[0]+'_'+dateList[epoch_number]+'.pdf'
          elif k[0] in ('interferograms','coherence','wrapped'):
             figName=ifgramList[epoch_number]+'.pdf'
          elif k[0] in ['.unw','.cor','.hgt','.dem','.trans','.mli','.slc']:
             figName=File+'.pdf'
       plt.savefig(figName,dpi=fig_dpi)
       print 'Saved figure to '+figName

    # Show Figure
    if dispFig in ('yes','Yes','Y','y','YES'):  
       plt.show()
    
####################################################################
########################## Plot All ################################  

  elif allData2display == 'yes':
    try:    font_size
    except: font_size=8

    if k[0] == 'timeseries':
       atr = h5file[k[0]].attrs
       # rewrapping
       if rewrapping in ('yes','Yes','Y','y','YES'):
          print 'Rewrapping. Set min/max to -pi/pi. Showing phase.' 
          range2phase=4*np.pi/float(h5file['timeseries'].attrs['WAVELENGTH'])
          min=-np.pi
          max=np.pi
       else:  print 'No rewrapping. Showing displacement.'

    elif k[0] in ('interferograms','coherence','wrapped'):
       atr = h5file[k[0]][h5file[k[0]].keys()[0]].attrs
       # rewrapping
       if k[0] in ('coherence','wrapped') and rewrapping in ('yes','Yes','y','Y','YES'):
          print 'No rewrapping for coherence/wrapped files, set to "no"'
          rewrapping='no'
       if rewrapping in ('yes','Yes','Y','y','YES'):
          print 'Rewrapping. Set min/max to -pi/pi.'
          min=-np.pi
          max=np.pi
       else:  print 'No rewrapping'

    ### Subset Option ###
    try:      # y/latitude direction
      win_lat
      try:
        atr['Y_FIRST']
        win_y=[0]*2
        win_y[0]=int((win_lat[1]-float(atr['Y_FIRST']))/float(atr['Y_STEP']))
        win_y[1]=int((win_lat[0]-float(atr['Y_FIRST']))/float(atr['Y_STEP']))
        if win_y[0]<0: win_y[0]=0; print 'input latitude > max latitude! Set to max'
        print 'subset in latitude  - '+str(win_lat[0])+':'+str(win_lat[1])
      except:  print 'Non-geocoded file, cannot use LatLon option';   Usage(); sys.exit(1)
    except:
      try:
        win_y
        print 'subset in y direction - '+str(win_y[0])+':'+str(win_y[1])
      except: win_y = [0,int(atr['FILE_LENGTH'])]
    try:      # x/longitude direction
      win_lon
      try:
        atr['X_FIRST']
        win_x=[0]*2
        win_x[0]=int((win_lon[0]-float(atr['X_FIRST']))/float(atr['X_STEP']))
        win_x[1]=int((win_lon[1]-float(atr['X_FIRST']))/float(atr['X_STEP']))
        if win_x[0]<0: win_x[0]=0; print 'input longitude > max longitude! Set to max'
        print 'subset in longitude - '+str(win_lon[0])+':'+str(win_lon[1])
      except:  print 'Non-geocoded file, cannot use LatLon option';   Usage(); sys.exit(1)
    except:
      try:
        win_x
        print 'subset in x direction - '+str(win_x[0])+':'+str(win_x[1])
      except: win_x = [0,int(atr['WIDTH'])]

    # Figure Name
    if saveFig in ('yes','Yes','Y','y','YES'):
       try:
          figName
          figNameBase = os.path.basename(figName).split('.')[0]
          figNameExt  = os.path.basename(figName).split('.')[1]
       except:
          figNameBase = os.path.basename(File).split('.')[0]
          figNameExt  = '.pdf'

    ################## DEM Options ####################
    try:
       demFile
       print 'Show topography'
       import pysar._readfile as readfile
       if   os.path.basename(demFile).split('.')[1]=='hgt':  amp,dem,demRsc = readfile.read_float32(demFile)
       elif os.path.basename(demFile).split('.')[1]=='dem':      dem,demRsc = readfile.read_dem(demFile)

       # Subset
       dem = dem[win_y[0]:win_y[1],win_x[0]:win_x[1]]

       if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):           #DEM basemap
          print 'plot DEM as basemap'
          cmap_dem=plt.get_cmap('gray')
          import pysar._pysar_utilities as ut
          hillshade_dem=ut.hillshade(dem,50.0)
       if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):     #contour
          print 'plot contour'
          #if smoothContour in ('yes','Yes','y','Y','YES'):
          import scipy.ndimage as ndimage
          dem=ndimage.gaussian_filter(dem,sigma=contour_sigma,order=0)
          contour_sequence=np.arange(-6000,9000,contour_step)
    except:  print 'No DEM file'

    ################## Plot Loop ####################
    ifgramList=h5file[k[0]].keys()
    nfigs   = figs_rows*figs_cols
    lifgram = len(ifgramList)
    print 'number of  epochs/interferograms to display:'+ str(lifgram)
    kk=int(lifgram/nfigs)+1
    ii=0

    # plot (1,end-1) figures
    for j in range(1,kk):
       fig = plt.figure(j)
       ii=(j-1)*nfigs+1
       for i in range(ii,ii+nfigs):
           print 'loading '+ifgramList[i-1]
           ax = fig.add_subplot(figs_rows,figs_cols,i-ii+1) 

           # Data option
           if k[0] == 'timeseries':
              figTitle = ifgramList[i-1]
              dset = h5file[k[0]].get(ifgramList[i-1])
              data = dset[0:dset.shape[0],0:dset.shape[1]]
              if rewrapping in ('yes','Yes','Y','y','YES'):
                 data=range2phase*data
                 data=rewrap(data)
           elif k[0] in ('interferograms','coherence','wrapped'):
              figTitle = str(i)+' : '+h5file[k[0]][ifgramList[i-1]].attrs['DATE12']
              dset = h5file[k[0]][ifgramList[i-1]].get(ifgramList[i-1])
              data = dset[0:dset.shape[0],0:dset.shape[1]]
              if rewrapping in ('yes','Yes','Y','y','YES'):
                 data=np.angle(np.exp(1j*data))

           data = data[win_y[0]:win_y[1],win_x[0]:win_x[1]]

           # Plot
           try:
              demFile
              if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):
                 plt.imshow(hillshade_dem,cmap=cmap_dem)
              if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):
                 plt.contour(dem,contour_sequence,origin='lower',colors='black',alpha=0.5)
           except:  pass
           try:     ax.imshow(data,cmap=ccmap,vmin=min,vmax=max)
           except:  ax.imshow(data,cmap=ccmap)

           ax.set_yticklabels([])
           ax.set_xticklabels([])
           ax.set_xticks([])
           ax.set_yticks([])
           if   title=='out':  ax.set_title(figTitle,fontsize=font_size)
           elif title=='in':   add_inner_title(ax, figTitle, loc=1)
       fig.subplots_adjust(wspace=Wspace,hspace=Hspace)
       if saveFig in ('yes','Yes','Y','y','YES'):   
           figName=figNameBase+'_'+str(j)+figNameExt
           plt.savefig(figName,dpi=fig_dpi)

    # plot the last figure
    fig = plt.figure(kk)
    ii=(kk-1)*nfigs+1
    for i in range(ii,lifgram+1):
           print 'loading '+ifgramList[i-1]
           ax = fig.add_subplot(figs_rows,figs_cols,i-ii+1)

           # Data option
           if k[0] == 'timeseries':
              figTitle = ifgramList[i-1]
              dset = h5file[k[0]].get(ifgramList[i-1])
              data = dset[0:dset.shape[0],0:dset.shape[1]]
              if rewrapping in ('yes','Yes','Y','y','YES'):
                 data=range2phase*data
                 data=rewrap(data)
           elif k[0] in ('interferograms','coherence','wrapped'):
              figTitle = str(i)+' : '+h5file[k[0]][ifgramList[i-1]].attrs['DATE12']
              dset = h5file[k[0]][ifgramList[i-1]].get(ifgramList[i-1])
              data = dset[0:dset.shape[0],0:dset.shape[1]]
              if rewrapping in ('yes','Yes','Y','y','YES'):
                 data=np.angle(np.exp(1j*data))

           data = data[win_y[0]:win_y[1],win_x[0]:win_x[1]]

           # Plot
           try:
              demFile
              if dispContour in ('no','No','n','N','NO','yes','Yes','y','Y','YES'):
                 plt.imshow(hillshade_dem,cmap=cmap_dem)
              if dispContour in ('only','Only','o','O','ONLY','yes','Yes','y','Y','YES'):
                 plt.contour(dem,contour_sequence,origin='lower',colors='black',alpha=0.5)
           except:  pass

           try:     ax.imshow(data,cmap=ccmap,vmin=min,vmax=max)
           except:  ax.imshow(data,cmap=ccmap)

           ax.xaxis.label.set_fontsize(20)
           ax.set_yticklabels([])
           ax.set_xticklabels([])
           ax.set_xticks([])
           ax.set_yticks([])
           if    title=='out':  ax.set_title(figTitle,fontsize=font_size)
           elif title =='in':   add_inner_title(ax, figTitle, loc=1)
    fig.subplots_adjust(wspace=Wspace,hspace=Hspace)
    if saveFig in ('yes','Yes','Y','y','YES'):
       figName=figNameBase+'_'+str(kk)+figNameExt
       plt.savefig(figName,dpi=fig_dpi)
       print 'Saved figure to '+figNameBase+'_*'+figNameExt

    if dispFig in ('yes','Yes','Y','y','YES'):
       plt.show()
   
####################################################################
####################################################################  

  try: h5file.close()
  except: pass

Example 37

Project: ReportLab
Source File: setup.py
View license
def main():
    #test to see if we've a special command
    if 'tests' in sys.argv or 'tests-preinstall' in sys.argv:
        if len(sys.argv)!=2:
            raise ValueError('tests commands may only be used alone')
        cmd = sys.argv[-1]
        PYTHONPATH=[pkgDir]
        if cmd=='tests-preinstall':
            PYTHONPATH.insert(0,pjoin(pkgDir,'src'))
        os.environ['PYTHONPATH']=os.pathsep.join(PYTHONPATH)
        os.chdir(pjoin(pkgDir,'tests'))
        os.system("%s runAll.py" % sys.executable)
        return

    SPECIAL_PACKAGE_DATA = {}
    RL_ACCEL = _find_rl_ccode('rl_accel','_rl_accel.c')
    LIBRARIES=[]
    EXT_MODULES = []
    if not RL_ACCEL:
        infoline( '***************************************************')
        infoline( '*No rl_accel code found, you can obtain it at     *')
        infoline( '*http://www.reportlab.org/downloads.html#_rl_accel*')
        infoline( '***************************************************')
    else:
        infoline( '################################################')
        infoline( '#Attempting install of _rl_accel, sgmlop & pyHnj')
        infoline( '#extensions from %r'%RL_ACCEL)
        infoline( '################################################')
        fn = pjoin(RL_ACCEL,'hyphen.mashed')
        SPECIAL_PACKAGE_DATA = {fn: pjoin('lib','hyphen.mashed')}
        EXT_MODULES += [
                    Extension( '_rl_accel',
                                [pjoin(RL_ACCEL,'_rl_accel.c')],
                                include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    Extension( 'sgmlop',
                            [pjoin(RL_ACCEL,'sgmlop.c')],
                            include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    Extension( 'pyHnj',
                            [pjoin(RL_ACCEL,'pyHnjmodule.c'),
                             pjoin(RL_ACCEL,'hyphen.c'),
                             pjoin(RL_ACCEL,'hnjalloc.c')],
                            include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    ]
    RENDERPM = _find_rl_ccode('renderPM','_renderPM.c')
    if not RENDERPM:
        infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        infoline( '!No rl_accel code found, you can obtain it at     !')
        infoline( '!http://www.reportlab.org/downloads.html          !')
        infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    else:
        infoline( '################################################')
        infoline( '#Attempting install of _renderPM')
        infoline( '#extensions from %r'%RENDERPM)
        LIBART_DIR=pjoin(RENDERPM,'libart_lgpl')
        GT1_DIR=pjoin(RENDERPM,'gt1')
        MACROS=[('ROBIN_DEBUG',None)]
        MACROS=[]
        def libart_version():
            K = ('LIBART_MAJOR_VERSION','LIBART_MINOR_VERSION','LIBART_MICRO_VERSION')
            D = {}
            for l in open(pjoin(LIBART_DIR,'configure.in'),'r').readlines():
                l = l.strip().split('=')
                if len(l)>1 and l[0].strip() in K:
                    D[l[0].strip()] = l[1].strip()
                    if len(D)==3: break
            return (sys.platform == 'win32' and '\\"%s\\"' or '"%s"') % '.'.join(map(lambda k,D=D: D.get(k,'?'),K))
        LIBART_VERSION = libart_version()
        SOURCES=[pjoin(RENDERPM,'_renderPM.c'),
                    pjoin(LIBART_DIR,'art_vpath_bpath.c'),
                    pjoin(LIBART_DIR,'art_rgb_pixbuf_affine.c'),
                    pjoin(LIBART_DIR,'art_rgb_svp.c'),
                    pjoin(LIBART_DIR,'art_svp.c'),
                    pjoin(LIBART_DIR,'art_svp_vpath.c'),
                    pjoin(LIBART_DIR,'art_svp_vpath_stroke.c'),
                    pjoin(LIBART_DIR,'art_svp_ops.c'),
                    pjoin(LIBART_DIR,'art_vpath.c'),
                    pjoin(LIBART_DIR,'art_vpath_dash.c'),
                    pjoin(LIBART_DIR,'art_affine.c'),
                    pjoin(LIBART_DIR,'art_rect.c'),
                    pjoin(LIBART_DIR,'art_rgb_affine.c'),
                    pjoin(LIBART_DIR,'art_rgb_affine_private.c'),
                    pjoin(LIBART_DIR,'art_rgb.c'),
                    pjoin(LIBART_DIR,'art_rgb_rgba_affine.c'),
                    pjoin(LIBART_DIR,'art_svp_intersect.c'),
                    pjoin(LIBART_DIR,'art_svp_render_aa.c'),
                    pjoin(LIBART_DIR,'art_misc.c'),
                    pjoin(GT1_DIR,'gt1-parset1.c'),
                    pjoin(GT1_DIR,'gt1-dict.c'),
                    pjoin(GT1_DIR,'gt1-namecontext.c'),
                    pjoin(GT1_DIR,'gt1-region.c'),
                    ]

        if platform=='win32':
            FT_LIB=os.environ.get('FT_LIB','')
            if not FT_LIB: FT_LIB=config('FREETYPE','lib','')
            if FT_LIB and not os.path.isfile(FT_LIB):
                infoline('# freetype lib %r not found' % FT_LIB)
                FT_LIB=[]
            if FT_LIB:
                FT_INC_DIR=os.environ.get('FT_INC','')
                if not FT_INC_DIR: FT_INC_DIR=config('FREETYPE','inc')
                FT_MACROS = [('RENDERPM_FT',None)]
                FT_LIB_DIR = [dirname(FT_LIB)]
                FT_INC_DIR = [FT_INC_DIR or pjoin(dirname(FT_LIB_DIR[0]),'include')]
                FT_LIB_PATH = FT_LIB
                FT_LIB = [os.path.splitext(os.path.basename(FT_LIB))[0]]                
                if isdir(FT_INC_DIR[0]):                   
                    infoline('# installing with freetype %r' % FT_LIB_PATH)
                else:
                    infoline('# freetype2 include folder %r not found' % FT_INC_DIR[0])
                    FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
            else:
                FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
        else:
            FT_LIB_DIR=config('FREETYPE','lib')
            FT_INC_DIR=config('FREETYPE','inc')
            I,L=inc_lib_dirs()
            ftv = None
            for d in I:
                if isfile(pjoin(d, "ft2build.h")):
                    ftv = 21
                    FT_INC_DIR=[d,pjoin(d, "freetype2")]
                    break
                d = pjoin(d, "freetype2")
                if isfile(pjoin(d, "ft2build.h")):
                    ftv = 21
                    FT_INC_DIR=[d]
                    break
                if isdir(pjoin(d, "freetype")):
                    ftv = 20
                    FT_INC_DIR=[d]
                    break
            if ftv:
                FT_LIB=['freetype']
                FT_LIB_DIR=L
                FT_MACROS = [('RENDERPM_FT',None)]
                infoline('# installing with freetype version %d' % ftv)
            else:
                FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
        if not FT_LIB:
            infoline('# installing without freetype no ttf, sorry!')
            infoline('# You need to install a static library version of the freetype2 software')
            infoline('# If you need truetype support in renderPM')
            infoline('# You may need to edit setup.cfg (win32)')
            infoline('# or edit this file to access the library if it is installed')
        EXT_MODULES +=  [Extension( '_renderPM',
                                        SOURCES,
                                        include_dirs=[RENDERPM,LIBART_DIR,GT1_DIR]+FT_INC_DIR,
                                        define_macros=FT_MACROS+[('LIBART_COMPILATION',None)]+MACROS+[('LIBART_VERSION',LIBART_VERSION)],
                                        library_dirs=[]+FT_LIB_DIR,

                                        # libraries to link against
                                        libraries=FT_LIB,
                                        #extra_objects=['gt1.lib','libart.lib',],
                                        #extra_compile_args=['/Z7'],
                                        extra_link_args=[]
                                        ),
                            ]
        infoline('################################################')

    #copy some special case files into place so package_data will treat them properly
    PACKAGE_DIR = {'reportlab': pjoin('src','reportlab')}
    for fn,dst in SPECIAL_PACKAGE_DATA.iteritems():
        shutil.copyfile(fn,pjoin(PACKAGE_DIR['reportlab'],dst))
        reportlab_files.append(dst)
    get_fonts(PACKAGE_DIR, reportlab_files)
    try:
        setup(
            name="reportlab",
            version=get_version(),
            license="BSD license (see license.txt for details), Copyright (c) 2000-2010, ReportLab Inc.",
            description="The Reportlab Toolkit",
            long_description="""The ReportLab Toolkit. An Open Source Python library for generating PDFs and graphics.""",

            author="Andy Robinson, Robin Becker, the ReportLab team and the community",
            author_email="[email protected]",
            url="http://www.reportlab.com/",
            packages=[
                    'reportlab',
                    'reportlab.graphics.charts',
                    'reportlab.graphics.samples',
                    'reportlab.graphics.widgets',
                    'reportlab.graphics.barcode',
                    'reportlab.graphics',
                    'reportlab.lib',
                    'reportlab.pdfbase',
                    'reportlab.pdfgen',
                    'reportlab.platypus',
                    ],
            # Ideally we'd have this but PIL via easy_install doesn't seem stable
            #install_requires=[
            #        'PIL',
            #],
            package_dir = PACKAGE_DIR,
            package_data = {'reportlab': reportlab_files},
            ext_modules =   EXT_MODULES,
            )
        print
        print '########## SUMMARY INFO #########'
        print '\n'.join(INFOLINES)
    finally:
        for dst in SPECIAL_PACKAGE_DATA.itervalues():
            os.remove(pjoin(PACKAGE_DIR['reportlab'],dst))
            reportlab_files.remove(dst)

Example 38

Project: ReportLab
Source File: setup.py
View license
def main():
    #test to see if we've a special command
    if 'tests' in sys.argv or 'tests-preinstall' in sys.argv:
        if len(sys.argv)!=2:
            raise ValueError('tests commands may only be used alone')
        cmd = sys.argv[-1]
        PYTHONPATH=[pkgDir]
        if cmd=='tests-preinstall':
            PYTHONPATH.insert(0,pjoin(pkgDir,'src'))
        os.environ['PYTHONPATH']=os.pathsep.join(PYTHONPATH)
        os.chdir(pjoin(pkgDir,'tests'))
        os.system("%s runAll.py" % sys.executable)
        return

    SPECIAL_PACKAGE_DATA = {}
    RL_ACCEL = _find_rl_ccode('rl_accel','_rl_accel.c')
    LIBRARIES=[]
    EXT_MODULES = []
    if not RL_ACCEL:
        infoline( '***************************************************')
        infoline( '*No rl_accel code found, you can obtain it at     *')
        infoline( '*http://www.reportlab.org/downloads.html#_rl_accel*')
        infoline( '***************************************************')
    else:
        infoline( '################################################')
        infoline( '#Attempting install of _rl_accel, sgmlop & pyHnj')
        infoline( '#extensions from %r'%RL_ACCEL)
        infoline( '################################################')
        fn = pjoin(RL_ACCEL,'hyphen.mashed')
        SPECIAL_PACKAGE_DATA = {fn: pjoin('lib','hyphen.mashed')}
        EXT_MODULES += [
                    Extension( '_rl_accel',
                                [pjoin(RL_ACCEL,'_rl_accel.c')],
                                include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    Extension( 'sgmlop',
                            [pjoin(RL_ACCEL,'sgmlop.c')],
                            include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    Extension( 'pyHnj',
                            [pjoin(RL_ACCEL,'pyHnjmodule.c'),
                             pjoin(RL_ACCEL,'hyphen.c'),
                             pjoin(RL_ACCEL,'hnjalloc.c')],
                            include_dirs=[],
                            define_macros=[],
                            library_dirs=[],
                            libraries=[], # libraries to link against
                            ),
                    ]
    RENDERPM = _find_rl_ccode('renderPM','_renderPM.c')
    if not RENDERPM:
        infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        infoline( '!No rl_accel code found, you can obtain it at     !')
        infoline( '!http://www.reportlab.org/downloads.html          !')
        infoline( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    else:
        infoline( '################################################')
        infoline( '#Attempting install of _renderPM')
        infoline( '#extensions from %r'%RENDERPM)
        LIBART_DIR=pjoin(RENDERPM,'libart_lgpl')
        GT1_DIR=pjoin(RENDERPM,'gt1')
        MACROS=[('ROBIN_DEBUG',None)]
        MACROS=[]
        def libart_version():
            K = ('LIBART_MAJOR_VERSION','LIBART_MINOR_VERSION','LIBART_MICRO_VERSION')
            D = {}
            for l in open(pjoin(LIBART_DIR,'configure.in'),'r').readlines():
                l = l.strip().split('=')
                if len(l)>1 and l[0].strip() in K:
                    D[l[0].strip()] = l[1].strip()
                    if len(D)==3: break
            return (sys.platform == 'win32' and '\\"%s\\"' or '"%s"') % '.'.join(map(lambda k,D=D: D.get(k,'?'),K))
        LIBART_VERSION = libart_version()
        SOURCES=[pjoin(RENDERPM,'_renderPM.c'),
                    pjoin(LIBART_DIR,'art_vpath_bpath.c'),
                    pjoin(LIBART_DIR,'art_rgb_pixbuf_affine.c'),
                    pjoin(LIBART_DIR,'art_rgb_svp.c'),
                    pjoin(LIBART_DIR,'art_svp.c'),
                    pjoin(LIBART_DIR,'art_svp_vpath.c'),
                    pjoin(LIBART_DIR,'art_svp_vpath_stroke.c'),
                    pjoin(LIBART_DIR,'art_svp_ops.c'),
                    pjoin(LIBART_DIR,'art_vpath.c'),
                    pjoin(LIBART_DIR,'art_vpath_dash.c'),
                    pjoin(LIBART_DIR,'art_affine.c'),
                    pjoin(LIBART_DIR,'art_rect.c'),
                    pjoin(LIBART_DIR,'art_rgb_affine.c'),
                    pjoin(LIBART_DIR,'art_rgb_affine_private.c'),
                    pjoin(LIBART_DIR,'art_rgb.c'),
                    pjoin(LIBART_DIR,'art_rgb_rgba_affine.c'),
                    pjoin(LIBART_DIR,'art_svp_intersect.c'),
                    pjoin(LIBART_DIR,'art_svp_render_aa.c'),
                    pjoin(LIBART_DIR,'art_misc.c'),
                    pjoin(GT1_DIR,'gt1-parset1.c'),
                    pjoin(GT1_DIR,'gt1-dict.c'),
                    pjoin(GT1_DIR,'gt1-namecontext.c'),
                    pjoin(GT1_DIR,'gt1-region.c'),
                    ]

        if platform=='win32':
            FT_LIB=os.environ.get('FT_LIB','')
            if not FT_LIB: FT_LIB=config('FREETYPE','lib','')
            if FT_LIB and not os.path.isfile(FT_LIB):
                infoline('# freetype lib %r not found' % FT_LIB)
                FT_LIB=[]
            if FT_LIB:
                FT_INC_DIR=os.environ.get('FT_INC','')
                if not FT_INC_DIR: FT_INC_DIR=config('FREETYPE','inc')
                FT_MACROS = [('RENDERPM_FT',None)]
                FT_LIB_DIR = [dirname(FT_LIB)]
                FT_INC_DIR = [FT_INC_DIR or pjoin(dirname(FT_LIB_DIR[0]),'include')]
                FT_LIB_PATH = FT_LIB
                FT_LIB = [os.path.splitext(os.path.basename(FT_LIB))[0]]                
                if isdir(FT_INC_DIR[0]):                   
                    infoline('# installing with freetype %r' % FT_LIB_PATH)
                else:
                    infoline('# freetype2 include folder %r not found' % FT_INC_DIR[0])
                    FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
            else:
                FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
        else:
            FT_LIB_DIR=config('FREETYPE','lib')
            FT_INC_DIR=config('FREETYPE','inc')
            I,L=inc_lib_dirs()
            ftv = None
            for d in I:
                if isfile(pjoin(d, "ft2build.h")):
                    ftv = 21
                    FT_INC_DIR=[d,pjoin(d, "freetype2")]
                    break
                d = pjoin(d, "freetype2")
                if isfile(pjoin(d, "ft2build.h")):
                    ftv = 21
                    FT_INC_DIR=[d]
                    break
                if isdir(pjoin(d, "freetype")):
                    ftv = 20
                    FT_INC_DIR=[d]
                    break
            if ftv:
                FT_LIB=['freetype']
                FT_LIB_DIR=L
                FT_MACROS = [('RENDERPM_FT',None)]
                infoline('# installing with freetype version %d' % ftv)
            else:
                FT_LIB=FT_LIB_DIR=FT_INC_DIR=FT_MACROS=[]
        if not FT_LIB:
            infoline('# installing without freetype no ttf, sorry!')
            infoline('# You need to install a static library version of the freetype2 software')
            infoline('# If you need truetype support in renderPM')
            infoline('# You may need to edit setup.cfg (win32)')
            infoline('# or edit this file to access the library if it is installed')
        EXT_MODULES +=  [Extension( '_renderPM',
                                        SOURCES,
                                        include_dirs=[RENDERPM,LIBART_DIR,GT1_DIR]+FT_INC_DIR,
                                        define_macros=FT_MACROS+[('LIBART_COMPILATION',None)]+MACROS+[('LIBART_VERSION',LIBART_VERSION)],
                                        library_dirs=[]+FT_LIB_DIR,

                                        # libraries to link against
                                        libraries=FT_LIB,
                                        #extra_objects=['gt1.lib','libart.lib',],
                                        #extra_compile_args=['/Z7'],
                                        extra_link_args=[]
                                        ),
                            ]
        infoline('################################################')

    #copy some special case files into place so package_data will treat them properly
    PACKAGE_DIR = {'reportlab': pjoin('src','reportlab')}
    for fn,dst in SPECIAL_PACKAGE_DATA.iteritems():
        shutil.copyfile(fn,pjoin(PACKAGE_DIR['reportlab'],dst))
        reportlab_files.append(dst)
    get_fonts(PACKAGE_DIR, reportlab_files)
    try:
        setup(
            name="reportlab",
            version=get_version(),
            license="BSD license (see license.txt for details), Copyright (c) 2000-2010, ReportLab Inc.",
            description="The Reportlab Toolkit",
            long_description="""The ReportLab Toolkit. An Open Source Python library for generating PDFs and graphics.""",

            author="Andy Robinson, Robin Becker, the ReportLab team and the community",
            author_email="[email protected]",
            url="http://www.reportlab.com/",
            packages=[
                    'reportlab',
                    'reportlab.graphics.charts',
                    'reportlab.graphics.samples',
                    'reportlab.graphics.widgets',
                    'reportlab.graphics.barcode',
                    'reportlab.graphics',
                    'reportlab.lib',
                    'reportlab.pdfbase',
                    'reportlab.pdfgen',
                    'reportlab.platypus',
                    ],
            # Ideally we'd have this but PIL via easy_install doesn't seem stable
            #install_requires=[
            #        'PIL',
            #],
            package_dir = PACKAGE_DIR,
            package_data = {'reportlab': reportlab_files},
            ext_modules =   EXT_MODULES,
            )
        print
        print '########## SUMMARY INFO #########'
        print '\n'.join(INFOLINES)
    finally:
        for dst in SPECIAL_PACKAGE_DATA.itervalues():
            os.remove(pjoin(PACKAGE_DIR['reportlab'],dst))
            reportlab_files.remove(dst)

Example 39

Project: sftpcloudfs
Source File: main.py
View license
    def __init__(self):
        """Parse configuration and CLI options."""
        global config_file

        # look for an alternative configuration file
        alt_config_file = False
        # used to show errors before we actually start parsing stuff
        parser = OptionParser()
        for arg in sys.argv:
            if arg == '--config':
                try:
                    alt_config_file = sys.argv[sys.argv.index(arg)+1]
                    config_file = alt_config_file
                except IndexError:
                    pass
            elif arg.startswith('--config='):
                _, alt_config_file = arg.split('=', 1)
                if alt_config_file == '':
                    parser.error("--config option requires an argument")
                config_file = alt_config_file

        config = RawConfigParser({'auth-url': None,
                                  'insecure': False,
                                  'host-key-file': None,
                                  'bind-address': "127.0.0.1",
                                  'port': 8022,
                                  'server-ident': 'sftpcloudfs_%s' % version,
                                  'memcache': None,
                                  'max-children': "20",
                                  'auth-timeout': "60",
                                  'negotiation-timeout': "0",
                                  'keepalive': "0",
                                  'ciphers': None,
                                  'digests': None,
                                  'log-file': None,
                                  'syslog': 'no',
                                  'verbose': 'no',
                                  'scp-support': 'yes',
                                  'pid-file': None,
                                  'uid': None,
                                  'gid': None,
                                  'split-large-files': "0",
                                  'hide-part-dir': "no",
                                  # keystone auth 2.0 support
                                  'keystone-auth': False,
                                  'keystone-region-name': None,
                                  'keystone-tenant-separator': default_ks_tenant_separator,
                                  'keystone-service-type': default_ks_service_type,
                                  'keystone-endpoint-type': default_ks_endpoint_type,
                                  })

        try:
            if not config.read(config_file) and alt_config_file:
                # the default conf file is optional
                parser.error("failed to read %s" % config_file)
        except ParsingError as ex:
             parser.error("failed to read %s: %s" % (config_file, ex.message))

        if not config.has_section('sftpcloudfs'):
            config.add_section('sftpcloudfs')

        parser = OptionParser(version="%prog " + version,
                              description="This is a SFTP interface to OpenStack " + \
                                    "Object Storage (Swift).",
                              epilog="Contact and support at: %s" % project_url)

        parser.add_option("-a", "--auth-url", dest="authurl",
                          default=config.get('sftpcloudfs', 'auth-url'),
                          help="Authentication URL")

        parser.add_option("--insecure", dest="insecure",
                          action="store_true",
                          default=config.get('sftpcloudfs', 'insecure'),
                          help="Allow to access servers without checking SSL certs")

        parser.add_option("-k", "--host-key-file", dest="host_key",
                          default=config.get('sftpcloudfs', 'host-key-file'),
                          help="Host RSA key used by the server")

        parser.add_option("-b", "--bind-address", dest="bind_address",
                          default=config.get('sftpcloudfs', 'bind-address'),
                          help="Address to bind (default: 127.0.0.1)")

        parser.add_option("-p", "--port", dest="port",
                          type="int",
                          default=config.get('sftpcloudfs', 'port'),
                          help="Port to bind (default: 8022)")

        parser.add_option("--server-ident", dest="server_ident",
                          type="str",
                          default=config.get('sftpcloudfs', 'server-ident'),
                          help="Server ident to use when sending the SSH banner to the " + \
                                  "client (default: sftpcloudfs_%s)" % version)

        memcache = config.get('sftpcloudfs', 'memcache')
        if memcache:
            memcache = [x.strip() for x in memcache.split(',')]
        parser.add_option('--memcache',
                          type="str",
                          dest="memcache",
                          action="append",
                          default=memcache,
                          help="Memcache server(s) to be used for cache (ip:port)")


        parser.add_option("-l", "--log-file", dest="log_file",
                          default=config.get('sftpcloudfs', 'log-file'),
                          help="Log into provided file")

        parser.add_option("-f", "--foreground", dest="foreground",
                          action="store_true",
                          default=False,
                          help="Run in the foreground (don't detach from terminal)")

        parser.add_option("--disable-scp", dest="no_scp",
                          action="store_true",
                          default=not config.getboolean('sftpcloudfs', 'scp-support'),
                          help="Disable SCP support (default: enabled)")

        parser.add_option("--syslog", dest="syslog",
                          action="store_true",
                          default=config.getboolean('sftpcloudfs', 'syslog'),
                          help="Enable logging to system logger (daemon facility)")

        parser.add_option("-v", "--verbose", dest="verbose",
                          action="store_true",
                          default=config.getboolean('sftpcloudfs', 'verbose'),
                          help="Show detailed information on logging")

        parser.add_option('--pid-file',
                          type="str",
                          dest="pid_file",
                          default=config.get('sftpcloudfs', 'pid-file'),
                          help="Full path to the pid file location")

        parser.add_option('--uid',
                          type="int",
                          dest="uid",
                          default=config.get('sftpcloudfs', 'uid'),
                          help="UID to drop the privileges to when in daemon mode")

        parser.add_option('--gid',
                          type="int",
                          dest="gid",
                          default=config.get('sftpcloudfs', 'gid'),
                          help="GID to drop the privileges to when in daemon mode")

        parser.add_option('--keystone-auth',
                          action="store_true",
                          dest="keystone",
                          default=config.get('sftpcloudfs', 'keystone-auth'),
                          help="Use auth 2.0 (Keystone, requires keystoneclient)")

        parser.add_option('--keystone-region-name',
                          type="str",
                          dest="region_name",
                          default=config.get('sftpcloudfs', 'keystone-region-name'),
                          help="Region name to be used in auth 2.0")

        parser.add_option('--keystone-tenant-separator',
                          type="str",
                          dest="tenant_separator",
                          default=config.get('sftpcloudfs', 'keystone-tenant-separator'),
                          help="Character used to separate tenant_name/username in auth 2.0, " + \
                              "default: TENANT%sUSERNAME" % default_ks_tenant_separator)

        parser.add_option('--keystone-service-type',
                          type="str",
                          dest="service_type",
                          default=config.get('sftpcloudfs', 'keystone-service-type'),
                          help="Service type to be used in auth 2.0, default: %s" % default_ks_service_type)

        parser.add_option('--keystone-endpoint-type',
                          type="str",
                          dest="endpoint_type",
                          default=config.get('sftpcloudfs', 'keystone-endpoint-type'),
                          help="Endpoint type to be used in auth 2.0, default: %s" % default_ks_endpoint_type)

        parser.add_option('--config',
                          type="str",
                          dest="config",
                          default=config_file,
                          help="Use an alternative configuration file")

        (options, args) = parser.parse_args()

        # required parameters
        if not options.authurl:
            parser.error("No auth-url provided")

        if not options.host_key:
            parser.error("No host-key-file provided")

        try:
            self.host_key = paramiko.RSAKey(filename=options.host_key)
        except (IOError, paramiko.SSHException), e:
            parser.error("host-key-file: %s" % e)

        if options.memcache:
            ObjectStorageFS.memcache_hosts = options.memcache
            try:
                ObjectStorageFS(None, None, None)
            except (ValueError, TypeError):
                parser.error("memcache: invalid server address, ip:port expected")

        if options.pid_file:
            self.pidfile = PIDFile(options.pid_file)
            if self.pidfile.is_locked():
                parser.error("pid-file found: %s\nIs the server already running?" % options.pid_file)
        else:
            self.pidfile = None

        try:
            options.max_children = int(config.get('sftpcloudfs', 'max-children'))
        except ValueError:
            parser.error('max-children: invalid value, integer expected')

        try:
            options.auth_timeout = int(config.get('sftpcloudfs', 'auth-timeout'))
        except ValueError:
            parser.error('auth-timeout: invalid value, integer expected')

        if options.auth_timeout <= 0:
            parser.error('auth-timeout: invalid value')

        try:
            options.negotiation_timeout = int(config.get('sftpcloudfs', 'negotiation-timeout'))
        except ValueError:
            parser.error('negotiation-timeout: invalid value, integer expected')

        if options.negotiation_timeout < 0:
            parser.error('negotiation-timeout: invalid value')

        try:
            options.keepalive = int(config.get('sftpcloudfs', 'keepalive'))
        except ValueError:
            parser.error('keepalive: invalid value, integer expected')

        if options.keepalive < 0:
            parser.error('keepalive: invalid value')

        options.secopts = {}
        ciphers = config.get('sftpcloudfs', 'ciphers')
        if ciphers:
            options.secopts["ciphers"] = [x.strip() for x in ciphers.split(',')]

        digests = config.get('sftpcloudfs', 'digests')
        if digests:
            options.secopts["digests"] = [x.strip() for x in digests.split(',')]

        try:
            options.split_size = int(config.get('sftpcloudfs', 'split-large-files'))*10**6
        except ValueError:
            parser.error('split-large-files: invalid size, integer expected')

        options.hide_part_dir = config.getboolean('sftpcloudfs', 'hide-part-dir')

        if options.keystone:
            keystone_keys = ('region_name', 'tenant_separator', 'service_type', 'endpoint_type')
            options.keystone = dict((key, getattr(options, key)) for key in keystone_keys)

        self.options = options

Example 40

Project: sftpcloudfs
Source File: main.py
View license
    def __init__(self):
        """Parse configuration and CLI options."""
        global config_file

        # look for an alternative configuration file
        alt_config_file = False
        # used to show errors before we actually start parsing stuff
        parser = OptionParser()
        for arg in sys.argv:
            if arg == '--config':
                try:
                    alt_config_file = sys.argv[sys.argv.index(arg)+1]
                    config_file = alt_config_file
                except IndexError:
                    pass
            elif arg.startswith('--config='):
                _, alt_config_file = arg.split('=', 1)
                if alt_config_file == '':
                    parser.error("--config option requires an argument")
                config_file = alt_config_file

        config = RawConfigParser({'auth-url': None,
                                  'insecure': False,
                                  'host-key-file': None,
                                  'bind-address': "127.0.0.1",
                                  'port': 8022,
                                  'server-ident': 'sftpcloudfs_%s' % version,
                                  'memcache': None,
                                  'max-children': "20",
                                  'auth-timeout': "60",
                                  'negotiation-timeout': "0",
                                  'keepalive': "0",
                                  'ciphers': None,
                                  'digests': None,
                                  'log-file': None,
                                  'syslog': 'no',
                                  'verbose': 'no',
                                  'scp-support': 'yes',
                                  'pid-file': None,
                                  'uid': None,
                                  'gid': None,
                                  'split-large-files': "0",
                                  'hide-part-dir': "no",
                                  # keystone auth 2.0 support
                                  'keystone-auth': False,
                                  'keystone-region-name': None,
                                  'keystone-tenant-separator': default_ks_tenant_separator,
                                  'keystone-service-type': default_ks_service_type,
                                  'keystone-endpoint-type': default_ks_endpoint_type,
                                  })

        try:
            if not config.read(config_file) and alt_config_file:
                # the default conf file is optional
                parser.error("failed to read %s" % config_file)
        except ParsingError as ex:
             parser.error("failed to read %s: %s" % (config_file, ex.message))

        if not config.has_section('sftpcloudfs'):
            config.add_section('sftpcloudfs')

        parser = OptionParser(version="%prog " + version,
                              description="This is a SFTP interface to OpenStack " + \
                                    "Object Storage (Swift).",
                              epilog="Contact and support at: %s" % project_url)

        parser.add_option("-a", "--auth-url", dest="authurl",
                          default=config.get('sftpcloudfs', 'auth-url'),
                          help="Authentication URL")

        parser.add_option("--insecure", dest="insecure",
                          action="store_true",
                          default=config.get('sftpcloudfs', 'insecure'),
                          help="Allow to access servers without checking SSL certs")

        parser.add_option("-k", "--host-key-file", dest="host_key",
                          default=config.get('sftpcloudfs', 'host-key-file'),
                          help="Host RSA key used by the server")

        parser.add_option("-b", "--bind-address", dest="bind_address",
                          default=config.get('sftpcloudfs', 'bind-address'),
                          help="Address to bind (default: 127.0.0.1)")

        parser.add_option("-p", "--port", dest="port",
                          type="int",
                          default=config.get('sftpcloudfs', 'port'),
                          help="Port to bind (default: 8022)")

        parser.add_option("--server-ident", dest="server_ident",
                          type="str",
                          default=config.get('sftpcloudfs', 'server-ident'),
                          help="Server ident to use when sending the SSH banner to the " + \
                                  "client (default: sftpcloudfs_%s)" % version)

        memcache = config.get('sftpcloudfs', 'memcache')
        if memcache:
            memcache = [x.strip() for x in memcache.split(',')]
        parser.add_option('--memcache',
                          type="str",
                          dest="memcache",
                          action="append",
                          default=memcache,
                          help="Memcache server(s) to be used for cache (ip:port)")


        parser.add_option("-l", "--log-file", dest="log_file",
                          default=config.get('sftpcloudfs', 'log-file'),
                          help="Log into provided file")

        parser.add_option("-f", "--foreground", dest="foreground",
                          action="store_true",
                          default=False,
                          help="Run in the foreground (don't detach from terminal)")

        parser.add_option("--disable-scp", dest="no_scp",
                          action="store_true",
                          default=not config.getboolean('sftpcloudfs', 'scp-support'),
                          help="Disable SCP support (default: enabled)")

        parser.add_option("--syslog", dest="syslog",
                          action="store_true",
                          default=config.getboolean('sftpcloudfs', 'syslog'),
                          help="Enable logging to system logger (daemon facility)")

        parser.add_option("-v", "--verbose", dest="verbose",
                          action="store_true",
                          default=config.getboolean('sftpcloudfs', 'verbose'),
                          help="Show detailed information on logging")

        parser.add_option('--pid-file',
                          type="str",
                          dest="pid_file",
                          default=config.get('sftpcloudfs', 'pid-file'),
                          help="Full path to the pid file location")

        parser.add_option('--uid',
                          type="int",
                          dest="uid",
                          default=config.get('sftpcloudfs', 'uid'),
                          help="UID to drop the privileges to when in daemon mode")

        parser.add_option('--gid',
                          type="int",
                          dest="gid",
                          default=config.get('sftpcloudfs', 'gid'),
                          help="GID to drop the privileges to when in daemon mode")

        parser.add_option('--keystone-auth',
                          action="store_true",
                          dest="keystone",
                          default=config.get('sftpcloudfs', 'keystone-auth'),
                          help="Use auth 2.0 (Keystone, requires keystoneclient)")

        parser.add_option('--keystone-region-name',
                          type="str",
                          dest="region_name",
                          default=config.get('sftpcloudfs', 'keystone-region-name'),
                          help="Region name to be used in auth 2.0")

        parser.add_option('--keystone-tenant-separator',
                          type="str",
                          dest="tenant_separator",
                          default=config.get('sftpcloudfs', 'keystone-tenant-separator'),
                          help="Character used to separate tenant_name/username in auth 2.0, " + \
                              "default: TENANT%sUSERNAME" % default_ks_tenant_separator)

        parser.add_option('--keystone-service-type',
                          type="str",
                          dest="service_type",
                          default=config.get('sftpcloudfs', 'keystone-service-type'),
                          help="Service type to be used in auth 2.0, default: %s" % default_ks_service_type)

        parser.add_option('--keystone-endpoint-type',
                          type="str",
                          dest="endpoint_type",
                          default=config.get('sftpcloudfs', 'keystone-endpoint-type'),
                          help="Endpoint type to be used in auth 2.0, default: %s" % default_ks_endpoint_type)

        parser.add_option('--config',
                          type="str",
                          dest="config",
                          default=config_file,
                          help="Use an alternative configuration file")

        (options, args) = parser.parse_args()

        # required parameters
        if not options.authurl:
            parser.error("No auth-url provided")

        if not options.host_key:
            parser.error("No host-key-file provided")

        try:
            self.host_key = paramiko.RSAKey(filename=options.host_key)
        except (IOError, paramiko.SSHException), e:
            parser.error("host-key-file: %s" % e)

        if options.memcache:
            ObjectStorageFS.memcache_hosts = options.memcache
            try:
                ObjectStorageFS(None, None, None)
            except (ValueError, TypeError):
                parser.error("memcache: invalid server address, ip:port expected")

        if options.pid_file:
            self.pidfile = PIDFile(options.pid_file)
            if self.pidfile.is_locked():
                parser.error("pid-file found: %s\nIs the server already running?" % options.pid_file)
        else:
            self.pidfile = None

        try:
            options.max_children = int(config.get('sftpcloudfs', 'max-children'))
        except ValueError:
            parser.error('max-children: invalid value, integer expected')

        try:
            options.auth_timeout = int(config.get('sftpcloudfs', 'auth-timeout'))
        except ValueError:
            parser.error('auth-timeout: invalid value, integer expected')

        if options.auth_timeout <= 0:
            parser.error('auth-timeout: invalid value')

        try:
            options.negotiation_timeout = int(config.get('sftpcloudfs', 'negotiation-timeout'))
        except ValueError:
            parser.error('negotiation-timeout: invalid value, integer expected')

        if options.negotiation_timeout < 0:
            parser.error('negotiation-timeout: invalid value')

        try:
            options.keepalive = int(config.get('sftpcloudfs', 'keepalive'))
        except ValueError:
            parser.error('keepalive: invalid value, integer expected')

        if options.keepalive < 0:
            parser.error('keepalive: invalid value')

        options.secopts = {}
        ciphers = config.get('sftpcloudfs', 'ciphers')
        if ciphers:
            options.secopts["ciphers"] = [x.strip() for x in ciphers.split(',')]

        digests = config.get('sftpcloudfs', 'digests')
        if digests:
            options.secopts["digests"] = [x.strip() for x in digests.split(',')]

        try:
            options.split_size = int(config.get('sftpcloudfs', 'split-large-files'))*10**6
        except ValueError:
            parser.error('split-large-files: invalid size, integer expected')

        options.hide_part_dir = config.getboolean('sftpcloudfs', 'hide-part-dir')

        if options.keystone:
            keystone_keys = ('region_name', 'tenant_separator', 'service_type', 'endpoint_type')
            options.keystone = dict((key, getattr(options, key)) for key in keystone_keys)

        self.options = options

Example 41

Project: fedora-software
Source File: importcomponents.py
View license
    def handle(self, *args, **options):

        # check arguments
        if len(args) == 1:
            xml_file = args[0]
        elif len(args) == 0:
            try:
                # try to find the file in rpm database
                import rpm
                header = rpm.TransactionSet().dbMatch('name', 'appstream-data').next()
                xml_file = filter(lambda f: f[0].endswith('.xml.gz'), header.fiFromHeader())[0][0]
            except:
                if settings.DEBUG:
                    raise
                raise CommandError(
                    'Failed to find xml file provided by appstream-data package. '\
                    'Specify path to the file as an argument.\nType {} help importcomponents'.format(
                        os.path.basename(sys.argv[0])
                    ))
        elif len(args) > 1:
            raise CommandError('Invalid number of arguments.\nType {} help importcomponents'.format(
                os.path.basename(sys.argv[0])))

        logger.info('Reading %s' % xml_file)

        try:
            tree = ElementTree.fromstring(gzip.open(xml_file,'rb').read())
        except Exception as e:
            if settings.DEBUG:
                raise
            raise CommandError('Failed to read content of {xml_file}: {e}\nType {manage} help importcomponents'.format(
                xml_file = xml_file, e = e, manage = os.path.basename(sys.argv[0])
                ))

        component_nodes_count = len(tree)
        logger.info('Parsed {} component nodes'.format(component_nodes_count))

        errors = 0
        component_ids = []
        for c_node in tree:
            c_type      = 'unknown'
            c_type_id   = 'unknown'

            try:
                with transaction.atomic():
                    c_type      = c_node.attrib['type']
                    c_type_id   = c_node.find('id').text
                    c_pkgname   = c_node.find('pkgname').text
                    try:
                        c_project_license = c_node.find('project_license').text
                    except:
                        c_project_license = None

                    logger.info('Importing component {}/{} ({}/{})'.format(
                        c_type, c_type_id, len(component_ids)+1, component_nodes_count,
                    ))

                    # create component
                    c = Component.objects.get_or_create(
                        type            = c_type,
                        type_id         = c_type_id,
                        pkgname         = c_pkgname,
                        project_license = c_project_license,
                    )[0]

                    lang_attr = '{http://www.w3.org/XML/1998/namespace}lang'

                    # create names
                    c.names.all().delete()
                    for name_node in c_node.findall('name'):
                        c.names.add(ComponentName(
                            lang = name_node.attrib.get(lang_attr),
                            name = name_node.text,
                        ))

                    # create summaries
                    c.summaries.all().delete()
                    for summary_node in c_node.findall('summary'):
                        c.summaries.add(ComponentSummary(
                            lang = summary_node.attrib.get(lang_attr),
                            summary = summary_node.text,
                        ))

                    # create descriptions
                    c.descriptions.all().delete()
                    for description_node in c_node.findall('description'):
                        c.descriptions.add(ComponentDescription(
                            lang = description_node.attrib.get(lang_attr),
                            description = ElementTree.tostring(description_node, method="html"),
                        ))

                    # create icons
                    c.icons.all().delete()
                    for icon_node in c_node.findall('icon'):
                        c.icons.add(ComponentIcon(
                            icon    = icon_node.text,
                            type    = icon_node.attrib.get('type'),
                            height  = icon_node.attrib.get('height'),
                            width   = icon_node.attrib.get('width'),
                        ))

                    # create categories
                    c.categories.all().delete()
                    categories_node = c_node.find('categories')
                    if categories_node is not None:
                        for category_node in categories_node.findall('category'):
                            c.categories.add(Category.objects.get_or_create(
                                slug        = slugify(category_node.text),
                                category    = category_node.text,
                            )[0])

                    # create keywords
                    c.keywords.all().delete()
                    keywords_node = c_node.find('keywords')
                    if keywords_node is not None:
                        for keyword_node in keywords_node.findall('keyword'):
                            c.keywords.add(Keyword.objects.get_or_create(
                                lang    = keyword_node.attrib.get(lang_attr),
                                keyword = keyword_node.text,
                            )[0])

                    # create urls
                    c.urls.all().delete()
                    for url_node in c_node.findall('url'):
                        if url_node.text is not None:
                            c.urls.add(ComponentUrl(
                                url     = url_node.text,
                                type    = url_node.attrib.get('type'),
                            ))

                    # create screenshots
                    c.screenshots.all().delete()
                    screenshots_node = c_node.find('screenshots')
                    if screenshots_node is not None:
                        for screenshot_node in screenshots_node.findall('screenshot'):
                            screenshot = ComponentScreenshot(
                                type = screenshot_node.attrib.get('type'),
                            )
                            c.screenshots.add(screenshot)
                            for image_node in screenshot_node.findall('image'):
                                screenshot.images.add(ComponentScreenshotImage(
                                    image   = image_node.text,
                                    type    = image_node.attrib.get('type'),
                                    height  = image_node.attrib.get('height'),
                                    width   = image_node.attrib.get('width'),
                                ))

                    # create releases
                    c.releases.all().delete()
                    releases_node = c_node.find('releases')
                    if releases_node is not None:
                        for release_node in releases_node.findall('release'):
                            c.releases.add(ComponentRelease(
                                version     = release_node.attrib.get('version'),
                                timestamp   = datetime.utcfromtimestamp(
                                    int(release_node.attrib.get('timestamp'))
                                ).replace(tzinfo=utc)
                            ))

                    # create languages
                    c.languages.all().delete()
                    languages_node = c_node.find('languages')
                    if languages_node is not None:
                        for lang_node in languages_node.findall('lang'):
                            c.languages.add(ComponentLanguage(
                                percentage  = lang_node.attrib.get('percentage'),
                                lang        = lang_node.text,
                            ))

                    # create metadata
                    c.metadata.all().delete()
                    metadata_node = c_node.find('metadata')
                    if metadata_node is not None:
                        for value_node in metadata_node.findall('value'):
                            c.metadata.add(ComponentMetadata(
                                key     = value_node.attrib.get('key'),
                                value   = value_node.text,
                            ))

            except Exception as e:
                logger.error('Failed to import node {}/{}: {}'.format(c_type, c_type_id, e))
                if settings.DEBUG:
                    raise
                errors += 1
            else:
                component_ids.append(c.id)

        # check errors
        if errors > 0:
            raise CommandError('Failed to import components: {} error(s)'.format(errors))
        else:
            logger.info('Successfully imported {} components'.format(len(component_ids)))

        # delete stale components
        deleted_components_count = 0
        for c in Component.objects.all():
            if c.id not in component_ids:
                logger.info('Deleting stale component {}/{}'.format(c.type, c.type_id))
                c.delete()
                deleted_components_count += 1

        if deleted_components_count > 0:
            logger.info('Successfully deleted {} stale components'.format(deleted_components_count))

Example 42

View license
def main():

    #Check if some argumentshave been passed
    #pass the path of a video
    if(len(sys.argv) > 2):
        file_path = sys.argv[1]
        if(os.path.isfile(file_path)==False): 
            print("ex_pnp_head_pose_estimation: the file specified does not exist.")
            return
        else:
            #Open the video file
            video_capture = cv2.VideoCapture(file_path)
            if(video_capture.isOpened() == True): print("ex_pnp_head_pose_estimation: the video source has been opened correctly...")
            # Define the codec and create VideoWriter object
            #fourcc = cv2.VideoWriter_fourcc(*'XVID')
            output_path = sys.argv[2]
            fourcc = cv2.cv.CV_FOURCC(*'XVID')
            out = cv2.VideoWriter(output_path, fourcc, 20.0, (1280,720))
    else:
        print("You have to pass as argument the path to a video file and the path to the output file to produce, for example: \n python ex_pnp_pose_estimation_video.py /home/video.mpg ./output.avi")
        return


    #Create the main window and move it
    cv2.namedWindow('Video')
    cv2.moveWindow('Video', 20, 20)

    #Obtaining the CAM dimension
    cam_w = int(video_capture.get(3))
    cam_h = int(video_capture.get(4))

    #Defining the camera matrix.
    #To have better result it is necessary to find the focal
    # lenght of the camera. fx/fy are the focal lengths (in pixels) 
    # and cx/cy are the optical centres. These values can be obtained 
    # roughly by approximation, for example in a 640x480 camera:
    # cx = 640/2 = 320
    # cy = 480/2 = 240
    # fx = fy = cx/tan(60/2 * pi / 180) = 554.26
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / numpy.tan(60/2 * numpy.pi / 180)
    f_y = f_x

    #Estimated camera matrix values.
    camera_matrix = numpy.float32([[f_x, 0.0, c_x],
                                   [0.0, f_y, c_y], 
                                   [0.0, 0.0, 1.0] ])

    print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")

    #These are the camera matrix values estimated on my webcam with
    # the calibration code (see: src/calibration):
    #camera_matrix = numpy.float32([[602.10618226,          0.0, 320.27333589],
                                   #[         0.0, 603.55869786,  229.7537026], 
                                   #[         0.0,          0.0,          1.0] ])

    #Distortion coefficients
    camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    #Distortion coefficients estimated by calibration
    #camera_distortion = numpy.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])


    #This matrix contains the 3D points of the
    # 11 landmarks we want to find. It has been
    # obtained from antrophometric measurement
    # on the human head.
    landmarks_3D = numpy.float32([P3D_RIGHT_SIDE,
                                  P3D_GONION_RIGHT,
                                  P3D_MENTON,
                                  P3D_GONION_LEFT,
                                  P3D_LEFT_SIDE,
                                  P3D_FRONTAL_BREADTH_RIGHT,
                                  P3D_FRONTAL_BREADTH_LEFT,
                                  P3D_SELLION,
                                  P3D_NOSE,
                                  P3D_SUB_NOSE,
                                  P3D_RIGHT_EYE,
                                  P3D_RIGHT_TEAR,
                                  P3D_LEFT_TEAR,
                                  P3D_LEFT_EYE,
                                  P3D_STOMION])

    #Declaring the two classifiers
    my_cascade = haarCascade("./etc/haarcascade_frontalface_alt.xml", "./etc/haarcascade_profileface.xml")
    my_detector = faceLandmarkDetection('./etc/shape_predictor_68_face_landmarks.dat')

    #Error counter definition
    no_face_counter = 0

    #Variables that identify the face
    #position in the main frame.
    face_x1 = 0
    face_y1 = 0
    face_x2 = 0
    face_y2 = 0
    face_w = 0
    face_h = 0

    #Variables that identify the ROI
    #position in the main frame.
    roi_x1 = 0
    roi_y1 = 0
    roi_x2 = cam_w
    roi_y2 = cam_h
    roi_w = cam_w
    roi_h = cam_h
    roi_resize_w = int(cam_w/10)
    roi_resize_h = int(cam_h/10)

    while(True):

        # Capture frame-by-frame
        ret, frame = video_capture.read()
        gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)

        #Looking for faces with cascade
        #The classifier moves over the ROI
        #starting from a minimum dimension and augmentig
        #slightly based on the scale factor parameter.
        #The scale factor for the frontal face is 1.10 (10%)
        #Scale factor: 1.15=15%,1.25=25% ...ecc
        #Higher scale factors means faster classification
        #but lower accuracy.
        #
        #Return code: 1=Frontal, 2=FrontRotLeft, 
        # 3=FrontRotRight, 4=ProfileLeft, 5=ProfileRight.
        my_cascade.findFace(gray, runFrontal=True, runFrontalRotated=True, runLeft=False, runRight=False, frontalScaleFactor=1.2, rotatedFrontalScaleFactor=1.2, leftScaleFactor=1.15, rightScaleFactor=1.15, minSizeX=80, minSizeY=80, rotationAngleCCW=30, rotationAngleCW=-30, lastFaceType=my_cascade.face_type)

        #Accumulate error values in a counter
        if(my_cascade.face_type == 0): 
            no_face_counter += 1

        #If any face is found for a certain
        #number of cycles, then the ROI is reset
        if(no_face_counter == 30):
            no_face_counter = 0
            roi_x1 = 0
            roi_y1 = 0
            roi_x2 = cam_w
            roi_y2 = cam_h
            roi_w = cam_w
            roi_h = cam_h

        #Checking wich kind of face it is returned
        if(my_cascade.face_type > 0 and my_cascade.face_type < 4):

            #Face found, reset the error counter
            no_face_counter = 0

            #Because the dlib landmark detector wants a precise
            #boundary box of the face, it is necessary to resize
            #the box returned by the OpenCV haar detector.
            #Adjusting the frame for profile left
            if(my_cascade.face_type == 4):
                face_margin_x1 = 20 - 10 #resize_rate + shift_rate
                face_margin_y1 = 20 + 5 #resize_rate + shift_rate
                face_margin_x2 = -20 - 10 #resize_rate + shift_rate
                face_margin_y2 = -20 + 5 #resize_rate + shift_rate
                face_margin_h = -0.7 #resize_factor
                face_margin_w = -0.7 #resize_factor
            #Adjusting the frame for profile right
            elif(my_cascade.face_type == 5):
                face_margin_x1 = 20 + 10
                face_margin_y1 = 20 + 5
                face_margin_x2 = -20 + 10
                face_margin_y2 = -20 + 5
                face_margin_h = -0.7
                face_margin_w = -0.7
            #No adjustments
            else:
                face_margin_x1 = 0
                face_margin_y1 = 0
                face_margin_x2 = 0
                face_margin_y2 = 0
                face_margin_h = 0
                face_margin_w = 0

            #Updating the face position
            face_x1 = my_cascade.face_x + roi_x1 + face_margin_x1
            face_y1 = my_cascade.face_y + roi_y1 + face_margin_y1
            face_x2 = my_cascade.face_x + my_cascade.face_w + roi_x1 + face_margin_x2
            face_y2 = my_cascade.face_y + my_cascade.face_h + roi_y1 + face_margin_y2
            face_w = my_cascade.face_w + int(my_cascade.face_w * face_margin_w)
            face_h = my_cascade.face_h + int(my_cascade.face_h * face_margin_h)

            #Updating the ROI position       
            roi_x1 = face_x1 - roi_resize_w
            if (roi_x1 < 0): roi_x1 = 0
            roi_y1 = face_y1 - roi_resize_h
            if(roi_y1 < 0): roi_y1 = 0
            roi_w = face_w + roi_resize_w + roi_resize_w
            if(roi_w > cam_w): roi_w = cam_w
            roi_h = face_h + roi_resize_h + roi_resize_h
            if(roi_h > cam_h): roi_h = cam_h    
            roi_x2 = face_x2 + roi_resize_w
            if (roi_x2 > cam_w): roi_x2 = cam_w
            roi_y2 = face_y2 + roi_resize_h
            if(roi_y2 > cam_h): roi_y2 = cam_h

            #Debugging printing utilities
            if(DEBUG == True):
                print("FACE: ", face_x1, face_y1, face_x2, face_y2, face_w, face_h)
                print("ROI: ", roi_x1, roi_y1, roi_x2, roi_y2, roi_w, roi_h)
                #Drawing a green rectangle
                # (and text) around the face.
                text_x1 = face_x1
                text_y1 = face_y1 - 3
                if(text_y1 < 0): text_y1 = 0
                cv2.putText(frame, "FACE", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
                cv2.rectangle(frame, 
                             (face_x1, face_y1), 
                             (face_x2, face_y2), 
                             (0, 255, 0),
                              2)

            #In case of a frontal/rotated face it
            # is called the landamark detector
            if(my_cascade.face_type > 0):
                landmarks_2D = my_detector.returnLandmarks(frame, face_x1, face_y1, face_x2, face_y2, points_to_return=TRACKED_POINTS)

                if(DEBUG == True):
                    #cv2.drawKeypoints(frame, landmarks_2D)

                    for point in landmarks_2D:
                        cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)


                #Applying the PnP solver to find the 3D pose
                # of the head from the 2D position of the
                # landmarks.
                #retval - bool
                #rvec - Output rotation vector that, together with tvec, brings 
                # points from the model coordinate system to the camera coordinate system.
                #tvec - Output translation vector.
                retval, rvec, tvec = cv2.solvePnP(landmarks_3D, 
                                                  landmarks_2D, 
                                                  camera_matrix, camera_distortion)

                #Now we project the 3D points into the image plane
                #Creating a 3-axis to be used as reference in the image.
                axis = numpy.float32([[50,0,0], 
                                      [0,50,0], 
                                      [0,0,50]])
                imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix, camera_distortion)

                #Drawing the three axis on the image frame.
                #The opencv colors are defined as BGR colors such as: 
                # (a, b, c) >> Blue = a, Green = b and Red = c
                #Our axis/color convention is X=R, Y=G, Z=B
                sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
                cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
                cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255,0,0), 3) #BLUE
                cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0,0,255), 3) #RED

        #Drawing a yellow rectangle
        # (and text) around the ROI.
        #if(DEBUG == True):
            #text_x1 = roi_x1
            #text_y1 = roi_y1 - 3
            #if(text_y1 < 0): text_y1 = 0
            #cv2.putText(frame, "ROI", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1);
            #cv2.rectangle(frame, 
                         #(roi_x1, roi_y1), 
                         #(roi_x2, roi_y2), 
                         #(0, 255, 255),
                         #2)

        #Writing in the output file
        out.write(frame)

        #Showing the frame and waiting
        # for the exit command
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break
   
    #Release the camera
    video_capture.release()
    print("Bye...")

Example 43

View license
def main():

    #Check if some argumentshave been passed
    #pass the path of a video
    if(len(sys.argv) > 2):
        file_path = sys.argv[1]
        if(os.path.isfile(file_path)==False): 
            print("ex_pnp_head_pose_estimation: the file specified does not exist.")
            return
        else:
            #Open the video file
            video_capture = cv2.VideoCapture(file_path)
            if(video_capture.isOpened() == True): print("ex_pnp_head_pose_estimation: the video source has been opened correctly...")
            # Define the codec and create VideoWriter object
            #fourcc = cv2.VideoWriter_fourcc(*'XVID')
            output_path = sys.argv[2]
            fourcc = cv2.cv.CV_FOURCC(*'XVID')
            out = cv2.VideoWriter(output_path, fourcc, 20.0, (1280,720))
    else:
        print("You have to pass as argument the path to a video file and the path to the output file to produce, for example: \n python ex_pnp_pose_estimation_video.py /home/video.mpg ./output.avi")
        return


    #Create the main window and move it
    cv2.namedWindow('Video')
    cv2.moveWindow('Video', 20, 20)

    #Obtaining the CAM dimension
    cam_w = int(video_capture.get(3))
    cam_h = int(video_capture.get(4))

    #Defining the camera matrix.
    #To have better result it is necessary to find the focal
    # lenght of the camera. fx/fy are the focal lengths (in pixels) 
    # and cx/cy are the optical centres. These values can be obtained 
    # roughly by approximation, for example in a 640x480 camera:
    # cx = 640/2 = 320
    # cy = 480/2 = 240
    # fx = fy = cx/tan(60/2 * pi / 180) = 554.26
    c_x = cam_w / 2
    c_y = cam_h / 2
    f_x = c_x / numpy.tan(60/2 * numpy.pi / 180)
    f_y = f_x

    #Estimated camera matrix values.
    camera_matrix = numpy.float32([[f_x, 0.0, c_x],
                                   [0.0, f_y, c_y], 
                                   [0.0, 0.0, 1.0] ])

    print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")

    #These are the camera matrix values estimated on my webcam with
    # the calibration code (see: src/calibration):
    #camera_matrix = numpy.float32([[602.10618226,          0.0, 320.27333589],
                                   #[         0.0, 603.55869786,  229.7537026], 
                                   #[         0.0,          0.0,          1.0] ])

    #Distortion coefficients
    camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])

    #Distortion coefficients estimated by calibration
    #camera_distortion = numpy.float32([ 0.06232237, -0.41559805,  0.00125389, -0.00402566,  0.04879263])


    #This matrix contains the 3D points of the
    # 11 landmarks we want to find. It has been
    # obtained from antrophometric measurement
    # on the human head.
    landmarks_3D = numpy.float32([P3D_RIGHT_SIDE,
                                  P3D_GONION_RIGHT,
                                  P3D_MENTON,
                                  P3D_GONION_LEFT,
                                  P3D_LEFT_SIDE,
                                  P3D_FRONTAL_BREADTH_RIGHT,
                                  P3D_FRONTAL_BREADTH_LEFT,
                                  P3D_SELLION,
                                  P3D_NOSE,
                                  P3D_SUB_NOSE,
                                  P3D_RIGHT_EYE,
                                  P3D_RIGHT_TEAR,
                                  P3D_LEFT_TEAR,
                                  P3D_LEFT_EYE,
                                  P3D_STOMION])

    #Declaring the two classifiers
    my_cascade = haarCascade("./etc/haarcascade_frontalface_alt.xml", "./etc/haarcascade_profileface.xml")
    my_detector = faceLandmarkDetection('./etc/shape_predictor_68_face_landmarks.dat')

    #Error counter definition
    no_face_counter = 0

    #Variables that identify the face
    #position in the main frame.
    face_x1 = 0
    face_y1 = 0
    face_x2 = 0
    face_y2 = 0
    face_w = 0
    face_h = 0

    #Variables that identify the ROI
    #position in the main frame.
    roi_x1 = 0
    roi_y1 = 0
    roi_x2 = cam_w
    roi_y2 = cam_h
    roi_w = cam_w
    roi_h = cam_h
    roi_resize_w = int(cam_w/10)
    roi_resize_h = int(cam_h/10)

    while(True):

        # Capture frame-by-frame
        ret, frame = video_capture.read()
        gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)

        #Looking for faces with cascade
        #The classifier moves over the ROI
        #starting from a minimum dimension and augmentig
        #slightly based on the scale factor parameter.
        #The scale factor for the frontal face is 1.10 (10%)
        #Scale factor: 1.15=15%,1.25=25% ...ecc
        #Higher scale factors means faster classification
        #but lower accuracy.
        #
        #Return code: 1=Frontal, 2=FrontRotLeft, 
        # 3=FrontRotRight, 4=ProfileLeft, 5=ProfileRight.
        my_cascade.findFace(gray, runFrontal=True, runFrontalRotated=True, runLeft=False, runRight=False, frontalScaleFactor=1.2, rotatedFrontalScaleFactor=1.2, leftScaleFactor=1.15, rightScaleFactor=1.15, minSizeX=80, minSizeY=80, rotationAngleCCW=30, rotationAngleCW=-30, lastFaceType=my_cascade.face_type)

        #Accumulate error values in a counter
        if(my_cascade.face_type == 0): 
            no_face_counter += 1

        #If any face is found for a certain
        #number of cycles, then the ROI is reset
        if(no_face_counter == 30):
            no_face_counter = 0
            roi_x1 = 0
            roi_y1 = 0
            roi_x2 = cam_w
            roi_y2 = cam_h
            roi_w = cam_w
            roi_h = cam_h

        #Checking wich kind of face it is returned
        if(my_cascade.face_type > 0 and my_cascade.face_type < 4):

            #Face found, reset the error counter
            no_face_counter = 0

            #Because the dlib landmark detector wants a precise
            #boundary box of the face, it is necessary to resize
            #the box returned by the OpenCV haar detector.
            #Adjusting the frame for profile left
            if(my_cascade.face_type == 4):
                face_margin_x1 = 20 - 10 #resize_rate + shift_rate
                face_margin_y1 = 20 + 5 #resize_rate + shift_rate
                face_margin_x2 = -20 - 10 #resize_rate + shift_rate
                face_margin_y2 = -20 + 5 #resize_rate + shift_rate
                face_margin_h = -0.7 #resize_factor
                face_margin_w = -0.7 #resize_factor
            #Adjusting the frame for profile right
            elif(my_cascade.face_type == 5):
                face_margin_x1 = 20 + 10
                face_margin_y1 = 20 + 5
                face_margin_x2 = -20 + 10
                face_margin_y2 = -20 + 5
                face_margin_h = -0.7
                face_margin_w = -0.7
            #No adjustments
            else:
                face_margin_x1 = 0
                face_margin_y1 = 0
                face_margin_x2 = 0
                face_margin_y2 = 0
                face_margin_h = 0
                face_margin_w = 0

            #Updating the face position
            face_x1 = my_cascade.face_x + roi_x1 + face_margin_x1
            face_y1 = my_cascade.face_y + roi_y1 + face_margin_y1
            face_x2 = my_cascade.face_x + my_cascade.face_w + roi_x1 + face_margin_x2
            face_y2 = my_cascade.face_y + my_cascade.face_h + roi_y1 + face_margin_y2
            face_w = my_cascade.face_w + int(my_cascade.face_w * face_margin_w)
            face_h = my_cascade.face_h + int(my_cascade.face_h * face_margin_h)

            #Updating the ROI position       
            roi_x1 = face_x1 - roi_resize_w
            if (roi_x1 < 0): roi_x1 = 0
            roi_y1 = face_y1 - roi_resize_h
            if(roi_y1 < 0): roi_y1 = 0
            roi_w = face_w + roi_resize_w + roi_resize_w
            if(roi_w > cam_w): roi_w = cam_w
            roi_h = face_h + roi_resize_h + roi_resize_h
            if(roi_h > cam_h): roi_h = cam_h    
            roi_x2 = face_x2 + roi_resize_w
            if (roi_x2 > cam_w): roi_x2 = cam_w
            roi_y2 = face_y2 + roi_resize_h
            if(roi_y2 > cam_h): roi_y2 = cam_h

            #Debugging printing utilities
            if(DEBUG == True):
                print("FACE: ", face_x1, face_y1, face_x2, face_y2, face_w, face_h)
                print("ROI: ", roi_x1, roi_y1, roi_x2, roi_y2, roi_w, roi_h)
                #Drawing a green rectangle
                # (and text) around the face.
                text_x1 = face_x1
                text_y1 = face_y1 - 3
                if(text_y1 < 0): text_y1 = 0
                cv2.putText(frame, "FACE", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
                cv2.rectangle(frame, 
                             (face_x1, face_y1), 
                             (face_x2, face_y2), 
                             (0, 255, 0),
                              2)

            #In case of a frontal/rotated face it
            # is called the landamark detector
            if(my_cascade.face_type > 0):
                landmarks_2D = my_detector.returnLandmarks(frame, face_x1, face_y1, face_x2, face_y2, points_to_return=TRACKED_POINTS)

                if(DEBUG == True):
                    #cv2.drawKeypoints(frame, landmarks_2D)

                    for point in landmarks_2D:
                        cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)


                #Applying the PnP solver to find the 3D pose
                # of the head from the 2D position of the
                # landmarks.
                #retval - bool
                #rvec - Output rotation vector that, together with tvec, brings 
                # points from the model coordinate system to the camera coordinate system.
                #tvec - Output translation vector.
                retval, rvec, tvec = cv2.solvePnP(landmarks_3D, 
                                                  landmarks_2D, 
                                                  camera_matrix, camera_distortion)

                #Now we project the 3D points into the image plane
                #Creating a 3-axis to be used as reference in the image.
                axis = numpy.float32([[50,0,0], 
                                      [0,50,0], 
                                      [0,0,50]])
                imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix, camera_distortion)

                #Drawing the three axis on the image frame.
                #The opencv colors are defined as BGR colors such as: 
                # (a, b, c) >> Blue = a, Green = b and Red = c
                #Our axis/color convention is X=R, Y=G, Z=B
                sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
                cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
                cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255,0,0), 3) #BLUE
                cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0,0,255), 3) #RED

        #Drawing a yellow rectangle
        # (and text) around the ROI.
        #if(DEBUG == True):
            #text_x1 = roi_x1
            #text_y1 = roi_y1 - 3
            #if(text_y1 < 0): text_y1 = 0
            #cv2.putText(frame, "ROI", (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1);
            #cv2.rectangle(frame, 
                         #(roi_x1, roi_y1), 
                         #(roi_x2, roi_y2), 
                         #(0, 255, 255),
                         #2)

        #Writing in the output file
        out.write(frame)

        #Showing the frame and waiting
        # for the exit command
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'): break
   
    #Release the camera
    video_capture.release()
    print("Bye...")

Example 44

Project: filmkodi
Source File: runfiles.py
View license
def main():
    import sys

    # Separate the nose params and the pydev params.
    pydev_params = []
    other_test_framework_params = []
    found_other_test_framework_param = None

    NOSE_PARAMS = '--nose-params'
    PY_TEST_PARAMS = '--py-test-params'

    for arg in sys.argv[1:]:
        if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
            pydev_params.append(arg)

        else:
            if not found_other_test_framework_param:
                found_other_test_framework_param = arg
            else:
                other_test_framework_params.append(arg)


    # Here we'll run either with nose or with the pydev_runfiles.
    from _pydev_runfiles import pydev_runfiles
    from _pydev_runfiles import pydev_runfiles_xml_rpc
    from _pydevd_bundle import pydevd_constants
    from pydevd_file_utils import _NormFile

    DEBUG = 0
    if DEBUG:
        sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
        sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
        if found_other_test_framework_param:
            sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))

    try:
        configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
    except:
        sys.stderr.write('Command line received: %s\n' % (sys.argv,))
        raise
    pydev_runfiles_xml_rpc.initialize_server(configuration.port)  # Note that if the port is None, a Null server will be initialized.

    NOSE_FRAMEWORK = 1
    PY_TEST_FRAMEWORK = 2
    try:
        if found_other_test_framework_param:
            test_framework = 0  # Default (pydev)
            if found_other_test_framework_param == NOSE_PARAMS:
                import nose
                test_framework = NOSE_FRAMEWORK

            elif found_other_test_framework_param == PY_TEST_PARAMS:
                import pytest
                test_framework = PY_TEST_FRAMEWORK

            else:
                raise ImportError()

        else:
            raise ImportError()

    except ImportError:
        if found_other_test_framework_param:
            sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
                found_other_test_framework_param,))

        test_framework = 0

    # Clear any exception that may be there so that clients don't see it.
    # See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
    if hasattr(sys, 'exc_clear'):
        sys.exc_clear()

    if test_framework == 0:

        return pydev_runfiles.main(configuration)  # Note: still doesn't return a proper value.

    else:
        # We'll convert the parameters to what nose or py.test expects.
        # The supported parameters are:
        # runfiles.py  --config-file|-t|--tests <Test.test1,Test2>  dirs|files --nose-params xxx yyy zzz
        # (all after --nose-params should be passed directly to nose)

        # In java:
        # --tests = Constants.ATTR_UNITTEST_TESTS
        # --config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE


        # The only thing actually handled here are the tests that we want to run, which we'll
        # handle and pass as what the test framework expects.

        py_test_accept_filter = {}
        files_to_tests = configuration.files_to_tests

        if files_to_tests:
            # Handling through the file contents (file where each line is a test)
            files_or_dirs = []
            for file, tests in files_to_tests.items():
                if test_framework == NOSE_FRAMEWORK:
                    for test in tests:
                        files_or_dirs.append(file + ':' + test)

                elif test_framework == PY_TEST_FRAMEWORK:
                    file = _NormFile(file)
                    py_test_accept_filter[file] = tests
                    files_or_dirs.append(file)

                else:
                    raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

        else:
            if configuration.tests:
                # Tests passed (works together with the files_or_dirs)
                files_or_dirs = []
                for file in configuration.files_or_dirs:
                    if test_framework == NOSE_FRAMEWORK:
                        for t in configuration.tests:
                            files_or_dirs.append(file + ':' + t)

                    elif test_framework == PY_TEST_FRAMEWORK:
                        file = _NormFile(file)
                        py_test_accept_filter[file] = configuration.tests
                        files_or_dirs.append(file)

                    else:
                        raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
            else:
                # Only files or dirs passed (let it do the test-loading based on those paths)
                files_or_dirs = configuration.files_or_dirs

        argv = other_test_framework_params + files_or_dirs


        if test_framework == NOSE_FRAMEWORK:
            # Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
            # show_stdout_option = ['-s']
            # processes_option = ['--processes=2']
            argv.insert(0, sys.argv[0])
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))

            from _pydev_runfiles import pydev_runfiles_nose
            PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.start_pydev_nose_plugin_singleton(configuration)
            argv.append('--with-pydevplugin')
            # Return 'not' because it will return 'success' (so, exit == 0 if success)
            return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])

        elif test_framework == PY_TEST_FRAMEWORK:
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv,))
                sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))

            def dotted(p):
                # Helper to convert path to have dots instead of slashes
                return os.path.normpath(p).replace(os.sep, "/").replace('/', '.')

            curr_dir = os.path.realpath('.')
            curr_dotted = dotted(curr_dir) + '.'

            # Overcome limitation on py.test:
            # When searching conftest if we have a structure as:
            # /my_package
            # /my_package/conftest.py
            # /my_package/tests
            # /my_package/tests/test_my_package.py
            # The test_my_package won't have access to the conftest contents from the
            # test_my_package.py file unless the working dir is set to /my_package.
            #
            # See related issue (for which we work-around below):
            # https://bitbucket.org/hpk42/pytest/issue/639/conftest-being-loaded-twice-giving

            for path in sys.path:
                path_dotted = dotted(path)
                if curr_dotted.startswith(path_dotted):
                    os.chdir(path)
                    break

            for i in xrange(len(argv)):
                arg = argv[i]
                # Workaround bug in py.test: if we pass the full path it ends up importing conftest
                # more than once (so, always work with relative paths).
                if os.path.isfile(arg) or os.path.isdir(arg):
                    from _pydev_bundle.pydev_imports import relpath
                    try:
                        # May fail if on different drives
                        arg = relpath(arg)
                    except ValueError:
                        pass
                    else:
                        argv[i] = arg

            # To find our runfile helpers (i.e.: plugin)...
            d = os.path.dirname(__file__)
            if d not in sys.path:
                sys.path.insert(0, d)

            import pickle, zlib, base64

            # Update environment PYTHONPATH so that it finds our plugin if using xdist.
            os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)

            # Set what should be skipped in the plugin through an environment variable
            s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
            if pydevd_constants.IS_PY3K:
                s = s.decode('ascii')  # Must be str in py3.
            os.environ['PYDEV_PYTEST_SKIP'] = s

            # Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
            # main pid to give xml-rpc notifications).
            os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
            os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)

            argv.append('-p')
            argv.append('_pydev_runfiles.pydev_runfiles_pytest2')
            if 'unittest' in sys.modules or 'unittest2' in sys.modules:
                sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
            return pytest.main(argv)

        else:
            raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

Example 45

Project: filmkodi
Source File: runfiles.py
View license
def main():
    import sys

    # Separate the nose params and the pydev params.
    pydev_params = []
    other_test_framework_params = []
    found_other_test_framework_param = None

    NOSE_PARAMS = '--nose-params'
    PY_TEST_PARAMS = '--py-test-params'

    for arg in sys.argv[1:]:
        if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
            pydev_params.append(arg)

        else:
            if not found_other_test_framework_param:
                found_other_test_framework_param = arg
            else:
                other_test_framework_params.append(arg)


    # Here we'll run either with nose or with the pydev_runfiles.
    from _pydev_runfiles import pydev_runfiles
    from _pydev_runfiles import pydev_runfiles_xml_rpc
    from _pydevd_bundle import pydevd_constants
    from pydevd_file_utils import _NormFile

    DEBUG = 0
    if DEBUG:
        sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
        sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
        if found_other_test_framework_param:
            sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))

    try:
        configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
    except:
        sys.stderr.write('Command line received: %s\n' % (sys.argv,))
        raise
    pydev_runfiles_xml_rpc.initialize_server(configuration.port)  # Note that if the port is None, a Null server will be initialized.

    NOSE_FRAMEWORK = 1
    PY_TEST_FRAMEWORK = 2
    try:
        if found_other_test_framework_param:
            test_framework = 0  # Default (pydev)
            if found_other_test_framework_param == NOSE_PARAMS:
                import nose
                test_framework = NOSE_FRAMEWORK

            elif found_other_test_framework_param == PY_TEST_PARAMS:
                import pytest
                test_framework = PY_TEST_FRAMEWORK

            else:
                raise ImportError()

        else:
            raise ImportError()

    except ImportError:
        if found_other_test_framework_param:
            sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
                found_other_test_framework_param,))

        test_framework = 0

    # Clear any exception that may be there so that clients don't see it.
    # See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
    if hasattr(sys, 'exc_clear'):
        sys.exc_clear()

    if test_framework == 0:

        return pydev_runfiles.main(configuration)  # Note: still doesn't return a proper value.

    else:
        # We'll convert the parameters to what nose or py.test expects.
        # The supported parameters are:
        # runfiles.py  --config-file|-t|--tests <Test.test1,Test2>  dirs|files --nose-params xxx yyy zzz
        # (all after --nose-params should be passed directly to nose)

        # In java:
        # --tests = Constants.ATTR_UNITTEST_TESTS
        # --config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE


        # The only thing actually handled here are the tests that we want to run, which we'll
        # handle and pass as what the test framework expects.

        py_test_accept_filter = {}
        files_to_tests = configuration.files_to_tests

        if files_to_tests:
            # Handling through the file contents (file where each line is a test)
            files_or_dirs = []
            for file, tests in files_to_tests.items():
                if test_framework == NOSE_FRAMEWORK:
                    for test in tests:
                        files_or_dirs.append(file + ':' + test)

                elif test_framework == PY_TEST_FRAMEWORK:
                    file = _NormFile(file)
                    py_test_accept_filter[file] = tests
                    files_or_dirs.append(file)

                else:
                    raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

        else:
            if configuration.tests:
                # Tests passed (works together with the files_or_dirs)
                files_or_dirs = []
                for file in configuration.files_or_dirs:
                    if test_framework == NOSE_FRAMEWORK:
                        for t in configuration.tests:
                            files_or_dirs.append(file + ':' + t)

                    elif test_framework == PY_TEST_FRAMEWORK:
                        file = _NormFile(file)
                        py_test_accept_filter[file] = configuration.tests
                        files_or_dirs.append(file)

                    else:
                        raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
            else:
                # Only files or dirs passed (let it do the test-loading based on those paths)
                files_or_dirs = configuration.files_or_dirs

        argv = other_test_framework_params + files_or_dirs


        if test_framework == NOSE_FRAMEWORK:
            # Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
            # show_stdout_option = ['-s']
            # processes_option = ['--processes=2']
            argv.insert(0, sys.argv[0])
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))

            from _pydev_runfiles import pydev_runfiles_nose
            PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.start_pydev_nose_plugin_singleton(configuration)
            argv.append('--with-pydevplugin')
            # Return 'not' because it will return 'success' (so, exit == 0 if success)
            return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])

        elif test_framework == PY_TEST_FRAMEWORK:
            if DEBUG:
                sys.stdout.write('Final test framework args: %s\n' % (argv,))
                sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))

            def dotted(p):
                # Helper to convert path to have dots instead of slashes
                return os.path.normpath(p).replace(os.sep, "/").replace('/', '.')

            curr_dir = os.path.realpath('.')
            curr_dotted = dotted(curr_dir) + '.'

            # Overcome limitation on py.test:
            # When searching conftest if we have a structure as:
            # /my_package
            # /my_package/conftest.py
            # /my_package/tests
            # /my_package/tests/test_my_package.py
            # The test_my_package won't have access to the conftest contents from the
            # test_my_package.py file unless the working dir is set to /my_package.
            #
            # See related issue (for which we work-around below):
            # https://bitbucket.org/hpk42/pytest/issue/639/conftest-being-loaded-twice-giving

            for path in sys.path:
                path_dotted = dotted(path)
                if curr_dotted.startswith(path_dotted):
                    os.chdir(path)
                    break

            for i in xrange(len(argv)):
                arg = argv[i]
                # Workaround bug in py.test: if we pass the full path it ends up importing conftest
                # more than once (so, always work with relative paths).
                if os.path.isfile(arg) or os.path.isdir(arg):
                    from _pydev_bundle.pydev_imports import relpath
                    try:
                        # May fail if on different drives
                        arg = relpath(arg)
                    except ValueError:
                        pass
                    else:
                        argv[i] = arg

            # To find our runfile helpers (i.e.: plugin)...
            d = os.path.dirname(__file__)
            if d not in sys.path:
                sys.path.insert(0, d)

            import pickle, zlib, base64

            # Update environment PYTHONPATH so that it finds our plugin if using xdist.
            os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)

            # Set what should be skipped in the plugin through an environment variable
            s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
            if pydevd_constants.IS_PY3K:
                s = s.decode('ascii')  # Must be str in py3.
            os.environ['PYDEV_PYTEST_SKIP'] = s

            # Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
            # main pid to give xml-rpc notifications).
            os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
            os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)

            argv.append('-p')
            argv.append('_pydev_runfiles.pydev_runfiles_pytest2')
            if 'unittest' in sys.modules or 'unittest2' in sys.modules:
                sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
            return pytest.main(argv)

        else:
            raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))

Example 46

View license
def main():
    variable1='Teste'
    if len(sys.argv) == 1:
	    prog = os.path.basename(sys.argv[0])
	    print '      '+sys.argv[0]+' [options]'
	    print "     Aide : ", prog, " --help"
	    print "        ou : ", prog, " -h"
	    print "example (scene): python %s -o scene -d 20151001 -f 20151231 -s 199030 -u usgs.txt"%sys.argv[0]
	    print "example (scene): python %s -z unzip -b LT5 -o scene -d 20151001 -f 20151231 -s 203034 -u usgs.txt --output /outputdir/"%sys.argv[0]
	    print "example (scene): python %s -z unzip -b LT5 -o scene -d 20151001 -f 20151231 -s 203034 -u usgs.txt --output /outputdir/ -k update --outputcatalogs /outputcatalogsdir/"%sys.argv[0]		
	    print "example (scene): python %s -b LE7 -o scene -d 20151201 -f 20151231 -s 191025 -u usgs.txt --output . --dir=3373 --station SG1"%sys.argv[0]
	    print "example (liste): python %s -o liste -l /home/hagolle/LANDSAT/liste_landsat8_site.txt -u usgs.txt"%sys.argv[0]	
	    sys.exit(-1)
    else:
        usage = "usage: %prog [options] "
        parser = OptionParser(usage=usage)
        parser.add_option("-o", "--option", dest="option", action="store", type="choice", \
			    help="scene or liste", choices=['scene','liste','catalog'],default=None)
        parser.add_option("-l", "--liste", dest="fic_liste", action="store", type="string", \
			    help="list filename",default=None)
        parser.add_option("-s", "--scene", dest="scene", action="store", type="string", \
			    help="WRS2 coordinates of scene (ex 198030)", default=None)
        parser.add_option("-d", "--start_date", dest="start_date", action="store", type="string", \
			    help="start date, fmt('20131223')")
        parser.add_option("-f","--end_date", dest="end_date", action="store", type="string", \
			    help="end date, fmt('20131223')")
        parser.add_option("-c","--cloudcover", dest="clouds", action="store", type="float", \
			    help="Set a limit to the cloud cover of the image", default=None)			
        parser.add_option("-u","--usgs_passwd", dest="usgs", action="store", type="string", \
			    help="USGS earthexplorer account and password file")
        parser.add_option("-p","--proxy_passwd", dest="proxy", action="store", type="string", \
                help="Proxy account and password file")
        parser.add_option("-z","--unzip", dest="unzip", action="store", type="string", \
			    help="Unzip downloaded tgz file", default=None)			
        parser.add_option("-b","--sat", dest="bird", action="store", type="choice", \
			    help="Which satellite are you looking for", choices=['LT5','LE7', 'LC8'], default='LC8')	
        parser.add_option("--output", dest="output", action="store", type="string", \
			    help="Where to download files",default='/tmp/LANDSAT')
        parser.add_option("--outputcatalogs", dest="outputcatalogs", action="store", type="string", \
			    help="Where to download metadata catalog files",default='/tmp/LANDSAT')					
        parser.add_option("--dir", dest="dir", action="store", type="string", \
			    help="Dir number where files  are stored at USGS",default=None)
        parser.add_option("--station", dest="station", action="store", type="string", \
			    help="Station acronym (3 letters) of the receiving station where the file is downloaded",default=None)	
        parser.add_option("-k", "--updatecatalogfiles", dest="updatecatalogfiles", action="store", type="choice", \
			    help="Update catalog metadata files", choices=['update','noupdate'],default='noupdate')			



        (options, args) = parser.parse_args()
        parser.check_required("-o")
        if options.option=='scene':
	        parser.check_required("-d")
	        parser.check_required("-s")
	        parser.check_required("-u")
	    
        elif options.option=='liste' :
	        parser.check_required("-l")
    	        parser.check_required("-u")

    print options.station, options.dir
    rep=options.output
    if not os.path.exists(rep):
        os.mkdir(rep)
        if options.option=='liste':
            if not os.path.exists(rep+'/LISTE'):
                os.mkdir(rep+'/LISTE')
 
    # read password files
    try:
        f=file(options.usgs)
        (account,passwd)=f.readline().split(' ')
        if passwd.endswith('\n'):
            passwd=passwd[:-1]
        usgs={'account':account,'passwd':passwd}
        f.close()
    except :
        print "error with usgs password file"
        sys.exit(-2)

			

    if options.proxy != None :
        try:
            f=file(options.proxy)
            (user,passwd)=f.readline().split(' ')
            if passwd.endswith('\n'):
                passwd=passwd[:-1]
            host=f.readline()
            if host.endswith('\n'):
                host=host[:-1]
            port=f.readline()
            if port.endswith('\n'):
                port=port[:-1]
            proxy={'user':user,'pass':passwd,'host':host,'port':port}
            f.close()
        except :
            print "error with proxy password file"
            sys.exit(-3)


			
##########Telechargement des produits par scene
    if options.option=='scene':
        produit=options.bird
        path=options.scene[0:3]
        row=options.scene[3:6]
    
        year_start =int(options.start_date[0:4])
        month_start=int(options.start_date[4:6])
        day_start  =int(options.start_date[6:8])
        date_start=datetime.datetime(year_start,month_start, day_start)
        global downloaded_ids		
        downloaded_ids=[]

        if options.end_date!= None:
	        year_end =int(options.end_date[0:4])
	        month_end=int(options.end_date[4:6])
	        day_end  =int(options.end_date[6:8])
	        date_end =datetime.datetime(year_end,month_end, day_end)
        else:
	    date_end=datetime.datetime.now()
	
        if options.proxy!=None:
            connect_earthexplorer_proxy(proxy,usgs)
        else:
            connect_earthexplorer_no_proxy(usgs)	

        # rep_scene="%s/SCENES/%s_%s/GZ"%(rep,path,row)   #Original
        rep_scene="%s"%(rep)	#Modified vbnunes
        if not(os.path.exists(rep_scene)):
            os.makedirs(rep_scene)
			
        if produit.startswith('LC8'):
            repert='4923'
            stations=['LGN']
        if produit.startswith('LE7'):
            repert='3373'
            #repert='3372"
            stations=['EDC','SGS','AGS','ASN','SG1','CUB','COA']
        if produit.startswith('LT5'):
            repert='3119'
            stations=['GLC','ASA','KIR','MOR','KHC', 'PAC', 'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS', 'CUB']		
        
        if options.station !=None:
            stations=[options.station]
        if options.dir !=None:
            repert=options.dir
            
        check=1
		
        curr_date=next_overpass(date_start,int(path),produit)
 
        while (curr_date < date_end) and check==1:
            date_asc=curr_date.strftime("%Y%j")
            notfound = False		
            print 'Searching for images on (julian date): ' + date_asc + '...'
            curr_date=curr_date+datetime.timedelta(16)
            for station in stations:
                for version in ['00','01','02']:			
					nom_prod=produit+options.scene+date_asc+station+version
					tgzfile=os.path.join(rep_scene,nom_prod+'.tgz')
					lsdestdir=os.path.join(rep_scene,nom_prod)				
					url="http://earthexplorer.usgs.gov/download/%s/%s/STANDARD/EE"%(repert,nom_prod)
					print url
					if os.path.exists(lsdestdir):
						print '   product %s already downloaded and unzipped'%nom_prod
						downloaded_ids.append(nom_prod)
						check = 0						
					elif os.path.isfile(tgzfile):
						print '   product %s already downloaded'%nom_prod
						if options.unzip!= None:
							p=unzipimage(nom_prod,rep_scene)
							if p==1 and options.clouds!= None:					
								check=check_cloud_limit(lsdestdir,options.clouds)
								if check==0:
									downloaded_ids.append(nom_prod)							
					else:
						try:
							downloadChunks(url,"%s"%rep_scene,nom_prod+'.tgz')
						except:
							print '   product %s not found'%nom_prod
							notfound = True
						if notfound != True and options.unzip!= None:
							p=unzipimage(nom_prod,rep_scene)
							if p==1 and options.clouds!= None:					
								check=check_cloud_limit(lsdestdir,options.clouds)
								if check==0:
									downloaded_ids.append(nom_prod)								
        log(rep,downloaded_ids)

##########Telechargement des produits par catalog metadata search
    if options.option=='catalog':
        produit=options.bird
        path=options.scene[0:3]
        row=options.scene[3:6]
    
        year_start =int(options.start_date[0:4])
        month_start=int(options.start_date[4:6])
        day_start  =int(options.start_date[6:8])
        date_start=datetime.datetime(year_start,month_start, day_start)
        downloaded_ids=[]

        if options.end_date!= None:
	        year_end =int(options.end_date[0:4])
	        month_end=int(options.end_date[4:6])
	        day_end  =int(options.end_date[6:8])
	        date_end =datetime.datetime(year_end,month_end, day_end)
        else:
	        date_end=datetime.datetime.now()
	
        if options.proxy!=None:
            connect_earthexplorer_proxy(proxy,usgs)
        else:
            connect_earthexplorer_no_proxy(usgs)	

        # rep_scene="%s/SCENES/%s_%s/GZ"%(rep,path,row)   #Original
        rep_scene="%s"%(rep)	#Modified vbnunes
        if not(os.path.exists(rep_scene)):
            os.makedirs(rep_scene)

        getmetadatafiles(options.outputcatalogs, options.updatecatalogfiles)			
			
        if produit.startswith('LC8'):
            repert=['4923']
            collection_file=os.path.join(options.outputcatalogs,'LANDSAT_8.csv')
        if produit.startswith('LE7'):
            repert=['3372','3373']
            collection_file=os.path.join(options.outputcatalogs,'LANDSAT_ETM.csv')			
        if produit.startswith('LT5'):
            repert=['3119','4345']
            if 2000<=int(year_start)<=2009:
                collection_file=os.path.join(options.outputcatalogs,'LANDSAT_TM-2000-2009.csv')
            if 2010<=int(year_start)<=2012:
                collection_file=os.path.join(options.outputcatalogs,'LANDSAT_TM-2010-2012.csv')				
            
        check=1

        notfound = False		
        			
        nom_prod=find_in_collection_metadata(collection_file,options.clouds,date_start,date_end,path,row)
        if nom_prod=='':
            sys.exit('No image was found in the catalog with the given specifications! Exiting...')
        else:				
            tgzfile=os.path.join(rep_scene,nom_prod+'.tgz')
            lsdestdir=os.path.join(rep_scene,nom_prod)

        if os.path.exists(lsdestdir):
            print '   product %s already downloaded and unzipped'%nom_prod
            downloaded_ids.append(nom_prod)
            check = 0						
        elif os.path.isfile(tgzfile):
            print '   product %s already downloaded'%nom_prod
            if options.unzip!= None:
                p=unzipimage(nom_prod,rep_scene)
                if p==1:
                    downloaded_ids.append(nom_prod)	
                    check = 0						
        else:
            while check == 1:
                for collectionid in repert:
                    url="http://earthexplorer.usgs.gov/download/%s/%s/STANDARD/EE"%(collectionid,nom_prod)				
                    try:
                        downloadChunks(url,"%s"%rep_scene,nom_prod+'.tgz')
                    except:
                        print '   product %s not found'%nom_prod
                        notfound = True
                    if notfound != True and options.unzip!= None:
                        p=unzipimage(nom_prod,rep_scene)
                        if p==1 and options.clouds!= None:					
                            check=check_cloud_limit(lsdestdir,options.clouds)
                            if check==0:
                                downloaded_ids.append(nom_prod)			
        log(rep,downloaded_ids)		
		
##########Telechargement par liste
    if options.option=='liste':
        with file(options.fic_liste) as f:
	    lignes=f.readlines()
        for ligne in lignes:
            (site,nom_prod)=ligne.split(' ')
            produit=nom_prod.strip()
            print produit
            if produit.startswith('LC8'):
                repert='4923'
                stations=['LGN']
            if produit.startswith('LE7'):
                repert='3373'
                #repert='3372"
                stations=['EDC','SGS','AGS','ASN','SG1']
            if produit.startswith('LT5'):
                repert='3119'
                stations=['GLC','ASA','KIR','MOR','KHC', 'PAC', 'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS']	
            if not os.path.exists(rep+'/'+site):
                os.mkdir(rep+'/'+site)
            url="http://earthexplorer.usgs.gov/download/%s/%s/STANDARD/EE"%(repert,produit)
            print 'url=',url
            try:
                if options.proxy!=None :
                    connect_earthexplorer_proxy(proxy,usgs)
                else:
                    connect_earthexplorer_no_proxy(usgs)

                downloadChunks(url,rep+'/'+site,produit+'.tgz')
            except TypeError:
                print 'produit %s non trouve'%produit

Example 47

Project: bumpversion
Source File: __init__.py
View license
def main(original_args=None):

    positionals, args = split_args_in_optional_and_positional(
      sys.argv[1:] if original_args is None else original_args
    )

    if len(positionals[1:]) > 2:
        warnings.warn("Giving multiple files on the command line will be deprecated, please use [bumpversion:file:...] in a config file.", PendingDeprecationWarning)

    parser1 = argparse.ArgumentParser(add_help=False)

    parser1.add_argument(
        '--config-file', metavar='FILE',
        default=argparse.SUPPRESS, required=False,
        help='Config file to read most of the variables from (default: .bumpversion.cfg)')

    parser1.add_argument(
        '--verbose', action='count', default=0,
        help='Print verbose logging to stderr', required=False)

    parser1.add_argument(
        '--list', action='store_true', default=False,
        help='List machine readable information', required=False)

    parser1.add_argument(
        '--allow-dirty', action='store_true', default=False,
        help="Don't abort if working directory is dirty", required=False)

    known_args, remaining_argv = parser1.parse_known_args(args)

    logformatter = logging.Formatter('%(message)s')

    if len(logger.handlers) == 0:
        ch = logging.StreamHandler(sys.stderr)
        ch.setFormatter(logformatter)
        logger.addHandler(ch)

    if len(logger_list.handlers) == 0:
       ch2 = logging.StreamHandler(sys.stdout)
       ch2.setFormatter(logformatter)
       logger_list.addHandler(ch2)

    if known_args.list:
          logger_list.setLevel(1)

    log_level = {
        0: logging.WARNING,
        1: logging.INFO,
        2: logging.DEBUG,
    }.get(known_args.verbose, logging.DEBUG)

    logger.setLevel(log_level)

    logger.debug("Starting {}".format(DESCRIPTION))

    defaults = {}
    vcs_info = {}

    for vcs in VCS:
        if vcs.is_usable():
            vcs_info.update(vcs.latest_tag_info())

    if 'current_version' in vcs_info:
        defaults['current_version'] = vcs_info['current_version']

    config = RawConfigParser('')

    # don't transform keys to lowercase (which would be the default)
    config.optionxform = lambda option: option

    config.add_section('bumpversion')

    explicit_config = hasattr(known_args, 'config_file')

    if explicit_config:
        config_file = known_args.config_file
    elif not os.path.exists('.bumpversion.cfg') and \
            os.path.exists('setup.cfg'):
        config_file = 'setup.cfg'
    else:
        config_file = '.bumpversion.cfg'

    config_file_exists = os.path.exists(config_file)

    part_configs = {}

    files = []

    if config_file_exists:

        logger.info("Reading config file {}:".format(config_file))
        logger.info(io.open(config_file, 'rt', encoding='utf-8').read())

        config.readfp(io.open(config_file, 'rt', encoding='utf-8'))

        log_config = StringIO()
        config.write(log_config)

        if 'files' in dict(config.items("bumpversion")):
            warnings.warn(
                "'files =' configuration is will be deprecated, please use [bumpversion:file:...]",
                PendingDeprecationWarning
            )

        defaults.update(dict(config.items("bumpversion")))

        for listvaluename in ("serialize",):
            try:
                value = config.get("bumpversion", listvaluename)
                defaults[listvaluename] = list(filter(None, (x.strip() for x in value.splitlines())))
            except NoOptionError:
                pass  # no default value then ;)

        for boolvaluename in ("commit", "tag", "dry_run"):
            try:
                defaults[boolvaluename] = config.getboolean(
                    "bumpversion", boolvaluename)
            except NoOptionError:
                pass  # no default value then ;)

        for section_name in config.sections():

            section_name_match = re.compile("^bumpversion:(file|part):(.+)").match(section_name)

            if not section_name_match:
                continue

            section_prefix, section_value = section_name_match.groups()

            section_config = dict(config.items(section_name))

            if section_prefix == "part":

                ThisVersionPartConfiguration = NumericVersionPartConfiguration

                if 'values' in section_config:
                    section_config['values'] = list(filter(None, (x.strip() for x in section_config['values'].splitlines())))
                    ThisVersionPartConfiguration = ConfiguredVersionPartConfiguration

                part_configs[section_value] = ThisVersionPartConfiguration(**section_config)

            elif section_prefix == "file":

                filename = section_value

                if 'serialize' in section_config:
                    section_config['serialize'] = list(filter(None, (x.strip() for x in section_config['serialize'].splitlines())))

                section_config['part_configs'] = part_configs

                if not 'parse' in section_config:
                    section_config['parse'] = defaults.get("parse", '(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')

                if not 'serialize' in section_config:
                    section_config['serialize'] = defaults.get('serialize', [str('{major}.{minor}.{patch}')])

                if not 'search' in section_config:
                    section_config['search'] = defaults.get("search", '{current_version}')

                if not 'replace' in section_config:
                    section_config['replace'] = defaults.get("replace", '{new_version}')

                files.append(ConfiguredFile(filename, VersionConfig(**section_config)))

    else:
        message = "Could not read config file at {}".format(config_file)
        if explicit_config:
            raise argparse.ArgumentTypeError(message)
        else:
            logger.info(message)

    parser2 = argparse.ArgumentParser(prog='bumpversion', add_help=False, parents=[parser1])
    parser2.set_defaults(**defaults)

    parser2.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated', required=False)
    parser2.add_argument('--parse', metavar='REGEX',
                         help='Regex parsing the version string',
                         default=defaults.get("parse", '(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'))
    parser2.add_argument('--serialize', metavar='FORMAT',
                         action=DiscardDefaultIfSpecifiedAppendAction,
                         help='How to format what is parsed back to a version',
                         default=defaults.get("serialize", [str('{major}.{minor}.{patch}')]))
    parser2.add_argument('--search', metavar='SEARCH',
                         help='Template for complete string to search',
                         default=defaults.get("search", '{current_version}'))
    parser2.add_argument('--replace', metavar='REPLACE',
                         help='Template for complete string to replace',
                         default=defaults.get("replace", '{new_version}'))

    known_args, remaining_argv = parser2.parse_known_args(args)

    defaults.update(vars(known_args))

    assert type(known_args.serialize) == list

    context = dict(list(time_context.items()) + list(prefixed_environ().items()) + list(vcs_info.items()))

    try:
        vc = VersionConfig(
            parse=known_args.parse,
            serialize=known_args.serialize,
            search=known_args.search,
            replace=known_args.replace,
            part_configs=part_configs,
        )
    except sre_constants.error as e:
        sys.exit(1)

    current_version = vc.parse(known_args.current_version) if known_args.current_version else None

    new_version = None

    if not 'new_version' in defaults and known_args.current_version:
        try:
            if current_version and len(positionals) > 0:
                logger.info("Attempting to increment part '{}'".format(positionals[0]))
                new_version = current_version.bump(positionals[0], vc.order())
                logger.info("Values are now: " + keyvaluestring(new_version._values))
                defaults['new_version'] = vc.serialize(new_version, context)
        except MissingValueForSerializationException as e:
            logger.info("Opportunistic finding of new_version failed: " + e.message)
        except IncompleteVersionRepresenationException as e:
            logger.info("Opportunistic finding of new_version failed: " + e.message)
        except KeyError as e:
            logger.info("Opportunistic finding of new_version failed")

    parser3 = argparse.ArgumentParser(
        prog='bumpversion',
        description=DESCRIPTION,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve',
        parents=[parser2],
    )

    parser3.set_defaults(**defaults)

    parser3.add_argument('--current-version', metavar='VERSION',
                         help='Version that needs to be updated',
                         required=not 'current_version' in defaults)
    parser3.add_argument('--dry-run', '-n', action='store_true',
                         default=False, help="Don't write any files, just pretend.")
    parser3.add_argument('--new-version', metavar='VERSION',
                         help='New version that should be in the files',
                         required=not 'new_version' in defaults)

    commitgroup = parser3.add_mutually_exclusive_group()

    commitgroup.add_argument('--commit', action='store_true', dest="commit",
                             help='Commit to version control', default=defaults.get("commit", False))
    commitgroup.add_argument('--no-commit', action='store_false', dest="commit",
                             help='Do not commit to version control', default=argparse.SUPPRESS)

    taggroup = parser3.add_mutually_exclusive_group()

    taggroup.add_argument('--tag', action='store_true', dest="tag", default=defaults.get("tag", False),
                          help='Create a tag in version control')
    taggroup.add_argument('--no-tag', action='store_false', dest="tag",
                          help='Do not create a tag in version control', default=argparse.SUPPRESS)

    parser3.add_argument('--tag-name', metavar='TAG_NAME',
                         help='Tag name (only works with --tag)',
                         default=defaults.get('tag_name', 'v{new_version}'))

    parser3.add_argument('--message', '-m', metavar='COMMIT_MSG',
                         help='Commit message',
                         default=defaults.get('message', 'Bump version: {current_version} → {new_version}'))


    file_names = []
    if 'files' in defaults:
        assert defaults['files'] != None
        file_names = defaults['files'].split(' ')

    parser3.add_argument('part',
                         help='Part of the version to be bumped.')
    parser3.add_argument('files', metavar='file',
                         nargs='*',
                         help='Files to change', default=file_names)

    args = parser3.parse_args(remaining_argv + positionals)

    if args.dry_run:
        logger.info("Dry run active, won't touch any files.")
    
    if args.new_version:
        new_version = vc.parse(args.new_version)

    logger.info("New version will be '{}'".format(args.new_version))

    file_names = file_names or positionals[1:]

    for file_name in file_names:
        files.append(ConfiguredFile(file_name, vc))

    for vcs in VCS:
        if vcs.is_usable():
            try:
                vcs.assert_nondirty()
            except WorkingDirectoryIsDirtyException as e:
                if not defaults['allow_dirty']:
                    logger.warn(
                        "{}\n\nUse --allow-dirty to override this if you know what you're doing.".format(e.message))
                    raise
            break
        else:
            vcs = None

    # make sure files exist and contain version string

    logger.info("Asserting files {} contain the version string:".format(", ".join([str(f) for f in files])))

    for f in files:
        f.should_contain_version(current_version, context)

    # change version string in files
    for f in files:
        f.replace(current_version, new_version, context, args.dry_run)

    commit_files = [f.path for f in files]

    config.set('bumpversion', 'new_version', args.new_version)

    for key, value in config.items('bumpversion'):
        logger_list.info("{}={}".format(key, value))

    config.remove_option('bumpversion', 'new_version')

    config.set('bumpversion', 'current_version', args.new_version)

    new_config = StringIO()

    try:
        write_to_config_file = (not args.dry_run) and config_file_exists

        logger.info("{} to config file {}:".format(
            "Would write" if not write_to_config_file else "Writing",
            config_file,
        ))

        config.write(new_config)
        logger.info(new_config.getvalue())

        if write_to_config_file:
            with io.open(config_file, 'wb') as f:
                f.write(new_config.getvalue().encode('utf-8'))

    except UnicodeEncodeError:
        warnings.warn(
            "Unable to write UTF-8 to config file, because of an old configparser version. "
            "Update with `pip install --upgrade configparser`."
        )

    if config_file_exists:
        commit_files.append(config_file)

    if not vcs:
        return

    assert vcs.is_usable(), "Did find '{}' unusable, unable to commit.".format(vcs.__name__)

    do_commit = (not args.dry_run) and args.commit
    do_tag = (not args.dry_run) and args.tag

    logger.info("{} {} commit".format(
        "Would prepare" if not do_commit else "Preparing",
        vcs.__name__,
    ))

    for path in commit_files:
        logger.info("{} changes in file '{}' to {}".format(
            "Would add" if not do_commit else "Adding",
            path,
            vcs.__name__,
        ))

        if do_commit:
            vcs.add_path(path)

    vcs_context = {
        "current_version": args.current_version,
        "new_version": args.new_version,
    }
    vcs_context.update(time_context)
    vcs_context.update(prefixed_environ())

    commit_message = args.message.format(**vcs_context)

    logger.info("{} to {} with message '{}'".format(
        "Would commit" if not do_commit else "Committing",
        vcs.__name__,
        commit_message,
    ))

    if do_commit:
        vcs.commit(message=commit_message)

    tag_name = args.tag_name.format(**vcs_context)
    logger.info("{} '{}' in {}".format(
        "Would tag" if not do_tag else "Tagging",
        tag_name,
        vcs.__name__
    ))

    if do_tag:
        vcs.tag(tag_name)

Example 48

View license
def main():
	
	import struct, json, time, sys, os, shutil, datetime, base64

	parserversion = "0.9.12.0"
	
	global rawdata, tupledata, data, structures, numoffrags
	global filename_source, filename_target
	global option_server, option_format, option_tanks
	
	filename_source = ""
	option_raw = 0
	option_format = 0
	option_server = 0
	option_frags = 1
	option_tanks = 0
	
	for argument in sys.argv[1:]:
		if argument == "-s":
			option_server = 1
			#print '-- SERVER mode enabled'
		elif argument == "-r":
			option_raw = 1
			#print '-- RAW mode enabled'
		elif argument == "-f":
			option_format = 1
			#print '-- FORMAT mode enabled'
		elif argument == "-k":
			option_frags = 0
			#print '-- FRAGS will be excluded'
		elif argument == "-t":
			option_tanks = 1
			#print '-- TANK info will be included'
		else:
			# dossier file, if more than one get only first
			if filename_source =='' and os.path.isfile(argument):
				filename_source = argument
	
	if filename_source == "":
		usage()
		sys.exit(2)
		
	printmessage('############################################')
	printmessage('###### WoTDC2J ' + parserversion)
	

	printmessage('Processing ' + filename_source)
	

	if not os.path.exists(filename_source) or not os.path.isfile(filename_source) or not os.access(filename_source, os.R_OK):
		catch_fatal('Dossier file does not exists')
		sys.exit(1)

	if os.path.getsize(filename_source) == 0:
		catch_fatal('Dossier file size is zero')
		sys.exit(1)
		
	filename_target = os.path.splitext(filename_source)[0]
	filename_target = filename_target + '.json'

	if os.path.exists(filename_target) and os.path.isfile(filename_target) and os.access(filename_target, os.R_OK):
		try:
			os.remove(filename_target)
		except:
			catch_fatal('Cannot remove target file ' + filename_target)

			
	cachefile = open(filename_source, 'rb')

	try:
		from SafeUnpickler import SafeUnpickler
		dossierversion, dossierCache = SafeUnpickler.load(cachefile)
	except Exception, e:
		exitwitherror('Dossier cannot be read (pickle could not be read) ' + e.message)

	if not 'dossierCache' in locals():
		exitwitherror('Dossier cannot be read (dossierCache does not exist)')

	printmessage("Dossier version " + str(dossierversion))
	
	tankitems = [(k, v) for k, v in dossierCache.items()]

	dossier = dict()
		
	dossierheader = dict()
	dossierheader['dossierversion'] = str(dossierversion)
	dossierheader['parser'] = 'http://www.vbaddict.net'
	dossierheader['parserversion'] = parserversion
	dossierheader['tankcount'] = len(tankitems)
	

	
	base32name = "?;?"
	if option_server == 0:
		filename_base = os.path.splitext(os.path.basename(filename_source))[0]
		try:
			base32name = base64.b32decode(filename_base)
		except Exception, e:
			pass
			#printmessage('cannot decode filename ' + filename_base + ': ' + e.message)


	dossierheader['server'] = base32name.split(';', 1)[0];
	dossierheader['username'] = base32name.split(';', 1)[1];
	
	
	if option_server == 0:
		dossierheader['date'] = time.mktime(time.localtime())
	
	tanksdata = load_tanksdata()
	structures = load_structures()
	
	tanks = dict()
	tanks_v2 = dict()
	
	battleCount_15 = 0
	battleCount_7 = 0
	battleCount_historical = 0
	battleCount_company = 0
	battleCount_clan = 0
	battleCount_fortBattles = 0
	battleCount_fortSorties = 0
	battleCount_rated7x7 = 0
	battleCount_globalMap = 0
	battleCount_fallout = 0
	
	for tankitem in tankitems:
		
		if len(tankitem) < 2:
			printmessage('Invalid tankdata')
			continue

		if len(tankitem[0]) < 2:
			printmessage('Invalid tankdata')
			continue
			
		rawdata = dict()
		
		try:
			data = tankitem[1][1]
		except Exception, e:
			printmessage('Invalid tankitem ' + str(e.message))
			continue
			
		tankstruct = str(len(data)) + 'B'
		tupledata = struct.unpack(tankstruct, data)
		tankversion = getdata("tankversion", 0, 1)
		
		#if tankversion != 87:
		#printmessage("Tankversion " + str(tankversion))
		#	continue
		
		if tankversion not in structures:
			write_to_log('unsupported tankversion ' + str(tankversion))
			continue				

		if not isinstance(tankitem[0][1], (int)):
			printmessage('Invalid tankdata')
			continue
	
		try:
			tankid = tankitem[0][1] >> 8 & 65535
		except Exception, e:
			printmessage('cannot get tankid ' + e.message)
			continue
						
		try:
			countryid = tankitem[0][1] >> 4 & 15
		except Exception, e:
			printmessage('cannot get countryid ' + e.message)
			continue
			
		#For debugging purposes
		#if not (countryid==4 and tankid==19):
		#	continue
		
		for m in xrange(0,len(tupledata)):
			rawdata[m] = tupledata[m]
		
		if len(tupledata) == 0:
			continue

		if option_server == 0:
			tanktitle = get_tank_data(tanksdata, countryid, tankid, "title")
		else:
			tanktitle = str(countryid) + '_' + str(tankid)

		fragslist = []
		if tankversion >= 65:
			tank_v2 = dict()
			
			if tankversion == 65:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7')
				
			if tankversion == 69:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7')

			if tankversion == 77:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical')

			if tankversion == 81:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements')

			if tankversion in [85, 87]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements')

			if tankversion in [88,89]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7')

			if tankversion == 92:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon')
			
			if tankversion == 94:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon', 'fallout', 'maxFallout', 'falloutAchievements')
				
			blockcount = len(list(blocks))+1

			newbaseoffset = (blockcount * 2)
			header = struct.unpack_from('<' + 'H' * blockcount, data)
			blocksizes = list(header[1:])
			blocknumber = 0
			numoffrags_list = 0
			numoffrags_a15x15 = 0
			numoffrags_a7x7 = 0
			numoffrags_historical = 0
			numoffrags_fortBattles = 0
			numoffrags_fortSorties = 0
			numoffrags_rated7x7 = 0
			numoffrags_globalMap = 0
			numoffrags_fallout = 0

			for blockname in blocks:

				if blocksizes[blocknumber] > 0:
					if blockname == 'frags':
						if option_frags == 1:
							fmt = '<' + 'IH' * (blocksizes[blocknumber]/6)
							fragsdata = struct.unpack_from(fmt, data, newbaseoffset)
							index = 0

							for i in xrange((blocksizes[blocknumber]/6)):
								compDescr, amount = (fragsdata[index], fragsdata[index + 1])
								numoffrags_list += amount	
								frag_countryid, frag_tankid, frag_tanktitle = get_tank_details(compDescr, tanksdata)
								tankfrag = [frag_countryid, frag_tankid, amount, frag_tanktitle]
								fragslist.append(tankfrag)
								index += 2							

							for i in xrange((blocksizes[blocknumber])):
								rawdata[newbaseoffset+i] = str(tupledata[newbaseoffset+i]) + " / Frags"
								
							tank_v2['fragslist'] = fragslist
				
						newbaseoffset += blocksizes[blocknumber] 

						
					else:
						oldbaseoffset = newbaseoffset
						structureddata = getstructureddata(blockname, tankversion, newbaseoffset)
						structureddata = keepCompatibility(structureddata)
						newbaseoffset = oldbaseoffset+blocksizes[blocknumber]
						tank_v2[blockname] = structureddata 

				blocknumber +=1
			if contains_block('max15x15', tank_v2):
				if 'maxXP' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxXP']==0:
						tank_v2['max15x15']['maxXP'] = 1
						
				if 'maxFrags' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxFrags']==0:
						tank_v2['max15x15']['maxFrags'] = 1

				
			if contains_block('company', tank_v2):
				if 'battlesCount' in tank_v2['company']:
					battleCount_company += tank_v2['company']['battlesCount']
			
			if contains_block('clan', tank_v2):
				if 'battlesCount' in tank_v2['clan']:
					battleCount_company += tank_v2['clan']['battlesCount']

			if contains_block('a15x15', tank_v2):
				
				if 'battlesCount' in tank_v2['a15x15']:
					battleCount_15 += tank_v2['a15x15']['battlesCount']
					
				if 'frags' in tank_v2['a15x15']:
					numoffrags_a15x15 = int(tank_v2['a15x15']['frags'])

			if contains_block('a7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['a7x7']:
					battleCount_7 += tank_v2['a7x7']['battlesCount']
				
				if 'frags' in tank_v2['a7x7']:
					numoffrags_a7x7 = int(tank_v2['a7x7']['frags'])
			
			if contains_block('historical', tank_v2):
				
				if 'battlesCount' in tank_v2['historical']:
					battleCount_historical += tank_v2['historical']['battlesCount']
				
				if 'frags' in tank_v2['historical']:
					numoffrags_historical = int(tank_v2['historical']['frags'])

			if contains_block('fortBattles', tank_v2):
				
				if 'battlesCount' in tank_v2['fortBattles']:
					battleCount_fortBattles += tank_v2['fortBattles']['battlesCount']
				
				if 'frags' in tank_v2['fortBattles']:
					numoffrags_fortBattles = int(tank_v2['fortBattles']['frags'])
					
			if contains_block('fortSorties', tank_v2):
				
				if 'battlesCount' in tank_v2['fortSorties']:
					battleCount_fortSorties += tank_v2['fortSorties']['battlesCount']
				
				if 'frags' in tank_v2['fortSorties']:
					numoffrags_fortSorties = int(tank_v2['fortSorties']['frags'])

			if contains_block('rated7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['rated7x7']:
					battleCount_rated7x7 += tank_v2['rated7x7']['battlesCount']
				
				if 'frags' in tank_v2['rated7x7']:
					numoffrags_rated7x7 = int(tank_v2['rated7x7']['frags'])
					
			if contains_block('globalMapCommon', tank_v2):
				
				if 'battlesCount' in tank_v2['globalMapCommon']:
					battleCount_globalMap += tank_v2['globalMapCommon']['battlesCount']
				
				if 'frags' in tank_v2['globalMapCommon']:
					numoffrags_globalMap = int(tank_v2['globalMapCommon']['frags'])
				
			if contains_block('fallout', tank_v2):
				
				if 'battlesCount' in tank_v2['fallout']:
					battleCount_fallout += tank_v2['fallout']['battlesCount']
				
				if 'frags' in tank_v2['fallout']:
					numoffrags_fallout = int(tank_v2['fallout']['frags'])
				
			if option_frags == 1:

				try:
					if numoffrags_list <> (numoffrags_a15x15 + numoffrags_a7x7 + numoffrags_historical + numoffrags_fortBattles + numoffrags_fortSorties + numoffrags_rated7x7 + numoffrags_globalMap + numoffrags_fallout):
						pass
						#write_to_log('Wrong number of frags for ' + str(tanktitle) + ', ' + str(tankversion) + ': ' + str(numoffrags_list) + ' = ' + str(numoffrags_a15x15) + ' + ' + str(numoffrags_a7x7) + ' + ' + str(numoffrags_historical) + ' + ' + str(numoffrags_fortBattles) + ' + ' + str(numoffrags_fortSorties) + ' + ' + str(numoffrags_rated7x7))
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
		
			
				
			tank_v2['common'] = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tank_v2['total']['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tank_v2['total']['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags":  numoffrags_a15x15,
				"frags_7x7":  numoffrags_a7x7,
				"frags_historical":  numoffrags_historical,
				"frags_fortBattles":  numoffrags_fortBattles,
				"frags_fortSorties":  numoffrags_fortSorties,
				"frags_compare": numoffrags_list,
				"has_15x15": contains_block("a15x15", tank_v2),
				"has_7x7": contains_block("a7x7", tank_v2),
				"has_historical": contains_block("historical", tank_v2),
				"has_clan": contains_block("clan", tank_v2),
				"has_company": contains_block("company", tank_v2),
				"has_fort": contains_block("fortBattles", tank_v2),
				"has_sortie": contains_block("fortSorties", tank_v2)
				
			}
			
			if option_raw == 1:
				tank_v2['rawdata'] = rawdata

			tanks_v2[tanktitle] = tank_v2
			
			
		if tankversion < 65:
			if tankversion >= 20:
				company = getstructureddata("company", tankversion)
				battleCount_company += company['battlesCount']
				clan = getstructureddata("clan", tankversion)
				battleCount_clan += clan['battlesCount']
			
			numoffrags = 0
	
			structure = getstructureddata("structure", tankversion)


			
			if 'fragspos' not in structure:
				write_to_log('tankversion ' + str(tankversion) + ' not in JSON')
				continue
			
			if option_frags == 1 and tankversion >= 17:
				fragslist = getdata_fragslist(tankversion, tanksdata, structure['fragspos'])
	
			tankdata = getstructureddata("tankdata", tankversion)
			battleCount_15 += tankdata['battlesCount']
	
			if not "creationTime" in tankdata:
				tankdata['creationTime'] = 1356998400
	
			common = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tankdata['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tankdata['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tankdata['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tankdata['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags": tankdata['frags'],
				"frags_compare": numoffrags
			}
	
			if option_frags == 1 and tankversion >= 17:
				try:
					if tankdata['frags'] <> numoffrags:
						printmessage('Wrong number of frags!')
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
	
			series = getstructureddata("series", tankversion)
	
			special = getstructureddata("special", tankversion)
	
			battle = getstructureddata("battle", tankversion)
	
			major = getstructureddata("major", tankversion)
	
			epic = getstructureddata("epic", tankversion)
	
	
	
			tank = dict()
			
			tank['tankdata'] = tankdata
			tank['common'] = common
	
			if tankversion >= 20:
				tank['series'] = series
				tank['battle'] = battle
				tank['special'] = special
				tank['epic'] = epic
				tank['major'] = major
				tank['clan'] = clan
				tank['company'] = company
				
			if option_frags == 1:
				tank['kills'] = fragslist
			
			if option_raw == 1:
				tank['rawdata'] = rawdata
			
			tanks[tanktitle] = tank
			#tanks = sorted(tanks.values())

	
	dossierheader['battleCount_15'] = battleCount_15	
	dossierheader['battleCount_7'] = battleCount_7
	dossierheader['battleCount_historical'] = battleCount_historical
	dossierheader['battleCount_company'] = battleCount_company
	dossierheader['battleCount_clan'] = battleCount_clan

	dossierheader['result'] = "ok"
	dossierheader['message'] = "ok"
	
	dossier['header'] = dossierheader
	dossier['tanks'] = tanks
	dossier['tanks_v2'] = tanks_v2

	dumpjson(dossier)

	printmessage('###### Done!')
	printmessage('')
	sys.exit(0)

Example 49

Project: pyicloud
Source File: cmdline.py
View license
def main(args=None):
    """Main commandline entrypoint"""
    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser(
        description="Find My iPhone CommandLine Tool")

    parser.add_argument(
        "--username",
        action="store",
        dest="username",
        default="",
        help="Apple ID to Use"
    )
    parser.add_argument(
        "--password",
        action="store",
        dest="password",
        default="",
        help=(
            "Apple ID Password to Use; if unspecified, password will be "
            "fetched from the system keyring."
        )
    )
    parser.add_argument(
        "-n",
        "--non-interactive",
        action="store_false",
        dest="interactive",
        default=True,
        help="Disable interactive prompts."
    )
    parser.add_argument(
        "--delete-from-keyring",
        action="store_true",
        dest="delete_from_keyring",
        default=False,
        help="Delete stored password in system keyring for this username.",
    )
    parser.add_argument(
        "--list",
        action="store_true",
        dest="list",
        default=False,
        help="Short Listings for Device(s) associated with account",
    )
    parser.add_argument(
        "--llist",
        action="store_true",
        dest="longlist",
        default=False,
        help="Detailed Listings for Device(s) associated with account",
    )
    parser.add_argument(
        "--locate",
        action="store_true",
        dest="locate",
        default=False,
        help="Retrieve Location for the iDevice (non-exclusive).",
    )

    #   Restrict actions to a specific devices UID / DID
    parser.add_argument(
        "--device",
        action="store",
        dest="device_id",
        default=False,
        help="Only effect this device",
    )

    #   Trigger Sound Alert
    parser.add_argument(
        "--sound",
        action="store_true",
        dest="sound",
        default=False,
        help="Play a sound on the device",
    )

    #   Trigger Message w/Sound Alert
    parser.add_argument(
        "--message",
        action="store",
        dest="message",
        default=False,
        help="Optional Text Message to display with a sound",
    )

    #   Trigger Message (without Sound) Alert
    parser.add_argument(
        "--silentmessage",
        action="store",
        dest="silentmessage",
        default=False,
        help="Optional Text Message to display with no sounds",
    )

    #   Lost Mode
    parser.add_argument(
        "--lostmode",
        action="store_true",
        dest="lostmode",
        default=False,
        help="Enable Lost mode for the device",
    )
    parser.add_argument(
        "--lostphone",
        action="store",
        dest="lost_phone",
        default=False,
        help="Phone Number allowed to call when lost mode is enabled",
    )
    parser.add_argument(
        "--lostpassword",
        action="store",
        dest="lost_password",
        default=False,
        help="Forcibly active this passcode on the idevice",
    )
    parser.add_argument(
        "--lostmessage",
        action="store",
        dest="lost_message",
        default="",
        help="Forcibly display this message when activating lost mode.",
    )

    #   Output device data to an pickle file
    parser.add_argument(
        "--outputfile",
        action="store_true",
        dest="output_to_file",
        default="",
        help="Save device data to a file in the current directory.",
    )

    command_line = parser.parse_args(args)

    username = command_line.username
    password = command_line.password

    if username and command_line.delete_from_keyring:
        utils.delete_password_in_keyring(username)

    failure_count = 0
    while True:
        # Which password we use is determined by your username, so we
        # do need to check for this first and separately.
        if not username:
            parser.error('No username supplied')

        if not password:
            password = utils.get_password(
                username,
                interactive=command_line.interactive
            )

        if not password:
            parser.error('No password supplied')

        try:
            api = pyicloud.PyiCloudService(
                username.strip(),
                password.strip()
            )
            if (
                not utils.password_exists_in_keyring(username) and
                command_line.interactive and
                confirm("Save password in keyring? ")
            ):
                utils.store_password_in_keyring(username, password)
            break
        except pyicloud.exceptions.PyiCloudFailedLoginException:
            # If they have a stored password; we just used it and
            # it did not work; let's delete it if there is one.
            if utils.password_exists_in_keyring(username):
                utils.delete_password_in_keyring(username)

            message = "Bad username or password for {username}".format(
                username=username,
            )
            password = None

            failure_count += 1
            if failure_count >= 3:
                raise RuntimeError(message)

            print(message, file=sys.stderr)

    for dev in api.devices:
        if (
            not command_line.device_id or
            (
                command_line.device_id.strip().lower() ==
                dev.content["id"].strip().lower()
            )
        ):
            #   List device(s)
            if command_line.locate:
                dev.location()

            if command_line.output_to_file:
                create_pickled_data(
                    dev,
                    filename=(
                        dev.content["name"].strip().lower() + ".fmip_snapshot"
                    )
                )

            contents = dev.content
            if command_line.longlist:
                print("-"*30)
                print(contents["name"])
                for x in contents:
                    print("%20s - %s" % (x, contents[x]))
            elif command_line.list:
                print("-"*30)
                print("Name - %s" % contents["name"])
                print("Display Name  - %s" % contents["deviceDisplayName"])
                print("Location      - %s" % contents["location"])
                print("Battery Level - %s" % contents["batteryLevel"])
                print("Battery Status- %s" % contents["batteryStatus"])
                print("Device Class  - %s" % contents["deviceClass"])
                print("Device Model  - %s" % contents["deviceModel"])

            #   Play a Sound on a device
            if command_line.sound:
                if command_line.device_id:
                    dev.play_sound()
                else:
                    raise RuntimeError(
                        "\n\n\t\t%s %s\n\n" % (
                            "Sounds can only be played on a singular device.",
                            DEVICE_ERROR
                        )
                    )

            #   Display a Message on the device
            if command_line.message:
                if command_line.device_id:
                    dev.display_message(
                        subject='A Message',
                        message=command_line.message,
                        sounds=True
                    )
                else:
                    raise RuntimeError(
                        "%s %s" % (
                            "Messages can only be played "
                            "on a singular device.",
                            DEVICE_ERROR
                        )
                    )

            #   Display a Silent Message on the device
            if command_line.silentmessage:
                if command_line.device_id:
                    dev.display_message(
                        subject='A Silent Message',
                        message=command_line.silentmessage,
                        sounds=False
                    )
                else:
                    raise RuntimeError(
                        "%s %s" % (
                            "Silent Messages can only be played "
                            "on a singular device.",
                            DEVICE_ERROR
                        )
                    )

            #   Enable Lost mode
            if command_line.lostmode:
                if command_line.device_id:
                    dev.lost_device(
                        number=command_line.lost_phone.strip(),
                        text=command_line.lost_message.strip(),
                        newpasscode=command_line.lost_password.strip()
                    )
                else:
                    raise RuntimeError(
                        "%s %s" % (
                            "Lost Mode can only be activated "
                            "on a singular device.",
                            DEVICE_ERROR
                        )
                    )

Example 50

Project: python-netsnmpagent
Source File: netsnmpagent.py
View license
	def __init__(self, **args):
		"""Initializes a new netsnmpAgent instance.
		
		"args" is a dictionary that can contain the following
		optional parameters:
		
		- AgentName     : The agent's name used for registration with net-snmp.
		- MasterSocket  : The transport specification of the AgentX socket of
		                  the running snmpd instance to connect to (see the
		                  "LISTENING ADDRESSES" section in the snmpd(8) manpage).
		                  Change this if you want to use eg. a TCP transport or
		                  access a custom snmpd instance, eg. as shown in
		                  run_simple_agent.sh, or for automatic testing.
		- PersistenceDir: The directory to use to store persistence information.
		                  Change this if you want to use a custom snmpd
		                  instance, eg. for automatic testing.
		- MIBFiles      : A list of filenames of MIBs to be loaded. Required if
		                  the OIDs, for which variables will be registered, do
		                  not belong to standard MIBs and the custom MIBs are not
		                  located in net-snmp's default MIB path
		                  (/usr/share/snmp/mibs).
		- UseMIBFiles   : Whether to use MIB files at all or not. When False,
		                  the parser for MIB files will not be initialized, so
		                  neither system-wide MIB files nor the ones provided
		                  in the MIBFiles argument will be in use.
		- LogHandler    : An optional Python function that will be registered
		                  with net-snmp as a custom log handler. If specified,
		                  this function will be called for every log message
		                  net-snmp itself generates, with parameters as follows:
		                  1. a string indicating the message's priority: one of
		                  "Emergency", "Alert", "Critical", "Error", "Warning",
		                  "Notice", "Info" or "Debug".
		                  2. the actual log message. Note that heading strings
		                  such as "Warning: " and "Error: " will be stripped off
		                  since the priority level is explicitly known and can
		                  be used to prefix the log message, if desired.
		                  Trailing linefeeds will also have been stripped off.
		                  If undefined, log messages will be written to stderr
		                  instead. """

		# Default settings
		defaults = {
			"AgentName"     : os.path.splitext(os.path.basename(sys.argv[0]))[0],
			"MasterSocket"  : None,
			"PersistenceDir": None,
			"UseMIBFiles"   : True,
			"MIBFiles"      : None,
			"LogHandler"    : None,
		}
		for key in defaults:
			setattr(self, key, args.get(key, defaults[key]))
		if self.UseMIBFiles and self.MIBFiles is not None and type(self.MIBFiles) not in (list, tuple):
			self.MIBFiles = (self.MIBFiles,)

		# Initialize status attribute -- until start() is called we will accept
		# SNMP object registrations
		self._status = netsnmpAgentStatus.REGISTRATION

		# Unfortunately net-snmp does not give callers of init_snmp() (used
		# in the start() method) any feedback about success or failure of
		# connection establishment. But for AgentX clients this information is
		# quite essential, thus we need to implement some more or less ugly
		# workarounds.

		# For net-snmp 5.7.x, we can derive success and failure from the log
		# messages it generates. Normally these go to stderr, in the absence
		# of other so-called log handlers. Alas we define a callback function
		# that we will register with net-snmp as a custom log handler later on,
		# hereby effectively gaining access to the desired information.
		def _py_log_handler(majorID, minorID, serverarg, clientarg):
			# "majorID" and "minorID" are the callback IDs with which this
			# callback function was registered. They are useful if the same
			# callback was registered multiple times.
			# Both "serverarg" and "clientarg" are pointers that can be used to
			# convey information from the calling context to the callback
			# function: "serverarg" gets passed individually to every call of
			# snmp_call_callbacks() while "clientarg" was initially passed to
			# snmp_register_callback().

			# In this case, "majorID" and "minorID" are always the same (see the
			# registration code below). "serverarg" needs to be cast back to
			# become a pointer to a "snmp_log_message" C structure (passed by
			# net-snmp's log_handler_callback() in snmplib/snmp_logging.c) while
			# "clientarg" will be None (see the registration code below).
			logmsg = ctypes.cast(serverarg, snmp_log_message_p)

			# Generate textual description of priority level
			priorities = {
				LOG_EMERG: "Emergency",
				LOG_ALERT: "Alert",
				LOG_CRIT: "Critical",
				LOG_ERR: "Error",
				LOG_WARNING: "Warning",
				LOG_NOTICE: "Notice",
				LOG_INFO: "Info",
				LOG_DEBUG: "Debug"
			}
			msgprio = priorities[logmsg.contents.priority]

			# Strip trailing linefeeds and in addition "Warning: " and "Error: "
			# from msgtext as these conditions are already indicated through
			# msgprio
			msgtext = re.sub(
				"^(Warning|Error): *",
				"",
				u(logmsg.contents.msg.rstrip(b"\n"))
			)

			# Intercept log messages related to connection establishment and
			# failure to update the status of this netsnmpAgent object. This is
			# really an ugly hack, introducing a dependency on the particular
			# text of log messages -- hopefully the net-snmp guys won't
			# translate them one day.
			if  msgprio == "Warning" \
			or  msgprio == "Error" \
			and re.match("Failed to .* the agentx master agent.*", msgtext):
				# If this was the first connection attempt, we consider the
				# condition fatal: it is more likely that an invalid
				# "MasterSocket" was specified than that we've got concurrency
				# issues with our agent being erroneously started before snmpd.
				if self._status == netsnmpAgentStatus.FIRSTCONNECT:
					self._status = netsnmpAgentStatus.CONNECTFAILED

					# No need to log this message -- we'll generate our own when
					# throwing a netsnmpAgentException as consequence of the
					# ECONNECT
					return 0

				# Otherwise we'll stay at status RECONNECTING and log net-snmp's
				# message like any other. net-snmp code will keep retrying to
				# connect.
			elif msgprio == "Info" \
			and  re.match("AgentX subagent connected", msgtext):
				self._status = netsnmpAgentStatus.CONNECTED
			elif msgprio == "Info" \
			and  re.match("AgentX master disconnected us.*", msgtext):
				self._status = netsnmpAgentStatus.RECONNECTING

			# If "LogHandler" was defined, call it to take care of logging.
			# Otherwise print all log messages to stderr to resemble net-snmp
			# standard behavior (but add log message's associated priority in
			# plain text as well)
			if self.LogHandler:
				self.LogHandler(msgprio, msgtext)
			else:
				print("[{0}] {1}".format(msgprio, msgtext))

			return 0

		# We defined a Python function that needs a ctypes conversion so it can
		# be called by C code such as net-snmp. That's what SNMPCallback() is
		# used for. However we also need to store the reference in "self" as it
		# will otherwise be lost at the exit of this function so that net-snmp's
		# attempt to call it would end in nirvana...
		self._log_handler = SNMPCallback(_py_log_handler)

		# Now register our custom log handler with majorID SNMP_CALLBACK_LIBRARY
		# and minorID SNMP_CALLBACK_LOGGING.
		if libnsa.snmp_register_callback(
			SNMP_CALLBACK_LIBRARY,
			SNMP_CALLBACK_LOGGING,
			self._log_handler,
			None
		) != SNMPERR_SUCCESS:
			raise netsnmpAgentException(
				"snmp_register_callback() failed for _netsnmp_log_handler!"
			)

		# Finally the net-snmp logging system needs to be told to enable
		# logging through callback functions. This will actually register a
		# NETSNMP_LOGHANDLER_CALLBACK log handler that will call out to any
		# callback functions with the majorID and minorID shown above, such as
		# ours.
		libnsa.snmp_enable_calllog()

		# Unfortunately our custom log handler above is still not enough: in
		# net-snmp 5.4.x there were no "AgentX master disconnected" log
		# messages yet. So we need another workaround to be able to detect
		# disconnects for this release. Both net-snmp 5.4.x and 5.7.x support
		# a callback mechanism using the "majorID" SNMP_CALLBACK_APPLICATION and
		# the "minorID" SNMPD_CALLBACK_INDEX_STOP, which we can abuse for our
		# purposes. Again, we start by defining a callback function.
		def _py_index_stop_callback(majorID, minorID, serverarg, clientarg):
			# For "majorID" and "minorID" see our log handler above.
			# "serverarg" is a disguised pointer to a "netsnmp_session"
			# structure (passed by net-snmp's subagent_open_master_session() and
			# agentx_check_session() in agent/mibgroup/agentx/subagent.c). We
			# can ignore it here since we have a single session only anyway.
			# "clientarg" will be None again (see the registration code below).

			# We only care about SNMPD_CALLBACK_INDEX_STOP as our custom log
			# handler above already took care of all other events.
			if minorID == SNMPD_CALLBACK_INDEX_STOP:
				self._status = netsnmpAgentStatus.RECONNECTING

			return 0

		# Convert it to a C callable function and store its reference
		self._index_stop_callback = SNMPCallback(_py_index_stop_callback)

		# Register it with net-snmp
		if libnsa.snmp_register_callback(
			SNMP_CALLBACK_APPLICATION,
			SNMPD_CALLBACK_INDEX_STOP,
			self._index_stop_callback,
			None
		) != SNMPERR_SUCCESS:
			raise netsnmpAgentException(
				"snmp_register_callback() failed for _netsnmp_index_callback!"
			)

		# No enabling necessary here

		# Make us an AgentX client
		if libnsa.netsnmp_ds_set_boolean(
			NETSNMP_DS_APPLICATION_ID,
			NETSNMP_DS_AGENT_ROLE,
			1
		) != SNMPERR_SUCCESS:
			raise netsnmpAgentException(
				"netsnmp_ds_set_boolean() failed for NETSNMP_DS_AGENT_ROLE!"
			)

		# Use an alternative transport specification to connect to the master?
		# Defaults to "/var/run/agentx/master".
		# (See the "LISTENING ADDRESSES" section in the snmpd(8) manpage)
		if self.MasterSocket:
			if libnsa.netsnmp_ds_set_string(
				NETSNMP_DS_APPLICATION_ID,
				NETSNMP_DS_AGENT_X_SOCKET,
				b(self.MasterSocket)
			) != SNMPERR_SUCCESS:
				raise netsnmpAgentException(
					"netsnmp_ds_set_string() failed for NETSNMP_DS_AGENT_X_SOCKET!"
				)

		# Use an alternative persistence directory?
		if self.PersistenceDir:
			if libnsa.netsnmp_ds_set_string(
				NETSNMP_DS_LIBRARY_ID,
				NETSNMP_DS_LIB_PERSISTENT_DIR,
				b(self.PersistenceDir)
			) != SNMPERR_SUCCESS:
				raise netsnmpAgentException(
					"netsnmp_ds_set_string() failed for NETSNMP_DS_LIB_PERSISTENT_DIR!"
				)

		# Initialize net-snmp library (see netsnmp_agent_api(3))
		if libnsa.init_agent(b(self.AgentName)) != 0:
			raise netsnmpAgentException("init_agent() failed!")

		# Initialize MIB parser
		if self.UseMIBFiles:
			libnsa.netsnmp_init_mib()

		# If MIBFiles were specified (ie. MIBs that can not be found in
		# net-snmp's default MIB directory /usr/share/snmp/mibs), read
		# them in so we can translate OID strings to net-snmp's internal OID
		# format.
		if self.UseMIBFiles and self.MIBFiles:
			for mib in self.MIBFiles:
				if libnsa.read_mib(b(mib)) == 0:
					raise netsnmpAgentException("netsnmp_read_module({0}) " +
					                            "failed!".format(mib))

		# Initialize our SNMP object registry
		self._objs = defaultdict(dict)