subprocess.call

Here are the examples of the python api subprocess.call taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

170 Examples 7

Example 1

Project: mantaray Source File: create_kml_from_exif_mr.py
def create_kml_from_exif_mr(item_to_process, case_number, root_folder_path, evidence):
	print("The item to process is: " + item_to_process)
	print("The case_name is: " + case_number)
	print("The output folder is: " + root_folder_path)
	print("The evidence to process is: " + evidence)

	evidence_no_quotes = evidence
	evidence = '"' + evidence + '"'

	#create output folder path
	folder_path = root_folder_path + "/" + "KML_From_EXIF"
	check_for_folder(folder_path, "NONE")
	

	#open a log file for output
	log_file = folder_path + "/KML_From_EXIF_logfile.txt"
	outfile = open(log_file, 'wt+')

	#initialize variables
	files_of_interest = {}
	files_of_interest_list = []
	mount_point = "NONE"

	log_file3 = folder_path + "/" + case_number + "_files_to_exploit.xls"
	outfile3 = open(log_file3, 'wt+')

	#write out column headers to xls file
	outfile3.write("Name\tMD5\tFile Size (kb)\n")



	if(item_to_process == "Directory"):
		#select folder to process
		folder_process = evidence_no_quotes
	
		#set folder variable to "folder" since this is a folder and not a disk partition
		folder = "Directory"

		#call process subroutine
		process(folder_process, outfile, folder_path, folder, outfile3)

	elif(item_to_process == 'EnCase Logical Evidence File'):
		folder = "LEF"
		file_to_process = evidence
		mount_point = mount_encase_v6_l01(case_number, file_to_process, outfile)
		process(mount_point, outfile, folder_path, folder, outfile3)

		#umount
		if(os.path.exists(mount_point)):
			subprocess.call(['sudo umount -f ' + mount_point], shell=True)
			os.rmdir(mount_point)

	elif(item_to_process == 'Single File'):
		process_single_file(evidence_no_quotes, outfile, folder_path, "Single-File", outfile3)

	elif(item_to_process == 'Bit-Stream Image'):

		#select image to process
		Image_Path = evidence

		#get datetime
		now = datetime.datetime.now()

		#set Mount Point
		mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S")	

		#check to see if Image file is in Encase format
		if re.search(".E01", Image_Path):
			#strip out single quotes from the quoted path
			no_quotes_path = Image_Path.replace("'","")
			print("The no quotes path is: " + no_quotes_path)
			#call mount_ewf function
			cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
			try:
				subprocess.call([cmd_false], shell=True)
			except:
				print("Autmount false failed")
			Image_Path = mount_ewf(Image_Path, outfile, mount_point)

		#call mmls function
		partition_info_dict, temp_time = mmls(outfile, Image_Path)
		#partition_info_dict_temp, temp_time = partition_info_dict

		#get filesize of mmls_output.txt
		file_size = os.path.getsize("/tmp/mmls_output_" + temp_time +".txt") 
		print("The filesize is: " + str(file_size))

		#if filesize of mmls output is 0 then run parted
		if(file_size == 0):
			print("mmls output was empty, running parted")
			outfile.write("mmls output was empty, running parted")
			#call parted function
			partition_info_dict, temp_time = parted(outfile, Image_Path)	

		else:
	
			#read through the mmls output and look for GUID Partition Tables (used on MACS)
			mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
			for line in mmls_output_file:
				if re.search("GUID Partition Table", line):
					print("We found a GUID partition table, need to use parted")
					outfile.write("We found a GUID partition table, need to use parted\n")
					#call parted function
					partition_info_dict, temp_time = parted(outfile, Image_Path)

			#close file
			mmls_output_file.close()

		#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
		#for key,value in partition_info_dict.items():
		for key,value in sorted(partition_info_dict.items()):

			#create output folder for processed files
			if not os.path.exists(folder_path + "/Processed_files_" + str(key)):
				os.mkdir(folder_path + "/Processed_files_" + str(key))

			#disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed
			cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
			try:
				subprocess.call([cmd_false], shell=True)
			except:
				print("Autmount false failed")

			#call mount sub-routine
			success_code, loopback_device_mount = mount(value,key,Image_Path, outfile, mount_point)

			if(success_code):
				print("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
				outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
			else:
		
				print("We just mounted filesystem: " + value + " at offset:" + str(key) + ". Scanning for files of interest.....\n")
				outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")

				#call process subroutine
				process(mount_point, outfile, folder_path, key, outfile3)
			

				#unmount and remove mount points
				if(os.path.exists(mount_point)): 
					subprocess.call(['sudo umount -f ' + mount_point], shell=True)
					os.rmdir(mount_point)
				#unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting
				if not (loopback_device_mount == "NONE"):
					losetup_d_command = "losetup -d " + loopback_device_mount
					subprocess.call([losetup_d_command], shell=True)

			#delete /tmp files created for processing bit-stream images
			if (os.path.exists("/tmp/mmls_output_" + temp_time + ".txt")):
				os.remove("/tmp/mmls_output_" + temp_time + ".txt")

	#write out list of filenames to end of output file so that user can create a filter for those filenames in Encase
	outfile3.write("\n\ncuem**** LIST of FILENAMES of INTEREST ******************\n")
	#sort list so that all values are unique
	unique(files_of_interest_list) 
	for files in files_of_interest_list:
		outfile3.write(files + "\n")
	

	#program cleanup
	outfile.close()
	outfile3.close()

	#remove mount points created for this program
	if(os.path.exists(mount_point)):
		subprocess.call(['sudo umount -f ' + mount_point], shell=True)
		os.rmdir(mount_point)
	if(os.path.exists(mount_point+"_ewf")):
		subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
		os.rmdir(mount_point+"_ewf")
	
	#convert outfile using unix2dos	
	#chdir to output foler
	os.chdir(folder_path)

	#run text files through unix2dos
	for root, dirs, files in os.walk(folder_path):
		for filenames in files:
			#get file extension
			fileName, fileExtension = os.path.splitext(filenames)
			if(fileExtension.lower() == ".txt"):
				full_path = os.path.join(root,filenames)
				quoted_full_path = "'" +full_path+"'"
				print("Running Unix2dos against file: " + filenames)
				unix2dos_command = "sudo unix2dos " + filenames
				subprocess.call([unix2dos_command], shell=True)

	#delete empty directories in output folder
	for root, dirs, files in os.walk(folder_path, topdown=False):	
		for directories in dirs:
			files = []
			dir_path = os.path.join(root,directories)
			files = os.listdir(dir_path)	
			if(len(files) == 0):
				os.rmdir(dir_path)

	#unmount and remove mount points
	if(mount_point != "NONE"):
		if(os.path.exists(mount_point+"_ewf")):
			subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
			os.rmdir(mount_point+"_ewf")

Example 2

Project: tlsprober Source File: scan_start.py
@transaction.commit_manually
def main():

	computername =  os.environ.get('COMPUTERNAME',"any").lower()
	if computername == "any":
		computername =  os.environ.get('HOSTNAME',"any").lower()
	if computername == "any":
		raise Exception("Computername was empty")
	
	computername = computername.partition('.')[0]
	
	options_config = OptionParser()
	
	options_config.add_option("--testbase2", action="store_true", dest="use_testbase2")
	options_config.add_option("--verbose", action="store_true", dest="verbose")
	options_config.add_option("--managed", action="store_true", dest="managed")
	options_config.add_option("--performance", action="store_true", dest="register_performance")
	
	(options, args) = options_config.parse_args()
	
	
	master_configuration,created = Scanner.ScannerNode.objects.get_or_create(hostname = "tlsprober-cluster", defaults={
																				"scanner_parameters":"not used",
																				"active_node":True,
																				})
	configuration,created = Scanner.ScannerNode.objects.get_or_create(hostname = computername, defaults={
																				"scanner_parameters":"--processes 4 --iterations 10",
																				"active_node":True,
																				})
	configuration.save()
	transaction.commit()
	
	for run in Scanner.ScannerRun.objects.filter(enabled=True).order_by("-priority", "entered_date").iterator():
		if not run.enabled or run.scannerqueue_set.filter(state=Scanner.ScannerQueue.SCANQ_IDLE).count() == 0:
			run.enabled = False
			run.save()
			continue
		
		if run.branch:
			terminate = True
		
		processes = []
		process_index = 0
		
		check_queue_frequency = 1
		checked_count = 0
		checked_count_git = 0
		last_count =Scanner.ScannerQueue.objects.filter(part_of_run=run,state=Scanner.ScannerQueue.SCANQ_IDLE).count()
	
		while (Scanner.ScannerRun.objects.filter(enabled=True, priority__gt=run.priority).count() == 0 and
			Scanner.ScannerRun.objects.get(id = run.id).enabled):
			cluster_master_configuration,created = Cluster.ClusterNode.objects.get_or_create(hostname = "tlsprober-cluster", defaults={
																						"probe_parameters":"not used",
																						#"result_parameters":"not used",
																						"active_node":True,
																						})
			cluster_configuration,created = Cluster.ClusterNode.objects.get_or_create(hostname = computername, defaults={
																						"probe_parameters":"--processes 40 --iterations 40",
																						#"result_parameters":"--processes 10 --iterations 100",
																						"active_node":True,
																						})
			cluster_configuration.save()
			if (cluster_master_configuration.active_node and cluster_configuration.active_node and 
				Cluster.ClusterRun.objects.filter(enabled=True).count()>0):
				break
	
			master_configuration = Scanner.ScannerNode.objects.all().get(hostname = "tlsprober-cluster")
			configuration= Scanner.ScannerNode.objects.all().get(hostname = computername)
			if not configuration.active_node or not master_configuration.active_node:
				break;
			configuration.save()
			transaction.commit()
	
			if not processes:
				subprocess.call(["git", "pull",])
				subprocess.call(["git", "submodule", "update", "--recursive"], cwd = "..")
	
			checked_count += 1
			if checked_count >= check_queue_frequency:
				qlen =Scanner.ScannerQueue.objects.filter(part_of_run=run.id, state=Scanner.ScannerQueue.SCANQ_IDLE).count()
				if qlen <= 0 :
					break;
				if qlen < 50000:
					check_queue_frequency = 0
				checked_count =0
	
			checked_count_git += 1;
			if checked_count_git>= 10:
				subprocess.call(["git", "pull",])
				subprocess.call(["git", "submodule", "update", "--recursive"], cwd = "..")
				checked_count_git = 0;
			
			run_config = OptionParser()
			
			run_config.add_option("--processes", action="store", type="int", dest="process_count", default=1)
			run_config.add_option("--iterations", action="store", type="int", dest="iteration_count", default=40)
	
			(run_options, args) = run_config.parse_args(configuration.scan_parameters.split())
			if int(run_options.process_count) == 0:
				break
	
			started_proc = 0;
			
			proc_limit = int(run_options.process_count)
	
			while len(processes) < proc_limit and started_proc<max(10,min(30, proc_limit/10)):
				process_index += 1
				new_process = subprocess.Popen(
											(["nice"] if os.name == "posix" else []) +
											["python", "-O", 
												"scan_hostnames.py",
												"-n",  str(process_index),
												"--run-id", str(run.id),
												"--source", '"'+run.source_name+'"',
												"--description", '"'+run.description+'"',
												"--max", str(run_options.iteration_count),
												]+
												(["--performance"] if options.verbose  or options.register_performance else [])+
												(["--verbose"] if options.verbose else [])+
												(["--testbase2"] if options.use_testbase2 else []),
												shell=False)
				
				started_proc += 1
				if new_process:
					processes.append(new_process)
					if options.verbose:
						print "started ", process_index, " count ", len(processes), "/", proc_limit
				time.sleep(0.5)
		
			if len(processes) >400 and len(processes) < proc_limit*0.95:
				time.sleep(120)
			else:
				time.sleep(30 if len(processes) > proc_limit*0.95 else 15)
			
			if options.verbose:
				getPerformance(run, configuration)
			
			next_process_list = []
			
			for proc in processes:
				if proc.poll() == None:
					next_process_list.append(proc)
			
			processes = next_process_list
				
			# Loop back and try the next one
		
		while processes:
			time.sleep(30)
			if options.verbose:
				getPerformance(run, configuration)
	
			next_process_list = []
			
			for proc in processes:
				if proc.poll() == None:
					next_process_list.append(proc)
			
			processes = next_process_list
			
			if options.verbose:
				print "closing down: count ", len(processes), "/",  run_options.process_count
				
			# Loop back and see if all has ended now
		
		
		if options.verbose:
			print "closed down:"
			
		break; 
	transaction.commit()

Example 3

Project: OTPSetup Source File: main.py
def handle(conn, body, message):

    print "begin"

    # write tomcat-users file
    tutemplate = open(os.path.join(deployer_resources_dir, 'tomcat-users.xml'), 'r')
    tuxml = tutemplate.read()
    tutemplate.close()

    tuxml = tuxml.format(password=settings.TOMCAT_ADMIN_PASSWORD)

    tufilepath = os.path.join(tomcat_home, 'conf/tomcat-users.xml')
    tufile = open(tufilepath, 'w')
    tufile.write(tuxml)
    tufile.close()

    print "wrote users"

    # start & wait for tomcat
    subprocess.call(['/etc/init.d/tomcat6', 'start'])        
    tomcat_launched = wait_for_tomcat()

    print "tomcat started"
        
    # download latest wars
    download_otp_wars()

    print "downloaded wars"

    # deploy otp-api-webapp on tomcat
    encodedstring = base64.encodestring("admin:%s" % settings.TOMCAT_ADMIN_PASSWORD)[:-1]
    auth = "Basic %s" % encodedstring

    url ='http://localhost:8080/manager/install?path=/opentripplanner-api-webapp&war=/var/otp/wars/opentripplanner-api-webapp.war'
    req = urllib2.Request(url, None, {"Authorization": auth })
    url_handle = urllib2.urlopen(req)

    print "deployed api"

    # override data-sources.xml and application-context.xml
    dspath_from = os.path.join(deployer_resources_dir, 'data-sources.xml')
    dspath_to = os.path.join(tomcat_home, 'webapps/opentripplanner-api-webapp/WEB-INF/classes/data-sources.xml')
    subprocess.call(['cp', dspath_from, dspath_to])
    
    acpath_from = os.path.join(deployer_resources_dir, 'application-context.xml')
    acpath_to = os.path.join(tomcat_home, 'webapps/opentripplanner-api-webapp/WEB-INF/classes/org/opentripplanner/api/application-context.xml')
    subprocess.call(['cp', acpath_from, acpath_to])

        
    # open security-application-context.xml template
    sactemplate = open(os.path.join(deployer_resources_dir, 'security-application-context.xml'), 'r')
    sacxml = sactemplate.read()
    sactemplate.close()

    # generate password and insert into file
    chars = string.letters + string.digits
    password = ''.join([choice(chars) for i in range(8)])
    sacxml = sacxml.format(password=password)

    # overwrite deployed security-application-context.xml
    sacfilepath = os.path.join(tomcat_home, 'webapps/opentripplanner-api-webapp/WEB-INF/classes/org/opentripplanner/api/security-application-context.xml')
    sacfile = open(sacfilepath, 'w')
    sacfile.write(sacxml)
    sacfile.close()
    
    # restart tomcat
    subprocess.call(['/etc/init.d/tomcat6', 'restart'])

    # init multideployer_ready message params
    instance_id = 'n/a'
    ec2_conn = connect_ec2(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_KEY)
    host_ip = socket.gethostname()[3:].replace('-','.')
    reservations = ec2_conn.get_all_instances()
    for reservation in reservations:
        for instance in reservation.instances:
            private_ip = instance.private_ip_address
            if private_ip == host_ip:
                instance_id = instance.id
                found_instance = True

    pomfile = open(os.path.join(tomcat_home, 'webapps/opentripplanner-api-webapp/META-INF/maven/org.opentripplanner/opentripplanner-api-webapp/pom.properties'), 'r')
    version = 'n/a'
    for line in pomfile:
        if line[:8] == 'version=':
            version = line[8:].rstrip()
            break

    print host_ip
    print instance_id
    print version

    # publish multideployer_ready message
    publisher = conn.Producer(routing_key="multideployer_ready", exchange=exchange)
    publisher.publish({'request_id' : body['request_id'], 'host_ip' : host_ip, 'instance_id' : instance_id, 'otp_version' : version, 'auth_password' : password})
    
    # create init file    
    subprocess.call(['touch', initfile])

    # acknowledge original message and exit
    message.ack()
    sys.exit(0)

Example 4

Project: ARC Source File: assembler.py
    def RunNewbler(self):
        #Code for running newbler
        """
        Expects params keys:
            PE1 and PE2 and/or SE
            target_dir
            -urt
        """
        #Check for necessary params:
        if not (('assembly_PE1' in self.params and 'assembly_PE2' in self.params) or 'assembly_SE' in self.params):
            raise exceptions.FatalError('Missing self.params in RunNewbler.')

        #Check for necessary files:
        if 'assembly_PE1' in self.params and 'assembly_PE2' in self.params and not(os.path.exists(self.params['assembly_PE1']) or not(os.path.exists(self.params['assembly_PE2']))):
            raise exceptions.FatalError('Missing PE files in RunNewbler.')

        if 'assembly_SE' in self.params and not(os.path.exists(self.params['assembly_SE'])):
            raise exceptions.FatalError('Missing SE file in RunNewbler.')

        sample = self.params['sample']
        target = self.params['target']
        killed = False
        failed = False

        #determine whether to pipe output to a file or /dev/null
        if self.params['verbose']:
            out = open(os.path.join(self.params['target_dir'], "assembly.log"), 'w')
        else:
            out = open(os.devnull, 'w')

        #Build args for newAssembly:
        args = ['newAssembly', '-force']
        if self.params['last_assembly'] and self.params['cdna']:
            #only run with cdna switch on the final assembly
            args += ['-cdna']
        args += [os.path.join(self.params['target_dir'], 'assembly')]
        logger.debug("Calling newAssembly for sample: %s target %s" % (sample, target))
        logger.info(" ".join(args))
        ret = subprocess.call(args, stdout=out, stderr=out)
        #Build args for addRun:
        if 'assembly_PE1' in self.params and 'assembly_PE2' in self.params:
            args = ['addRun', os.path.join(self.params['target_dir'], 'assembly')]
            args += [self.params['assembly_PE1']]
            logger.debug("Calling addRun for sample: %s target %s" % (sample, target))
            logger.debug(" ".join(args))
            ret = subprocess.call(args, stdout=out, stderr=out)

            args = ['addRun', os.path.join(self.params['target_dir'], 'assembly')]
            args += [self.params['assembly_PE2']]
            logger.debug("Calling addRun for sample: %s target %s" % (sample, target))
            logger.debug(" ".join(args))
            ret = subprocess.call(args, stdout=out, stderr=out)
        if 'assembly_SE' in self.params:
            args = ['addRun', os.path.join(self.params['target_dir'], 'assembly')]
            args += [self.params['assembly_SE']]
            logger.debug("Calling addRun for sample: %s target %s" % (sample, target))
            logger.debug(" ".join(args))
            ret = subprocess.call(args, stdout=out, stderr=out)

        #Build args for runProject
        args = ['runProject']
        args += ['-cpu', '1']
        if self.params['last_assembly'] and self.params['cdna']:
            args += ['-noace']
        else:
            args += ['-nobig']
        if self.params['urt'] and not self.params['last_assembly']:
            #only run with the -urt switch when it isn't the final assembly
            args += ['-urt']
        if self.params['rip']:
            args += ['-rip']
        args += [os.path.join(self.params['target_dir'], 'assembly')]
        try:
            start = time.time()
            logger.debug("Calling runProject for sample: %s target %s" % (sample, target))
            logger.debug(" ".join(args))
            ret = subprocess.Popen(args, stdout=out, stderr=out)
            pid = ret.pid
            while ret.poll() is None:
                if time.time() - start > self.params['assemblytimeout']:
                    self.kill_process_children(pid)
                    logger.warn("Sample: %s target: %s iteration: %s Killing assembly after %s seconds" % (sample, target, self.params['iteration'], time.time() - start))
                    killed = True
                    break
                time.sleep(.5)
        except Exception as exc:
            txt = "Sample: %s, Target: %s: Unhandeled error running Newbler assembly" % (self.params['sample'], self.params['target'])
            txt += '\n\t' + str(exc) + "".join(traceback.format_exception)
            logger.warn(txt)
            failed = True
            pass
        finally:
            out.close()

        #Sometimes newbler doesn't seem to exit completely:
        self.kill_process_children(pid)

        #if ret != 0:
            #raise exceptions.RerunnableError("Newbler assembly failed.")

        if not killed and ret.poll() != 0:
            #raise exceptions.RerunnableError("Newbler assembly failed.")
            failed = True

        if failed:
            logger.info("Sample: %s target: %s iteration: %s Assembly failed after %s seconds" % (sample, target, self.params['iteration'], time.time() - start))
            outf = open(os.path.join(self.params['target_dir'], "finished"), 'w')
            outf.write("assembly_failed\t" + str(time.time() - start))
            outf.close()
        if killed:
            logger.info("Sample: %s target: %s iteration: %s Assembly killed after %s seconds" % (sample, target, self.params['iteration'], time.time() - start))
            outf = open(os.path.join(self.params['target_dir'], "finished"), 'w')
            outf.write("assembly_killed\t" + str(time.time() - start))
            outf.close()
        else:
            #Run finished without error
            logger.info("Sample: %s target: %s iteration: %s Assembly finished in %s seconds" % (sample, target, self.params['iteration'], time.time() - start))
            outf = open(os.path.join(self.params['target_dir'], "finished"), 'w')
            outf.write("assembly_complete\t" + str(time.time() - start))
            outf.close()

Example 5

Project: git-repo Source File: interactive-workflow-test.py
def main():
  usage = "Usage: %prog [options] REPO_GIT"
  parser = OptionParser(usage)
  parser.add_option("-u", "--manifest-url",
                 dest='manifest_url',
                 help='manifest repository location', metavar='URL')
  parser.add_option("-s", "--no-sync",
                 action="store_true", dest="no_sync", default=False,
                 help="do not sync after init")
  parser.add_option("-c", "--clean",
                 action="store_true", dest="clean", default=False,
                 help="clean gits after init")
  parser.add_option("-i", "--interactive",
                 action="store_true", dest="interactive", default=False,
                 help="wait for user input after each step")
  parser.add_option("-q", "--quiet",
                 action="store_false", dest="verbose", default=True,
                 help="don't print status messages to stdout")

  (options, args) = parser.parse_args()

  repo_dir = sys.argv[1]

  env = os.environ
  if len(args) < 1:
    print("Missing REPO_GIT argument")
    exit(1)
  if options.verbose:
    env["REPO_TRACE"] = "1"

  if options.manifest_url.find('Test') < 0:
    print("Warning: aborting due to manifest url has no 'Test' substring. Make sure to create special manifest for this test util since it will not care for any git fetched!")
    exit(1)

  redirect_all('cat')

  repo = "%s/repo" % args[0]
  abs_repo = os.path.abspath(args[0])
  prefix = '# '

  def p(s=""):
    print('\033[47m\033[32m' + (prefix + s).ljust(80) + '\033[0m')

  def check_repository(min_git_count):
    dirs = filter(lambda x: not x == '.repo' and not x.find('Test') > -1, os.listdir('.'))
    if len(dirs) > 0:
      p("Warning: aborting due existing folders without 'Test' in name in repository! (folders: %s)" % (' '.join(dirs)))
      exit(1)

    dirs = filter(lambda x: not x == '.repo', os.listdir('.'))
    if len(dirs) < min_git_count:
      p("Warning: exit since not enough repositories found: required=%s, found=%s" % (min_git_count, len(dirs)))
      exit(1)


  def clean():
    dirs = filter(lambda x: not x.startswith('.') and os.path.isdir(os.path.join('.', x)), os.listdir('.'))
    for d in dirs:
      cmd = "git reset --hard"
      subprocess.call(cmd.split(), cwd="./%s" % d, env=env)
      cmd = "git clean -xfd"
      subprocess.call(cmd.split(), cwd="./%s" % d, env=env)

  def select_folder(folder_index):
    dirs = filter(lambda x: not x.startswith('.') and os.path.isdir(os.path.join('.', x)), os.listdir('.'))
    dirs = sorted(dirs)
    return dirs[folder_index]

  def repo_do(name, cmd, dry_run=False):
    print()
    p(name)

    cmd = ['python', repo] + cmd
    if options.verbose:
      p(' '.join(cmd))

    if not dry_run:
      subprocess.call(cmd, env=env)
    else:
      p("Skipping")

    if options.interactive:
      key = input("{0}{0}{1}Press any key to continue or 'q' to exit".format(os.linesep, prefix))
      if key == 'q':
        p("Exit")
        exit(1)
    else:
      time.sleep(1)

  def changeAnyFile(d):
    files = filter(lambda x: not x.startswith('.') and os.path.isfile(os.path.join(d, x)), os.listdir(d))
    f = files[0]
    fh = open('%s/%s' % (d, f), 'w')
    fh.write(str(random.random()))
    fh.close()
    p("Changing file %s in %s" % (f, d))

  def stageAllIn(d):
    cmd = "git add -u"
    subprocess.call(cmd.split(), cwd="./%s" % d, env=env)

  def commitIn(d):
    cmd = "git commit -m \"%s\"" % (str(random.random()))
    subprocess.call(cmd.split(), cwd="./%s" % d, env=env)

  def startChangeCommit(folder):
    repo_do("Start branch in folder %s" % folder, ["start", "fix", folder])
    changeAnyFile(folder)
    repo_do("Changed file in folder %s" % folder, ["status"])
    stageAllIn(folder)
    repo_do("Stage file in folder %s" % folder, ["status"])
    commitIn(folder)
    repo_do("Commit changes in folder %s" % folder, ["status"])

  repo_do("Test init", ["init", "-u", options.manifest_url, "--no-repo-verify", "--repo-url", abs_repo, "--repo-branch", "stable"])

  if options.clean:
    clean()
    repo_do("Do clean", ["status"])

  repo_do("Test sync", ["sync"], options.no_sync)
  check_repository(2)

  repo_do("Test status", ["status"])
  repo_do("Test info", ["info", "-o"])

  folder_index0 = select_folder(0)
  folder_index1 = select_folder(1)

  startChangeCommit(folder_index0)
  repo_do("Check single pushable branch", ["info", "-o"])
  repo_do("Test single push", ["push"])
  repo_do("Check nothing to push after commit", ["info", "-o"])

  startChangeCommit(folder_index0)
  startChangeCommit(folder_index1)
  repo_do("Check two pushable branches", ["info", "-o"])
  repo_do("Test multiple push with editor", ["push"])
  repo_do("Check nothing to push after commit", ["info", "-o"])
  repo_do("Check already pushed branches", ["status"])
  repo_do("Do prune", ["prune"])
  repo_do("Check prune", ["status"])

  repo_do("Test forall with env varibales", ["forall", "-c", "printenv", "REPO_PROJECT"])

  print()
  p("Done")

  WaitForProcess()

Example 6

Project: pyina Source File: ez_map.py
def ez_map(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, while function 'func' is inspected
and written as a source file to be imported.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
    """
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if kwds.has_key('mapper'):
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError, "Mapper '%s' not found." % mapper()
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if kwds.has_key('nnodes'): ezdefaults['nodes'] = kwds['nnodes']
    if kwds.has_key('nodes'): ezdefaults['nodes'] = kwds['nodes']
    if kwds.has_key('timelimit'): ezdefaults['timelimit'] = kwds['timelimit']
    if kwds.has_key('queue'): ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if kwds.has_key('launcher'): launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if kwds.has_key('scheduler'): scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if kwds.has_key('workdir'): ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump, dump_source
    # write func source to a NamedTemporaryFile (instead of pickle.dump)
    # ezrun requires 'FUNC = <function>' to be included as module.FUNC
    modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir'])
    # standard pickle.dump of inputs to a NamedTemporaryFile
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    modname = os.path.splitext(os.path.basename(modfile.name))[0] 
    ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # debuggery... output = function(inputs)
   #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func
   #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs
   #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True)  # pickled list of output

    # read result back
    res = pickle.load(open(resfilename,'r'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    subprocess.call('rm -f %sc' % modfile.name, shell=True)
    return res

Example 7

Project: unattended-upgrades Source File: test_in_chroot.py
    def _run_upgrade_test_in_real_chroot(self, options, clean_chroot=True):
        """ helper that runs the unattended-upgrade in a chroot
            and does some basic verifications
        """
        if os.getuid() != 0:
            print("Skipping because uid != 0")
            return

        # clear to avoid pollution in the chroot
        apt.apt_pkg.config.clear("Acquire::http::ProxyAutoDetect")

        # create chroot
        target = "./test-chroot.%s" % DISTRO

        # setup chroot if needed
        if clean_chroot:
            self._setup_chroot(target)

        # ensure we have /dev/pts in the chroot
        ret = subprocess.call(["mount", "-t", "devpts", "devptsfs",
                               os.path.join(target, "dev", "pts")])
        if ret != 0:
            raise Exception("Failed to mount %s/proc" % target)
        self.addCleanup(
            lambda: subprocess.call(
                ["umount", os.path.join(target, "dev", "pts")]))

        # and run the upgrade test
        pid = os.fork()
        if pid == 0:
            # chroot
            os.chroot(target)
            os.chdir("/")
            if not os.path.exists("/var/log/unattended-upgrades/"):
                os.makedirs("/var/log/unattended-upgrades/")
            # make sure we are up-to-date
            subprocess.call(["apt-get", "update", "-q", "-q"])
            # run it
            apt.apt_pkg.config.clear("Unattended-Upgrade::Allowed-Origins")
            apt.apt_pkg.config.clear("Unattended-Upgrade::Origins-Pattern")
            apt.apt_pkg.config.set(
                "Unattended-Upgrade::Origins-Pattern::", ORIGINS_PATTERN)
            unattended_upgrade.DISTRO_CODENAME = DISTRO
            unattended_upgrade.main(options)
            os._exit(0)
        else:
            has_progress = False
            all_progress = ""
            last_progress = ""
            progress_log = os.path.join(
                target, "var/run/unattended-upgrades.progress")
            while True:
                time.sleep(0.01)
                if os.path.exists(progress_log):
                    progress = open(progress_log).read()
                    if progress and progress != last_progress:
                        has_progress = progress.startswith("Progress")
                        last_progress = progress
                    all_progress += progress
                # check exit status
                (apid, status) = os.waitpid(pid, os.WNOHANG)
                if pid == apid:
                    ret = os.WEXITSTATUS(status)
                    break
        #print("cuem***************", all_progress)
        self.assertEqual(ret, 0)
        # this number is a bit random, we just want to be sure we have
        # progress data
        self.assertTrue(has_progress, True)
        self.assertTrue(len(all_progress) > 5)
        return target

Example 8

Project: mantaray Source File: plist_processor.py
def plist_processor(item_to_process, case_number, root_folder_path, evidence):
	print("The item to process is: " + item_to_process)
	print("The case_name is: " + case_number)
	print("The output folder is: " + root_folder_path)
	print("The evidence to process is: " + evidence)

	evidence_no_quotes = evidence
	evidence = '"' + evidence + '"'

	#get datetime
	now = datetime.datetime.now()

	#set Mount Point
	mount_point = "/mnt/" + "MantaRay_" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
	
	#create output folder path
	folder_path = root_folder_path + "/" + "PLIST_Processor"
	check_for_folder(folder_path, "NONE")
	
	#open a log file for output
	log_file = folder_path + "/PLIST_processor_logfile.txt"
	outfile = open(log_file, 'wt+')

	#open an error file for output
	log_file = folder_path + "/PLIST_processor_error_log.txt"
	outfile_error = open(log_file, 'wt+')

	#open file to write output
	exp_file = folder_path + "/" + case_number +"_PLIST_Triage.txt"
	export_file = open(exp_file, 'a')

	if(item_to_process == "Directory"):
		folder_to_process = evidence_no_quotes
		process_folder(folder_to_process, export_file, outfile, outfile_error, now)
	elif(item_to_process =="EnCase Logical Evidence File"):
		file_to_process = evidence
		mount_point = mount_encase_v6_l01(case_number, file_to_process, outfile)
		process_folder(mount_point, export_file, outfile, outfile_error, now)

		#umount
		if(os.path.exists(mount_point)):
			subprocess.call(['sudo umount -f ' + mount_point], shell=True)
			os.rmdir(mount_point)
	elif(item_to_process == "Bit-Stream Image"):

		#set Mount Point
		mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
	
		Image_Path = evidence

		#check if Image file is in Encase format
		if re.search(".E01", Image_Path):
			#set mount point
			#mount_point = "/mnt/"+	case_number+"_ewf"
			Image_Path = mount_ewf(Image_Path, outfile, mount_point)


		#call mmls function
		partition_info_dict, temp_time = mmls(outfile, Image_Path)
		partition_info_dict_temp = partition_info_dict

		#get filesize of mmls_output.txt
		file_size = os.path.getsize("/tmp/mmls_output_" + temp_time +".txt") 

		#if filesize of mmls output is 0 then run parted
		if(file_size == 0):
			print("mmls output was empty, running parted\n")
			outfile.write("mmls output was empty, running parted\n")
			#call parted function
			partition_info_dict, temp_time = parted(outfile, Image_Path)

		else:

			#read through the mmls output and look for GUID Partition Tables (used on MACS)
			mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
			for line in mmls_output_file:
				if re.search("GUID Partition Table", line):
					print("We found a GUID partition table, need to use parted")
					outfile.write("We found a GUID partition table, need to use parted\n")
					#call parted function
					partition_info_dict, temp_time = parted(outfile, Image_Path)
			mmls_output_file.close()
	
		#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
		for key,value in partition_info_dict.items():
			cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
			try:
				subprocess.call([cmd_false], shell=True)
			except:
				print("Autmount false failed")

			#process plist files
			if(value =="hfs+"):
				#call mount sub-routine
				success_code, loopback_device_mount = mount(value,str(key),Image_Path, outfile, mount_point)

				if(success_code):
					print("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
					outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
				else:
		
					print("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")
					outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")

					#process 
					process_folder(mount_point, export_file, outfile, outfile_error, now)

					#unmount
					subprocess.call(['umount ' + mount_point], shell=True)
					subprocess.call(['losetup -d ' + loopback_device_mount], shell=True)
					
			else:
				print("This partition is not formatted HFS+")
				outfile.write("This partition is not formatted HFS+\n\n")
		#close export_file
		export_file.close()


		#chdir to output foler
		os.chdir(folder_path)	

		#unmount and remount points
		if re.search(".E01", Image_Path):
			if(os.path.exists(mount_point+"_ewf")):
				subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
				os.rmdir(mount_point+"_ewf")

		#remove empty directories
		for root, dirs, files in os.walk(folder_path, topdown = False):
			for directory in dirs:
				dir_path = os.path.join(root, directory)
				if not os.listdir(dir_path):
					outfile.write("Removing empty folder: " + dir_path + "\n")
					os.rmdir(dir_path)

		#close outfiles
		outfile.close()

		#run text files through unix2dos
		for root, dirs, files in os.walk(folder_path):
			for filenames in files:
				#get file extension
				fileName, fileExtension = os.path.splitext(filenames)
				if(fileExtension.lower() == ".txt"):
					full_path = os.path.join(root,filenames)
					quoted_full_path = "'" +full_path+"'"
					print("Running Unix2dos against file: " + filenames)
					unix2dos_command = "sudo unix2dos " + quoted_full_path
					subprocess.call([unix2dos_command], shell=True)

		#delete /tmp/ls_output.txt
		if (os.path.exists("/tmp/mmls_output_" + temp_time + ".txt")):
			os.remove("/tmp/mmls_output_" + temp_time + ".txt")
		if (os.path.exists("/tmp/timeline_partition_info_" + temp_time +".txt")):
			os.remove("/tmp/timeline_partition_info_" + temp_time +".txt")
		if (os.path.exists("/tmp/dump_" + temp_time + ".txt")):
			os.remove("/tmp/dump_" + temp_time + ".txt")
		if (os.path.exists("/tmp/fls_output_" + temp_time + ".txt")):
			os.remove("/tmp/fls_output_" + temp_time + ".txt")
		if (os.path.exists("/tmp/hives_to_rename_" + temp_time)):
			shutil.rmtree("/tmp/hives_to_rename_" + temp_time)

Example 9

Project: ccm Source File: repository.py
def clone_development(git_repo, version, verbose=False, alias=False):
    print_(git_repo, version)
    target_dir = directory_name(version)
    assert target_dir
    if 'github' in version:
        git_repo_name, git_branch = github_username_and_branch_name(version)
    elif 'local:' in version:
        git_repo_name = 'local_{}'.format(git_repo)  # add git repo location to distinguish cache location for differing repos
        git_branch = version.split(':')[-1]  # last token on 'local:...' slugs should always be branch name
    elif alias:
        git_repo_name = 'alias_{}'.format(version.split('/')[0].split(':')[-1])
        git_branch = version.split('/')[-1]
    else:
        git_repo_name = 'apache'
        git_branch = version.split(':', 1)[1]
    local_git_cache = os.path.join(__get_dir(), '_git_cache_' + git_repo_name)
    logfile = lastlogfilename()
    with open(logfile, 'w') as lf:
        try:
            # Checkout/fetch a local repository cache to reduce the number of
            # remote fetches we need to perform:
            if not os.path.exists(local_git_cache):
                common.info("Cloning Cassandra...")
                out = subprocess.call(
                    ['git', 'clone', '--mirror', git_repo, local_git_cache],
                    cwd=__get_dir(), stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git clone"
            else:
                common.info("Fetching Cassandra updates...")
                out = subprocess.call(
                    ['git', 'fetch', '-fup', 'origin', '+refs/*:refs/*'],
                    cwd=local_git_cache, stdout=lf, stderr=lf)

            # Checkout the version we want from the local cache:
            if not os.path.exists(target_dir):
                # development branch doesn't exist. Check it out.
                common.info("Cloning Cassandra (from local cache)")

                # git on cygwin appears to be adding `cwd` to the commands which is breaking clone
                if sys.platform == "cygwin":
                    local_split = local_git_cache.split(os.sep)
                    target_split = target_dir.split(os.sep)
                    subprocess.call(['git', 'clone', local_split[-1], target_split[-1]], cwd=__get_dir(), stdout=lf, stderr=lf)
                else:
                    subprocess.call(['git', 'clone', local_git_cache, target_dir], cwd=__get_dir(), stdout=lf, stderr=lf)

                # determine if the request is for a branch
                is_branch = False
                try:
                    branch_listing = subprocess.check_output(['git', 'branch', '--all'], cwd=target_dir).decode('utf-8')
                    branches = [b.strip() for b in branch_listing.replace('remotes/origin/', '').split()]
                    is_branch = git_branch in branches
                except subprocess.CalledProcessError as cpe:
                    common.error("Error Running Branch Filter: {}\nAssumming request is not for a branch".format(cpe.output))

                # now check out the right version
                branch_or_sha_tag = 'branch' if is_branch else 'SHA/tag'
                common.info("Checking out requested {} ({})".format(branch_or_sha_tag, git_branch))
                if is_branch:
                    # we use checkout -B with --track so we can specify that we want to track a specific branch
                    # otherwise, you get errors on branch names that are also valid SHAs or SHA shortcuts, like 10360
                    # we use -B instead of -b so we reset branches that already exist and create a new one otherwise
                    out = subprocess.call(['git', 'checkout', '-B', git_branch,
                                           '--track', 'origin/{git_branch}'.format(git_branch=git_branch)],
                                          cwd=target_dir, stdout=lf, stderr=lf)
                else:
                    out = subprocess.call(['git', 'checkout', git_branch], cwd=target_dir, stdout=lf, stderr=lf)
                if int(out) != 0:
                    raise CCMError('Could not check out git branch {branch}. '
                                   'Is this a valid branch name? (see {lastlog} or run '
                                   '"ccm showlastlog" for details)'.format(
                                       branch=git_branch, lastlog=logfile
                                   ))
                # now compile
                compile_version(git_branch, target_dir, verbose)
            else:  # branch is already checked out. See if it is behind and recompile if needed.
                out = subprocess.call(['git', 'fetch', 'origin'], cwd=target_dir, stdout=lf, stderr=lf)
                assert out == 0, "Could not do a git fetch"
                status = subprocess.Popen(['git', 'status', '-sb'], cwd=target_dir, stdout=subprocess.PIPE, stderr=lf).communicate()[0]
                if str(status).find('[behind') > -1:
                    common.info("Branch is behind, recompiling")
                    out = subprocess.call(['git', 'pull'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not do a git pull"
                    out = subprocess.call([platform_binary('ant'), 'realclean'], cwd=target_dir, stdout=lf, stderr=lf)
                    assert out == 0, "Could not run 'ant realclean'"

                    # now compile
                    compile_version(git_branch, target_dir, verbose)
        except Exception as e:
            # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs.
            try:
                rmdirs(target_dir)
                common.error("Deleted {} due to error".format(target_dir))
            except:
                print_('Building C* version {version} failed. Attempted to delete {target_dir}'
                       'but failed. This will need to be manually deleted'.format(
                           version=version,
                           target_dir=target_dir
                       ))
            finally:
                raise e

Example 10

Project: livecd-tools Source File: mkbiarch.py
def main():


    def usage():
        usage = 'usage: mkbiarch.py <x86 Live ISO File> <x64 Live ISO File> <Target Multi Arch Image File>'
        print >> sys.stdout, usage


    def mount(src, dst, options=None):
        if os.path.exists(src):
            if not os.path.exists(dst):
                os.makedir(dst)
            if options is None:
                args = ("/bin/mount", src, dst)
            else:
                args = ("/bin/mount", options, src, dst)
            rc = subprocess.call(args)
            return rc
        return


    def umount(src):
        if os.path.exists(src):
                args = ("/bin/umount", src)
                rc = subprocess.call(args)
                return rc
        return


    def copy(src, dst):
        if os.path.exists(src):
            if not os.path.exists(dst):
                if not os.path.isfile(src):
                    mkdir(dst)
            shutil.copy(src, dst)


    def move(src, dst):
        if os.path.exists(src):
            shutil.move(src, dst)

    def mkdir(dir=None):
        if dir is None:
            tmp = tempfile.mkdtemp()
            return tmp
        else:
            args = ("/bin/mkdir", "-p", dir)
            rc = subprocess.call(args)


    def losetup(src, dst, offset=None):
        if os.path.exists(src):
            if os.path.exists(dst):
                if offset is None:
                    args = ("/sbin/losetup", src, dst)
                else:
                    args = ("/sbin/losetup", "-o", str(offset), src, dst)
                rc = subprocess.call(args)
        return rc

    def lounset(device):
        args = ("/sbin/losetup", "-d", device)
        rc = subprocess.call(args) 

    def null():
        fd = open(os.devnull, 'w')
        return fd

    def dd(file, target):
        args = ("/bin/dd", "if=%s"%file, "of=%s"%target)
        rc = subprocess.call(args)

    def lo():
        args = ("/sbin/losetup", "--find")
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()
        return rc

    def lodev(file):
        args = ("/sbin/losetup", "-j", file)
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].split(":")
        return rc[0]


    def mkimage(bs, count):
        tmp = tempfile.mkstemp()
        image = tmp[1]
        args = ("/bin/dd", "if=/dev/zero",
                 "of=%s"%image, "bs=%s"%bs,
                 "count=%s"%count)
        rc = subprocess.call(args)
        return image


    def size(ent):
        if os.path.exists(ent):
            return os.stat(ent).st_size

    def bs(size):
        return size / 2048

    def partition(device):
        dev = parted.Device(path=device)
        disk = parted.freshDisk(dev, 'msdos')
        constraint = parted.Constraint(device=dev)

        new_geom = parted.Geometry(device=dev,
                                   start=1,
                                   end=(constraint.maxSize - 1))
        filesystem = parted.FileSystem(type="ext2",
                                       geometry=new_geom)
        partition = parted.Partition(disk=disk,
                                     fs=filesystem,
                                     type=parted.PARTITION_NORMAL,
                                     geometry=new_geom)
        constraint = parted.Constraint(exactGeom=new_geom)
        partition.setFlag(parted.PARTITION_BOOT)
        disk.addPartition(partition=partition,
                          constraint=constraint)
        
        disk.commit()

    def format(partition):
        args = ("/sbin/mke2fs", "-j", partition)
        rc = subprocess.call(args)

    def mbr(target):
        mbr = "/usr/share/syslinux/mbr.bin"
        dd(mbr, target)

    def getuuid(device):
        args = ("/sbin/blkid", "-s", "UUID", "-o", "value", device)
        rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0].rstrip()
        return rc

    def syslinux(multitmp, config, **args):
        arg = ("/sbin/extlinux", "--install", multitmp + "/extlinux/")
        rc = subprocess.call(arg)

        content = """
        default vesamenu.c32
        timeout 100

        menu background splash.jpg
        menu title Welcome to Fedora 13
        menu color border 0 #ffffffff #00000000
        menu color sel 7 #ffffffff #ff000000
        menu color title 0 #ffffffff #00000000
        menu color tabmsg 0 #ffffffff #00000000
        menu color unsel 0 #ffffffff #00000000
        menu color hotsel 0 #ff000000 #ffffffff
        menu color hotkey 7 #ffffffff #ff000000
        menu color timeout_msg 0 #ffffffff #00000000
        menu color timeout 0 #ffffffff #00000000
        menu color cmdline 0 #ffffffff #00000000
        menu hidden
        menu hiddenrow 5

        label Fedora-13-x86
        menu label Fedora-13-x86
        kernel vmlinuz0
        append initrd=initrd0.img root=UUID=%(uuid)s rootfstype=auto ro live_dir=/x86/LiveOS liveimg
        
        label Fedora-13-x64
        menu label Fedora-13-x64
        kernel vmlinuz1
        append initrd=initrd1.img root=UUID=%(uuid)s rootfstype=auto ro live_dir=/x64/LiveOS liveimg
        """ % args
        fd = open(config, 'w')
        fd.write(content)
        fd.close()

    def verify():
        # use md5 module to verify image files
        pass

    def setup(x86, x64, multi):

        sz = size(x86) + size(x64)
        count = bs(sz)
        blsz = str(2048)

        count = count + 102400

        multi = mkimage(blsz, count)    
        losetup(lo(), multi)
 
        mbr(lodev(multi))
        partition(lodev(multi))
 
        lounset(lodev(multi))
     
        losetup(lo(), multi, offset=512)
        format(lodev(multi))

        multitmp = mkdir()
        mount(lodev(multi), multitmp)

        losetup(lo(), x86)
        losetup(lo(), x64)
 
        x86tmp = mkdir()
        x64tmp = mkdir()

        mount(lodev(x86), x86tmp)
        mount(lodev(x64), x64tmp)


        dirs = ("/extlinux/", "/x86/", "/x64/")
        for dir in dirs:
            mkdir(multitmp + dir)
        dirs = ("/x86/", "/x64/")
        for dir in dirs:
            mkdir(multitmp + dir + "/LiveOS/")

        intermediate = tempfile.mkdtemp() # loopdev performance is slow
                                          # copy to here first then back
                                          # to multitmp + dir which is looback also

        imgs = ("squashfs.img", "osmin.img")
        for img in imgs:
            copy(x86tmp + "/LiveOS/" + img, intermediate)
            copy(intermediate + "/" + img, multitmp + "/x86/LiveOS/")
        for img in imgs:
            copy(x64tmp + "/LiveOS/" + img, intermediate)
            copy(intermediate + "/" + img, multitmp + "/x64/LiveOS/")

        for file in os.listdir(x86tmp + "/isolinux/"):
            copy(x86tmp + "/isolinux/" + file, multitmp + "/extlinux/")

        copy(x64tmp + "/isolinux/vmlinuz0", multitmp + "/extlinux/vmlinuz1")
        copy(x64tmp + "/isolinux/initrd0.img", multitmp + "/extlinux/initrd1.img")
            

       
        uuid = getuuid(lodev(multi))

  
        config = (multitmp + "/extlinux/extlinux.conf")
        syslinux(multitmp,
                 config,
                 uuid=uuid)



        umount(x86tmp)
        umount(x64tmp)
        umount(multitmp)

        lounset(lodev(x86))
        lounset(lodev(x64))
        lounset(lodev(multi))

        shutil.rmtree(x86tmp)
        shutil.rmtree(x64tmp)
        shutil.rmtree(multitmp)
        shutil.rmtree(intermediate)   
        


        if os.path.exists(sys.argv[3]):
            os.unlink(sys.argv[3])
        move(multi, sys.argv[3])
 

    def parse(x86, x64, multi):
        for file in x86, x64:
            if os.path.exists(file):
                pass
            else:
                usage()
        if not multi:
            usage()
        setup(x86, x64, multi)





    try: 
        parse(sys.argv[1], sys.argv[2], sys.argv[3])
    except:
        usage()

Example 11

Project: addons-source Source File: run_dynamicweb.py
Function: main
def main(report_nums):
    # Create results directory
    results_path = os.path.join(plugin_path, "reports")
    results_path = os.path.abspath(results_path)
    if (not os.path.isdir(results_path)): os.mkdir(results_path)
    plugvers = plugin_version(plugin_path)

    # Initialize index pages
    html_index = html_index_0
    html_procedures = html_procedures_0

    for (report_num, report_set) in enumerate(report_list):
        if (report_num not in report_nums): continue
        report_name = "report_%03i" % report_num
        # Build the report title and path
        title = report_set['title']
        print("=" * 80)
        print("%s:" % report_name)
        print("Exporting with options: %s" % title)
        print("=" * 80)
        target = os.path.join(results_path, report_name)

        # Build the report options form the default options + the report set options
        o = copy.deepcopy(default_options)
        o.update(report_set['options'])
        o.update({
            'title': title,
            'target': target,
            'archive_file': os.path.join(target, os.path.basename(o['archive_file'])),
        })
        param = ",".join([
            (key + "=" + (str(value) if isinstance(value, (int, bool)) else value))
            for (key, value) in o.items()
        ])

        # Setup environment variables
        os.environ.update(report_set['environ'])

        # Call Gramps CLI
        if (sys.version_info[0] < 3):
            param = param.encode("UTF-8")
        os.chdir(gramps_path)
        # subprocess.call([sys.executable, os.path.join(gramps_path, "Gramps.py"), "-d", ".DynamicWeb", "-q", "-O", "dynamicweb_example", "-a", "report", "-p", param])
        subprocess.call([sys.executable, os.path.join(gramps_path, "Gramps.py"), "-q", "-O", "dynamicweb_example", "-a", "report", "-p", param])

        # Update index pages
        p = report_name + "/" + report_set['link']
        html_index += "<li><a href='%s'>%s</a></li>" % (p, report_set['title'])
        for procedure in report_set['procedures']:
            p = report_name + "/" + procedure['path']
            html_procedures += "<li>%s<br><a href='%s'>%s</a></li>" % (procedure['what'], p, p)

    for (test_num, test_set) in enumerate(test_list):
        if ((test_num + len(report_list)) not in report_nums): continue
        test_name = "test_%03i" % test_num
        # Build the test title and path
        title = test_set['title']
        print("=" * 80)
        print("%s:" % test_name)
        print("Exporting with options: %s" % title)
        print("=" * 80)
        target = os.path.join(results_path, test_name)
        
        # Build the test options form the default options + the test set options
        o = copy.deepcopy(default_options)
        o.update(test_set['options'])
        o.update({
            'title': title,
            'target': target,
            'archive_file': os.path.join(target, os.path.basename(o['archive_file'])),
        })
        param = ",".join([
            (key + "=" + (str(value) if isinstance(value, (int, bool)) else value))
            for (key, value) in o.items()
        ])

        # Setup environment variables
        os.environ.update(test_set['environ'])

        # Call Gramps CLI
        if (sys.version_info[0] < 3):
            param = param.encode("UTF-8")
        os.chdir(gramps_path)
        subprocess.call([sys.executable, os.path.join(gramps_path, "Gramps.py"), "-q", "-O", "dynamicweb_example", "-a", "report", "-p", param])
            
    # Generate index pages
    html_index += html_index_1 % (default_options['name'], plugvers, VERSION)
    f = codecs.open(os.path.join(results_path, "index.html"), "w", encoding = "UTF-8", errors="xmlcharrefreplace")
    f.write(html_index)
    f.close()
    html_procedures += html_procedures_1 % (default_options['name'], plugvers, VERSION)
    f = codecs.open(os.path.join(results_path, "procedures.html"), "w", encoding = "UTF-8", errors="xmlcharrefreplace")
    f.write(html_procedures)
    f.close()

Example 12

Project: mwebfp Source File: mwebfp.py
def main():
  global args, errorstring, httpports, httpsports, debug, recovering, csvfile
  arguments()
  if debug:
    print 'DEBUG: RECEIVED ARGUMENTS AFTER PROCESSING:'
    print '  Debug:',args.debug
    print '  Input Range:',args.input_range
    print '  Input File:',args.input_file
    print '  Output Directory (Sanitized):',args.output_dir
    print '  Output Format:',args.output_format
    print '  Recover:',args.recover
    print '  HTTP Ports:',args.http_ports
    print '  HTTPS Ports:',args.https_ports
    print '  VHosts:',args.vhosts
    print '  Web Screenshots:',args.web_screenshots
  if args.recover and not os.path.exists(args.output_dir):
    print errorstring,
    print 'directory for recovery process not found'
    sys.exit(0)
  if os.path.exists(args.output_dir):
    print 'Using existing directory: ',args.output_dir
    if args.recover:
      if not os.path.exists(args.output_dir + '/.status'):
        print errorstring,
        print 'Recovery not possible: Recovery status file not found at \'' + args.output_dir + '\''
        sys.exit(0)
      if not os.path.exists(args.output_dir + '/.allips'):
        print errorstring,
        print 'Recovery not possible: Recovery target file not found at \'' + args.output_dir + '\''
        sys.exit(0)
      statusfile = open(args.output_dir + '/.status', 'r')
      status = statusfile.readline().strip('\n')
      statusfile.close()
      ststage = int(re.findall(r'(\d+)-',status)[0])
      stcode = int(re.findall(r'-(\d+)',status)[0])
      recovering = True
      if debug:
        print 'DEBUG: Recovering: Stage: ' + str(ststage) + ' Code: ' + str(stcode)
  else:
    print 'Creating output directory: ',args.output_dir
    os.makedirs(args.output_dir)  
  csvfile = args.output_dir + '/mwebfp-' + args.output_dir + '.csv'
  createcsv(csvfile)
  cidr_ranges = []
  if args.input_file:
    try:
      rangefile = open(args.input_file, 'r')
    except IndexError:
      print errorstring,
      print 'something is wrong with the provided filename'
      sys.exit(0)
    for line in rangefile.readlines():
      line = line.rstrip('\n')
      if not validate_cidr(line) and not validate_ip(line):
        print errorstring,
        print 'provided CIDR IP range file contains invalid lines. Verify file contents: " ' + line + ' "'
        sys.exit(0)
      else:
        if '/' not in line:
          line = line + '/32'
        cidr_ranges.append(line)
  if args.input_range:
    if '/' not in args.input_range:
       args.input_range = args.input_range + '/32'
    cidr_ranges.append(args.input_range)
  if debug: print 'DEBUG: Initial CIDR ranges list: ',cidr_ranges
  if recovering == False:
    all_ips = []
    for iprange in cidr_ranges:
      nm1 = nmap.PortScanner()
      nm1.scan(iprange, arguments='-sL -vvv -n -P0')
      allhosts = []
      for host in nm1.all_hosts():
        all_ips.append(str(host))
      for i in range(len(all_ips)):
        all_ips[i] = "%3s.%3s.%3s.%3s" % tuple(all_ips[i].split("."))
      all_ips.sort()
      for i in range(len(all_ips)):
        all_ips[i] = all_ips[i].replace(" ", "")
    targetfile = open(args.output_dir + '/.allips', 'w')
    for ip in all_ips:
      targetfile.write("%s\n" % ip)
    targetfile.close()
  else:
    all_ips = []
    with open(args.output_dir + '/.allips') as f:
      all_ips = f.read().splitlines()
    for ip in all_ips:
      if not validate_ip(ip):
        print errorstring,
        print 'Recovery not possible: Recovery target file is corrupted'
        sys.exit(0)
  if debug: print 'ips (All IPs) = ',all_ips
  numips = len(all_ips)
  print 'Loaded ' + str(numips) + ' IP addresses to scan' 
  scanports = args.http_ports + ',' + args.https_ports
  http_ports = args.http_ports.split(',')
  https_ports = args.https_ports.split(',')
  scanports_list = scanports.split(',')
  if debug: print 'TCP Ports to scan (HTTP and HTTPS):',scanports
  for ip in all_ips:
    vhostsip = []
    if args.vhosts == 'yes':
      print 'IP Address = ' + ip + ' (also checking virtual hosts)'
      vhostsip = vhosts(ip)
    else:
      print 'IP Address = ' + ip
    print '   NMap heavylifting ... (please be patient)'
    nm2 = nmap.PortScanner()
    nm2.scan(ip,arguments='-sT -P0 -vvv -n -T4 -oN ' + args.output_dir + '/mwebfp-nmap-' + ip + '.txt --script=http-favicon --script=http-headers --script=http-methods --script=http-title -p' + scanports)
    for port in scanports_list:
      portstate = nm2[ip]['tcp'][int(port)]['state']
      print '   Processing port ' + port + '\t->\t' + portstate
      if portstate == 'open':
        try:
          title = str(nm2[ip]['tcp'][int(port)]['script']['http-title'])
        except:
          title = '<No Title>'
        try:
          favicon = str(nm2[ip]['tcp'][int(port)]['script']['http-favicon'])
        except:
          favicon = '<No Favicon>'
        try:
          methods = str(nm2[ip]['tcp'][int(port)]['script']['http-methods'])
        except:
          methods = '<No Methods>'
        try:
          headers = str(nm2[ip]['tcp'][int(port)]['script']['http-headers']).strip(' ').lstrip('\n').strip('\n').lstrip(' ').replace('\n  ','\n')
          server = re.findall(r'Server: (\S+)',headers)[0]
        except:
          headers = '<No Headers>'
          server = '<No Server>'
        if args.web_screenshots == 'yes':
          print '      Capturing screenshot ...',
          if port in http_ports:
            try:
              fname = 'mwebfp-capture---http-' + ip + '-NoHostname-p' + port + '.png'
              filename = args.output_dir + '/' + fname
              subprocess.call(['cutycapt','--url=http://' + ip + '/','--out=' + filename,'--out-format=png'])
            except:
              pass
          if port in https_ports:
            try:
              fname = 'mwebfp-capture---https-' + ip + '-NoHostname-p' + port + '.png'
              filename = args.output_dir + '/' + fname
              subprocess.call(['cutycapt','--url=https://' + ip + '/','--out=' + filename,'--out-format=png','--insecure'])
            except:
              pass
          writecsv(csvfile,[ip,port,'open','<No Hostname>',title,favicon,methods,headers,server,fname])
        else:
          writecsv(csvfile,[ip,port,'open','<No Hostname>',title,favicon,methods,headers,server])
        if args.vhosts == 'yes' and len(vhostsip) > 0:
          for vhost in vhostsip:
            nm3 = nmap.PortScanner()
            nm3.scan(vhost,arguments='-sT -sV -P0 -vvv -n -T4 -oN ' + args.output_dir + '/mwebfp-nmap-' + ip + '-' + vhost + '-p' + port + '.txt --script=http-favicon --script=http-headers --script=http-methods --script=http-title -p' + port)
            try:
              title = str(nm3[ip]['tcp'][int(port)]['script']['http-title'])
            except:
              title = '<No Title>'
            try:
              favicon = str(nm3[ip]['tcp'][int(port)]['script']['http-favicon'])
            except:
              favicon = '<No Favicon>'
            try:
              methods = str(nm3[ip]['tcp'][int(port)]['script']['http-methods'])
            except:
              methods = '<No Methods>'
            try:
              headers = str(nm3[ip]['tcp'][int(port)]['script']['http-headers']).strip(' ').lstrip('\n').strip('\n').lstrip(' ').replace('\n  ','\n')
              server = re.findall(r'Server: (\S+)',headers)[0]
            except:
              headers = '<No Headers>'
              server = '<No Server>'
            if args.web_screenshots == 'yes':
              if port in http_ports:
                try:
                  fname = 'mwebfp-capture---http-' + ip + '-' + vhost + '-p' + port + '.png' 
                  filename = args.output_dir + '/' + fname
                  subprocess.call(['cutycapt','--url=http://' + vhost + '/','--out=' + filename,'--out-format=png'])
                except:
                  pass
              if port in https_ports:
                try:
                  fname = 'mwebfp-capture---https-' + ip + '-' + vhost + '-p' + port + '.png'
                  filename = args.output_dir + '/' + fname
                  subprocess.call(['cutycapt','--url=https://' + vhost + '/','--out=' + filename,'--out-format=png','--insecure'])
                except:
                  pass
              writecsv(csvfile,[ip,port,'open',vhost,title,favicon,methods,headers,server,fname])
            else:
              writecsv(csvfile,[ip,port,'open',vhost,title,favicon,methods,headers,server])
        print 'Done.'
      else:
        writecsv(csvfile,[ip,port,str(portstate)])
  print 'Done. Go check your report file !'

Example 13

Project: itermocil Source File: itermocil.py
def main():

    parser = argparse.ArgumentParser(
        description='Process a teamocil file natively in iTerm2 (i.e. without tmux).',
        usage='%(prog)s [options] <layout>'
    )

    parser.add_argument("layout_name",
                        help="the layout name you wish to process",
                        metavar="layout",
                        nargs="*")

    # teamocil compatible flags:

    parser.add_argument("--here",
                        help="run in the current terminal",
                        action="store_true",
                        default=False)

    parser.add_argument("--edit",
                        help="edit file in $EDITOR if set, otherwise open in GUI",
                        action="store_true",
                        default=False)

    parser.add_argument("--show",
                        help="show the layout instead of executing it",
                        action="store_true",
                        default=False)

    parser.add_argument("--layout",
                        help="specify a layout file rather looking in the ~/.teamocil",
                        action="store_true",
                        default=None)

    parser.add_argument("--list",
                        help="show the available layouts in ~/teamocil",
                        action="store_true",
                        default=False)

    parser.add_argument("--version",
                        help="show iTermocil version",
                        action="store_true",
                        default=None)

    parser.add_argument("--debug",
                        help="output the iTerm Applescript instead of executing it",
                        action="store_true",
                        default=None)

    args = parser.parse_args()

    # itermocil files live in a hidden directory in the home directory
    # either in an .itermocil directory or a .teamocil directory
    itermocil_dir = os.path.join(os.path.expanduser("~"), ".itermocil")
    teamocil_dir = os.path.join(os.path.expanduser("~"), ".teamocil")

    # If --version then show the version number
    if args.version:
        print __version__
        sys.exit(0)

    # If --list then show the layout names in ~./teamocil
    if args.list:
        for d in [itermocil_dir, teamocil_dir]:
            if os.path.isdir(d):
                print d
                for file in os.listdir(d):
                    if file.endswith(".yml"):
                        print("  " + file[:-4])
        sys.exit(0)

    filepath = None
    if not args.layout_name:
        # parser.error('You must supply a layout name, or just the --list option. Use -h for help.')
        filepath = os.path.join(os.getcwd(), 'iTermocil.yml')
        if not os.path.isfile(filepath):
            parser.print_help()
            sys.exit(1)
    else:
        layout = args.layout_name[0]
        # Sanitize input
        layout = re.sub("[\*\?\[\]\'\"\\\$\;\&\(\)\|\^\<\>]", "", layout)

    # Build teamocil file path based on presence of --layout flag.
    if args.layout:
        filepath = os.path.join(os.getcwd(), layout)
    else:
        if not os.path.isdir(itermocil_dir):
            if not os.path.isdir(teamocil_dir):
                print "ERROR: No ~/.itermocil or ~/.teamocil directory"
                sys.exit(1)

        if not filepath:
            filepath = os.path.join(itermocil_dir, layout + ".yml")
            if not os.path.isfile(filepath):
                filepath = os.path.join(teamocil_dir, layout + ".yml")

    # If --edit the try to launch editor and exit
    if args.edit:
        editor_var = os.getenv('EDITOR')
        if editor_var:
            import shlex
            editor = shlex.split(editor_var)
            editor.append(filepath)
            subprocess.call(editor)
        else:
            if not os.path.isfile(filepath):
                subprocess.call(['touch', filepath])
            subprocess.call(['open', filepath])

        sys.exit(0)

    # Check teamocil file exists
    if not os.path.isfile(filepath):
        print "ERROR: There is no file at: " + filepath
        sys.exit(1)

    # If --show then output and exit()
    if args.show:
        with open(filepath, 'r') as fin:
            print fin.read()
            sys.exit(0)

    # Parse the teamocil file and execute it.
    cwd = os.getcwd()
    instance = Itermocil(filepath, here=args.here, cwd=cwd)

    # If --debug then output the applescript. Do some rough'n'ready
    # formatting on it.
    if args.debug:

        script = instance.script()
        script = re.sub("^(\s*)", "", script, flags=re.MULTILINE)

        indent = ""
        formatted_script = []

        for line in script.split("\n"):
            if line[:8] == "end tell":
                indent = indent[:-1]
            if line[:4] == "tell" and line[:7] != "tell i ":
                formatted_script.append("")

            formatted_script.append(indent + line)

            if line[:4] == "tell" and line[:7] != "tell i ":
                indent += "\t"

        formatted_script.append("")
        print "\n".join(formatted_script)
    else:
        instance.execute()

Example 14

Project: LS-BSR Source File: ls_bsr.py
def main(directory,id,filter,processors,genes,cluster_method,blast,length,
         max_plog,min_hlog,f_plog,keep,filter_peps,filter_scaffolds,prefix,temp_dir,debug):
    start_dir = os.getcwd()
    ap=os.path.abspath("%s" % start_dir)
    dir_path=os.path.abspath("%s" % directory)
    logging.logPrint("Testing paths of dependencies")
    if blast=="blastn" or blast=="tblastn":
        ab = subprocess.call(['which', 'blastn'])
        if ab == 0:
            print "citation: Altschul SF, Madden TL, Schaffer AA, Zhang J, Zhang Z, Miller W, and Lipman DJ. 1997. Gapped BLAST and PSI-BLAST: a new generation of protein database search programs. Nucleic Acids Res 25:3389-3402"
        else:
            print "blastn isn't in your path, but needs to be!"
            sys.exit()
    if "NULL" in temp_dir:
        fastadir = tempfile.mkdtemp()
    else:
        fastadir = os.path.abspath("%s" % temp_dir)
        if os.path.exists('%s' % temp_dir):
            print "old run directory exists in your genomes directory (%s).  Delete and run again" % temp_dir
            sys.exit()
        else:
            os.makedirs('%s' % temp_dir)
    for infile in glob.glob(os.path.join(dir_path, '*.fasta')):
        name=get_seq_name(infile)
        os.link("%s" % infile, "%s/%s.new" % (fastadir,name))
    if "null" in genes:
        rc = subprocess.call(['which', 'prodigal'])
        if rc == 0:
            pass
        else:
            print "prodigal is not in your path, but needs to be!"
            sys.exit()
        print "citation: Hyatt D, Chen GL, Locascio PF, Land ML, Larimer FW, and Hauser LJ. 2010. Prodigal: prokaryotic gene recognition and translation initiation site identification. BMC Bioinformatics 11:119"
        if "usearch" in cluster_method:
            print "citation: Edgar RC. 2010. Search and clustering orders of magnitude faster than BLAST. Bioinformatics 26:2460-2461"
        elif "cd-hit" in cluster_method:
            print "citation: Li, W., Godzik, A. 2006. Cd-hit: a fast program for clustering and comparing large sets of protein or nuceltodie sequences. Bioinformatics 22(13):1658-1659"
        elif "vsearch" in cluster_method:
            print "citation: Rognes, T., Flouri, T., Nichols, B., Qunice, C., Mahe, Frederic. 2016. VSEARCH: a versatile open source tool for metagenomics. PeerJ Preprints. DOI: https://doi.org/10.7287/peerj.preprints.2409v1"
        if blast=="blat":
            ac = subprocess.call(['which', 'blat'])
            if ac == 0:
                print "citation: W.James Kent. 2002. BLAT - The BLAST-Like Alignment Tool.  Genome Research 12:656-664"
            else:
                print "You have requested blat, but it is not in your PATH"
                sys.exit()
        logging.logPrint("predicting genes with Prodigal")
        predict_genes(fastadir, processors)
        logging.logPrint("Prodigal done")
        """This function produces locus tags"""
        genbank_hits = process_genbank_files(dir_path)
        if genbank_hits == None or len(genbank_hits) == 0:
            os.system("cat *genes.seqs > all_gene_seqs.out")
            if filter_scaffolds == "T":
                filter_scaffolds("all_gene_seqs.out")
                os.system("mv tmp.out all_gene_seqs.out")
            else:
                pass
        else:
            logging.logPrint("Converting genbank files")
            """First combine all of the prodigal files into one file"""
            os.system("cat *genes.seqs > all_gene_seqs.out")
            if filter_scaffolds == "T":
                filter_scaffolds("all_gene_seqs.out")
                os.system("mv tmp.out all_gene_seqs.out")
            else:
                pass
            """This combines the locus tags with the Prodigal prediction"""
            os.system("cat *locus_tags.fasta all_gene_seqs.out > tmp.out")
            os.system("mv tmp.out all_gene_seqs.out")
            """I also need to convert the GenBank file to a FASTA file"""
            for hit in genbank_hits:
                reduced_hit = hit.replace(".gbk","")
                SeqIO.convert("%s/%s" % (dir_path, hit), "genbank", "%s.fasta.new" % reduced_hit, "fasta")
        if "NULL" in cluster_method:
            print "Clustering chosen, but no method selected...exiting"
            sys.exit()
        elif "usearch" in cluster_method:
            ac = subprocess.call(['which', 'usearch'])
            if ac == 0:
                os.system("mkdir split_files")
                os.system("cp all_gene_seqs.out split_files/all_sorted.txt")
                os.chdir("split_files/")
                logging.logPrint("Splitting FASTA file for use with USEARCH")
                split_files("all_sorted.txt")
                logging.logPrint("clustering with USEARCH at an ID of %s" % id)
                run_usearch(id)
                os.system("cat *.usearch.out > all_sorted.txt")
                os.system("mv all_sorted.txt %s" % fastadir)
                os.chdir("%s" % fastadir)
                uclust_cluster(id)
                logging.logPrint("USEARCH clustering finished")
            else:
                print "usearch must be in your path as usearch...exiting"
                sys.exit()
        elif "vsearch" in cluster_method:
            ac = subprocess.call(['which', 'vsearch'])
            if ac == 0:
                logging.logPrint("clustering with VSEARCH at an ID of %s, using %s processors" % (id,processors))
                run_vsearch(id, processors)
                os.system("mv vsearch.out consensus.fasta")
                logging.logPrint("VSEARCH clustering finished")
            else:
                print "vsearch must be in your path as vsearch...exiting"
                sys.exit()
        elif "cd-hit" in cluster_method:
            ac = subprocess.call(['which', 'cd-hit-est'])
            if ac == 0:
                logging.logPrint("clustering with cd-hit at an ID of %s, using %s processors" % (id,processors))
                subprocess.check_call("cd-hit-est -i all_gene_seqs.out -o consensus.fasta -M 0 -T %s -c %s > /dev/null 2>&1" % (processors, id), shell=True)
            else:
                print "cd-hit must be in your path as cd-hit-est...exiting"
                sys.exit()
        """need to check for dups here"""
        dup_ids = test_duplicate_header_ids("consensus.fasta")
        if dup_ids == "True":
            pass
        elif dup_ids == "False":
            print "duplicate headers identified, renaming.."
            rename_fasta_header("consensus.fasta", "tmp.txt")
            os.system("mv tmp.txt consensus.fasta")
        else:
            pass
        if "tblastn" == blast:
            subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
            translate_consensus("consensus.fasta")
            if filter_peps == "T":
                filter_seqs("tmp.pep")
                os.system("rm tmp.pep")
            else:
                os.system("mv tmp.pep consensus.pep")
            clusters = get_cluster_ids("consensus.pep")
            blast_against_self_tblastn("tblastn", "consensus.fasta", "consensus.pep", "tmp_blast.out", processors, filter)
        elif "blastn" == blast:
            subprocess.check_call("makeblastdb -in consensus.fasta -dbtype nucl > /dev/null 2>&1", shell=True)
            blast_against_self_blastn("blastn", "consensus.fasta", "consensus.fasta", "tmp_blast.out", filter, processors)
            clusters = get_cluster_ids("consensus.fasta")
        elif "blat" == blast:
            blat_against_self("consensus.fasta", "consensus.fasta", "tmp_blast.out", processors)
            clusters = get_cluster_ids("consensus.fasta")
        else:
            pass
        subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
        ref_scores=parse_self_blast(open("self_blast.out", "U"))
        subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
        os.system("rm *new_genes.*")
        if blast == "tblastn" or blast == "blastn":
            logging.logPrint("starting BLAST")
        else:
            logging.logPrint("starting BLAT")
        if "tblastn" == blast:
            blast_against_each_genome_tblastn(dir_path, processors, "consensus.pep", filter)
        elif "blastn" == blast:
            blast_against_each_genome_blastn(dir_path, processors, filter, "consensus.fasta")
        elif "blat" == blast:
            blat_against_each_genome(dir_path, "consensus.fasta",processors)
        else:
            pass
    else:
        logging.logPrint("Using pre-compiled set of predicted genes")
        files = glob.glob(os.path.join(dir_path, "*.fasta"))
        if len(files)==0:
            print "no usable reference genomes found!"
            sys.exit()
        else:
            pass
        gene_path=os.path.abspath("%s" % genes)
        dup_ids = test_duplicate_header_ids(gene_path)
        if dup_ids == "True":
            pass
        elif dup_ids == "False":
            print "duplicate headers identified, exiting.."
            sys.exit()
        clusters = get_cluster_ids(gene_path)
        os.system("cp %s %s" % (gene_path,fastadir))
        os.chdir("%s" % fastadir)
        if gene_path.endswith(".pep"):
            logging.logPrint("using tblastn on peptides")
            try:
                subprocess.check_call("makeblastdb -in %s -dbtype prot > /dev/null 2>&1" % gene_path, shell=True)
            except:
                logging.logPrint("problem encountered with BLAST database")
                sys.exit()
            blast_against_self_tblastn("blastp", gene_path, gene_path, "tmp_blast.out", processors, filter)
            subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
            ref_scores=parse_self_blast(open("self_blast.out", "U"))
            subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
            logging.logPrint("starting BLAST")
            blast_against_each_genome_tblastn(dir_path, processors, gene_path, filter)
        elif gene_path.endswith(".fasta"):
            if "tblastn" == blast:
                logging.logPrint("using tblastn")
                translate_genes(gene_path)
                try:
                    subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
                except:
                    logging.logPrint("problem encountered with BLAST database")
                    sys.exit()
                blast_against_self_tblastn("tblastn", gene_path, "genes.pep", "tmp_blast.out", processors, filter)
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAST")
                blast_against_each_genome_tblastn(dir_path, processors, "genes.pep", filter)
                os.system("cp genes.pep %s" % start_dir)
            elif "blastn" == blast:
                logging.logPrint("using blastn")
                try:
                    subprocess.check_call("makeblastdb -in %s -dbtype nucl > /dev/null 2>&1" % gene_path, shell=True)
                except:
                    logging.logPrint("Database not formatted correctly...exiting")
                    sys.exit()
                try:
                    blast_against_self_blastn("blastn", gene_path, gene_path, "tmp_blast.out", filter, processors)
                except:
                    print "problem with blastn, exiting"
                    sys.exit()
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                os.system("cp self_blast.out tmp.out")
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAST")
                try:
                    blast_against_each_genome_blastn(dir_path, processors, filter, gene_path)
                except:
                    print "problem with blastn, exiting"
                    sys.exit()
            elif "blat" == blast:
                logging.logPrint("using blat")
                blat_against_self(gene_path, gene_path, "tmp_blast.out", processors)
                subprocess.check_call("sort -u -k 1,1 tmp_blast.out > self_blast.out", shell=True)
                ref_scores=parse_self_blast(open("self_blast.out", "U"))
                subprocess.check_call("rm tmp_blast.out self_blast.out", shell=True)
                logging.logPrint("starting BLAT")
                blat_against_each_genome(dir_path,gene_path,processors)
            else:
                pass
        else:
            print "input file format not supported"
            sys.exit()
    find_dups_dev(ref_scores, length, max_plog, min_hlog, clusters, processors)
    if blast=="blat":
        logging.logPrint("BLAT done")
    else:
        logging.logPrint("BLAST done")
    parse_blast_report("false")
    get_unique_lines()
    curr_dir=os.getcwd()
    table_files = glob.glob(os.path.join(curr_dir, "*.filtered.unique"))
    files_and_temp_names = [(str(idx), os.path.join(curr_dir, f))
                            for idx, f in enumerate(table_files)]
    names=[]
    table_list = []
    nr_sorted=sorted(clusters)
    centroid_list = []
    centroid_list.append(" ")
    for x in nr_sorted:
        centroid_list.append(x)
    table_list.append(centroid_list)
    logging.logPrint("starting matrix building")
    new_names,new_table = new_loop(files_and_temp_names, processors, clusters, debug)
    new_table_list = table_list+new_table
    logging.logPrint("matrix built")
    open("ref.list", "a").write("\n")
    for x in nr_sorted:
        open("ref.list", "a").write("%s\n" % x)
    names_out = open("names.txt", "w")
    names_redux = [val for subl in new_names for val in subl]
    for x in names_redux: print >> names_out, "".join(x)
    names_out.close()
    create_bsr_matrix_dev(new_table_list)
    divide_values("bsr_matrix", ref_scores)
    subprocess.check_call("paste ref.list BSR_matrix_values.txt > %s/bsr_matrix_values.txt" % start_dir, shell=True)
    if "T" in f_plog:
        filter_paralogs("%s/bsr_matrix_values.txt" % start_dir, "paralog_ids.txt")
        os.system("cp bsr_matrix_values_filtered.txt %s" % start_dir)
    else:
        pass
    try:
        subprocess.check_call("cp dup_matrix.txt names.txt consensus.pep consensus.fasta duplicate_ids.txt paralog_ids.txt %s" % ap, shell=True, stderr=open(os.devnull, 'w'))
    except:
        sys.exc_clear()
    """new code to rename files according to a prefix"""
    import datetime
    timestamp = datetime.datetime.now()
    rename = str(timestamp.year), str(timestamp.month), str(timestamp.day), str(timestamp.hour), str(timestamp.minute), str(timestamp.second)
    os.chdir("%s" % ap)
    if "NULL" in prefix:
        os.system("mv dup_matrix.txt %s_dup_matrix.txt" % "".join(rename))
        os.system("mv names.txt %s_names.txt" % "".join(rename))
        os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % "".join(rename))
        os.system("mv paralog_ids.txt %s_paralog_ids.txt" % "".join(rename))
        os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % "".join(rename))
        if os.path.isfile("consensus.fasta"):
            os.system("mv consensus.fasta %s_consensus.fasta" % "".join(rename))
        if os.path.isfile("consensus.pep"):
            os.system("mv consensus.pep %s_consensus.pep" % "".join(rename))
    else:
        os.system("mv dup_matrix.txt %s_dup_matrix.txt" % prefix)
        os.system("mv names.txt %s_names.txt" % prefix)
        os.system("mv duplicate_ids.txt %s_duplicate_ids.txt" % prefix)
        os.system("mv paralog_ids.txt %s_paralog_ids.txt" % prefix)
        os.system("mv bsr_matrix_values.txt %s_bsr_matrix.txt" % prefix)
        if os.path.isfile("consensus.fasta"):
            os.system("mv consensus.fasta %s_consensus.fasta" % prefix)
        if os.path.isfile("consensus.pep"):
            os.system("mv consensus.pep %s_consensus.pep" % prefix)
    if "NULL" in prefix:
        outfile = open("%s_run_parameters.txt" % "".join(rename), "w")
    else:
        outfile = open("%s_run_parameters.txt" % prefix, "w")
    print >> outfile, "-d %s \\" % directory
    print >> outfile, "-i %s \\" % id
    print >> outfile, "-f %s \\" % filter
    print >> outfile, "-p %s \\" % processors
    print >> outfile, "-g %s \\" % genes
    print >> outfile, "-c %s \\" % cluster_method
    print >> outfile, "-b %s \\" % blast
    print >> outfile, "-l %s \\" % length
    print >> outfile, "-m %s \\" % max_plog
    print >> outfile, "-n %s \\" % min_hlog
    print >> outfile, "-t %s \\" % f_plog
    print >> outfile, "-k %s \\" % keep
    print >> outfile, "-s %s \\" % filter_peps
    print >> outfile, "-e %s \\" % filter_scaffolds
    print >> outfile, "-x %s \\" % prefix
    print >> outfile, "-z %s" % debug
    print >> outfile, "temp data stored here if kept: %s" % fastadir
    outfile.close()
    logging.logPrint("all Done")
    if "T" == keep:
        pass
    else:
        os.system("rm -rf %s" % fastadir)
    os.chdir("%s" % ap)

Example 15

Project: BookwormDB Source File: wordcounter.py
def sortWordlist(maxDictionaryLength=1000000):
    """
    The function to sort and curtail the wordcounts created by the previous function leaves an unsorted file at
    `.bookworm/texts/wordlist/raw.txt`. We sort this by invoking the system "sort" program, which is likely to be 
    faster than anything pythonic; and then for legacy reasons use a perl program to make the counts, sort again (to put the most common words
    at the top, and then take the top 1,000,000 words.
    """
    logging.info("Sorting full word counts\n")
    #This LC_COLLATE here seems to be extremely necessary, because otherwise alphabetical order isn't preserved across different orderings.
    subprocess.call(["export LC_COLLATE='C';export LC_ALL='C'; sort -k1 .bookworm/texts/wordlist/raw.txt > .bookworm/texts/wordlist/sorted.txt"], shell=True)
    
    logging.info("Collapsing word counts\n")
    
    #This is in perl, using bignum, because it's possible to get integer overflows on a really huge text set (like Google ngrams).

    subprocess.call(["""
           perl -ne '
           BEGIN {use bignum; $last=""; $count=0} 
           if ($_ =~ m/(.*) (\d+)/) {
            if ($last ne $1 & $last ne "") {
             print "$last $count\n"; $count = 0;
            } 
           $last = $1;
           $count += $2
           } END {print "$last $count\n"}' .bookworm/texts/wordlist/sorted.txt > .bookworm/texts/wordlist/counts.txt"""], shell=True) 

    subprocess.call(["export LC_ALL='C';export LC_COLLATE='C';sort -nrk2 .bookworm/texts/wordlist/counts.txt > .bookworm/texts/wordlist/complete.txt"], shell=True)
    # logfile.write("Including the old words first\n")
    oldids = set()
    oldids.add(0)
    oldwords = dict()

    """
    This following section may be fixed for unicode problems
    """

    try:
        i = 1
        oldFile = open(".bookworm/texts/wordlist/wordlist.txt")
        for line in oldFile:
            line = line.split('\t')
            wid = int(line[0])
            word = line[1]
            oldids.add(wid)
            oldwords[word] = wid
            i = i + 1
            if i > maxDictionaryLength:
                oldFile.close()
                return
        oldFile.close()

    #To work perfectly, this would have to keep track of all the words that have been added, and also update the database with the counts from the old books for each of them. That's hard. Currently, a new word will be added if the new set of texts AND the old one has it in its top 1m words; BUT it will be only added into the database among the new texts, not the old ones. In a few cases that defeats the point of updating the old list at all, since we can't see the origins, but at least new people will show up eventually.
    except:
        # logfile.write(" No original file to work from: moving on...\n")
        pass
    newWords = set()
    # logfile.write("writing new ids\n")
    newlist = open(".bookworm/texts/wordlist/complete.txt","r")
    i = 1
    nextIDtoAssign = max(oldids) + 1
    counts = list()
    for line in newlist:
        line = line.split(" ")
        word = line[0]
        count = line[1]
        try:
            wordid = oldwords[word]
        except KeyError:
            wordid = nextIDtoAssign
            nextIDtoAssign = nextIDtoAssign+1
        counts.append("\t".join([str(wordid), word.replace("\\","\\\\"), count]))
            
        i = i + 1
        if i > maxDictionaryLength:
            break

    output = open(".bookworm/texts/wordlist/newwordlist.txt", "w")
    for count in counts:
        output.write(count) #Should just carry over the newlines from earlier.
    
    #Don't overwrite the new file until the old one is complete
    subprocess.call(["mv", ".bookworm/texts/wordlist/newwordlist.txt", ".bookworm/texts/wordlist/wordlist.txt"])

Example 16

Project: mantaray Source File: jumplist_mr.py
def jumplist_mr(item_to_process, case_number, root_folder_path, evidence):


	print("The item to process is: " + item_to_process)
	print("The case_name is: " + case_number)
	print("The output folder is: " + root_folder_path)
	print("The evidence to process is: " + evidence)

	evidence = '"' + evidence + '"'

	#get datetime
	now = datetime.datetime.now()

	#set Mount Point
	mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S")
	
	#create output folder path
	folder_path = root_folder_path + "/" + "Jumplist_Parser"
	check_for_folder(folder_path, "NONE")
	

	#open a log file for output
	log_file = folder_path + "/Jumplist_Parser_logfile.txt"
	outfile = open(log_file, 'wt+')

	

	#select image to process
	Image_Path = evidence
	print("The image path is: " + Image_Path)

	#check to see if Image file is in Encase format
	if re.search(".E01", Image_Path):
		#strip out single quotes from the quoted path
		no_quotes_path = Image_Path.replace("'","")
		print("The no quotes path is: " + no_quotes_path)
		#call mount_ewf function
		Image_Path = mount_ewf(no_quotes_path, outfile, mount_point)

	#call mmls function
	partition_info_dict, temp_time = mmls(outfile, Image_Path)
	partition_info_dict_temp = partition_info_dict

	#get filesize of mmls_output.txt
	file_size = os.path.getsize("/tmp/mmls_output_" + temp_time +".txt") 
	print("The filesize is: " + str(file_size))

	#if filesize of mmls output is 0 then run parted
	if(file_size == 0):
		print("mmls output was empty, running parted")
		outfile.write("mmls output was empty, running parted")
		#call parted function
		partition_info_dict, temp_time = parted(outfile, Image_Path)

	else:

		#read through the mmls output and look for GUID Partition Tables (used on MACS)
		mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
		for line in mmls_output_file:
			if re.search("GUID Partition Table", line):
				print("We found a GUID partition table, need to use parted")
				outfile.write("We found a GUID partition table, need to use parted\n")
				#call parted function
				partition_info_dict, temp_time = parted(outfile, Image_Path)

	#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
	#for key,value in partition_info_dict.items():
	for key,value in sorted(partition_info_dict.items()):

		#disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed
		cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
		try:
			subprocess.call([cmd_false], shell=True)
		except:
			print("Autmount false failed")

		#run mount command
		success_code, loopback_device_mount = mount(value,key,Image_Path, outfile, mount_point)

		if(success_code):
			print("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
			outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
		else:
		
			print("We just mounted filesystem: " + value + " at offset:" + str(key) + ".\n")
			outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")
		
			#run jl.pl against every JumpList file found under mount_point if filesystem is fat32 or ntfs
			if(value == "ntfs") or (value=="fat32"):
				for root, dirs, files in os.walk(mount_point):
					for filenames in files:
						#get file extension
						fileName, fileExtension = os.path.splitext(filenames)
						if(fileExtension.lower() == ".automaticdestinations-ms"):
							full_path = os.path.join(root,filenames)
							quoted_full_path = "'" +full_path+"'"
							print("Processing Jump List: " + filenames)
							outfile.write("Processing Jump List: " + filenames + "\n")

							#get profile name
							profile = get_account_profile_names(full_path, outfile)
							print("The profile is: " + profile)
							outfile.write("The profile is: " + profile + "\n")
			
							#process Jumplist files with jl.pl
							#jl_command = "perl /usr/share/windows-perl/jl.pl -u " + "'" + profile + "'" + " -f " + full_path + " >> " + "'" + folder_path + "/jumplist_metadata.txt" + "'"
							jl_command_tln = "perl /usr/share/windows-perl/jl.pl -u " + "'" + profile + "'" + " -t -f " + quoted_full_path + " >> " + "'" + folder_path + "/jumplist_metadata_tln.txt" + "'"
							outfile.write("The jl_command_tln is: " + jl_command_tln + "\n")
							subprocess.call([jl_command_tln], shell=True)
						else:
							print("Scanning file: " + filenames + ".  This file is not a jumplist.")
				#unmount and remove mount points
				if(os.path.exists(mount_point)): 
					subprocess.call(['sudo umount -f ' + mount_point], shell=True)
					os.rmdir(mount_point)
				#unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting
				if not (loopback_device_mount == "NONE"):
					losetup_d_command = "losetup -d " + loopback_device_mount
					subprocess.call([losetup_d_command], shell=True)
			else:
				print("Filesystem: " + value + " at offset:" + str(key) + " is not NTFS or FAT32")
				outfile.write("Filesystem: " + value + " at offset:" + str(key) + " is not NTFS or FAT32\n")

				if(os.path.exists(mount_point)): 
					subprocess.call(['sudo umount -f ' + mount_point], shell=True)
					os.rmdir(mount_point)
				#unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting
				if not (loopback_device_mount == "NONE"):
					losetup_d_command = "losetup -d " + loopback_device_mount
					subprocess.call([losetup_d_command], shell=True)
			#create timeline
			parse_command = "perl /usr/share/windows-perl/parse.pl -f " + "'" + folder_path + "/jumplist_metadata_tln.txt" + "'" + "> " + "'" + folder_path + "/jumplist_timeline.txt" + "'"
			subprocess.call([parse_command], shell=True)

	#unmount and remove mount points
	#if(os.path.exists(mount_point)):
	#	os.rmdir(mount_point)
	if(os.path.exists(mount_point+"_ewf")):
		print("Unmounting mount point for ewf before exiting\n\n")
		subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
		os.rmdir(mount_point+"_ewf")

	#program cleanup
	outfile.close()
	#convert outfile using unix2dos	
	#chdir to output foler
	os.chdir(folder_path)

	#run text files through unix2dos
	for root, dirs, files in os.walk(folder_path):
		for filenames in files:
			#get file extension
			fileName, fileExtension = os.path.splitext(filenames)
			if(fileExtension.lower() == ".txt"):
				full_path = os.path.join(root,filenames)
				quoted_full_path = "'" +full_path+"'"
				print("Running Unix2dos against file: " + quoted_full_path)
				#unix2dos_command = "sudo unix2dos " + "'"+filenames+"'"
				unix2dos_command = "sudo unix2dos " + quoted_full_path
				subprocess.call([unix2dos_command], shell=True)

Example 17

Project: reprozip Source File: docker.py
@target_must_exist
def docker_run(args):
    """Runs the experiment in the container.
    """
    target = Path(args.target[0])
    unpacked_info = read_dict(target)
    cmdline = args.cmdline

    # Sanity check
    if args.detach and args.x11:
        logging.critical("Error: Can't use X11 forwarding if you're detaching")
        raise UsageError

    # Loads config
    config = load_config(target / 'config.yml', True)
    runs = config.runs

    selected_runs = get_runs(runs, args.run, cmdline)

    # Get current image name
    if 'current_image' in unpacked_info:
        image = unpacked_info['current_image']
        logging.debug("Running from image %s", image.decode('ascii'))
    else:
        logging.critical("Image doesn't exist yet, have you run setup/build?")
        sys.exit(1)

    # Name of new container
    if args.detach:
        container = make_unique_name(b'reprounzip_detached_')
    else:
        container = make_unique_name(b'reprounzip_run_')

    hostname = runs[selected_runs[0]].get('hostname', 'reprounzip')

    # X11 handler
    if args.x11:
        local_ip = get_local_addr()

        docker_host = local_ip
        if os.environ.get('DOCKER_HOST'):
            m = _dockerhost_re.match(os.environ['DOCKER_HOST'])
            if m is not None:
                docker_host = m.group(1)

        if args.tunneled_x11:
            x11 = X11Handler(True, ('internet', docker_host), args.x11_display)
        else:
            x11 = X11Handler(True, ('internet', local_ip), args.x11_display)

            if (docker_host != local_ip and docker_host != 'localhost' and
                    not docker_host.startswith('127.') and
                    not docker_host.startswith('192.168.99.')):
                ssh_cmdline = ' '.join(
                    '-R*:%(p)d:127.0.0.1:%(p)d' % {'p': port}
                    for port, connector in x11.port_forward)
                logging.warning(
                    "You requested X11 forwarding but the Docker container "
                    "appears to be running remotely. It is probable that it "
                    "won't be able to connect to the local display. Creating "
                    "a remote SSH tunnel and running with --tunneled-x11 "
                    "might help (%s).",
                    ssh_cmdline)
    else:
        x11 = X11Handler(False, ('local', hostname), args.x11_display)

    cmds = []
    for run_number in selected_runs:
        run = runs[run_number]
        cmd = 'cd %s && ' % shell_escape(run['workingdir'])
        cmd += '/busybox env -i '
        environ = x11.fix_env(run['environ'])
        environ = fixup_environment(environ, args)
        cmd += ' '.join('%s=%s' % (shell_escape(k), shell_escape(v))
                        for k, v in iteritems(environ))
        cmd += ' '
        # FIXME : Use exec -a or something if binary != argv[0]
        if cmdline is None:
            argv = [run['binary']] + run['argv'][1:]
        else:
            argv = cmdline
        cmd += ' '.join(shell_escape(a) for a in argv)
        uid = run.get('uid', 1000)
        gid = run.get('gid', 1000)
        cmd = '/rpzsudo \'#%d\' \'#%d\' /busybox sh -c %s' % (
            uid, gid,
            shell_escape(cmd))
        cmds.append(cmd)
    cmds = x11.init_cmds + cmds
    cmds = ' && '.join(cmds)

    signals.pre_run(target=target)

    # Creates forwarders
    forwarders = []
    for port, connector in x11.port_forward:
        forwarders.append(LocalForwarder(connector, port))

    if args.detach:
        logging.info("Start container %s (detached)",
                     container.decode('ascii'))
        retcode = interruptible_call(['docker', 'run', b'--name=' + container,
                                      '-h', hostname,
                                      '-d', '-t'] +
                                     args.docker_option +
                                     [image, '/busybox', 'sh', '-c', cmds])
        if retcode != 0:
            logging.critical("docker run failed with code %d", retcode)
            subprocess.call(['docker', 'rm', '-f', container])
            sys.exit(1)
        return

    # Run command in container
    logging.info("Starting container %s", container.decode('ascii'))
    retcode = interruptible_call(['docker', 'run', b'--name=' + container,
                                  '-h', hostname,
                                  '-i', '-t'] +
                                 args.docker_option +
                                 [image, '/busybox', 'sh', '-c', cmds])
    if retcode != 0:
        logging.critical("docker run failed with code %d", retcode)
        subprocess.call(['docker', 'rm', '-f', container])
        sys.exit(1)

    # Get exit status from "docker inspect"
    out = subprocess.check_output(['docker', 'inspect', container])
    outjson = json.loads(out.decode('ascii'))
    if (outjson[0]["State"]["Running"] is not False or
            outjson[0]["State"]["Paused"] is not False):
        logging.error("Invalid container state after execution:\n%s",
                      json.dumps(outjson[0]["State"]))
    retcode = outjson[0]["State"]["ExitCode"]
    stderr.write("\n*** Command finished, status: %d\n" % retcode)

    # Commit to create new image
    new_image = make_unique_name(b'reprounzip_image_')
    logging.info("Committing container %s to image %s",
                 container.decode('ascii'), new_image.decode('ascii'))
    subprocess.check_call(['docker', 'commit', container, new_image])

    # Update image name
    unpacked_info['current_image'] = new_image
    write_dict(target, unpacked_info)

    # Remove the container
    logging.info("Destroying container %s", container.decode('ascii'))
    retcode = subprocess.call(['docker', 'rm', container])
    if retcode != 0:
        logging.error("Error deleting container %s", container.decode('ascii'))

    # Untag previous image, unless it is the initial_image
    if image != unpacked_info['initial_image']:
        logging.info("Untagging previous image %s", image.decode('ascii'))
        subprocess.check_call(['docker', 'rmi', image])

    # Update input file status
    metadata_update_run(config, unpacked_info, selected_runs)
    write_dict(target, unpacked_info)

    signals.post_run(target=target, retcode=retcode)

Example 18

Project: OTPSetup Source File: handlers.py
Function: process_gtfs
def process_gtfs(conn, body):

    try:
        print 'process_gtfs'
        #print body['config']
        #config = json.loads(body['config'])

        directory = "/mnt/gtfs%s" % body['id']
        bucket = managed_gtfs_bucket()

        print "feeds:"
        i = 0
        agency_groups = { } 
        os.makedirs(os.path.join(directory, 'gtfs'))
        for feed in body['feeds']:
            feedId = feed['feedId']
            print " - %s" % feedId
            
            if 'defaultAgencyId' in feed:
                agencyId = feed['defaultAgencyId']
                if agencyId in agency_groups:
                    agency_groups[agencyId].append(feed)
                else:
                    agency_groups[agencyId] = [ feed ]
 
            else:
                agencyId = "agency%s" % i
                i = i + 1
                agency_groups[agencyId] = [ feedId ]
                
        print agency_groups

        agency_keys = { }
        agency_original_keys = { }

        for agencyId in agency_groups:
            print "%s: %s" % (agencyId, len(agency_groups[agencyId]))
            agencyDir = os.path.join(directory, agencyId)

            keyList = []
            for feed in agency_groups[agencyId]:
                keyList.append(feed['feedId'])
            print "keyList: %s" % keyList
            agency_original_keys[agencyId] = ",".join(keyList)
            
            if len(agency_groups[agencyId]) > 1: # multiple feeds for agency -- shorten & merge required

                # download & shorten feeds
                os.makedirs(agencyDir)

                shortened_paths = []                
                for feed in agency_groups[agencyId]:
                    
                    feedId = feed['feedId']

                    print "downloading %s" % feedId
                    key = Key(bucket)
                    key.key = feedId
                    basename = os.path.basename(feedId)
                    path = os.path.join(agencyDir, "%s.zip" % basename)
                    key.get_contents_to_filename(path)

                    # shorten
                    print " shortening"
                    shortened_path = os.path.join(agencyDir, "%s_shortened.zip" % basename)
                    shorten_date = feed['expireOn'].replace("-","")
                    subprocess.call(['python', '/var/otp/resources/process_gtfs/shortenGtfsFeed.py', shorten_date, path, shortened_path])
                    shortened_paths.append(shortened_path) 
                    print " shortened"
                    
                # merge
                mergejarpath = "/var/otp/resources/process_gtfs/merger.jar"
                #merge_cmd = ['java', '-Xms15G', '-Xmx15G', '-jar', mergejarpath, '--file=agency.txt', '--fuzzyDuplicates', '--file=routes.txt', '--fuzzyDuplicates', '--file=shapes.txt', '--fuzzyDuplicates', '--file=fare_attributes.txt', '--fuzzyDuplicates', '--file=fare_rules.txt', '--fuzzyDuplicates', '--file=transfers.txt', '--fuzzyDuplicates', '--file=calendar.txt', '--renameDuplicates', '--file=trips.txt', '--renameDuplicates'] 
                merge_cmd = ['java', '-Xms15G', '-Xmx15G', '-jar', mergejarpath, '--file=agency.txt', '--fuzzyDuplicates', '--file=stops.txt', '--fuzzyDuplicates', '--file=routes.txt', '--fuzzyDuplicates', '--file=shapes.txt', '--fuzzyDuplicates', '--file=fare_attributes.txt', '--fuzzyDuplicates', '--file=fare_rules.txt', '--fuzzyDuplicates', '--file=transfers.txt', '--fuzzyDuplicates', '--file=calendar.txt', '--duplicateDetection=IDENTITY', '--renameDuplicates', '--file=trips.txt', '--duplicateDetection=IDENTITY', '--renameDuplicates'] 
                merge_cmd.extend(shortened_paths)

                merged_path = os.path.join(agencyDir, "merged.zip")
                merge_cmd.append(merged_path)

                print "merging"
                subprocess.call(merge_cmd)
                print "merged"

                to_transform = merged_path
 
            else: # single feed for agency ("standalone" feed) -- shorten only

                os.makedirs(agencyDir)
                feed = agency_groups[agencyId][0] 
                print "process standalone: %s" % feed['feedId']
                key = Key(bucket)
                key.key = feed['feedId']
                basename = os.path.basename(feedId)
                path = os.path.join(agencyDir, "%s.zip" % basename)
                key.get_contents_to_filename(path)

                # shorten
                print " shortening"
                shortened_path = os.path.join(agencyDir, "%s_shortened.zip" % basename)
                shorten_date = feed['expireOn'].replace("-","")
                subprocess.call(['python', '/var/otp/resources/process_gtfs/shortenGtfsFeed.py', shorten_date, path, shortened_path])
                print " shortened"

                to_transform = shortened_path


            # transform

            transformed_path = os.path.join(agencyDir, "transformed.zip")
            transformjarpath = "/var/otp/resources/process_gtfs/transformer.jar"

            transform_json = '{"op":"transform","class":"org.onebusaway.gtfs_transformer.updates.CalendarSimplicationStrategy"}'
            transform_cmd = ['java', '-Xms15G', '-Xmx15G', '-jar', transformjarpath, '--transform=json:%s' % transform_json, to_transform, transformed_path ]

            print "transforming"
            subprocess.call(transform_cmd)
            print "transformed"

            # upload to s3
            print "uploading to s3"
            s3_key = "processed/%s" % uuid.uuid1()
            key = Key(bucket)
            key.key = s3_key
            key.set_contents_from_filename(transformed_path)

            # add key to list
            agency_keys[agencyId] = s3_key

            #else:
            #
            #    # add standalone feed to list 
            #    agency_keys[agencyId] = agency_groups[agencyId][0]

        print agency_keys

        # publish process_gtfs_done message
        publisher = conn.Producer(routing_key="process_gtfs_done", exchange=exchange)
        publisher.publish({ 'id' : body['id'], 'key_map' : agency_keys, 'original_keys_map' : agency_original_keys }) 
        print "published p_g_d msg"



    except:
        now = datetime.now()
        errfile = "/var/otp/gb_err_%s_%s" % (body['id'], now.strftime("%F-%T"))
        traceback.print_exc(file=open(errfile,"a"))
        traceback.print_exc()

Example 19

Project: pyina Source File: ez_map.py
def ez_map2(func, *arglist, **kwds):
    """higher-level map interface for selected mapper and launcher

maps function 'func' across arguments 'arglist'.  arguments and results
are stored and sent as pickled strings, the function 'func' is also stored
and sent as pickled strings.  This is different than 'ez_map', in that
it does not use temporary files to store the mapped function.

Further Input:
    nodes -- the number of parallel nodes
    launcher -- the launcher object
    scheduler -- the scheduler object
    mapper -- the mapper object
    timelimit -- string representation of maximum run time (e.g. '00:02')
    queue -- string name of selected queue (e.g. 'normal')
"""
    import dill as pickle
    import os.path, tempfile, subprocess
    from pyina.tools import which_strategy
    # mapper = None (allow for use of default mapper)
    if kwds.has_key('mapper'):
        mapper = kwds['mapper']
        if mapper() == "mpi_pool": scatter = False
        elif mapper() == "mpi_scatter": scatter = True
        else: raise NotImplementedError, "Mapper '%s' not found." % mapper()
        ezdefaults['program'] = which_strategy(scatter, lazy=True)
    # override the defaults
    if kwds.has_key('nnodes'): ezdefaults['nodes'] = kwds['nnodes']
    if kwds.has_key('nodes'): ezdefaults['nodes'] = kwds['nodes']
    if kwds.has_key('timelimit'): ezdefaults['timelimit'] = kwds['timelimit']
    if kwds.has_key('queue'): ezdefaults['queue'] = kwds['queue']
    # set the scheduler & launcher (or use the given default)
    if kwds.has_key('launcher'): launcher = kwds['launcher']
    else: launcher = mpirun_launcher  #XXX: default = non_mpi?
    if kwds.has_key('scheduler'): scheduler = kwds['scheduler']
    else: scheduler = ''
    # set scratch directory (most often required for queue launcher)
    if kwds.has_key('workdir'): ezdefaults['workdir'] = kwds['workdir']
    else:
        if launcher in [torque_launcher, moab_launcher] \
        or scheduler in [torque_scheduler, moab_scheduler]:
            ezdefaults['workdir'] = os.path.expanduser("~")

    from dill.temp import dump
    # standard pickle.dump of inputs to a NamedTemporaryFile
    modfile = dump(func, suffix='.pik', dir=ezdefaults['workdir'])
    kwd = {'onall':kwds.get('onall',True)}
    argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir'])
    # Keep the above return values for as long as you want the tempfile to exist

    resfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
    ezdefaults['progargs'] = ' '.join([modfile.name,argfile.name,resfilename, \
                                       ezdefaults['workdir']])
    #HOLD.append(modfile)
    #HOLD.append(argfile)

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        jobfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        outfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        errfilename = tempfile.mktemp(dir=ezdefaults['workdir'])
        ezdefaults['jobfile'] = jobfilename
        ezdefaults['outfile'] = outfilename
        ezdefaults['errfile'] = errfilename

    # get the appropriate launcher for the scheduler
    if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().mpirun
    elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().mpirun

    elif scheduler in [torque_scheduler] and launcher in [srun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().srun
    elif scheduler in [moab_scheduler] and launcher in [srun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().srun

    elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().aprun
    elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().aprun

    elif scheduler in [torque_scheduler] and launcher in [serial_launcher]:
        launcher = torque_launcher
        ezdefaults['scheduler'] = scheduler().serial
    elif scheduler in [moab_scheduler] and launcher in [serial_launcher]:
        launcher = moab_launcher
        ezdefaults['scheduler'] = scheduler().serial
    #else: scheduler = None

    # counting on the function below to block until done.
    #print 'executing: ', launcher(ezdefaults)
    launch(launcher(ezdefaults)) #FIXME: use subprocessing

    if launcher in [torque_launcher, moab_launcher] \
    or scheduler in [torque_scheduler, moab_scheduler]:
        import time                              #BLOCKING
        while (not os.path.exists(resfilename)): #XXX: or out* to confirm start
            time.sleep(sleeptime) #XXX: wait for results... may infinite loop?
        subprocess.call('rm -f %s' % jobfilename, shell=True)
        subprocess.call('rm -f %s' % outfilename, shell=True)
        subprocess.call('rm -f %s' % errfilename, shell=True)

    # read result back
    res = pickle.load(open(resfilename,'r'))
    subprocess.call('rm -f %s' % resfilename, shell=True)
    return res

Example 20

Project: osmc Source File: config_editor.py
Function: on_click
	def onClick(self, controlID):
		log('%s' % controlID)

		if controlID == SAVE:
			log('SAVE')

			if self.changed:

				final_action = DIALOG.yesno(lang(32052), lang(32053), nolabel=lang(32054), yeslabel=lang(32055))

				if final_action:

					log('final action')

					new_config = self.grab_item_strings()

					# temporary location for the config.txt
					tmp_loc = '/var/tmp/config.txt'

					# write the long_string_file to the config.txt
					with open(tmp_loc,'w') as f:
						for line in new_config:
							f.write(line.replace(" = ","=") + '\n')
							log('' + line)

					# backup existing config
					suffix = '_' + str(time.time()).split('.')[0]
					subprocess.call(["sudo", "cp", self.config, '/home/pi/' ])
					subprocess.call(["sudo", "mv", '/home/pi/config.txt', '/home/pi/config' + suffix + '.txt' ])

					# copy over the temp config.txt to /boot/ as superuser
					subprocess.call(["sudo", "mv", tmp_loc, self.config ])

					# THIS IS JUST FOR TESTING, LAPTOP DOESNT LIKE SUDO HERE
					try:
						subprocess.call(["mv", tmp_loc, self.config ])
					except:
						pass

					log('writing ended')

			self.close()

		else:
			selected_entry = self.list_control.getSelectedPosition()
			item = self.list_control.getSelectedItem()
			currentlabel = item.getLabel()
			
			if selected_entry != 0:

				if self.del_string not in currentlabel:
					action = DIALOG.yesno(lang(32051), lang(32057), nolabel=lang(32058), yeslabel=lang(32059))

					if action:
						# delete
						item.setLabel(currentlabel + self.del_string)
						self.changed = True

					else:
						# edit
						d = DIALOG.input(lang(32060), currentlabel, type=xbmcgui.INPUT_ALPHANUM)

						if d:

							self.check_for_duplicates(d, True)

							item.setLabel(d)
							self.changed = True

				else:
					action = DIALOG.yesno(lang(32051), lang(32061), nolabel=lang(32058), yeslabel=lang(32062))

					if action:
						# delete
						item.setLabel(currentlabel[:len(currentlabel) - len(self.del_string)])
						self.changed = True

					else:
						# edit
						d = DIALOG.input(lang(32063), currentlabel, type=xbmcgui.INPUT_ALPHANUM)

						if d:
							self.check_for_duplicates(d, edit=True)

							item.setLabel(d)
							self.changed = True

			else:
				d = DIALOG.input(lang(32064), type=xbmcgui.INPUT_ALPHANUM)

				if d:

					self.check_for_duplicates(d)

					# add the new item to the list
					tmp = xbmcgui.ListItem(d)#, thumbnailImage=IMAGE)
					self.list_control.addItem(tmp)
					
					self.changed = True

					self.item_count += 1

Example 21

Project: pydgin Source File: build.py
def build_target( name, pypy_dir, build_dir, extra_rpython_flags ):

  # use the name to determine the arch, jit, softfloat requirement, and debug

  arch = None
  require_softfloat = False
  if "parc" in name:
    arch = "parc"
  if "arm" in name:
    assert arch is None, "conflicting arch definitions {} and {}" \
                         .format( arch, "arm" )
    arch = "arm"
  if "riscv" in name:
    assert arch is None, "conflicting arch definitions {} and {}" \
                         .format( arch, "riscv" )
    arch = "riscv"
    # risc-v is the only architecture that requires softfloat for now
    require_softfloat = True
  assert arch is not None, "could not determine arch from name"

  # check if we have already built softfloat and if not, build it
  if require_softfloat:
    # check os to find which extension to check for (we only support mac
    # or linux)
    assert sys.platform == "linux" or sys.platform == "linux2" \
          or sys.platform == "darwin"

    softfloat_file = "libsoftfloat.dylib" if sys.platform == "darwin" \
          else "libsoftfloat.so"

    print "softfloat is required, checking if {} exists..." \
          .format( softfloat_file ),
    found_softfloat = os.path.isfile( softfloat_file )

    if not found_softfloat:
      print "no"
      print "calling build-softfloat.py to build it"
      cmd = "../scripts/build-softfloat.py"
      print cmd
      ret = subprocess.call( cmd, shell=True )

      # check for success and if the file exists

      if ret != 0:
        print "softfloat library could not be built, aborting!"
        sys.exit( ret )

      if not os.path.isfile( softfloat_file ):
        print "{} could not be found, aborting!".format( softfloat_file )
        sys.exit( ret )

    else:
      print "yes"

  if "jit" in name and "nojit" not in name:
    jit = True
  elif "nojit" in name:
    jit = False
  else:
    # default behavior if neither jit or nojit in name
    jit = True

  if "debug" in name and "nodebug" not in name:
    debug = True
  elif "nodebug" in name:
    debug = False
  else:
    # default behavior if neither debug or nodebug in name
    debug = False

  print "Building {}\n  arch: {}\n  jit: {}\n  debug: {}\n" \
        .format( name, arch, jit, debug )

  # check for the pypy executable, if it doesn't exist warn

  python_bin = distutils.spawn.find_executable('pypy')
  if not python_bin:
    print ('WARNING: Cannot find a pypy executable!\n'
           '  Proceeding to translate with CPython.\n'
           '  Note that this will be *much* slower than using pypy.\n'
           '  Please install pypy for faster translation times!\n')
    python_bin = 'python'

  # create the translation command and execute it

  os.chdir('../{}'.format( arch ) )
  cmd = ( '{python_bin} {pypy_dir}/rpython/bin/rpython {rpython_opts} '
          '{arch}-sim.py {pydgin_opts}' ) \
          .format( arch=arch, pypy_dir=pypy_dir,
                   rpython_opts=( extra_rpython_flags +
                                  ("--opt=jit" if jit   else "") ),
                   pydgin_opts =( "--debug"   if debug else "" ),
                   python_bin=python_bin )

  print cmd
  ret = subprocess.call( cmd, shell=True )

  # check for success and cleanup

  if ret != 0:
    print "{} failed building, aborting!".format( name )
    sys.exit( ret )

  # for some reason, -rpath to the linker doesn't seem to work on macs?
  # we patch the binary generated to add the exact dir of libsoftfloat.so

  if require_softfloat and sys.platform == "darwin":
    cmd = "install_name_tool -change {short_so} {full_so} {pydgin}" \
          .format( short_so="libsoftfloat.so",
                   full_so="{}/../../../libsoftfloat.so".format( build_dir ),
                   pydgin=name )
    print cmd
    subprocess.call( cmd, shell=True )

  shutil.copy( name, '{}'.format( build_dir ) )
  symlink_name = '{}/../{}'.format( build_dir, name )
  if os.path.lexists( symlink_name ):
    os.remove( symlink_name )
  os.symlink( '{}/{}'.format( build_dir, name ), symlink_name )

Example 22

Project: crunchy-xml-decoder Source File: ultimate.py
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')
	
    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title+'.flv')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()
    
    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    #subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    host = xmlconfig.find('host')
    if host:
        host = host.string

    filen = xmlconfig.find('file')
    if filen:
        filen = filen.string

    if not host and not filen:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
        host = xmlconfig.find('host').string


    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        if host:
            if re.search('fplive\.net', host):
                url1 = re.findall('.+/c[0-9]+', host).pop()
                url2 = re.findall('c[0-9]+\?.+', host).pop()
            else:
                url1 = re.findall('.+/ondemand/', host).pop()
                url2 = re.findall('ondemand/.+', host).pop()
            video()
            video_input = os.path.join("export", title + '.flv')
        else:
            video_input = os.path.join("export", title + '.ts')
            video_hls(filen, video_input)

        heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(xmlconfig.height.string)  # This is less likely to fail
        subtitles(title)

        print 'Starting mkv merge'
        mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
        filename_output = os.path.join("export", title + '[' + heightp.strip() +'].mkv')
        subtitle_input = []
        if os.path.isfile(mkvmerge):
            with_wine = os.name != 'nt'
        else:
            mkvmerge = "mkvmerge"
            with_wine = False
        cmd = [mkvmerge, "-o", filename_output, '--language', '0:jpn', '--language', '1:jpn', '-a', '1', '-d', '0', video_input, '--title', title]
        if with_wine:
            cmd.insert(0, 'wine')
        if not hardcoded:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
            for i in sub_id2:
                sublangc=sub_id5.pop(0)
                sublangn=sub_id6.pop(0)

                if onlymainsub and sublangc != sublang:
                    continue

                filename_subtitle = os.path.join("export", title+'['+sublangc+']'+sublangn+'.ass')
                if not os.path.isfile(filename_subtitle):
                    continue

                cmd.extend(['--language', '0:' + sublangc.replace('spa_spa','spa')])

                if sublangc == sublang:
                    cmd.extend(['--default-track', '0:yes'])
                else:
                    cmd.extend(['--default-track', '0:no'])
                if forcesub:
                    cmd.extend(['--forced-track', '0:yes'])
                else:
                    cmd.extend(['--forced-track', '0:no'])

                cmd.extend(['--track-name', '0:' + sublangn])
                cmd.extend(['-s', '0'])
                cmd.append(filename_subtitle)
                subtitle_input.append(filename_subtitle)
        subprocess.call(cmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(video_input)
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for f in subtitle_input:
            os.remove(f)
    print 'Cleanup Complete'

Example 23

Project: ProxImaL Source File: halide.py
def gengen(generator_source, builddir='./build',
           target='host', generator_name=[], function_name=[], generator_param=[],
           external_source=[], external_libs=[], compile_flags=[],
           cleansource=True, verbose=True):
    """ Will take .cpp containing one (or more) Generators, compile them,
        link them with libHalide, and run
        the resulting executable to produce a .o/.h expressing the Generator's
        function. Function name is the C function name for the result """

    # Build directory
    if not os.path.exists(builddir):
        os.makedirs(builddir)

    # Generator code is a temporary file
    generator = os.path.join(builddir, 'gengen.XXXX')

    # File definitions
    halide_lib = '${HALIDE_PATH}/bin/libHalide.so'
    halid_incl = '-I${HALIDE_PATH}/include'
    generator_main = '${HALIDE_PATH}/tools/GenGen.cpp'

    # Define output names
    function_name, function_name_c, output_lib = output_names(
        function_name, generator_source, builddir)

    # It's OK for GENERATOR_NAME and FUNCTION_NAME to be empty
    # if the source we're compiling has only one generator registered,
    # we just use that one (and assume that FUNCTION_NAME=GENERATOR_NAME)
    generator_flag = ""
    if generator_name:
        generator_flag = "-g " + generator_name

    # Function flag
    function_flag = "-f " + function_name

    # Target flags
    target_flags = "target=" + target

    launcher_file = ''

    try:

        # Additional flags
        compile_flag_str = ''
        if compile_flags:
            for cf in compile_flags:
                compile_flag_str += cf + ' '

        # Compile
        cmd = ("g++ {0} -g -Wwrite-strings -std=c++11 -fno-rtti {1} {2} {3} {4} "
               " -lz -lpthread -ldl -o {5}").format(
            compile_flag_str, halid_incl, generator_source, generator_main, halide_lib, generator)

        if verbose:
            print('Compiling {0}'.format(generator_source))
            print('\t' + cmd)
        subprocess.call(cmd, shell=True)

        # Run generator
        cmd = '{0} {1} {2} -e o,h -o {3} {4}'.format(generator,
                                                     generator_flag, function_flag,
                                                     builddir, target_flags)
        if verbose:
            print('Calling generator')
            print('\t' + cmd)
        subprocess.call(cmd, shell=True)

        # Find params in output generated by generator
        header_file = os.path.join(builddir, function_name + '.h')
        object_file = os.path.join(builddir, function_name + '.o')
        params = scan_params(header_file, function_name, verbose)
        if verbose:
            print('Found {0} buffers and {1} float params and {2} int params'.format(
                params.count(Params.ImageParam_Float32),
                params.count(Params.Param_Float32),
                params.count(Params.Param_Int32)))

        # Generate launcher cpp and write
        launcher_file = os.path.join(builddir, function_name + '.cpp')
        launcher_body, argument_names = generate_launcher(
            header_file, function_name, function_name_c, params)
        with open(launcher_file, 'w') as fp:
            fp.write(launcher_body)

        # Compile launcher into library file (which will be called later by ctypes)
        if os.path.exists(output_lib):
            os.remove(output_lib)

        # External sources
        external_source_str = ''
        if external_source:
            for sc in external_source:
                external_source_str += find_source(sc) + ' '

        external_libs_str = ''
        if external_libs:
            for el in external_libs:
                external_libs_str += el + ' '

        cmd = ("g++ -fPIC -std=c++11 -Wall -O2 {0} {1} {2} -lpthread "
               "{3} -shared -o {4}").format(launcher_file, external_source_str,
                                            external_libs_str, object_file, output_lib)
        if verbose:
            print('Compiling library')
            print('\t' + cmd)
        subprocess.call(cmd, shell=True)

        return output_lib, function_name_c, argument_names

    except Exception as e:
        print('Error genererator compilation: {0}'.format(e.message), file=sys.stderr)
        exit()

    finally:
        # Cleanup
        if cleansource:
            source = [generator, header_file, object_file, launcher_file]
            for selem in source:
                if os.path.exists(selem):
                    os.remove(selem)

Example 24

Project: silvereye Source File: silvereye.py
Function: download_packages
  def downloadPackages(self):
    # Retrieve the RPMs for CentOS, Eucalyptus, and dependencies
    rpms = set()
    for groupList in self.doGroupLists():
      for x in groupList:
        if x.name in ['Core', 'X Window System', 'Desktop', 'Fonts']:
          rpms.update(x.packages)

    rpms.update(['centos-release', 'epel-release', 'euca2ools-release',
                 'authconfig', 'fuse-libs', 'gpm', 'libsysfs', 'mdadm',
                 'ntp', 'postgresql-libs', 'prelink', 'setools',
                 'system-config-network-tui', 'tzdata', 'tzdata-java',
                 'udftools', 'unzip', 'wireless-tools', 'livecd-tools',
                 'eucalyptus', 'eucalyptus-admin-tools', 'eucalyptus-cc',
                 'eucalyptus-cloud', 'eucalyptus-common-java',
                 'eucalyptus-console', 'eucalyptus-load-balancer-image', 
                 'eucalyptus-gl', 'eucalyptus-nc', 'eucalyptus-sc',
                 'eucalyptus-walrus', 'eucalyptus-release' ])

    # These are specifically for the EMI
    rpms.update(['cloud-init', 'system-config-securitylevel-tui',
                 'system-config-firewall-base', 'acpid'])

    # Add desktop bits.  Do we want a build flag to ignore this?
    rpms.update(['firefox'])

    # Useful tools
    rpms.update(['tcpdump', 'strace', 'man'])

    if self.distroversion == "6":
      rpms.update(['ntpdate', 'libvirt-client', 'elrepo-release', 
                   'iwl6000g2b-firmware', 'sysfsutils'])

    # Monitoring
    rpms.update(['nrpe', 'nagios-plugins-all', 'nagios'])

    # Download the base rpms
    self.logger.info("Retrieving Packages")

    if self.conf.yumvar['basearch'] == 'x86_64':
      self.conf.exclude.append('*.i?86')
    self.conf.assumeyes = 1
    if self.distroversion.startswith("6"):
      self.conf.releasever = self.distroversion[0]
      self.conf.plugins=1
    yumconf = os.path.join(self.builddir, 'yum.conf')
    self.conf.write(open(yumconf, 'w'))

    # TODO: convert this to API?
    if self.distroversion.startswith("6"):
      subprocess.call(['yumdownloader', 'centos-release'],
                      stdout=self.cmdout, stderr=self.cmdout)
      centospkg = glob.glob('centos-release-*')[0]
      subprocess.call(['rpm', '-iv', '--nodeps', '--justdb', '--root',
                           self.builddir, centospkg],
                      stdout=self.cmdout, stderr=self.cmdout)

    yumrepodir = os.path.join(self.builddir, 'etc', 'yum.repos.d')
    mkdir(yumrepodir)
    for repoid in ['base', 'updates', 'epel', 'elrepo', 'eucalyptus', 'euca2ools', 'console']:
      if self.repos.repos.has_key(repoid):
        if hasattr(self.repos.repos[repoid], 'cfg'):
          self.repos.repos[repoid].cfg.set(repoid, 'enabled', '1')
          self.repos.repos[repoid].cfg.write(open(os.path.join(yumrepodir, repoid + '.repo'), 'w'))
        else:
          repo = self.repos.repos[repoid]
          f = open(os.path.join(yumrepodir, repoid + '.repo'), 'w')
          f.write("[%s]\nenabled=%s\ngpgcheck=%s\n" % (repoid, repo.enabled, repo.gpgcheck))
          if repo.mirrorlist:
            f.write("mirrorlist=" + repo.mirrorlist)
          else:
            f.write("baseurl=" + repo.baseurl[0])
          f.close()
      elif repoid == 'console':
          # We don't mind if there's no console repository
          pass
      else:
        raise Exception('repo %s not configured' % repoid)

    self.logger.info("Downloading packages")

    if not os.path.exists(os.path.join(self.imgdir, 'base')):
        os.symlink(self.pkgdir, os.path.join(self.imgdir, 'base'))
    for path in [ 'console', 'euca2ools', 'epel', 'elrepo', 'eucalyptus', 'updates' ]:
        mkdir(os.path.join(self.imgdir, path))

    subprocess.call([os.path.join(self.basedir, 'scripts', 'yumdownloader'), 
                     '-c', yumconf,
                     '--resolve', '--installroot', self.builddir,
                     '--destdir', self.imgdir, '--splitbyrepo',
                     '--releasever', '6' ] + list(rpms),
                      stdout=self.cmdout, stderr=self.cmdout) 

    # Call again to dep close Packages dir from 6.3 base
    subprocess.call([os.path.join(self.basedir, 'scripts', 'yumdownloader'), 
                     '-c', yumconf,
                     '--disablerepo', 'updates',
                     '--resolve', '--installroot', self.builddir,
                     '--destdir', self.imgdir, '--splitbyrepo',
                     '--releasever', '6' ] + list(rpms),
                      stdout=self.cmdout, stderr=self.cmdout) 

Example 25

Project: attention-lvcsr Source File: run_tests_in_batch.py
def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
        display_batch_output):

    # Setting aside current working directory for later saving
    sav_dir = os.getcwd()
    # The first argument is the called script.
    argv = argv[1:]

    # It seems safer to fully regenerate the list of tests on each call.
    if os.path.isfile('.noseids'):
        os.remove('.noseids')

    # Collect test IDs.
    print("""\
####################
# COLLECTING TESTS #
####################""")
    stdout.flush()
    stderr.flush()
    dummy_in = open(os.devnull)
    # We need to call 'python' on Windows, because theano-nose is not a
    # native Windows app; and it does not hurt to call it on Unix.
    # Using sys.executable, so that the same Python version is used.
    python = sys.executable
    rval = subprocess.call(
        ([python, theano_nose, '--collect-only', '--with-id']
         + argv),
        stdin=dummy_in.fileno(),
        stdout=stdout.fileno(),
        stderr=stderr.fileno())
    stdout.flush()
    stderr.flush()
    assert rval == 0
    noseids_file = '.noseids'

    with open(noseids_file, 'rb') as f:
        data = pickle.load(f)

    ids = data['ids']
    n_tests = len(ids)
    if n_tests == 0:
        raise Exception("0 test selected")
    assert n_tests == max(ids)

    # Standard batch testing is called for
    if not time_profile:
        failed = set()
        print("""\
###################################
# RUNNING TESTS IN BATCHES OF %s #
###################################""" % batch_size)
        # When `display_batch_output` is False, we suppress all output because
        # we want the user to focus only on the failed tests, which are re-run
        # (with output) below.
        dummy_out = open(os.devnull, 'w')
        for test_id in xrange(1, n_tests + 1, batch_size):
            stdout.flush()
            stderr.flush()
            test_range = list(range(test_id,
                                    min(test_id + batch_size, n_tests + 1)))
            cmd = ([python, theano_nose, '--with-id'] +
                   list(map(str, test_range)) +
                   argv)
            subprocess_extra_args = dict(stdin=dummy_in.fileno())
            if not display_batch_output:
                # Use quiet mode in nosetests.
                cmd.append('-q')
                # Suppress all output.
                subprocess_extra_args.update(dict(
                    stdout=dummy_out.fileno(),
                    stderr=dummy_out.fileno()))
            t0 = time.time()
            subprocess.call(cmd, **subprocess_extra_args)
            t1 = time.time()
            # Recover failed test indices from the 'failed' field of the
            # '.noseids' file. We need to do it after each batch because
            # otherwise this field may get erased. We use a set because it
            # seems like it is not systematically erased though, and we want
            # to avoid duplicates.
            with open(noseids_file, 'rb') as f:
                failed = failed.union(pickle.load(f)['failed'])

            print('%s%% done in %.3fs (failed: %s)' % (
                (test_range[-1] * 100) // n_tests, t1 - t0, len(failed)))
        # Sort for cosmetic purpose only.
        failed = sorted(failed)
        if failed:
            # Re-run only failed tests
            print("""\
################################
# RE-RUNNING FAILED TESTS ONLY #
################################""")
            stdout.flush()
            stderr.flush()
            subprocess.call(
                ([python, theano_nose, '-v', '--with-id']
                 + failed
                 + argv),
                stdin=dummy_in.fileno(),
                stdout=stdout.fileno(),
                stderr=stderr.fileno())
            stdout.flush()
            stderr.flush()
            return 0
        else:
            print("""\
####################
# ALL TESTS PASSED #
####################""")

    # Time-profiling is called for
    else:
        print("""\
########################################
# RUNNING TESTS IN TIME-PROFILING MODE #
########################################""")

        # finds first word of list l containing string s
        def getIndexOfFirst(l, s):
            for pos, word in enumerate(l):
                if s in word:
                    return pos

        # finds last word of list l containing string s
        def getIndexOfLast(l, s):
            for pos, word in enumerate(reversed(l)):
                if s in word:
                    return (len(l) - pos - 1)

        # iterating through tests
        # initializing master profiling list and raw log
        prof_master_nosort = []
        prof_rawlog = []
        dummy_out = open(os.devnull, 'w')
        path_rawlog = os.path.join(sav_dir, 'timeprof_rawlog')
        stamp = str(datetime.datetime.now()) + '\n\n'
        f_rawlog = open(path_rawlog, 'w')
        f_rawlog.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                       ' (raw log)\n\n' + stamp)
        f_rawlog.flush()

        stamp = str(datetime.datetime.now()) + '\n\n'
        fields = ('Fields: computation time; nosetests sequential id;'
                  ' test name; parent class (if any); outcome\n\n')
        path_nosort = os.path.join(sav_dir, 'timeprof_nosort')
        # probably this part can be extracted for function with many args
        with open(path_nosort, 'w') as f_nosort:
            # begin of saving nosort
            f_nosort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                           ' (by sequential id)\n\n' + stamp + fields)
            f_nosort.flush()
            for test_floor in xrange(1, n_tests + 1, batch_size):
                for test_id in xrange(test_floor, min(test_floor + batch_size,
                                                     n_tests + 1)):
                    # Print the test we will start in the raw log to help
                    # debug tests that are too long.
                    f_rawlog.write("\n%s Will run test #%d %s\n" % (
                        time.ctime(), test_id, data["ids"][test_id]))
                    f_rawlog.flush()

                    p_out = output_subprocess_Popen(
                        ([python, theano_nose, '-v', '--with-id']
                         + [str(test_id)] + argv +
                         ['--disabdocstring']))
                        # the previous option calls a custom Nosetests plugin
                        # precluding automatic sustitution of doc. string for
                        # test name in display
                        # (see class 'DisabDocString' in file theano-nose)

                    # recovering and processing data from pipe
                    err = p_out[1]
                    # print the raw log
                    f_rawlog.write(err)
                    f_rawlog.flush()

                    # parsing the output
                    l_err = err.split()
                    try:
                        pos_id = getIndexOfFirst(l_err, '#')
                        prof_id = l_err[pos_id]
                        pos_dot = getIndexOfFirst(l_err, '...')
                        prof_test = ''
                        for s in l_err[pos_id + 1: pos_dot]:
                            prof_test += s + ' '
                        if 'OK' in err:
                            pos_ok = getIndexOfLast(l_err, 'OK')
                            if len(l_err) == pos_ok + 1:
                                prof_time = float(l_err[pos_ok - 1][0:-1])
                                prof_pass = 'OK'
                            elif 'SKIP' in l_err[pos_ok + 1]:
                                prof_time = 0.
                                prof_pass = 'SKIPPED TEST'
                            elif 'KNOWNFAIL' in l_err[pos_ok + 1]:
                                prof_time = float(l_err[pos_ok - 1][0:-1])
                                prof_pass = 'OK'
                            else:
                                prof_time = 0.
                                prof_pass = 'FAILED TEST'
                        else:
                            prof_time = 0.
                            prof_pass = 'FAILED TEST'
                    except Exception:
                        prof_time = 0
                        prof_id = '#' + str(test_id)
                        prof_test = ('FAILED PARSING, see raw log for details'
                                     ' on test')
                        prof_pass = ''
                    prof_tuple = (prof_time, prof_id, prof_test, prof_pass)

                    # appending tuple to master list
                    prof_master_nosort.append(prof_tuple)

                    # write the no sort file
                    s_nosort = ((str(prof_tuple[0]) + 's').ljust(10) +
                     " " + prof_tuple[1].ljust(7) + " " +
                     prof_tuple[2] + prof_tuple[3] +
                     "\n")
                    f_nosort.write(s_nosort)
                    f_nosort.flush()

                print('%s%% time-profiled' % ((test_id * 100) // n_tests))
            f_rawlog.close()

            # sorting tests according to running-time
            prof_master_sort = sorted(prof_master_nosort,
                                      key=lambda test: test[0], reverse=True)

            # saving results to readable files
            path_sort = os.path.join(sav_dir, 'timeprof_sort')
            with open(path_sort, 'w') as f_sort:
                f_sort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                             ' (sorted by computation time)\n\n' + stamp + fields)
                for i in xrange(len(prof_master_nosort)):
                    s_sort = ((str(prof_master_sort[i][0]) + 's').ljust(10) +
                         " " + prof_master_sort[i][1].ljust(7) + " " +
                         prof_master_sort[i][2] + prof_master_sort[i][3] +
                         "\n")
                    f_sort.write(s_sort)

Example 26

Project: nototools Source File: shape_diff.py
    def find_rendered_diffs(self, font_size=256, render_path=None):
        """Find diffs of glyphs as rendered by harfbuzz + image magick."""

        hb_input_generator_a = hb_input.HbInputGenerator(self.font_a)
        hb_input_generator_b = hb_input.HbInputGenerator(self.font_b)

        a_png_file = tempfile.NamedTemporaryFile()
        a_png = a_png_file.name
        b_png_file = tempfile.NamedTemporaryFile()
        b_png = b_png_file.name
        cmp_png_file = tempfile.NamedTemporaryFile()
        cmp_png = cmp_png_file.name
        diffs_file = tempfile.NamedTemporaryFile()
        diffs_filename = diffs_file.name

        self.build_names()
        for name in self.names:
            class_a = self.gdef_a.get(name, GDEF_UNDEF)
            class_b = self.gdef_b.get(name, GDEF_UNDEF)
            if GDEF_MARK in (class_a, class_b) and class_a != class_b:
                self.stats['gdef_mark_mismatch'].append((
                    self.basepath, name, GDEF_LABELS[class_a],
                    GDEF_LABELS[class_b]))
                continue

            width_a = self.glyph_set_a[name].width
            width_b = self.glyph_set_b[name].width
            zwidth_a = width_a == 0
            zwidth_b = width_b == 0
            if zwidth_a != zwidth_b:
                self.stats['zero_width_mismatch'].append((
                    self.basepath, name, width_a, width_b))
                continue

            hb_args_a = hb_input_generator_a.input_from_name(name, pad=zwidth_a)
            hb_args_b = hb_input_generator_b.input_from_name(name, pad=zwidth_b)
            if hb_args_a != hb_args_b:
                self.stats['input_mismatch'].append((
                    self.basepath, name, hb_args_a, hb_args_b))
                continue

            # ignore unreachable characters
            if not hb_args_a:
                self.stats['untested'].append((self.basepath, name))
                continue

            features, text = hb_args_a

            # ignore null character
            if unichr(0) in text:
                continue

            with open(diffs_filename, 'a') as ofile:
                ofile.write('%s\n' % name)

            subprocess.call([
                'hb-view', '--font-size=%d' % font_size,
                '--output-file=%s' % a_png,
                '--features=%s' % ','.join(features), self.path_a, text])
            subprocess.call([
                'hb-view', '--font-size=%d' % font_size,
                '--output-file=%s' % b_png,
                '--features=%s' % ','.join(features), self.path_b, text])

            img_info = subprocess.check_output(['identify', a_png]).split()
            assert img_info[0] == a_png and img_info[1] == 'PNG'
            subprocess.call([
                'convert', '-gravity', 'center', '-background', 'black',
                '-extent', img_info[2], b_png, b_png])

            if render_path:
                glyph_filename = re.sub(r'([A-Z_])', r'\1_', name) + '.png'
                output_png = os.path.join(render_path, glyph_filename)
                # see for a discussion of this rendering technique:
                # https://github.com/googlei18n/nototools/issues/162#issuecomment-175885431
                subprocess.call([
                    'convert',
                    '(', b_png, '-colorspace', 'gray', ')',
                    '(', a_png, '-colorspace', 'gray', ')',
                    '(', '-clone', '0-1', '-compose', 'darken', '-composite', ')',
                    '-channel', 'RGB', '-combine', output_png])

            with open(diffs_filename, 'a') as ofile:
                subprocess.call(
                    ['compare', '-metric', 'AE', a_png, b_png, cmp_png],
                    stderr=ofile)
                ofile.write('\n')

        with open(diffs_filename) as ifile:
            lines = [l.strip() for l in ifile.readlines() if l.strip()]
        diffs = [(lines[i], lines[i + 1]) for i in range(0, len(lines), 2)]

        mismatched = {}
        for name, diff in diffs:
            if int(diff) != 0:
                mismatched[name] = int(diff)

        stats = self.stats['compared']
        for name, diff in mismatched.items():
            stats.append((diff, name, self.basepath))

Example 27

Project: fips Source File: project.py
def run(fips_dir, proj_dir, cfg_name, target_name, target_args, target_cwd) :
    """run a build target executable

    :param fips_dir:    absolute path of fips
    :param proj_dir:    absolute path of project dir
    :param cfg_name:    config name or pattern
    :param target_name: the target name
    :param target_args: command line arguments for build target
    :param target_cwd:  working directory or None
    """

    retcode = 10
    proj_name = util.get_project_name_from_dir(proj_dir)
    util.ensure_valid_project_dir(proj_dir)
    
    # load the config(s)
    configs = config.load(fips_dir, proj_dir, cfg_name)
    if configs :
        for cfg in configs :
            log.colored(log.YELLOW, "=== run '{}' (config: {}, project: {}):".format(target_name, cfg['name'], proj_name))

            # find deploy dir where executables live
            deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg)
            if not target_cwd :
                target_cwd = deploy_dir

            if cfg['platform'] in ['emscripten', 'pnacl'] : 
                # special case: emscripten app
                if cfg['platform'] == 'emscripten' :
                    html_name = target_name + '.html'
                else :
                    html_name = target_name + '_pnacl.html'
                if util.get_host_platform() == 'osx' :
                    try :
                        subprocess.call(
                            'open http://localhost:8000/{} ; python {}/mod/httpserver.py'.format(html_name, fips_dir),
                            cwd = target_cwd, shell=True)
                    except KeyboardInterrupt :
                        return 0
                elif util.get_host_platform() == 'win' :
                    try :
                        cmd = 'cmd /c start http://localhost:8000/{} && python {}/mod/httpserver.py'.format(html_name, fips_dir)
                        subprocess.call(cmd, cwd = target_cwd, shell=True)
                    except KeyboardInterrupt :
                        return 0
                elif util.get_host_platform() == 'linux' :
                    try :
                        subprocess.call(
                            'xdg-open http://localhost:8000/{}; python {}/mod/httpserver.py'.format(html_name, fips_dir),
                            cwd = target_cwd, shell=True)
                    except KeyboardInterrupt :
                        return 0
                else :
                    log.error("don't know how to start HTML app on this platform")
            elif cfg['platform'] == 'android' :
                try :
                    adb_path = android.get_adb_path(fips_dir)
                    # Android: first install the apk...
                    cmd = '{} install -r {}/{}-debug.apk'.format(adb_path, deploy_dir, target_name)
                    subprocess.call(cmd, shell=True)
                    # ...then start the apk
                    cmd = '{} shell am start -n com.fips.{}/android.app.NativeActivity'.format(adb_path, target_name)
                    subprocess.call(cmd, shell=True)
                    # ...then run adb logcat
                    cmd = '{} logcat'.format(adb_path)
                    subprocess.call(cmd, shell=True)
                    return 0
                except KeyboardInterrupt :
                    return 0

            elif os.path.isdir('{}/{}.app'.format(deploy_dir, target_name)) :
                # special case: Mac app
                cmd_line = '{}/{}.app/Contents/MacOS/{}'.format(deploy_dir, target_name, target_name)
            else :
                cmd_line = '{}/{}'.format(deploy_dir, target_name) 
            if cmd_line :
                if target_args :
                    cmd_line += ' ' + ' '.join(target_args)
                try:
                    retcode = subprocess.call(args=cmd_line, cwd=target_cwd, shell=True)
                except OSError as e:
                    log.error("Failed to execute '{}' with '{}'".format(target_name, e.strerror))
    else :
        log.error("No valid configs found for '{}'".format(cfg_name))

    return retcode

Example 28

Project: buildfox Source File: buildfox.py
Function: main
def main(*argv, **kwargs):
	# find out if user wants help about flags or something and slice all arguments after help
	arg_help = [sys.argv.index(v) for v in ["-h", "--help"] if v in sys.argv]
	arg_help = sys.argv[min(arg_help) + 1:] if arg_help else None
	if arg_help:
		lines = fox_core.split("\n")
		for arg in arg_help:
			# find stuff
			results = [index for index in range(0, len(lines)) if arg in lines[index]]
			# look behind/ahead
			results = [set([item for item in range(index - 1, index + 2) if item >= 0 and item < len(lines)]) for index in results]
			# merge context groups
			# so if we have [(0,1,2), (1,2,3)] we will have [(0,1,2,3)]
			merged_results = []
			while results:
				head = results[0]
				tail = results[1:]
				last_len = -1
				while len(head) > last_len:
					last_len = len(head)
					new_tail = []
					for rest in tail:
						if head.intersection(rest):
							head |= rest
						else:
							new_tail.append(rest)
					tail = new_tail
				merged_results.append(head)
				results = tail
			results = merged_results
			# merge strings
			results = "\n...\n".join(["\n".join([lines[item] for item in sorted(group)]) for group in results])
			# print results
			if results:
				print("results for %s:" % arg)
				print("...")
				print(results)
				print("...")
			else:
				print("no results for %s" % arg)
		exit(0)

	# parse arguments normally
	title = "buildfox ninja generator %s" % VERSION
	argsparser = argparse.ArgumentParser(description = title, add_help = False)
	argsparser.add_argument("-i", "--in", help = "input file", default = "build.fox")
	argsparser.add_argument("-o", "--out", help = "output file", default = "build.ninja")
	argsparser.add_argument("-w", "--workdir", help = "working directory")
	argsparser.add_argument("variables", metavar = "name=value", type = str, nargs = "*", help = "variables with values to setup", default = [])
	#argsparser.add_argument("-v", "--verbose", action = "store_true", help = "verbose output") # TODO
	argsparser.add_argument("--ide", help = "generate ide solution (vs, vs2012, vs2013, vs2015, xcode, make, qtcreator, cmake)", default = None, dest = "ide")
	argsparser.add_argument("--ide-prj", help = "ide project prefix", default = "build")
	argsparser.add_argument("--ide-env", help = "run provided command to set required environment before calling ninja from the ide, " +
		"use set NAME=VALUE form if you need to modify environment so it will work with all IDE's", default = None)
	argsparser.add_argument("--no-core", action = "store_false",
		help = "disable parsing fox core definitions", default = True, dest = "core")
	argsparser.add_argument("--no-env", action = "store_false",
		help = "disable environment discovery", default = True, dest = "env")
	argsparser.add_argument("-n", "--ninja-ide-gen", action = "store_true",
		help = "enables ninja ide generator mode (equal to --no-core --no-env)", default = False, dest = "ninja_ide_gen")
	# It won't be checked for real. Ninja will be run only if no arguments were passed.
	argsparser.add_argument("--just-generate", action = "store_true",
		help = "skips automatic ninja run", default = False, dest = "just_generate")
	argsparser.add_argument("--selftest", action = "store_true",
		help = "run self test", default = False, dest = "selftest")
	argsparser.add_argument("-v", "--ver", "--version", action = "version", version = title)
	argsparser.add_argument("-h", "--help", metavar = "REQUEST", type = str, nargs = "*",
		default = argparse.SUPPRESS, help = "look for request or show this help message and exit")
	args = vars(argsparser.parse_args())
	if "help" in args:
		argsparser.print_help()
		exit(0)

	if args.get("ninja_ide_gen"):
		args["core"] = False
		args["env"] = False
		args["in"] = "build.ninja" if args.get("in") == "build.fox" else args.get("in")
		args["out"] = ""

	if args.get("workdir"):
		os.chdir(args.get("workdir"))

	engine = Engine()

	if args.get("env"):
		env = discover()
		for name in sorted(env.keys()):
			engine.on_assign((name, env.get(name), "="))

	for var in args.get("variables"):
		parts = var.split("=")
		if len(parts) == 2:
			name, value = parts[0], parts[1]
			engine.on_assign((name, value, "="))
		else:
			raise SyntaxError("unknown argument '%s'. you should use name=value syntax to setup a variable" % var)

	if args.get("core"):
		engine.load_core(fox_core)

	if args.get("selftest"):
		fox_filename, ninja_filename, app_filename = selftest_setup()
		engine.load(fox_filename)
		engine.save(ninja_filename)
		result = not subprocess.call(["ninja", "-f", ninja_filename])
		if result:
			result = not subprocess.call(["./" + app_filename])
		if result:
			print("Selftest - ok")
			selftest_wipe()
		else:
			print("Selftest - failed")
			sys.exit(1)
	else:
		engine.load(args.get("in"))
		if len(args.get("out")):
			engine.save(args.get("out"))

		ide = args.get("ide")

		if ide in ["vs", "vs2012", "vs2013", "vs2015"]:
			if ide == "vs":
				ide = "vs" + engine.variables.get("toolset_msc_ver", "")
			gen_vs(
				engine.context.all_files,
				cxx_defines(engine.variables.get("defines", "")),
				cxx_includedirs(engine.variables.get("includedirs", "")),
				args.get("ide_prj"),
				ide,
				args.get("ide_env"))
		elif ide in ["xcode"]:
			gen_xcode(
				engine.context.all_files,
				cxx_includedirs(engine.variables.get("includedirs", "")),
				args.get("ide_prj"),
				args.get("in"),
				args.get("ide_env"),
				args.get("ninja_ide_gen"))
		elif ide in ["make"]:
			gen_make(
				args.get("in"),
				args.get("ide_env"),
				args.get("ninja_ide_gen"))
		elif ide in ["qtcreator"]:
			gen_qtcreator(
				engine.context.all_files,
				cxx_defines(engine.variables.get("defines", "")),
				cxx_includedirs(engine.variables.get("includedirs", "")),
				args.get("ide_prj"),
				args.get("in"),
				args.get("ide_env"),
				args.get("ninja_ide_gen"))
		elif ide in ["cmake"]:
			gen_cmake(
				engine.context.all_files,
				cxx_includedirs(engine.variables.get("includedirs", "")),
				args.get("ide_prj"),
				args.get("in"),
				args.get("ide_env"))
		elif ide is not None:
			raise ValueError("unknown ide '%s', available ide's : vs, vs2012, vs2013, vs2015, xcode, make, qtcreator, cmake" % ide)
	if len(sys.argv) == 1:
		sys.exit(subprocess.call("ninja" + (" -f " + args["out"] if len(args["out"]) else "")))

Example 29

Project: pyExifToolGUI Source File: renaming.py
def run_rename_photos(self, work_on, qApp):
    '''
    Examples
    D:/Datadir/python/exiftool.exe "-FileName<${CreateDate}_pipo%-.3nc.%e"  -d %Y%m%d "D:\Datadir\fototest\testje"
    20121114_pipo-001.jpg
    20121114_pipo-002.jpg
    20121114_pipo-003.jpg
    20121114_pipo-004.jpg

    D:/Datadir/python/exiftool.exe "-FileName<${CreateDate}_pipo.%e"  -d %Y%m%d%%-.3nc "D:\Datadir\fototest\testje"
    20121114-001_pipo.jpg
    20121114-002_pipo.jpg
    20121114-003_pipo.jpg
    20121114-004_pipo.jpg

    D:/Datadir/python/exiftool.exe "-FileName<pipo_${Exif:Model}%-.2c.%e" "D:\Datadir\fototest\testje"
    pipo_DMC-TZ30-00.jpg
    pipo_DMC-TZ30-01.jpg
    pipo_DMC-TZ30-02.jpg
    pipo_DMC-TZ30-03.jpg

    D:/Datadir/python/exiftool.exe "-FileName<pipo_${CreateDate}%-.2nc.%e" -d %Y%m%d "D:\Datadir\fototest\testje"
    pipo_20121114-01.jpg
    pipo_20121114-02.jpg
    pipo_20121114-03.jpg
    pipo_20121114-04.jpg

    D:/Datadir/python/exiftool.exe "-FileName<pipo_${Exif:Model}.%e" -d%%-.2c "D:\Datadir\fototest\testje"
    does not work!
    '''
    # build our exiftoolparams string
    # exiftoolparams = "'-FileName<" + self.prefix + "_" + self.suffix + ".%le' " + self.prefixformat + " " + self.suffixformat + "-." + self.combobox_digits.currenttext() + "nc" + self.sourcefolder + "/*"
    exiftoolparams = "'-FileName<" + self.prefix
    if not self.rename_photos_dialog.radioButton_suffix_donotuse.isChecked():
        exiftoolparams += "_" + self.suffix
    print("self.fulldatetime " + str(self.fulldatetime))
    if self.fulldatetime == True: 
        # This means that the autonumber should only work on images that have the same full datetime
        exiftoolparams += "%-" + self.rename_photos_dialog.comboBox_digits.currentText() + self.startcounting
    else:
        exiftoolparams += "%-." + self.rename_photos_dialog.comboBox_digits.currentText() + self.startcounting
    print("numbering: exiftoolparams " + exiftoolparams)
    # Do everything split for a prefix as date(time) vs. string; no combined actions, is much simpler       
    if self.prefixformat != "":
        # This means that the prefix is a date(time)
        exiftoolparams += self.rename_extension + "' " + self.prefixformat
        # both lines above mean : prefix_suffix_number.extension
    else:
        # this means that we use a string instead of date(time) as prefix
        # if self.prefixformat is empty we need to move the "counter"
        exiftoolparams += self.rename_extension + "'"
        if self.suffixformat != "":
            exiftoolparams += " " + self.suffixformat

    # now start working and detect which images we use
    if work_on == "nothing_to_work_with":
        # This should already have been dealt with earlier, but in case I did something stupid we simply exit this function
        return
    elif work_on == "main_screen_selection":
        # we use the images that were selected from the main screen
        print("we use the images that were selected from the main screen")
        selected_rows = self.MaintableWidget.selectedIndexes()
        #exiftoolparams = "'-FileName<" + self.prefix + "_" + self.suffix + ".%le' " + self.prefixformat + " " + self.suffixformat + "-." + self.combobox_digits.currenttext() + "nc" + self.sourcefolder + "/*"
        rowcounter = 0
        total_rows = len(selected_rows)
        self.progressbar.setRange(0, total_rows)
        self.progressbar.setValue(0)
        self.progressbar.show()
        rows = []
        selected_images = ""
        message_images = ""
        qApp.processEvents()
        for selected_row in selected_rows:
            selected_row = str(selected_row)
            selected_row = selected_row.replace("<PySide.QtCore.QModelIndex(",'')
            selected_row, tail = re.split(',0x0',selected_row)
            #print str(selected_row)
            row, column = re.split(',',selected_row)
            if row not in rows:
                rows.append(row)
                selected_image = "\"" + self.fileNames[int(row)] + "\""
                selected_images += " " + selected_image + " "
                message_images += " " + os.path.basename(selected_image) + " "
                self.progressbar.setValue(rowcounter)
                #   p = subprocess.call(args)
        parameters = ' -fileorder datetimeoriginal# ' + exiftoolparams + ' ' + selected_images
        print('parameters ' + parameters)
        self.statusbar.showMessage("Renaming " + message_images)
        qApp.processEvents()
        if self.OSplatform in ("Windows", "win32"):
            parameters = parameters.replace("/", "\\")
            parameters = parameters.replace("'", "\"")
            args = '"' + self.exiftoolprog + '" ' + parameters
            print(args)
            p = subprocess.call(args, shell=True)
        else:
            #parameters = parameters.replace("'", "\"")
            command_line = self.exiftoolprog + ' -fileorder datetimeoriginal# ' + exiftoolparams + ' ' + selected_images
            args = shlex.split(command_line)
            print("command_line " + command_line)
            #p = subprocess.call(command_line)
            p = subprocess.call(args)
        self.statusbar.showMessage("Finished renaming")
        qApp.processEvents()
        self.progressbar.hide()
        self.statusbar.showMessage("")
    elif work_on == "rename_source_folder":
        # work on all images in the source folder and do it in this function self
        #print "work on all images in the source folder"
        #print self.rename_photos_dialog.LineEdit_rename_source_folder.text()
        self.statusbar.showMessage("Renaming all images in: " + self.rename_photos_dialog.LineEdit_rename_source_folder.text())
        parameters = ' -fileorder datetimeoriginal# ' + exiftoolparams + ' "' + self.rename_photos_dialog.LineEdit_rename_source_folder.text() + '"'
        if self.OSplatform in ("Windows", "win32"):
            parameters = parameters.replace("/", "\\")
            parameters = parameters.replace("'", "\"")
            args = '"' + self.exiftoolprog + '" ' + parameters
            print(args)
            p = subprocess.call(args, shell=True)
        else:
            pathofimages = self.rename_photos_dialog.LineEdit_rename_source_folder.text().replace(" ", "\\ ")
            command_line = self.exiftoolprog + ' ' + exiftoolparams + ' ' + pathofimages
            #print "command_line " + command_line
            p = subprocess.call(command_line, shell=True)
        self.statusbar.showMessage("Finished renaming all images in: " + self.rename_photos_dialog.LineEdit_rename_source_folder.text())

Example 30

Project: cloudpulse Source File: file_check_test.py
    def perform_file_permission_check(self, input_params):
        try:
            print ("Executing the test ", input_params.get('testcase_name'))
            final_result = []
            final_status = []
            final_msg = []
            file_info_dir = input_params['global_data']['file_info_dir']
            is_containerized = input_params['global_data']['is_containerized']
            perform_on = input_params['perform_on']
            if perform_on is None or not perform_on:
                print ("Perform on should be mentioned either at test level" +
                       " or test case level")
                msg = {'message': 'Perform on should be mentioned either at' +
                       ' test level or test case level'}
                return (404, json.dumps([msg]), [])
            os_hostobj_list = input_params['os_host_list']
            base_dir = os.path.dirname(cloudpulse.__file__)
            baseline_file = input_params['baseline_file']
            flist = [base_dir +
                     "/scenario/plugins/security_pulse/testcase/" +
                     "remote_file_check.py",
                     base_dir + "/scenario/plugins/security_pulse/testcase/" +
                     "remote_filecredentials.py",
                     file_info_dir + "dir_list",
                     file_info_dir + "os_baseline"]

            def ConsolidateResults(flist, container_name=None):
                result = ans_runner.execute_cmd(
                    "python " +
                    file_info_dir +
                    "remote_file_check.py ",
                    file_list=flist, container_name=container_name)
                Result = ans_runner.get_parsed_ansible_output(result)
                final_status.append(Result[0])
                final_result.extend(ast.literal_eval(Result[1]))
                final_msg.extend(Result[2])

            for p in perform_on:
                for obj in os_hostobj_list:
                    ans_runner = ansible_runner([obj])
                    if obj.getRole() == p:
                        os_dir = input_params[p + '_dir']
                        all_baseline = ast.literal_eval(
                            open(baseline_file).read())
                        baseline = all_baseline[p]
                        open(
                            file_info_dir +
                            'os_baseline',
                            'w').write(
                            str(baseline))

                        # if container, make dir list and copy to container
                        if is_containerized:
                            for container, os_dir in os_dir.items():
                                self.createDirList(
                                    os_dir,
                                    file_info_dir)
                                ConsolidateResults(
                                    flist,
                                    container_name=container)
                                subprocess.call([
                                    'rm',
                                    file_info_dir +
                                    'dir_list'])

                        else:
                            os_dir_list = []
                            [os_dir_list.extend(d) for d in os_dir.values()]
                            # os_dir = os_dir.values()
                            self.createDirList(os_dir_list, file_info_dir)
                            # flist.append("/tmp/sec_hc/dir_list")
                            ConsolidateResults(flist)
            subprocess.call([
                'rm', '-rf',
                file_info_dir +
                'os_baseline',
                file_info_dir +
                'output'])
            subprocess.call([
                'rm',
                file_info_dir +
                'dir_list'])
            if 404 in final_status:
                return (404, final_result, final_msg)
            else:
                return (200, final_result, final_msg)
        except Exception as e:
            print ("exception in perform_file_permission_check is--", e)
            subprocess.call([
                'rm', '-rf',
                file_info_dir +
                'os_baseline',
                file_info_dir +
                'output'])
            subprocess.call([
                'rm',
                file_info_dir +
                'dir_list'])
            print (
                "Exception occured in executing" +
                " perform_file_permission_check")
            message = {
                'message': 'Test case execution failed due to some exception'}
            return (404, json.dumps([message]), [])

Example 31

Project: myhdl Source File: _verify.py
Function: call
    def __call__(self, func, *args, **kwargs):

        if not self.simulator:
            raise ValueError("No simulator specified")
        if self.simulator not in _simulators:
            raise ValueError("Simulator %s is not registered" % self.simulator)
        hdlsim = _simulators[self.simulator]
        hdl = hdlsim.hdl
        if hdl == 'Verilog' and toVerilog.name is not None:
            name = toVerilog.name
        elif hdl == 'VHDL' and toVHDL.name is not None:
            name = toVHDL.name
        elif isinstance(func, _Block):
            name = func.func.__name__
        else:
            warnings.warn(
                "\n    analyze()/verify(): Deprecated usage: See http://dev.myhdl.org/meps/mep-114.html", stacklevel=2)
            try:
                name = func.__name__
            except:
                raise TypeError(str(type(func)))

        vals = {}
        vals['topname'] = name
        vals['unitname'] = name.lower()
        vals['version'] = _version

        analyze = hdlsim.analyze % vals
        elaborate = hdlsim.elaborate
        if elaborate is not None:
            elaborate = elaborate % vals
        simulate = hdlsim.simulate % vals
        skiplines = hdlsim.skiplines
        skipchars = hdlsim.skipchars
        ignore = hdlsim.ignore

        if isinstance(func, _Block):
            if hdl == "VHDL":
                inst = func.convert(hdl='VHDL')
            else:
                inst = func.convert(hdl='Verilog')
        else:
            if hdl == "VHDL":
                inst = toVHDL(func, *args, **kwargs)
            else:
                inst = toVerilog(func, *args, **kwargs)

        if hdl == "VHDL":
            if not os.path.exists("work"):
                os.mkdir("work")
        if hdlsim.name in ('vlog', 'vcom'):
            if not os.path.exists("work_vsim"):
                try:
                    subprocess.call("vlib work_vlog", shell=True)
                    subprocess.call("vlib work_vcom", shell=True)
                    subprocess.call("vmap work_vlog work_vlog", shell=True)
                    subprocess.call("vmap work_vcom work_vcom", shell=True)
                except:
                    pass

        # print(analyze)
        ret = subprocess.call(analyze, shell=True)
        if ret != 0:
            print("Analysis failed", file=sys.stderr)
            return ret

        if self._analyzeOnly:
            print("Analysis succeeded", file=sys.stderr)
            return 0

        f = tempfile.TemporaryFile(mode='w+t')
        sys.stdout = f
        sim = Simulation(inst)
        sim.run()
        sys.stdout = sys.__stdout__
        f.flush()
        f.seek(0)

        flines = f.readlines()
        f.close()
        if not flines:
            print("No MyHDL simulation output - nothing to verify", file=sys.stderr)
            return 1

        if elaborate is not None:
            # print(elaborate)
            ret = subprocess.call(elaborate, shell=True)
            if ret != 0:
                print("Elaboration failed", file=sys.stderr)
                return ret

        g = tempfile.TemporaryFile(mode='w+t')
        # print(simulate)
        ret = subprocess.call(simulate, stdout=g, shell=True)
    #    if ret != 0:
    #        print "Simulation run failed"
    #        return
        g.flush()
        g.seek(0)

        glines = g.readlines()[skiplines:]
        if ignore:
            for p in ignore:
                glines = [line for line in glines if not line.startswith(p)]
        # limit diff window to the size of the MyHDL output
        # this is a hack to remove an eventual simulator postamble
        if len(glines) > len(flines):
            glines = glines[:len(flines)]
        glines = [line[skipchars:] for line in glines]
        flinesNorm = [line.lower() for line in flines]
        glinesNorm = [line.lower() for line in glines]
        g = difflib.unified_diff(flinesNorm, glinesNorm, fromfile='MyHDL', tofile=hdlsim.name)

        MyHDLLog = "MyHDL.log"
        HDLLog = hdlsim.name + ".log"
        try:
            os.remove(MyHDLLog)
            os.remove(HDLLog)
        except:
            pass

        s = "".join(g)
        f = open(MyHDLLog, 'w')
        g = open(HDLLog, 'w')
        d = open('diff.log', 'w')
        f.writelines(flines)
        g.writelines(glines)
        d.write(s)
        f.close()
        g.close()
        d.close()

        if not s:
            print("Conversion verification succeeded", file=sys.stderr)
        else:
            print("Conversion verification failed", file=sys.stderr)
            # print >> sys.stderr, s ,
            return 1

        return 0

Example 32

Project: Wallace Source File: command_line.py
def deploy_sandbox_shared_setup(verbose=True, app=None, web_procs=1):
    """Set up Git, push to Heroku, and launch the app."""
    if verbose:
        out = None
    else:
        out = open(os.devnull, 'w')

    (id, tmp) = setup_experiment(debug=False, verbose=verbose, app=app)

    # Log in to Heroku if we aren't already.
    log("Making sure that you are logged in to Heroku.")
    ensure_heroku_logged_in()

    # Change to temporary directory.
    cwd = os.getcwd()
    os.chdir(tmp)

    # Commit Heroku-specific files to tmp folder's git repo.
    cmds = ["git init",
            "git add --all",
            'git commit -m "Experiment ' + id + '"']
    for cmd in cmds:
        subprocess.call(cmd, stdout=out, shell=True)
        time.sleep(0.5)

    # Load psiTurk configuration.
    config = PsiturkConfig()
    config.load_config()

    # Initialize the app on Heroku.
    log("Initializing app on Heroku...")
    subprocess.call(
        "heroku apps:create " + id +
        " --buildpack https://github.com/thenovices/heroku-buildpack-scipy",
        stdout=out,
        shell=True)

    database_size = config.get('Database Parameters', 'database_size')

    try:
        if config.getboolean('Easter eggs', 'whimsical'):
            whimsical = "true"
        else:
            whimsical = "false"
    except:
        whimsical = "false"

    # Set up postgres database and AWS/psiTurk environment variables.
    cmds = [
        "heroku addons:create heroku-postgresql:{}".format(database_size),

        "heroku pg:wait",

        "heroku addons:create rediscloud:250",

        "heroku addons:create papertrail",

        "heroku config:set HOST=" +
        id + ".herokuapp.com",

        "heroku config:set aws_access_key_id=" +
        config.get('AWS Access', 'aws_access_key_id'),

        "heroku config:set aws_secret_access_key=" +
        config.get('AWS Access', 'aws_secret_access_key'),

        "heroku config:set aws_region=" +
        config.get('AWS Access', 'aws_region'),

        "heroku config:set psiturk_access_key_id=" +
        config.get('psiTurk Access', 'psiturk_access_key_id'),

        "heroku config:set psiturk_secret_access_id=" +
        config.get('psiTurk Access', 'psiturk_secret_access_id'),

        "heroku config:set auto_recruit=" +
        config.get('Experiment Configuration', 'auto_recruit'),

        "heroku config:set wallace_email_username=" +
        config.get('Email Access', 'wallace_email_address'),

        "heroku config:set wallace_email_key=" +
        config.get('Email Access', 'wallace_email_password'),

        "heroku config:set heroku_email_address=" +
        config.get('Heroku Access', 'heroku_email_address'),

        "heroku config:set heroku_password=" +
        config.get('Heroku Access', 'heroku_password'),

        "heroku config:set whimsical=" + whimsical,
    ]
    for cmd in cmds:
        subprocess.call(cmd + " --app " + id, stdout=out, shell=True)

    # Set the notification URL in the cofig file to the notifications URL.
    config.set(
        "Server Parameters",
        "notification_url",
        "http://" + id + ".herokuapp.com/notifications")

    # Set the database URL in the config file to the newly generated one.
    log("Saving the URL of the postgres database...")
    db_url = subprocess.check_output(
        "heroku config:get DATABASE_URL --app " + id, shell=True)
    config.set("Database Parameters", "database_url", db_url.rstrip())
    subprocess.call("git add config.txt", stdout=out, shell=True),
    time.sleep(0.25)
    subprocess.call(
        'git commit -m "Save URLs for database and notifications"',
        stdout=out,
        shell=True)
    time.sleep(0.25)

    # Launch the Heroku app.
    log("Pushing code to Heroku...")
    subprocess.call("git push heroku HEAD:master", stdout=out,
                    stderr=out, shell=True)

    scale_up_dynos(id)

    time.sleep(8)

    # Launch the experiment.
    log("Launching the experiment on MTurk...")
    subprocess.call(
        'curl --data "" http://{}.herokuapp.com/launch'.format(id),
        shell=True)

    time.sleep(8)

    url = subprocess.check_output("heroku logs --app " + id + " | sort | " +
                                  "sed -n 's|.*URL:||p'", shell=True)

    log("URLs:")
    click.echo(url)

    # Return to the branch whence we came.
    os.chdir(cwd)

    log("Completed deployment of experiment " + id + ".")

Example 33

Project: RPi-Tron-Radio Source File: tron-radio.py
Function: button
def button(number):
        global menu
        if menu == 1:
            if number == 1:
                subprocess.call('mpc play' , shell=True)
                #print "play"

            if number == 2:
                subprocess.call('mpc pause' , shell=True)
                #print "pause"

            if number == 3:
                subprocess.call('mpc volume +5' , shell=True)
                
                #print "vol +x"
                 

            if number == 4:
                subprocess.call('mpc volume 0' , shell=True)
                #print "vol 0"

            if number == 5:
                subprocess.call('mpc prev' , shell=True)
                #print "prev"

            if number == 6:
                subprocess.call('mpc next' , shell=True)
                #print "next"

            if number == 7:
                subprocess.call('mpc volume -5' , shell=True)
                #print "vol -x"

            if number == 8:
                #print "go to menu 2"
                menu = 2
                update_screen()
                return

        if menu == 2:
            if number == 1:
                favorite()
                

            if number == 2:
                #print "switch skin"
                global skin_number
                skin_number = skin_number+1
                
                
                #print skin_number
                update_screen()

            if number == 3:
                #print "run in background"
                
                pygame.quit()
                sys.exit()

            if number == 4:
                #print "quit radio"
                subprocess.call('mpc stop', shell=True)
                pygame.quit()
                sys.exit()

            if number == 5:
                print "power off"
                poweroff()

            if number == 6:
                print "reboot"
                reboot()

            if number == 7:
                #print "update screen"
                update_screen()
                

            if number == 8:
                #print "go to menu 1"
                menu = 1
                update_screen()
                return

Example 34

Project: mkmov Source File: twod.py
    def lights(self,minvar=None,maxvar=None):
        """function to do some sanity checks on the files and find out where the time dim is.
        
        """
        _lg.info("Lights! Looking at your netCDF files...")
        var_timedims=[]

        #create bias files
        if self.arguments['--bias']:
            #following example in http://linux.die.net/man/1/ncdiff
            ncout='ncra '+' '.join(self.filelist)+' '+self.workingfolder+'mean.nc'
            _lg.info("Creating mean file: " + ncout)
            subprocess.call(ncout,shell=True)

            ncout='ncwa -O -a '+self.arguments['--bias']+' '+self.workingfolder+'mean.nc '+self.workingfolder+'mean_notime.nc'
            _lg.info("Removing time dimension from mean file: " + ncout)
            subprocess.call(ncout,shell=True)

            difffol=self.workingfolder+'difffiles/'
            mkdir_sub(self.workingfolder+'difffiles/')
            newfilelist=[]
            cnt=0
            for f in self.filelist:
                ncout='ncdiff '+' '+f+' '+self.workingfolder+'mean_notime.nc '+difffol+os.path.basename(f)[:-3]+'_diff_'+str(cnt).zfill(5)+'.nc'
                _lg.info("Creating anomaly file: " + ncout)
                subprocess.call(ncout,shell=True)
                newfilelist.append(difffol+os.path.basename(f)[:-3]+'_diff_'+str(cnt).zfill(5)+'.nc')
                cnt+=1

            self.filelist=newfilelist
                
        #error checks files, are all similar
        for f in self.filelist:
            if not os.path.exists(f):
                _lg.error("Input file: " + str(os.path.basename(f))  + " does not exist.")
                sys.exit("Input file: " + str(os.path.basename(f))  + " does not exist.")

            ifile=Dataset(f, 'r')

            if self.variable_name not in ifile.variables.keys():
                _lg.error("Variable: " + str(self.variable_name) + " does not exist in netcdf4 file.")
                _lg.error("Options are: " + str(ifile.variables.keys()) )
                sys.exit("Variable: " + str(self.variable_name) + " does not exist in netcdf4 file.")

            #what shape is the passed variable? Do some error checks
            self.var_len=len(ifile.variables[self.variable_name].shape)
            if self.var_len==2:
                if len(self.arguments['FILE_NAME'])==1:
                    #h'm haven't actually tried this! 
                    _lg.error("Variable: " + str(self.variable_name) + " has only two dimensions and you only fed mkmov one file so I don't know where your time dimension is.")
                    sys.exit()
                elif len(self.arguments['FILE_NAME'])>1: #have tested this on AVISO works okay
                    pass
                    
            #the 'obvious' case; one file with one time dim and two spatial dims
            if self.var_len==3: 
                pass

            #tricky, which dims are time/random_dim/spatial1/spatial2?
            if self.var_len==4:
                if self.arguments['--4dvar']:
                    _lg.debug("Variable: " + str(self.variable_name) + " has four dimensions. Following your argument, we will plot depth level: "+self.arguments['--4dvar'] )
                    self.depthlvl=int(self.arguments['--4dvar'])
                else:
                    _lg.warning("Variable: " + str(self.variable_name) + " has four dimensions. MkMov will assume the second dim is depth/height and plot the first level.")
                    self.depthlvl=0

            ifile_dim_keys=list(dict(ifile.dimensions).keys())

            #find unlimited dimension
            findunlim=[ifile.dimensions[dim].isunlimited() for dim in ifile_dim_keys]
            dim_unlim_num=[i for i, x in enumerate(findunlim) if x]
            if len(dim_unlim_num)==0:
                _lg.warning("Input file: " + str(os.path.basename(f))  + " has no unlimited dimension, which dim is time?")
                # sys.exit("Input file: " + str(os.path.basename(f))  + " has no unlimited dimension, which dim is time?")
            elif len(dim_unlim_num)>1:
                _lg.warning("Input file: " + str(os.path.basename(f))  + " has more than one unlimited dimension.")
                # sys.exit("Input file: " + str(os.path.basename(f))  + " has more than one unlimited dimension.")
            else:
                timename=ifile_dim_keys[dim_unlim_num[0]]
                var_timedim=[i for i, x in enumerate(ifile.variables[self.variable_name].dimensions) if x==timename][0]
                var_timedims.append(var_timedim)
                ifile.close()
                continue #NOTE I'm a continue!

            #okay so we didn't find time as an unlimited dimension, perhaps it has a sensible name?
            if 'time' in ifile_dim_keys:
                timename='time'
            elif 't' in ifile_dim_keys:
                timename='t'
            elif 'Time' in ifile_dim_keys:
                timename='Time'
            else:
                timename=''

            if timename!='':
                if self.var_len>2:
                    _lg.info("Good news, we think we found the time dimension it's called: " + timename )
                    var_timedim=[i for i, x in enumerate(ifile.variables[self.variable_name].dimensions) if x==timename][0]
                    var_timedims.append(var_timedim)


            # the case where there is only two dimensions assumed to vary across each file (e.g. mwf-ers2 files)
            if self.var_len==2:
                var_timedims=[-1]

            ifile.close()

        #check all time dimensions are in the same place across all files..
        if var_timedims[1:]==var_timedims[:-1]:
            self.timedim=var_timedims[0]
        else:
            _lg.error("(Unlimited) 'time' dimension was not the same across all files, fatal error.")
            sys.exit("(Unlimited) 'time' dimension was not the same across all files, fatal error.")

        #get max and min values for timeseries. This is expensive :(
        if (minvar is None) and (maxvar is None):
            mins=[]
            maxs=[]
            for f in self.filelist:
                ifile=Dataset(f, 'r')
                name_of_array=self.getdata(ifile)

                mins.append(np.min(name_of_array))
                maxs.append(np.max(name_of_array))
                ifile.close()

            self.minvar=np.min(mins)
            self.maxvar=np.max(maxs)

        if minvar or maxvar is not None:
            #user specified the range
            self.minvar=float(minvar)
            self.maxvar=float(maxvar)

        if (self.arguments['--x'] is not None) and (self.arguments['--y'] is not None):
            ifile=Dataset(self.filelist[0], 'r') #they should all be the same.
            xvar=ifile.variables[self.arguments['--x']][:]
            yvar=ifile.variables[self.arguments['--y']][:]
            self.x,self.y=np.meshgrid(xvar,yvar)
            ifile.close()
        elif (self.arguments['--x2d'] is not None) and (self.arguments['--y2d'] is not None):
            ifile=Dataset(self.filelist[0], 'r') #they should all be the same.
            self.x=ifile.variables[self.arguments['--x2d']][:]

            if self.arguments['--fixdateline']:
                #fix the dateline
                for index in np.arange(np.shape(self.x)[0]):
                    if len(np.where(np.sign(self.x[index,:])==-1)[0])==0:
                        _lg.warning("MkMov couldn't find your dateline, skipping the 'fix'.")
                        break

                    start=np.where(np.sign(self.x[index,:])==-1)[0][0]
                    self.x[index,start:]=self.x[index,start:]+360

            self.y=ifile.variables[self.arguments['--y2d']][:]
            ifile.close()
        else:
            ifile=Dataset(self.filelist[0], 'r')
            name_of_array=np.shape(ifile.variables[self.variable_name])
            if self.var_len==4:
                name_of_array=[name_of_array[0]]+[e for e in name_of_array[2:]]

            if self.timedim==-1:
                self.x,self.y=np.meshgrid(np.arange(name_of_array[1]),\
                        np.arange(name_of_array[0]))
            else:
                self.x,self.y=np.meshgrid(np.arange(name_of_array[self.timedim+2]),\
                        np.arange(name_of_array[self.timedim+1]))

            ifile.close()

        return

Example 35

Project: mantaray Source File: exifdata_mr.py
def exifdata_mr(item_to_process, case_number, root_folder_path, evidence):
	print("The item to process is: " + item_to_process)
	print("The case_name is: " + case_number)
	print("The output folder is: " + root_folder_path)
	print("The evidence to process is: " + evidence)

	evidence_no_quotes = evidence

	evidence = '"' + evidence + '"'

	#get datetime
	now = datetime.datetime.now()

	#set Mount Point
	mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")
	
	#create output folder path
	folder_path = root_folder_path + "/" + "EXIF_Tool"
	check_for_folder(folder_path, "NONE")
	

	#open a log file for output
	log_file = folder_path + "/EXIF_Tool_logfile.txt"
	outfile = open(log_file, 'wt+')

	#set up tuple holding all of the file extensions exiftool can process
	valid_extensions = ('3FR', '3G2', '3GP2', '3GP', '3GPP', 'ACR', 'AFM', 'ACFM', 'AMFM', 'AI', 'AIT', 'AIFF', 'AIF', 'AIFC', 'APE', 'ARW', 'ASF', 'AVI', 'BMP', 'DIB', 'BTF', 'TIFF', 'TIF', 'CHM', 'COS', 'CR2', 'CRW', 'CIFF', 'CS1', 'DCM', 'DC3', 'DIC', 'DICM', 'DCP', 'DCR', 'DFONT', 'DIVX', 'DJVU', 'DJV', 'DNG', 'DOC', 'DOT', 'DOCX', 'DOCM', 'DOTX', 'DOTM', 'DYLIB', 'DV', 'DVB', 'EIP', 'EPS', 'EPSF', 'EXR', 'PS', 'ERF', 'EXE', 'DLL', 'EXIF', 'F4A', 'F4B', 'F4P', 'F4V', 'FFF', 'FLA', 'FLAC', 'FLV', 'FPX', 'GIF', 'GZ', 'GZIP', 'HDP', 'HDR', 'WDP', 'HTML', 'HTM', 'XHTML', 'ICC', 'ICM', 'IIQ', 'IND', 'INDD', 'INDT', 'INX', 'ITC', 'JP2', 'JPF', 'JPM', 'JPX', 'JPEG', 'JPC', 'JPG', 'J2C', 'J2K', 'K25', 'KDC', 'KEY', 'KTH', 'LNK', 'M2TS', 'MTS', 'M2T', 'TS', 'M4A', 'M4B', 'M4P', 'M4V', 'MEF', 'MIE', 'MIFF', 'MIF', 'MKA', 'MKV', 'MKS', 'MOS', 'MOV', 'Q', 'MP3', 'MP4', 'MPC', 'MPEG', 'MPG', 'M2V', 'MPO', 'MQV', 'QT', 'MRW', 'MXF', 'NEF', 'NMBTEMPLATE', 'NRW', 'NUMBERS', 'ODB', 'ODC', 'ODF', 'ODG', 'OGI', 'ODP', 'ODS', 'ODT', 'OGG', 'ORF', 'OTF', 'PAGES', 'PDF', 'PEF', 'PFA', 'PFB', 'PFM', 'PGF', 'PICT', 'PCT', 'PMP', 'PNG', 'JNG', 'MNG', 'PPM', 'PBM', 'PGM', 'PPT', 'PPS', 'POT', 'POTX', 'POTM', 'PPSX', 'PPSM', 'PPTX', 'PPTM', 'PSD', 'PSB', 'PSP', 'PSPIMAGE', 'QTIF', 'QTI', 'QIF', 'RAF', 'RAM', 'RPM', 'RAW', 'RAR', 'RAW', 'RIFF', 'RIF', 'RM', 'RV', 'RMVB', 'RSRC', 'RTF', 'RW2', 'RWL', 'RWZ', 'SO', 'SR2', 'SRF', 'SRW', 'SVG', 'SWF', 'THM', 'THMX', 'TIFF', 'TIF', 'TTF', 'TTC', 'VOB', 'VRD', 'VSD', 'WAV', 'WEBM', 'WEBP', ',WMA', 'WMV', 'X3F', 'XCF', 'XLS', 'XLT', 'XLSX', 'XLSM', 'XLSB', 'XLTX', 'XLTM', 'XMP', 'ZIP')

	if(item_to_process =="EnCase Logical Evidence File"):

		file_to_process = evidence
		mount_point = mount_encase_v6_l01(case_number, file_to_process, outfile)
		process_folder(mount_point, valid_extensions, item_to_process)

		#umount
		if(os.path.exists(mount_point)):
			subprocess.call(['sudo umount -f ' + mount_point], shell=True)
			os.rmdir(mount_point)


	if(item_to_process == "Directory"):

		mount_point = evidence_no_quotes
		process_folder(mount_point, valid_extensions, item_to_process, outfile, folder_path)

	

	elif(item_to_process == "Bit-Stream Image"):

		#get datetime
		now = datetime.datetime.now()

		#set Mount Point
		mount_point = "/mnt/" + now.strftime("%Y-%m-%d_%H_%M_%S_%f")

		#select dd image to process	
		Image_Path = evidence

		#check if Image file is in Encase format
		if re.search(".E01", Image_Path):

			#strip out single quotes from the quoted path
			#no_quotes_path = Image_Path.replace("'","")
			#print("THe no quotes path is: " +  no_quotes_path)
			#call mount_ewf function
			Image_Path = mount_ewf(Image_Path, outfile, mount_point)

		#call mmls function
		partition_info_dict, temp_time = mmls(outfile, Image_Path)

		#get filesize of mmls_output.txt
		file_size = os.path.getsize("/tmp/mmls_output_" + temp_time + ".txt") 

		#if filesize of mmls output is 0 then run parted
		if(file_size == 0):
			print("mmls output was empty, running parted")
			outfile.write("mmls output was empty, running parted")
			#call parted function
			partition_info_dict, temp_time = parted(outfile, Image_Path)	

		else:

			#read through the mmls output and look for GUID Partition Tables (used on MACS)
			mmls_output_file = open("/tmp/mmls_output_" + temp_time + ".txt", 'r')
			for line in mmls_output_file:
				if re.search("GUID Partition Table", line):
					print("We found a GUID partition table, need to use parted")
					outfile.write("We found a GUID partition table, need to use parted\n")
					#call parted function
					partition_info_dict, temp_time = parted(outfile, Image_Path)

	
		#loop through the dictionary containing the partition info (filesystem is VALUE, offset is KEY)
		for key,value in partition_info_dict.items():

			#set up file object for output file
			output_file = folder_path + "/Exif_data_partition_offset_" + str(key) +".txt"
			print("The output_file is: " + output_file)
			exif_out = open(output_file, 'wt+')

			#disable auto-mount in nautilis - this stops a nautilis window from popping up everytime the mount command is executed
			cmd_false = "sudo gsettings set org.gnome.desktop.media-handling automount false && sudo gsettings set org.gnome.desktop.media-handling automount-open false"
			try:
				subprocess.call([cmd_false], shell=True)
			except:
				print("Autmount false failed")

			#call mount sub-routine
			success_code, loopback_device_mount = mount(value,key,Image_Path, outfile, mount_point)

			if(success_code):
				print("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
				outfile.write("Could not mount partition with filesystem: " + value + " at offset:" + str(key))
			else:
		
				print("We just mounted filesystem: " + value + " at offset:" + str(key) + ". Scanning for files of interest.....\n")
				outfile.write("We just mounted filesystem: " + value + " at offset:" + str(key) + "\n")
			

				#get the filename without extension
				for root,dirs,files in os.walk(mount_point):
					for filenames in files:
						fileName, fileExtension = os.path.splitext(filenames)
					
						#replace the . in the file extension with nothing
						file_extension = fileExtension.replace('.','')	
						file_extension = file_extension.upper()				
						file_name = os.path.basename(fileName)
						for extension in valid_extensions:
							if(file_extension == extension):
								print("Running exiftool against file: " + filenames)
								outfile.write("Running exiftool against file: " + filenames)

								#chdir to output foler
								os.chdir(folder_path)
						
								#get absolute path to file
								file_name = os.path.join(root,filenames)
								quoted_file_name = "'" +file_name +"'"

								#enclose strings in quotes
								quoted_root = "'" +root +"'"	
								
	
								#set up exiftool command			
								exif_command = "exiftool -ext " + extension + " -l -sep cuem******* -z " + quoted_file_name + " >> " + "'" +  folder_path + "/Exif_data_partition_offset_" + str(key) +".txt" + "'"
										
								#print("The exif command is: " + exif_command + "\n\n")
								outfile.write("The exif command is: " + exif_command + "\n\n")

								#execute the exif command
								subprocess.call([exif_command], shell=True)
								#exif_out.write("\n\n")
						

				#unmount and remove mount points
				if(os.path.exists(mount_point)): 
					subprocess.call(['sudo umount -f ' + mount_point], shell=True)
					os.rmdir(mount_point)
				#unmount loopback device if this image was HFS+ - need to run losetup -d <loop_device> before unmounting
				if not (loopback_device_mount == "NONE"):
					losetup_d_command = "losetup -d " + loopback_device_mount
					subprocess.call([losetup_d_command], shell=True)

			#close outfile
			exif_out.close()

	#program cleanup
	outfile.close()
	
	#remove mount points created for this program
	if(os.path.exists(mount_point)):
		if not (item_to_process == "Directory"):
			os.rmdir(mount_point)
	if(os.path.exists(mount_point+"_ewf")):
		subprocess.call(['sudo umount -f ' + mount_point + "_ewf"], shell=True)
		os.rmdir(mount_point+"_ewf")

	#delete empty directories in output folder
	for root, dirs, files in os.walk(folder_path, topdown=False):	
		for directories in dirs:
			files = []
			dir_path = os.path.join(root,directories)
			files = os.listdir(dir_path)	
			if(len(files) == 0):
				os.rmdir(dir_path)

Example 36

Project: opveclib Source File: test_dynamiclib_addgpu.py
    def test1(self):

        # build the operator libs if needed
        cpulib = os.path.join(cache_directory, "libaddcpu.so")
        gpulib = os.path.join(cache_directory, "libaddgpu.so")
        if not os.path.exists(cpulib):
            this_file_path = os.path.abspath(__file__)
            this_directory = os.path.split(this_file_path)[0]

            cpp_path = os.path.join(this_directory, 'addcpu.cpp')
            subprocess.call([cxx, '-fPIC', '-Wall',
                         '-std=c++11', '-Ofast', '-Wextra',
                         '-g', '-pedantic',
                         '-I'+this_directory+'/..',
                         '-o', cpulib, '-shared',  cpp_path])

        if cuda_enabled:
            if not os.path.exists(gpulib):
                this_file_path = os.path.abspath(__file__)
                this_directory = os.path.split(this_file_path)[0]

                nvcc_path = os.path.join(cuda_directory, 'bin/nvcc')
                cuda_path = os.path.join(this_directory, 'addgpu.cu')
                cuda_o_path = os.path.join(cache_directory, 'addgpu.o')

                subprocess.call([nvcc_path, '-O3', '--use_fast_math', '--relocatable-device-code=true', '--compile', '-Xcompiler',
                                '-fPIC', '-std=c++11', '-I'+this_directory+'/..',
                                 cuda_path, '-o', cuda_o_path])
                subprocess.call([nvcc_path, '-shared', '-o', gpulib, cuda_o_path])
                # clean up .o files
                subprocess.call(['rm', cuda_o_path])

            devices = ['/cpu:0', '/gpu:0']
        else:
            devices = ['/cpu:0']
        for dev_string in devices:
            logger.debug('*** device: {dev}'.format(dev= dev_string))
            test_config=tf.ConfigProto(allow_soft_placement=False)
            # Don't perform optimizations for tests so we don't inadvertently run
            # gpu ops on cpu
            test_config.graph_options.optimizer_options.opt_level = -1
            with tf.Session(config=test_config):
                with tf.device(dev_string):
                    logger.debug('*** add2Int64')
                    in0 = np.random.rand(3,50).astype(np.int64)
                    in1 = np.random.rand(3,50).astype(np.int64)
                    ones = np.ones((3,50), dtype=np.int64)
                    output = _DynamicLibOp.module().dynamic_lib(inputs=[in0, in1],
                                                               out_shapes=[[3,50]],
                                                               out_types=['int64'],
                                                               cpu_lib_path=cpulib,
                                                               cpu_func_name="add2Int64",
                                                               gpu_lib_path=gpulib,
                                                               gpu_func_name="add2Int64",
                                                               serialized_grad_dag='',
                                                               cuda_threads_per_block=_default_cuda_threads_per_block)

                    ref = np.add(in0,in1)
                    if (dev_string is '/gpu:0'):
                        ref = np.add(ref,ones)
                    assert np.allclose(output[0].eval(), ref)

                    logger.debug('*** add2Int32')
                    in0 = np.random.rand(3,50).astype(np.int32)
                    in1 = np.random.rand(3,50).astype(np.int32)
                    ones = np.ones((3,50), dtype=np.int32)
                    output = _DynamicLibOp.module().dynamic_lib(inputs=[in0, in1],
                                                               out_shapes=[[3,50]],
                                                               out_types=['int32'],
                                                               cpu_lib_path=cpulib,
                                                               cpu_func_name="add2Int32",
                                                               gpu_lib_path=gpulib,
                                                               gpu_func_name="add2Int32",
                                                               serialized_grad_dag='',
                                                               cuda_threads_per_block=_default_cuda_threads_per_block)

                    ref = np.add(in0,in1)
                    if (dev_string is '/gpu:0'):
                        ref = np.add(ref,ones)
                    assert np.allclose(output[0].eval(), ref)


                    logger.debug('*** addFloatDoubleFloat')
                    in0 = np.random.rand(3,50).astype(np.float32)
                    in1 = np.random.rand(3,50).astype(np.float32)
                    in2 = np.random.rand(3,50).astype(np.float64)
                    ones = np.ones((3,50), dtype=np.float32)
                    output = _DynamicLibOp.module().dynamic_lib(inputs=[in0, in2, in1],
                                                                       out_shapes=[[3,50]],
                                                                       out_types=['float'],
                                                                       cpu_lib_path= cpulib,
                                                                       cpu_func_name="addFloatDoubleFloat",
                                                                       gpu_lib_path= gpulib,
                                                                       gpu_func_name="addFloatDoubleFloat",
                                                                       serialized_grad_dag='',
                                                                       cuda_threads_per_block=_default_cuda_threads_per_block)
                    ref = (in0 + in2 + in1).astype(np.float32)
                    if (dev_string is '/gpu:0'):
                        ref = ref + ones
                    assert np.allclose(output[0].eval(), ref)

                    logger.debug('*** sumAndSq')
                    output = _DynamicLibOp.module().dynamic_lib(inputs=[in0, in2],
                                                                       out_shapes=[[3,50], [3,50]],
                                                                       out_types=['float', 'float'],
                                                                       cpu_lib_path= cpulib,
                                                                       cpu_func_name="sumAndSq",
                                                                       gpu_lib_path= gpulib,
                                                                       gpu_func_name="sumAndSq",
                                                                       serialized_grad_dag='',
                                                                       cuda_threads_per_block=_default_cuda_threads_per_block)

                    out0 = (in0 + in2).astype(np.float32)
                    if (dev_string is '/gpu:0'):
                        out0 = out0 + ones
                    out1 = np.multiply(out0, out0)
                    if (dev_string is '/gpu:0'):
                        out1 = out1 + ones
                    assert np.allclose(output[0].eval(), out0)
                    assert np.allclose(output[1].eval(), out1)

                    # make sure we can also use a standard TF gpu operator in the same session
                    logger.debug('*** TF numerics op')
                    x_shape = [5, 4]
                    x = np.random.random_sample(x_shape).astype(np.float32)
                    t = tf.constant(x, shape=x_shape, dtype=tf.float32)
                    t_verified = tf.verify_tensor_all_finite(t, "Input is not a number.")
                    assert np.allclose(x, t_verified.eval())

Example 37

Project: pyomo Source File: driver.py
Function: perform_install
def perform_install(package, config=None, user='hudson', dest='python', virtualenv=True, virtualenv_args=None):
    if os.environ.get('WORKSPACE', None) is None:
        sys.stdout.write(
            "\n(INFO) WORKSPACE environment vatiable not found."
            "\n       Assuming WORKSPACE==%s\n\n" % (os.getcwd(),) )
        os.environ['WORKSPACE'] = os.getcwd()

    if os.path.exists(dest):
        if os.path.abspath(sys.executable).startswith(os.path.abspath('python')):
            raise Exception(
                "Python executable used to create the virtual environment:"
                "\n\t    %s\n\tfound within the target installation directory:"
                "\n\t    %s\n\tCowardly refusing to continue installation."
                % ( os.path.abspath(sys.executable), os.path.abspath(dest) ) )
        rmtree(dest)

    # Set the user name for windows builds
    if platform == 'win':
        os.environ['USER'] = user

    if 'CONFIGFILE' in os.environ:
        configfile = os.environ['CONFIGFILE']
    else:
        if config is None:
            config = os.path.join( os.environ['WORKSPACE'],"hudson",package+"-vpy","all.ini" )
        elif os.sep not in config:
            config = os.path.join( os.environ['WORKSPACE'],"hudson",package+"-vpy",config )
        configfile = config

    if 'PYPI_URL' in os.environ:
        if os.environ['PYPI_URL']:
            pypi_url = [ '--pypi-url', os.environ['PYPI_URL'] ]
        else:
            pypi_url = []
    else:
        pypi_url = [ '--pypi-url', 'http://giskard.sandia.gov:8888/pypi',
                     '--trust-pypi-url' ]

    if 'PICO' in os.environ and os.environ['PICO'] == 'yes':
        os.environ['PATH'] = os.pathsep.join(
            os.path.join(os.environ['WORKSPACE'], 'build', 'bin'),
            os.environ['PATH'] )

    python=os.environ.get('PYTHON','')
    if python == '':
        python = sys.executable
    elif python[0] == '"':
        python=eval(python)
    sys.stdout.write("\n")
    sys.stdout.write("Installing with Python version %s\n" % sys.version)
    sys.stdout.write("\n")
    # Install
    if virtualenv:
        # Install using vpy_install
        cmd = [
            python,
            os.path.join( os.environ['WORKSPACE'],'vpy','pyutilib','virtualenv', 'vpy_install.py' ),
            '--debug', '-v', '--system-site-packages', '--config', configfile ]
        if pypi_url:
            cmd.extend( pypi_url )
        if virtualenv_args is None:
            cmd.extend(sys.argv[1:])
        else:
            cmd.extend(virtualenv_args)
        cmd.append( os.path.join(os.environ['WORKSPACE'], dest) )
        sys.stdout.write("Running Command: %s\n" % " ".join(cmd))
        sys.stdout.flush()
        if platform == 'win':
            sys.stdout.write( str(subprocess.call(['cmd','/c']+cmd)) + '\n' )
        else:
            sys.stdout.write( str(subprocess.call(cmd)) + '\n' )
    else:
        # Install into a local directory "$WORKSPACE/python"
        sitedir = os.path.join(os.path.abspath(dest),'lib','python'+'.'.join(map(str,sys.version_info[:2])),'site-packages')
        if 'PYTHONPATH' in os.environ:
            os.environ['PYTHONPATH'] = os.environ['PYTHONPATH']+':'+sitedir
        else:
            os.environ['PYTHONPATH'] = sitedir
        os.makedirs(sitedir)
        os.chdir(os.path.join( os.environ['WORKSPACE'],'src' ))
        #print "HERE", os.environ['WORKSPACE']
        for file in glob.glob(os.path.join( os.environ['WORKSPACE'],'src','*')):
            if os.path.isdir(file) and os.path.exists( os.path.join(file,'setup.py') ):
                cmd = [
                    python,
                    'setup.py',
                    'develop', '--no-deps', '--prefix', os.path.join( os.environ['WORKSPACE'],'python') ]
                sys.stdout.write("Running Command: %s\n" % " ".join(cmd))
                sys.stdout.flush()
                os.chdir(file)
                if platform == 'win':
                    sys.stdout.write( str(subprocess.call(['cmd','/c']+cmd)) + '\n' )
                else:
                    sys.stdout.write( str(subprocess.call(cmd)) + '\n' )

Example 38

Project: academicmarkdown Source File: tools.py
def addLineNumbersToPDF(inFile, outFile, color='#d3d7cf'):

	"""
	desc:
		Adds line numbers to a PDF file.

	arguments:
		inFile:
			desc:	The name of the input PDF.
			type:	str, unicode
		outFile:
			desc:	The name of the output PDF.
			type:	str, unicode

	keywords:
		color:
			desc:	An HTML-style color name.
			type:	str, unicode
	"""

	import os
	import shutil
	import subprocess
	from scipy import ndimage
	import numpy as np
	from PIL import Image, ImageDraw, ImageFont

	#fontFile = '/usr/share/fonts/truetype/msttcorefonts/Times_New_Roman.ttf'
	fontFile = '/usr/share/fonts/truetype/freefont/FreeSans.ttf'
	fontSize = 20
	tmpFolder = u'line-numbers-tmp'
	pageFolder = u'%s/page' % tmpFolder
	watermarkFolder = u'%s/watermark' % tmpFolder

	try:
		shutil.rmtree(tmpFolder)
	except:
		pass
	os.makedirs(pageFolder)
	os.makedirs(watermarkFolder)

	print(u'Adding line numbers to PDF')
	print(u'Converting ...')
	cmd = u'convert -density 150 %s %s' % (inFile, os.path.join(pageFolder,
		u'%03d.png'))
	subprocess.call(cmd.split())
	print(u'Done!')
	# Create watermarks for all pages
	for path in os.listdir(pageFolder):
		im = ndimage.imread(os.path.join(pageFolder, path), flatten=True)
		# Create a list of indices that have text on them
		nonEmptyRows = np.where(im.mean(axis=1) != 255)[0]
		# Store the rows (i.e.) y coordinates of all to-be-numbered-rows
		numberRows =[]
		firstRow = None
		for row in nonEmptyRows:
			if im[row-1].mean() == 255:
				numberRows.append(row)
		print(u'Found %d lines!' % len(numberRows))
		# Create watermark image
		print(u'Creating watermark ...')
		font = ImageFont.truetype(fontFile, fontSize)
		wm = Image.new('RGBA', (im.shape[1], im.shape[0]))
		dr = ImageDraw.Draw(wm)
		i = 1
		for row in numberRows:
			dr.text((32, row), '%s' % i, font=font, fill=color)
			i += 1
		wm.save(os.path.join(watermarkFolder, path))
		print(u'Done!')

	print(u'Creating watermark pdf ...')
	cmd = 'convert %s/*.png watermark.pdf' % watermarkFolder
	subprocess.call(cmd.split())
	print(u'Done!')

	print(u'Merging watermark and source docuement ...')
	cmd = u'pdftk %s multibackground watermark.pdf output %s' \
		% (inFile, outFile)
	subprocess.call(cmd.split())
	print(u'Done!')

	print(u'Cleaning up ...')
	shutil.rmtree(tmpFolder)
	print(u'Done')

Example 39

Project: grr Source File: osx.py
  def BuildInstallerPkg(self, output_file):
    """Builds a package (.pkg) using PackageMaker."""
    build_files_dir = config_lib.Resource().Filter("install_data/macosx/client")

    pmdoc_dir = os.path.join(build_files_dir, "grr.pmdoc")

    client_name = config_lib.CONFIG.Get("Client.name", context=self.context)
    plist_name = config_lib.CONFIG.Get("Client.plist_filename",
                                       context=self.context)

    out_build_files_dir = build_files_dir.replace(
        config_lib.Resource().Filter("grr"), self.build_dir)
    out_pmdoc_dir = os.path.join(self.build_dir, "%s.pmdoc" % client_name)

    utils.EnsureDirExists(out_build_files_dir)
    utils.EnsureDirExists(out_pmdoc_dir)
    utils.EnsureDirExists(
        config_lib.CONFIG.Get("ClientBuilder.package_dir",
                              context=self.context))

    self.GenerateFile(
        input_filename=os.path.join(build_files_dir, "grr.plist.in"),
        output_filename=os.path.join(self.build_dir, plist_name))
    self.GenerateFile(
        input_filename=os.path.join(pmdoc_dir, "index.xml.in"),
        output_filename=os.path.join(out_pmdoc_dir, "index.xml"))
    self.GenerateFile(
        input_filename=os.path.join(pmdoc_dir, "01grr.xml.in"),
        output_filename=os.path.join(out_pmdoc_dir, "01%s.xml" % client_name))
    self.GenerateFile(
        input_filename=os.path.join(pmdoc_dir, "01grr-contents.xml"),
        output_filename=os.path.join(out_pmdoc_dir,
                                     "01%s-contents.xml" % client_name))
    self.GenerateFile(
        input_filename=os.path.join(pmdoc_dir, "02com.xml.in"),
        output_filename=os.path.join(out_pmdoc_dir, "02com.xml"))
    self.GenerateFile(
        input_filename=os.path.join(pmdoc_dir, "02com-contents.xml"),
        output_filename=os.path.join(out_pmdoc_dir, "02com-contents.xml"))

    self.GenerateFile(
        input_filename=os.path.join(build_files_dir, "preinstall.sh.in"),
        output_filename=os.path.join(self.build_dir, "preinstall.sh"))
    self.GenerateFile(
        input_filename=os.path.join(build_files_dir, "postinstall.sh.in"),
        output_filename=os.path.join(self.build_dir, "postinstall.sh"))

    output_basename = config_lib.CONFIG.Get("ClientBuilder.output_basename",
                                            context=self.context)

    # Rename the generated binaries to the correct name.
    template_binary_dir = os.path.join(
        config_lib.CONFIG.Get("PyInstaller.distpath", context=self.context),
        "grr-client")
    target_binary_dir = os.path.join(self.build_dir, "%s" % output_basename)

    if template_binary_dir != target_binary_dir:
      shutil.move(template_binary_dir, target_binary_dir)

    shutil.move(
        os.path.join(target_binary_dir, "grr-client"),
        os.path.join(
            target_binary_dir,
            config_lib.CONFIG.Get("Client.binary_name", context=self.context)))

    repacker = build.ClientRepacker(context=self.context)
    repacker.context = self.context

    # Generate a config file.
    with open(
        os.path.join(
            target_binary_dir,
            config_lib.CONFIG.Get("ClientBuilder.config_filename",
                                  context=self.context)),
        "wb") as fd:
      fd.write(
          repacker.GetClientConfig(
              ["Client Context"] + self.context, validate=False))

    print "Fixing file ownership and permissions"

    command = ["sudo", "/usr/sbin/chown", "-R", "root:wheel", self.build_dir]
    # Change the owner, group and permissions of the binaries
    print "Running: %s" % " ".join(command)
    subprocess.call(command)

    command = ["sudo", "/bin/chmod", "-R", "755", self.build_dir]

    print "Running: %s" % " ".join(command)
    subprocess.call(command)

    print "Building a package with PackageMaker"
    pkg = "%s-%s.pkg" % (
        config_lib.CONFIG.Get("Client.name", context=self.context),
        config_lib.CONFIG.Get("Source.version_string", context=self.context))

    output_pkg_path = os.path.join(self.pkg_dir, pkg)
    command = [
        config_lib.CONFIG.Get("ClientBuilder.package_maker_path",
                              context=self.context), "--doc", out_pmdoc_dir,
        "--out", output_pkg_path
    ]

    print "Running: %s " % " ".join(command)
    ret = subprocess.call(command)
    if ret != 0:
      msg = "PackageMaker returned an error (%d)." % ret
      print msg
      raise RuntimeError(msg)

    print "Copying output to templates location: %s -> %s" % (output_pkg_path,
                                                              output_file)
    utils.EnsureDirExists(os.path.dirname(output_file))
    shutil.copyfile(output_pkg_path, output_file)

    # Change the owner, group and permissions of the binaries back.
    command = [
        "sudo", "/usr/sbin/chown", "-R", "%s:staff" % getpass.getuser(),
        self.build_dir
    ]
    print "Running: %s" % " ".join(command)
    subprocess.call(command)

Example 40

Project: spladder Source File: rproc.py
def rproc_wait(jobinfo, pausetime=120, frac_finished=1.0, resub_on=1, verbosity=2):
    # [jobinfo, num_crashed] = rproc_wait(jobinfo, pausetime, frac_finished, resub_on, verbosity) 

    global rproc_wait_jobinfo
    rproc_wait_jobinfo = jobinfo

    if resub_on == 1:
        print '\n\ncrashed jobs will be resubmitted by rproc_wait'
    elif resub_on == -1:
        print '\n\ncrashed jobs may be resubmitted by rproc_wait'
    else:
        print '\n\ncrashed jobs will not be resubmitted by rproc_wait'

    if not isinstance(jobinfo, list):
        jobinfo = [jobinfo]

    num_jobs = 0
    num_crashed = 0
    for i in range(len(jobinfo)):
        if jobinfo[i].created == 1:
            if jobinfo[i].time is None:
                print >> sys.stderr, 'WARNING: job created but not submitted yet. ignoring'
                jobinfo[i].created = 0
            else:
                num_jobs += 1

    num_finished = 0 
    first_iter = True
    while (num_finished < num_jobs * frac_finished) or (num_crashed > 0):
        if not first_iter:
            time.sleep(pausetime)
        first_iter = False
        num_finished = 0
        num_crashed  = 0
        crashed_files = 'log files of crashed jobs:'
        for id in range(len(jobinfo)):
            cur_finished = rproc_finished(jobinfo[id])
            (still_running, qstat_line, start_time, status) = rproc_still_running(jobinfo[id])
            if status == -1:
                return (jobinfo, num_crashed)

            jobinfo[id].start_time = start_time
            if cur_finished:
                num_finished += 1
            elif not still_running:
                num_finished += 1
                num_crashed += 1
                crashed_files = '%s\n%s' % (crashed_files, jobinfo[id].log_fname)
                if jobinfo[id].crashed_time is None:
                    jobinfo[id].crashed_time = time.time()
                elif 24 * 60 * (time.time() - jobinfo[id].crashed_time) > max(3 * (pausetime/60.0), 0.1)  and (resub_on == 1 or (resub_on == -1 and jobinfo[id].resubmit >= jobinfo[id].retries + 1)):
                    if resub_on == 1:
                        (reachedlimit, jobwalltime) = rproc_reached_timelimit(jobinfo[id])
                        if reachedlimit: # check whether the job has been killed because it reached the time limit
                            if verbosity >= 1:
                                print 'job has been canceled because it used %1.0fs, but time limit was %1.0fs walltime.\nhence, we increase the time limit to %1.0fs.\n' % (jobwalltime, jobinfo[id].time * 60, max(jobinfo[id].time, jobwalltime) * 2)
                            jobinfo[id].time = max(jobinfo[id].time, jobwalltime / 60) * 2
                    elif resub_on == -1:
                        jobinfo[id].time = jobinfo[id].time_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].time_req_resubmit) - 1)]
                        jobinfo[id].Mem = jobinfo[id].mem_req_resubmit[min(jobinfo[id].retries + 1, len(jobinfo[id].mem_req_resubmit) - 1)] 
                        jobinfo[id].start_time = []
                        if verbosity >= 1:
                            print 'resubmitting job (%i) with new time and memory limitations: %iMb and %i minutes (retry #%i)\n' % (jobinfo[id].jobid, jobinfo[id].Mem, jobinfo[id].time, jobinfo[id].retries + 1)
                    if verbosity >= 2:
                        print 'log file of previous attempt %s\n' % jobinfo[id].log_fname
                    jobinfo[id] = rproc_resubmit(jobinfo[id]) 
                    jobinfo[id].crashed_time = None 
                    num_finished -= 1
            else:
                if verbosity >= 2:
                    print '%s' % qstat_line
            ### hard_time_limit in minutes
            if len(jobinfo[id].start_time) > 0 and 24 * 60 * (time.time() - jobinfo[id].start_time) > jobinfo[id].hard_time_limit:
                print 'delete job (%i) because hard time limit (%imin) was reached\n' % (jobinfo[id].jobid, jobinfo[id].hard_time_limit)
                #SCHED_DELETE_JOB
                subprocess.call(['qdel', str(jobinfo[id].jobid)])
        if verbosity >= 1:
            print '\n%i of %i jobs finished (%i of them crashed) \n' % (num_finished, num_jobs, num_crashed)
        if verbosity >= 2:
            if len(crashed_files.strip().split('\n')) > 0:
                print '%s\n' % crashed_files
        if resub_on == 0 and num_finished == num_jobs * frac_finished:
            break
        if resub_on == -1 and num_finished == num_jobs * frac_finished:
            all_tried = True
            for i in range(len(jobinfo)):
                fin = rproc_finished(jobinfo[i])
                if (jobinfo[i].resubmit >= jobinfo[i].retries + 1) and not fin:
                    all_tried = False
            if all_tried:
                break

    time.sleep(1)

Example 41

Project: AvsPmod Source File: [7] Optimize Sliders.py
def main():
    import random
    import math
    import subprocess
    import os
    import os.path
    
    app = avsp.GetWindow()
    params = []
    scriptTemplate = ''
    logfilename = 'log.txt'
    avs2avidir = os.path.join(app.toolsfolder, 'avs2avi.exe')
    
    # Simple Genetic Algorithm implementation
    class SGA(object):
        def __init__(self,
                chromosome_length,
                objective_function,
                population_size=100,
                probability_crossover=0.5,
                probability_mutation=0.01,
                selection_pressure=4,
                max_generations=10,
                minimize=True,
                dump_function=None):
            # Define the variables for the key GA parameters
            SGA.length = chromosome_length
            self.objfn = objective_function
            self.n = population_size - population_size % 2
            self.pc = probability_crossover
            self.pm = probability_mutation
            self.s = selection_pressure
            self.maxgen = max_generations
            SGA.minimize = minimize
            self.dump = dump_function
            self.generation = 0
            self.scoreDict = {}
            # Define the individual class
            class Individual(object):
                def __init__(self, chromosome=None):
                    self.length = SGA.length
                    self.minimize = SGA.minimize
                    self.score = None
                    self.chromosome = chromosome
                    if self.chromosome is None:
                        self.chromosome = [random.choice((0,1)) for i in xrange(self.length)]
                        
                def __cmp__(self, other):
                    if self.minimize:
                        return cmp(self.score, other.score)
                    else:
                        return cmp(other.score, self.score)                    
                        
                def copy(self):
                    twin = self.__class__(self.chromosome[:])
                    twin.score = self.score
                    return twin
            self.Individual = Individual
            
        def run(self):
            # Create the initial population (generation 0)
            self.population = [self.Individual() for i in range(self.n)]
            try:
                pb = avsp.ProgressBox(self.n, _('Initial evaluation...'), _('Generation 0 Progress'))
            except NameError:
                pb = None
            try:
                for i, individual in enumerate(self.population):
                    self.evaluate(individual)
                    if pb is not None:
                        if not pb.Update(i)[0]:
                            pb.Destroy()
                            return False
                # Dump the best data from this generation
                best = min(self.population)
                initialscore = best.score
                if self.dump is not None:
                    self.dump(best.chromosome, best.score)
                if pb is not None:
                    pb.Destroy()
                self.generation += 1
                # Run the genetic algorithm
                while self.generation < self.maxgen:
                    # Create a progress bar for this generation
                    if pb is not None:
                        pb = avsp.ProgressBox(
                            self.n,
                            _('Initial best score: %.3f, Current best score: %.3f') % (initialscore, best.score),
                            'Generation %i Progress' % self.generation
                        )
                    newpopulation = [best.copy()]
                    count = len(newpopulation)
                    while count < self.n:
                    #~ for i in xrange(self.n/2):
                        # Selection
                        mate1 = self.selection()
                        mate2 = self.selection()
                        # Crossover
                        children = self.crossover(mate1, mate2)
                        for individual in children:
                            # Mutation
                            self.mutation(individual)
                            # Evaluate the individual and add it to the new population
                            self.evaluate(individual)
                            newpopulation.append(individual)
                        # Update the progress bar
                        count = len(newpopulation)
                        if pb is not None:
                            i = min(count-1, self.n-1)
                            if not pb.Update(i)[0]:
                                pb.Destroy()
                                return False
                    # Update the internally stored population
                    self.population = newpopulation[:self.n]
                    # Dump the best data from this generation
                    best = min(self.population)
                    if self.dump is not None:
                        self.dump(best.chromosome, best.score)
                    # Destroy the progress bar for this generation
                    if pb is not None:
                        pb.Destroy()
                    self.generation += 1
            finally:
                if pb is not None:
                    pb.Destroy()
            return True
            
        def crossover(self, individual1, individual2):
            '''Two point crossover'''
            if random.random() < self.pc:
                # Pick the crossover points randomly
                left = random.randrange(1, self.length-2)
                right = random.randrange(left, self.length-1)
                # Create the children chromosomes
                p1 = individual1.chromosome
                p2 = individual2.chromosome
                c1 = p1[:left] + p2[left:right] + p1[right:]
                c2 = p2[:left] + p1[left:right] + p2[right:]
                # Return the new individuals
                return self.Individual(c1), self.Individual(c2)
            else:
                # Don't perform crossover
                return individual1.copy(), individual2.copy()
            
        def mutation(self, individual):
            '''Bit-flip mutation'''
            # Randomly flip each bit in the chromosome
            chromosome = individual.chromosome
            for gene in xrange(self.length):
                if random.random() < self.pm:
                    chromosome[gene] = int(not chromosome[gene])
                    
        def selection(self):
            '''Tournament selection with replacement'''
            # Return best individual from s randomly selected members
            competitors = [random.choice(self.population) for i in range(self.s)]
            #~ competitors.sort()
            #~ return competitors[0]
            return min(competitors)
            
        def evaluate(self, individual):
            intChromosome = binary2int(individual.chromosome)
            if self.scoreDict.has_key(intChromosome):
                # The chromosome was evaluated previously
                individual.score = self.scoreDict[intChromosome]
            else:
                # Run the objective function to evaluate the chromosome
                individual.score = self.objfn(individual.chromosome)
                self.scoreDict[intChromosome] = individual.score
                
    def binary2int(x):
        '''decode a binary list to a single unsigned integer'''
        return sum(map(lambda z: int(x[z]) and 2**(len(x) - z - 1),  range(len(x)-1, -1, -1)))
        
    def decode_params(bitlist, params):
        '''returns dictionary of values for each param'''
        iA = 0
        paramDict = {}
        for name, valuelist, nbits in params:
            iB = iA + nbits
            sublist = bitlist[iA:iB]
            #~ value = min + binary2int(sublist) * (max-min)/float(2**nbits - 1)
            #~ if type(min) == bool:
                #~ value = bool(value)
            index = int(binary2int(sublist) * (len(valuelist) - 1) / float(2 ** nbits - 1))
            paramDict[name] = valuelist[index]
            iA = iB
        return paramDict    
        
    def evaluate(chromosome):
        # Decode the bit string into the individual parameters
        paramDict = decode_params(chromosome, params)
        # Create the AviSynth script
        script = scriptTemplate % paramDict
        inputavsname = os.path.join(scriptdir, 'ga_evaluate.avs')
        script = app.GetEncodedText(script, bom=True)
        f = open(inputavsname, 'w')
        f.write(script)
        f.close()
        # Encode the video to get the results (dumped to log.txt)
        try:
            os.remove(logfilename)
        except OSError:
            pass
        subprocess.call([avs2avidir, inputavsname, '-q', '-o', 'n', '-c','null'], shell=True)
        # Read the results in log.txt
        if os.path.isfile(logfilename):
            f = open(logfilename, 'r')
            lines = f.readlines()
            f.close()
            score = float(lines[-1].split()[2])
            #~ print 'good!', score
        else:
            score = 0
            #~ print '*** Error, bad script:'
            #~ print script
            #~ print '*** End script'
        return score
        
    def dump(chromosome, score=None):
        '''Write the script to a file'''
        paramDict = decode_params(chromosome, params)
        script = scriptTemplate % paramDict
        script = app.GetEncodedText(script, bom=True)
        f = open(os.path.splitext(filename)[0] + '-optimized.avs', 'w')
        f.write(script)
        f.close()
        if score is not None:
            print _('Best score: %.2f') % score
            
    # MAIN SECTION
    if not avs2avidir or not os.path.isfile(avs2avidir):
        avsp.MsgBox(_('Must configure avs2avi directory to use this macro!'), _('Error'))
        return
    # Save the script
    filename = avsp.SaveScript()
    if not filename:
        return
    if not avsp.UpdateVideo():
        avsp.MsgBox(_('The current Avisynth script contains errors.'), _('Error'))
        return
    scriptdir = os.path.dirname(filename)
    scriptTemplate = avsp.GetText()
    # Parse the script to determine the log filename
    
    # Create the parameters to optimize based on user sliders in the script
    sliderInfoList = avsp.GetSliderInfo()
    if not sliderInfoList:
        avsp.MsgBox(_('Not user sliders on the current Avisynth script!'), _('Error'))
        return
    length = 0
    for text, label, valuelist, nDecimal in sliderInfoList:
        if valuelist is None:
            continue
        mantissa, nbits = math.frexp(len(valuelist))
        if mantissa == 0.5:
            nbits -= 1
        params.append([label, valuelist, nbits])
        length += nbits
        scriptTemplate = scriptTemplate.replace(text, '%('+label+').'+str(nDecimal)+'f')
    # Get basic encoder options with a dialog box
    title = _('Enter optimization info    (%i bits, %i possibilities)') % (length, 2**length)
    message = [_('SSIM log filename:'), [_('max generations:'), _('population size:'), 
              _('crossover probability:'), _('mutation probability:'), _('selection pressure:')]]
    dirname, basename = os.path.split(logfilename)
    if not os.path.isdir(dirname):
        logfilename = os.path.join(app.GetProposedPath(only='dir'), basename)
    default = [logfilename, [(10, 1), (30, 1), (0.6, 0, 1, 2, 0.05), (0.03, 0, 1, 2, 0.05), 4]]
    types = ['file_save', ['spin', 'spin', 'spin', 'spin', 'spin']]
    entries = avsp.GetTextEntry(message, default, title, types)
    if not entries:
        return
    # First clear the AVI from memory (to close the log file)
    txt = avsp.GetText()
    avsp.HideVideoWindow()
    avsp.CloseTab()
    avsp.OpenFile(filename)
    avsp.SetText(txt)
    avsp.SaveScript()
    # Run the optimization
    logfilename, maxgen, n, pc, pm, s = entries
    print _('Begin optimization...')
    print 'n=%s, pc=%s, pm=%s, s=%s, maxgen=%s (%i bits)' % (n, pc, pm, s, maxgen, length)
    sga = SGA(length, evaluate, int(n), float(pc), float(pm), int(s), int(maxgen), False, dump)
    sga.run()
    os.remove(os.path.join(scriptdir, 'ga_evaluate.avs'))
    print _('Finished optimization.')
    # Show the optimized results
    avsp.OpenFile(os.path.splitext(filename)[0] + '-optimized.avs')
    avsp.ShowVideoFrame()

Example 42

Project: pyload Source File: YoutubeCom.py
    def process(self, pyfile):
        pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
        self.data  = self.load(pyfile.url)

        if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*">', self.data):
            self.offline()

        if "We have been receiving a large volume of requests from your network." in self.data:
            self.temp_offline()

        #: Get config
        use3d = self.config.get('3d')

        if use3d:
            quality = {'sd': 82, 'hd': 84, 'fullhd': 85, '240p': 83, '360p': 82,
                       '480p': 82, '720p': 84, '1080p': 85, '3072p': 85}
        else:
            quality = {'sd': 18, 'hd': 22, 'fullhd': 37, '240p': 5, '360p': 18,
                       '480p': 35, '720p': 22, '1080p': 37, '3072p': 38}

        desired_fmt = self.config.get('fmt')

        if not desired_fmt:
            desired_fmt = quality.get(self.config.get('quality'), 18)

        elif desired_fmt not in self.formats:
            self.log_warning(_("FMT %d unknown, using default") % desired_fmt)
            desired_fmt = 0

        #: Parse available streams
        streams = re.search(r'"url_encoded_fmt_stream_map":"(.+?)",', self.data).group(1)
        streams = [x.split('\u0026') for x in streams.split(',')]
        streams = [dict((y.split('=', 1)) for y in x) for x in streams]
        streams = [(int(x['itag']),
                    urllib.unquote(x['url']),
                    x.get('s', x.get('sig', None)),
                    True if 's' in x else False)
                   for x in streams]

        # self.log_debug("Found links: %s" % streams)

        self.log_debug("AVAILABLE STREAMS: %s" % [x[0] for x in streams])

        #: Build dictionary of supported itags (3D/2D)
        allowed = lambda x: self.config.get(self.formats[x][0])
        streams = [x for x in streams if x[0] in self.formats and allowed(x[0])]

        if not streams:
            self.fail(_("No available stream meets your preferences"))

        fmt_dict = dict([(x[0], x[1:]) for x in streams if self.formats[x[0]][4] == use3d] or streams)

        self.log_debug("DESIRED STREAM: ITAG:%d (%s) %sfound, %sallowed" %
                      (desired_fmt, "%s %dx%d Q:%d 3D:%s" % self.formats[desired_fmt],
                       "" if desired_fmt in fmt_dict else "NOT ", "" if allowed(desired_fmt) else "NOT "))

        #: Return fmt nearest to quality index
        if desired_fmt in fmt_dict and allowed(desired_fmt):
            choosen_fmt = desired_fmt
        else:
            sel  = lambda x: self.formats[x][3]  #: Select quality index
            comp = lambda x, y: abs(sel(x) - sel(y))

            self.log_debug("Choosing nearest fmt: %s" % [(x, allowed(x), comp(x, desired_fmt)) for x in fmt_dict.keys()])

            choosen_fmt = reduce(lambda x, y: x if comp(x, desired_fmt) <= comp(y, desired_fmt) and
                                                   sel(x) > sel(y) else y, fmt_dict.keys())

        self.log_debug("Chosen fmt: %s" % choosen_fmt)

        url = fmt_dict[choosen_fmt][0]

        if fmt_dict[choosen_fmt][1]:
            if fmt_dict[choosen_fmt][2]:
                signature = self._decrypt_signature(fmt_dict[choosen_fmt][1])

            else:
                signature = fmt_dict[choosen_fmt][1]

            url += "&signature=" + signature

        if "&ratebypass=" not in url:
            url += "&ratebypass=yes"

        #: Set file name
        file_suffix = self.formats[choosen_fmt][0] if choosen_fmt in self.formats else ".flv"
        file_name_pattern = '<meta name="title" content="(.+?)">'
        name = re.search(file_name_pattern, self.data).group(1).replace("/", "")

        #: Cleaning invalid characters from the file name
        name = name.encode('ascii', 'replace')
        for c in self.invalid_chars:
            name = name.replace(c, '_')

        pyfile.name = html_unescape(name)

        time = re.search(r't=((\d+)m)?(\d+)s', pyfile.url)
        ffmpeg = which("ffmpeg")
        if ffmpeg and time:
            m, s = time.groups()[1:]
            if m is None:
                m = "0"

            pyfile.name += " (starting at %s:%s)" % (m, s)

        pyfile.name += file_suffix
        filename     = self.download(url)

        if ffmpeg and time:
            inputfile = filename + "_"
            os.rename(filename, inputfile)

            subprocess.call([
                ffmpeg,
                "-ss", "00:%s:%s" % (m, s),
                "-i", inputfile,
                "-vcodec", "copy",
                "-acodec", "copy",
                filename])

            self.remove(inputfile, trash=False)

Example 43

Project: saga-python Source File: ssh.py
    @SYNC_CALL
    def _initialize (self, session) :

        # make sure we have can access the key
        api = self.get_api ()

        unexpanded_key = None
        unexpanded_pub = None
        pwd = None

        
        if api.attribute_exists (saga.context.USER_KEY ) :
            unexpanded_key  = api.get_attribute    (saga.context.USER_KEY )
        if api.attribute_exists (saga.context.USER_CERT) :
            unexpanded_pub  = api.get_attribute    (saga.context.USER_CERT)
        if api.attribute_exists (saga.context.USER_PASS) :
            pwd  = api.get_attribute    (saga.context.USER_PASS)

        # Expand any environment variables in the key/pub paths
        if unexpanded_key:
            key = os.path.expandvars(unexpanded_key)
        else:
            key = None
        if unexpanded_pub:
            pub = os.path.expandvars(unexpanded_pub)
        else:
            pub = None

        # either user_key or user_cert should be specified (or both), 
        # then we complement the other, and convert to/from private 
        # from/to public keys
        if  pub  and not key :
            key  = pub

        if  not key :
            # nothing to do, really.  This likely means that ssh setup is
            # done out-of-band.
            return

        # convert public key into private key
        if  key.endswith ('.pub') :
            if  not pub :
                pub = key
            key = key[:-4]
        elif key.endswith ('.pem') :
            if  not pub :
                pub = key
        else :
            if  not pub :
                pub = key+'.pub'

        # update the context with these setting
        api.set_attribute (saga.context.USER_KEY , key)
        api.set_attribute (saga.context.USER_CERT, pub)


        # the private and public keys must exist
        if  not os.path.exists (key) or \
            not os.path.isfile (key)    :
            raise se.BadParameter ("ssh key inaccessible: %s" % (key))

        if  not os.path.exists (pub) or \
            not os.path.isfile (pub)    :
            raise se.BadParameter ("ssh public key inaccessible: %s" % (pub))


        try :
            fh_key = open (key)
        except Exception as e:
            raise se.PermissionDenied ("ssh key '%s' not readable: %s" % (key, e))
        else :
            fh_key.close ()


        try :
            fh_pub = open (pub)
        except Exception as e:
            raise se.PermissionDenied ("ssh public key '%s' not readable: %s" % (pub, e))
        else :
            fh_pub.close ()


        import subprocess
        if  not subprocess.call (["sh", "-c", "grep ENCRYPTED %s > /dev/null" % key]) :
            if  pwd  :
                if  subprocess.call (["sh", "-c", "ssh-keygen -y -f %s -P '%s' > /dev/null" % (key, pwd)]) :
                    raise se.PermissionDenied ("ssh key '%s' is encrypted, incorrect password" % (key))
            else :
                self._logger.error ("ssh key '%s' is encrypted, unknown password" % (key))


        self._logger.info ("init SSH context for key  at '%s' done" % key)

Example 44

Project: pulp Source File: test_runner.py
Function: run_tests
def run_tests(packages, tests_all_platforms, tests_non_rhel5,
              flake8_paths=None, flake8_exclude=None):
    """
    Method used by each of the pulp projects to execute their unit & coverage tests
    This method ensures that the arguments that are used by all of them are consistent.

    :param packages: List of packages that should have test coverage data collected
    :type packages: list of str
    :param tests_all_platforms: List of test directories to inspect for tests that are run on
                                all platforms
    :type tests_all_platforms: list of str
    :param tests_non_rhel5: List of test directories to inspect for tests that are run on
                            all platforms except rhel 5
    :type tests_non_rhel5: list of str
    :param flake8_paths: paths that should be checked with flake8
    :type flake8_paths: list of str
    :param flake8_exclude: list of paths that should be ignored during the flake8 check
    :type  flake8_exclude: list
    :return: the exit code from nosetests
    :rtype:  integer
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('--xunit-file')
    parser.add_argument('--with-xunit', action='store_true')
    parser.add_argument('--enable-coverage', action='store_true', default=False)
    parser.add_argument('--with-xcoverage', action='store_true')
    parser.add_argument('--cover-min-percentage', type=int, nargs=1)
    parser.add_argument('--xcoverage-file')
    parser.add_argument('-x', '--failfast', action='store_true')
    parser.add_argument('-v', '--verbose', action='store_true')

    arguments = parser.parse_args()

    args = [
        'nosetests',
    ]

    if arguments.enable_coverage:
        if arguments.with_xcoverage:
            args.extend(['--with-xcoverage'])
        else:
            args.extend(['--with-coverage'])

        if arguments.xcoverage_file:
            args.extend(['--xcoverage-file', arguments.xcoverage_file])

        if arguments.cover_min_percentage:
            args.extend(['--cover-min-percentage', str(arguments.cover_min_percentage[0])])

        args.extend(['--cover-html',
                     '--cover-erase',
                     '--cover-package',
                     ','.join(packages)])

    # don't run the server or plugins tests in RHEL5.
    flake8_exit_code = 0
    if sys.version_info >= (2, 6):
        # make sure we test everything
        args.extend(tests_non_rhel5)

        # Check the files for coding conventions
        if flake8_paths:
            # Ignore E401: multiple imports on one line
            flake8_default_exclude = '--exclude=.ropeproject,docs,playpen,*/build/*'
            if flake8_exclude:
                flake8_exclude = flake8_default_exclude + ',%s' % ','.join(flake8_exclude)
            else:
                flake8_exclude = flake8_default_exclude
            flake8_command = ['flake8', '--max-line-length=100', '--ignore=E401', flake8_exclude]
            flake8_command.extend(flake8_paths)
            if arguments.verbose:
                print 'Running {flake8}'.format(flake8=' '.join(flake8_command))
            else:
                print 'Running flake8'
            flake8_exit_code = subprocess.call(flake8_command)

    else:
        args.extend(['-e', 'server'])
        args.extend(['-e', 'plugins'])

    args.extend(tests_all_platforms)

    if arguments.failfast:
        args.extend(['-x'])
        if flake8_exit_code:
            return flake8_exit_code
    if arguments.verbose:
        args.extend(['-v'])
    if arguments.with_xunit:
        args.extend(['--with-xunit', '--process-timeout=360'])
    if arguments.xunit_file:
        args.extend(['--xunit-file', '../test/' + arguments.xunit_file])

    if arguments.verbose:
        print 'Running {tests}'.format(tests=' '.join(args))
    else:
        print "Running Unit Tests"
    # Call the test process, and return its exit code
    return subprocess.call(args) or flake8_exit_code

Example 45

Project: pymo Source File: build.py
def build(iface, directory, commands):

    # Are we doing a Ren'Py build?

    global RENPY
    RENPY = os.path.exists("renpy")

    if not os.path.isdir(directory):
        iface.fail("{} is not a directory.".format(directory))

    if RENPY and not os.path.isdir(os.path.join(directory, "game")):
        iface.fail("{} does not contain a Ren'Py game.".format(directory))

    
    config = configure.Configuration(directory)
    if config.package is None:
        iface.fail("Run configure before attempting to build the app.")


    global blacklist
    global whitelist
    
    blacklist = PatternList("blacklist.txt")
    whitelist = PatternList("whitelist.txt")
        
    if RENPY:
        manifest_extra = None        
        default_icon = "templates/renpy-icon.png"
        default_presplash = "templates/renpy-presplash.jpg"

        public_dir = None
        private_dir = None
        assets_dir = directory
    
    else:
        manifest_extra = ""
        default_icon = "templates/pygame-icon.png"
        default_presplash = "templates/pygame-presplash.jpg"
        
        if config.layout == "internal":
            private_dir = directory
            public_dir = None
            assets_dir = None
        elif config.layout == "external":
            private_dir = None
            public_dir = directory
            assets_dir = None
        elif config.layout == "split":
            private_dir = join_and_check(directory, "internal")
            public_dir = join_and_check(directory, "external")
            assets_dir = join_and_check(directory, "assets")
        
    versioned_name = config.name.replace(" ", "").replace("'", "") + "-" + config.version

    # Annoying fixups.
    config.name = config.name.replace("'", "\\'")
    config.icon_name = config.icon_name.replace("'", "\\'")
    
    # Figure out versions of the private and public data.
    private_version = str(time.time())

    if public_dir:
        public_version = private_version
    else:
        public_version = None
            
    # Render the various templates into control files.
    render(
        "AndroidManifest.tmpl.xml",
        "AndroidManifest.xml", 
        config = config,
        manifest_extra = manifest_extra,
        )

    render(
        "strings.xml",
        "res/values/strings.xml",
        public_version = public_version,
        private_version = private_version,
        config = config)

    try:
        os.unlink("build.xml")
    except:
        pass
        
    iface.info("Updating source code.")
    
    edit_file("src/org/renpy/android/DownloaderActivity.java", r'import .*\.R;', 'import {}.R;'.format(config.package))
    
    iface.info("Updating build files.")
        
    # Update the project to a recent version.
    subprocess.call([plat.android, "update", "project", "-p", '.', '-t', 'android-19', '-n', versioned_name,
        # "--library", "android-sdk/extras/google/play_licensing/library",
        "--library", "android-sdk/extras/google/play_apk_expansion/downloader_library",
        ])


    iface.info("Creating assets directory.")

    if os.path.isdir("assets"):
        shutil.rmtree("assets")
    
    if assets_dir is not None:
        make_tree(assets_dir, "assets")
    else:
        os.mkdir("assets")

    # Copy in the Ren'Py common assets.
    if os.path.exists("renpy/common"):

        if os.path.isdir("assets/common"):
            shutil.rmtree("assets/common")
        
        make_tree("renpy/common", "assets/common")

        # Ren'Py uses a lot of names that don't work as assets. Auto-rename
        # them.
        for dirpath, dirnames, filenames in os.walk("assets", topdown=False):
            
            for fn in filenames + dirnames:
                if fn[0] == ".":
                    continue
                
                old = os.path.join(dirpath, fn)
                new = os.path.join(dirpath, "x-" + fn)
                
                os.rename(old, new)


    if config.expansion:
        iface.info("Creating expansion file.")
        expansion_file = "main.{}.{}.obb".format(config.numeric_version, config.package)

        zf = zipfile.ZipFile(expansion_file, "w", zipfile.ZIP_STORED)
        zip_directory(zf, "assets")
        zf.close()

        # Delete and re-make the assets directory.
        shutil.rmtree("assets")
        os.mkdir("assets")
        
        # Write the file size into DownloaderActivity.
        file_size = os.path.getsize(expansion_file)
        
        edit_file("src/org/renpy/android/DownloaderActivity.java", 
            r'    private int fileVersion =', 
            '    private int fileVersion = {};'.format(config.numeric_version))

        edit_file("src/org/renpy/android/DownloaderActivity.java", 
            r'    private int fileSize =', 
            '    private int fileSize = {};'.format(file_size))
        
    else:
        expansion_file = None

    iface.info("Packaging internal data.")

    private_dirs = [ 'private' ]

    if private_dir is not None:
        private_dirs.append(private_dir)
        
    if os.path.exists("engine-private"):
        private_dirs.append("engine-private")

    make_tar("assets/private.mp3", private_dirs)
    
    if public_dir is not None:
        iface.info("Packaging external data.")
        make_tar("assets/public.mp3", [ public_dir ])

    # Copy over the icon and presplash files.
    shutil.copy(join_and_check(directory, "android-icon.png") or default_icon, "res/drawable/icon.png")
    shutil.copy(join_and_check(directory, "android-presplash.jpg") or default_presplash, "res/drawable/presplash.jpg")

    # Build.
    iface.info("I'm using Ant to build the package.")

    # Clean is required 
    try:   
        subprocess.check_call([plat.ant, "clean"] +  commands)
        iface.success("It looks like the build succeeded.")
    except:
        iface.fail("The build seems to have failed.")


    if (expansion_file is not None) and ("install" in commands):
        iface.info("Uploading expansion file.")
        
        dest = "/mnt/sdcard/{}".format(expansion_file)

        subprocess.check_call([ plat.adb, "push", expansion_file, dest ])
        
        iface.success("Uploaded the expansion file.")

    if expansion_file is not None:
        os.rename(expansion_file, "bin/" + expansion_file)

Example 46

Project: piku Source File: piku.py
def spawn_app(app, deltas={}):
    """Create all workers for an app"""
    
    app_path = join(APP_ROOT, app)
    procfile = join(app_path, 'Procfile')
    workers = parse_procfile(procfile)
    ordinals = defaultdict(lambda:1)
    worker_count = {k:1 for k in workers.keys()}

    # the Python virtualenv
    virtualenv_path = join(ENV_ROOT, app)
    # Settings shipped with the app
    env_file = join(APP_ROOT, app, 'ENV')
    # Custom overrides
    settings = join(ENV_ROOT, app, 'ENV')
    # Live settings
    live = join(ENV_ROOT, app, 'LIVE_ENV')
    # Scaling
    scaling = join(ENV_ROOT, app, 'SCALING')

    # Bootstrap environment
    env = {
        'APP': app,
        'LOG_ROOT': LOG_ROOT,
        'HOME': environ['HOME'],
        'USER': environ['USER'],
        'PATH': environ['PATH'],
        'PWD': dirname(env_file),
        'VIRTUAL_ENV': virtualenv_path,
    }
    
    # Load environment variables shipped with repo (if any)
    if exists(env_file):
        env.update(parse_settings(env_file, env))
    
    # Override with custom settings (if any)
    if exists(settings):
        env.update(parse_settings(settings, env))

    if 'web' in workers or 'wsgi' in workers:
        # Pick a port if none defined and we're not running under nginx
        if 'PORT' not in env and 'NGINX_SERVER_NAME' not in env:
            env['PORT'] = str(get_free_port())

        # Safe default for bind address            
        if 'BIND_ADDRESS' not in env:
            env['BIND_ADDRESS'] = '127.0.0.1'
                
        # Set up nginx if we have NGINX_SERVER_NAME set
        if 'NGINX_SERVER_NAME' in env:
            nginx = command_output("nginx -V")
            nginx_ssl = "443 ssl"
            if "--with-http_v2_module" in nginx:
                nginx_ssl += " http2"
            elif "--with-http_spdy_module" in nginx and "nginx/1.6.2" not in nginx: # avoid Raspbian bug
                nginx_ssl += " spdy"
        
            env.update({ 
                'NGINX_SSL': nginx_ssl,
                'NGINX_ROOT': NGINX_ROOT,
            })
            
            if 'wsgi' in workers:
                sock = join(NGINX_ROOT, "%s.sock" % app)
                env['NGINX_SOCKET'] = env['BIND_ADDRESS'] = "unix://" + sock
                if 'PORT' in env:
                    del env['PORT']
            else:
                env['NGINX_SOCKET'] = "%(BIND_ADDRESS)s:%(PORT)s" % env 
        
            domain = env['NGINX_SERVER_NAME'].split()[0]       
            key, crt = [join(NGINX_ROOT,'%s.%s' % (app,x)) for x in ['key','crt']]
            if not exists(key):
                call('openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=NY/L=New York/O=Piku/OU=Self-Signed/CN=%(domain)s" -keyout %(key)s -out %(crt)s' % locals(), shell=True)
            
            # restrict access to server from CloudFlare IP addresses
            acl = []
            if env.get('NGINX_CLOUDFLARE_ACL', 'false').lower() == 'true':
                try:
                    cf = loads(urlopen('https://api.cloudflare.com/client/v4/ips').read())
                except Exception, e:
                    cf = defaultdict()
                    echo("-----> Could not retrieve CloudFlare IP ranges: %s" % e.text, fg="red")
                if cf['success'] == True:
                    for i in cf['result']['ipv4_cidrs']:
                        acl.append("allow %s;" % i)
                    for i in cf['result']['ipv6_cidrs']:
                        acl.append("allow %s;" % i)
                    # allow access from controlling machine
                    if 'SSH_CLIENT' in environ:
                        remote_ip = environ['SSH_CLIENT'].split()[0]
                        echo("-----> Adding your IP (%s) to nginx ACL" % remote_ip)
                        acl.append("allow %s;" % remote_ip)
                    acl.extend(["allow 127.0.0.1;","deny all;"])
            env['NGINX_ACL'] = " ".join(acl)

            env['NGINX_STATIC_MAPPINGS'] = ''
            
            # Get a mapping of /url:path1,/url2:path2
            static_paths = env.get('NGINX_STATIC_PATHS','')
            if len(static_paths):
                try:
                    items = static_paths.split(',')
                    for item in items:
                        static_url, static_path = item.split(':')
                        if static_path[0] != '/':
                            static_path = join(app_path, static_path)
                        env['NGINX_STATIC_MAPPINGS'] = env['NGINX_STATIC_MAPPINGS'] + NGINX_STATIC_MAPPING % {'url': static_url, 'path': static_path}
                except Exception as e:
                    print "Error %s in static path spec: should be /url1:path1[,/url2:path2], ignoring." % e
                    env['NGINX_STATIC_MAPPINGS'] = ''

            buffer = expandvars(NGINX_TEMPLATE, env)
            echo("-----> Setting up nginx for '%s:%s'" % (app, env['NGINX_SERVER_NAME']))
            with open(join(NGINX_ROOT,"%s.conf" % app), "w") as h:
                h.write(buffer)            

    # Configured worker count
    if exists(scaling):
        worker_count.update({k: int(v) for k,v in parse_procfile(scaling).iteritems()})
    
    to_create = {}
    to_destroy = {}    
    for k, v in worker_count.iteritems():
        to_create[k] = range(1,worker_count[k] + 1)
        if k in deltas and deltas[k]:
            to_create[k] = range(1, worker_count[k] + deltas[k] + 1)
            if deltas[k] < 0:
                to_destroy[k] = range(worker_count[k], worker_count[k] + deltas[k], -1)
            worker_count[k] = worker_count[k]+deltas[k]

    # Save current settings
    write_config(live, env)
    write_config(scaling, worker_count, ':')
    
    # Create new workers
    for k, v in to_create.iteritems():
        for w in v:
            enabled = join(UWSGI_ENABLED, '%s_%s.%d.ini' % (app, k, w))
            if not exists(enabled):
                echo("-----> Spawning '%s:%s.%d'" % (app, k, w), fg='green')
                spawn_worker(app, k, workers[k], env, w)
        
    # Remove unnecessary workers (leave logfiles)
    for k, v in to_destroy.iteritems():
        for w in v:
            enabled = join(UWSGI_ENABLED, '%s_%s.%d.ini' % (app, k, w))
            if exists(enabled):
                echo("-----> Terminating '%s:%s.%d'" % (app, k, w), fg='yellow')
                unlink(enabled)

Example 47

Project: YCM-Generator Source File: config_gen.py
def fake_build(project_dir, c_build_log_path, cxx_build_log_path, verbose, make_cmd, build_system, cc, cxx, out_of_tree, configure_opts, make_flags, preserve_environment, qt_version):
    '''Builds the project using the fake toolchain, to collect the compiler flags.

    project_dir: the directory containing the source files
    build_log_path: the file to log commands to
    verbose: show the build process output
    make_cmd: the path of the make executable
    cc: the path of the clang executable
    cxx: the path of the clang++ executable
    out_of_tree: perform an out-of-tree build (autotools only)
    configure_opts: additional flags for configure stage
    make_flags: additional flags for make
    preserve_environment: pass environment variables to build processes
    qt_version: The Qt version to use when building with qmake.
    '''

    # TODO: add Windows support
    assert(not sys.platform.startswith("win32"))
    fake_path = os.path.join(ycm_generator_dir, "fake-toolchain", "Unix")

    # environment variables and arguments for build process
    started = time.time()
    FNULL = open(os.devnull, "w")
    proc_opts = {} if verbose else {
        "stdin": FNULL,
        "stdout": FNULL,
        "stderr": FNULL
    }
    proc_opts["cwd"] = project_dir

    if(preserve_environment):
        env = os.environ
    else:
        # Preserve HOME, since Cmake needs it to find some packages and it's
        # normally there anyway. See #26.
        env = dict(map(lambda x: (x, os.environ[x]), ["HOME"]))

    env["PATH"]  = "{}:{}".format(fake_path, os.environ["PATH"])
    env["CC"] = "clang"
    env["CXX"] = "clang++"
    env["YCM_CONFIG_GEN_CC_LOG"] = c_build_log_path
    env["YCM_CONFIG_GEN_CXX_LOG"] = cxx_build_log_path

    # used during configuration stage, so that cmake, etc. can verify what the compiler supports
    env_config = env.copy()
    env_config["YCM_CONFIG_GEN_CC_PASSTHROUGH"] = cc
    env_config["YCM_CONFIG_GEN_CXX_PASSTHROUGH"] = cxx

    # use -i (ignore errors), since the makefile may include scripts which
    # depend upon the existence of various output files
    make_args = [make_cmd] + make_flags

    # Used for the qmake build system below
    pro_files = glob.glob(os.path.join(project_dir, "*.pro"))

    # sanity check - make sure the toolchain is available
    assert os.path.exists(fake_path), "Could not find toolchain at '{}'".format(fake_path)

    # helper function to display exact commands used
    def run(cmd, *args, **kwargs):
        print("$ " + " ".join(cmd))
        subprocess.call(cmd, *args, **kwargs)

    if build_system is None:
        if os.path.exists(os.path.join(project_dir, "CMakeLists.txt")):
            build_system = "cmake"
        elif os.path.exists(os.path.join(project_dir, "configure")):
            build_system = "autotools"
        elif pro_files:
            build_system = "qmake"
        elif any([os.path.exists(os.path.join(project_dir, x)) for x in ["GNUmakefile", "makefile", "Makefile"]]):
            build_system = "make"

    # execute the build system
    if build_system == "cmake":
        # cmake
        # run cmake in a temporary directory, then compile the project as usual
        build_dir = tempfile.mkdtemp()
        proc_opts["cwd"] = build_dir

        # if the project was built in-tree, we need to hide the cache file so that cmake
        # populates the build dir instead of just re-generating the existing files
        cache_path = os.path.join(project_dir, "CMakeCache.txt")

        if(os.path.exists(cache_path)):
            fd, cache_tmp = tempfile.mkstemp()
            os.close(fd)
            shutil.move(cache_path, cache_tmp)
        else:
            cache_tmp = None

        print("Running cmake in '{}'...".format(build_dir))
        sys.stdout.flush()
        run(["cmake", project_dir] + configure_opts, env=env_config, **proc_opts)

        print("\nRunning make...")
        sys.stdout.flush()
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")
        print("")
        sys.stdout.flush()
        shutil.rmtree(build_dir)

        if(cache_tmp):
            shutil.move(cache_tmp, cache_path)

    elif build_system == "autotools":
        # autotools
        # perform build in-tree, since not all projects handle out-of-tree builds correctly

        if(out_of_tree):
            build_dir = tempfile.mkdtemp()
            proc_opts["cwd"] = build_dir
            print("Configuring autotools in '{}'...".format(build_dir))
        else:
            print("Configuring autotools...")

        run([os.path.join(project_dir, "configure")] + configure_opts, env=env_config, **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")

        if(out_of_tree):
            print("")
            shutil.rmtree(build_dir)
        else:
            run([make_cmd, "maintainer-clean"], env=env, **proc_opts)

    elif build_system == "qmake":
        # qmake
        # make sure there is only one .pro file
        if len(pro_files) != 1:
            print("ERROR: Found {} .pro files (expected one): {}.".format(
                len(pro_files), ', '.join(pro_files)))
            sys.exit(1)

        # run qmake in a temporary directory, then compile the project as usual
        build_dir = tempfile.mkdtemp()
        proc_opts["cwd"] = build_dir
        env_config["QT_SELECT"] = qt_version

        # QMAKESPEC is platform dependent - valid mkspecs are in
        # /usr/share/qt4/mkspecs, /usr/lib64/qt5/mkspecs
        env_config["QMAKESPEC"] = {
            ("Linux",  True):   "unsupported/linux-clang",
            ("Linux",  False):  "linux-clang",
            ("Darwin", True):   "unsupported/macx-clang",
            ("Darwin", False):  "macx-clang",
            ("FreeBSD", False): "unsupported/freebsd-clang",
        }[(os.uname()[0], qt_version == "4")]

        print("Running qmake in '{}' with Qt {}...".format(build_dir, qt_version))
        run(["qmake"] + configure_opts + [pro_files[0]], env=env_config,
            **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

        print("\nCleaning up...")
        print("")
        shutil.rmtree(build_dir)

    elif build_system == "make":
        # make
        # needs to be handled last, since other build systems can generate Makefiles
        print("Preparing build directory...")
        run([make_cmd, "clean"], env=env, **proc_opts)

        print("\nRunning make...")
        run(make_args, env=env, **proc_opts)

    elif(os.path.exists(os.path.join(project_dir, "Make/options"))):
        print("Found OpenFOAM Make/options")

        # OpenFOAM build system
        make_args = ["wmake"]

        # Since icpc could not find directory in which g++ resides,
        # set environmental variables to gcc to make fake_build operate normally.

        env['WM_COMPILER']='Gcc'
        env['WM_CC']='gcc'
        env['WM_CXX']='g++'

        print("\nRunning wmake...")
        run(make_args, env=env, **proc_opts)

    else:
        print("ERROR: Unknown build system")
        sys.exit(2)

    print("Build completed in {} sec".format(round(time.time() - started, 2)))
    print("")

Example 48

Project: dcloud Source File: dcloud.py
Function: create
def create(clusterConfigFilePath, overrideClusterId):
    '''
    return:
    {
       "dns": "172.17.0.2",
       "hosts": "172.17.0.2 master\n172.17.0.3 slave1\n172.17.04 slave2"
    }
    '''
    
    dnsServerAddress = None
    hosts = ""
    
    with open(clusterConfigFilePath, "r") as conffile:
        conf = conffile.read()
    
    try:
        clusterConfig = json.loads(conf)
    except ValueError as e:
        print "Given cluster config json file " + clusterConfigFilePath + " is invalid "
        print e.message
        return 1
        
    # docker build if Dockerfile is specified
    clusterConfig = _flattenDockerfile(clusterConfig)

    clusterConfig = _flattenHostname(clusterConfig)
    
    if overrideClusterId != None:
        clusterConfig["id"] = overrideClusterId

    # Append DNS
    dnsNode = {
        "hostname" : "dclouddns",
        "imageName" : REPO_DNS_BASE,
        "cmd" : "service sshd start && tail -f /var/log/yum.log"
    }
    clusterConfig["nodes"].insert(0, dnsNode)

    for i in range(len(clusterConfig["nodes"])):
        # The first iteration is for DNS
        node = clusterConfig["nodes"][i]

        container_name = _generateContainerName(clusterConfig["id"], node["hostname"])

        cmd = ["docker", "run"
		    , "-d" # daemon
      		, "--privileged"]

        # DNS
        cmd.append("--dns")
        if i == 0:
            cmd.append("127.0.0.1") # localhost 
        else:
            cmd.append(dnsServerAddress)

        if "dns" in clusterConfig:
            for dnsIp in clusterConfig["dns"]:
                cmd.append("--dns")
                cmd.append(dnsIp)

        cmd.append("--name")
        cmd.append(container_name)

        fqdn = node["hostname"] + "." + clusterConfig["domain"]
        cmd.append("-h")
        cmd.append(fqdn)

        if "volumes" in node:
            for volumn in node["volumes"]:
                cmd.append("-v")
                cmd.append(volumn)

        if "ports" in node:
            for port in node["ports"]:
                cmd.append("-p")
                cmd.append(port)

        cmd.append(node["imageName"])
        cmd.append("bash")
        cmd.append("-c")
        cmd.append(node["cmd"])
        print "executing: " + ' '.join(cmd)
        subprocess.call(cmd)

        ip = docker.getContainerIpAddress(container_name)
        if i == 0:
            dnsServerAddress = ip
        hosts += ip + " " + fqdn + " " + node["hostname"] + "\n"

    print "dnsServerAddress: " + dnsServerAddress
    if(not ssh.connection_check(dnsServerAddress, "root", "changeme")):
        print "cuem ERROR ****"
        print "ssh connection to root@" + dnsServerAddress + " could not be established"
        return 1

    ssh.exec_command2(dnsServerAddress, "root", "changeme", "echo '" + hosts + "' > /etc/dcloud/dnsmasq/hosts")
    ssh.exec_command2(dnsServerAddress, "root", "changeme", "service dnsmasq restart")

    print "hosts:"
    print hosts
    result = RunResult()
    result.dns = dnsServerAddress
    result.hosts = hosts
    return 0

Example 49

Project: doit Source File: dependency.py
    def get_status(self, task, tasks_dict, get_log=False):
        """Check if task is up to date. set task.dep_changed

        If the checker class changed since the previous run, the task is
        deleted, to be sure that its state is not re-used.

        @param task: (Task)
        @param tasks_dict: (dict: Task) passed to objects used on uptodate
        @param get_log: (bool) if True, adds all reasons to the return
                               object why this file will be rebuild.
        @return: (DependencyStatus) a status object with possible status
                                    values up-to-date, run or error

        task.dep_changed (list-strings): file-dependencies that are not
        up-to-date if task not up-to-date because of a target, returned value
        will contain all file-dependencies reagrdless they are up-to-date
        or not.
        """
        result = DependencyStatus(get_log)
        task.dep_changed = []

        # check uptodate bool/callables
        uptodate_result_list = []
        for utd, utd_args, utd_kwargs in task.uptodate:
            # if parameter is a callable
            if hasattr(utd, '__call__'):
                # FIXME control verbosity, check error messages
                # 1) setup object with global info all tasks
                if isinstance(utd, UptodateCalculator):
                    utd.setup(self, tasks_dict)
                # 2) add magic positional args for `task` and `values`
                # if present.
                spec_args = list(inspect.signature(utd).parameters.keys())
                magic_args = []
                for i, name in enumerate(spec_args):
                    if i == 0 and name == 'task':
                        magic_args.append(task)
                    elif i == 1 and name == 'values':
                        magic_args.append(self.get_values(task.name))
                args = magic_args + utd_args
                # 3) call it and get result
                uptodate_result = utd(*args, **utd_kwargs)
            elif isinstance(utd, str):
                # TODO py3.3 has subprocess.DEVNULL
                with open(os.devnull, 'wb') as null:
                    uptodate_result = subprocess.call(
                        utd, shell=True, stderr=null, stdout=null) == 0
            # parameter is a value
            else:
                uptodate_result = utd

            # None means uptodate was not really calculated and should be
            # just ignored
            if uptodate_result is None:
                continue
            uptodate_result_list.append(uptodate_result)
            if not uptodate_result:
                result.add_reason('uptodate_false', (utd, utd_args, utd_kwargs))

        # any uptodate check is false
        if not get_log and result.status == 'run':
            return result

        # no dependencies means it is never up to date.
        if not (task.file_dep or uptodate_result_list):
            if result.set_reason('has_no_dependencies', True):
                return result


        # if target file is not there, task is not up to date
        for targ in task.targets:
            if not os.path.exists(targ):
                task.dep_changed = list(task.file_dep)
                if result.add_reason('missing_target', targ):
                    return result

        # check for modified file_dep checker
        previous = self._get(task.name, 'checker:')
        checker_name = self.checker.__class__.__name__
        if previous and previous != checker_name:
            task.dep_changed = list(task.file_dep)
            # remove all saved values otherwise they might be re-used by
            # some optmization on MD5Checker.get_state()
            self.remove(task.name)
            if result.set_reason('checker_changed', (previous, checker_name)):
                return result

        # check for modified file_dep
        previous = self._get(task.name, 'deps:')
        previous_set = set(previous) if previous else None
        if previous_set and previous_set != task.file_dep:
            if get_log:
                added_files = sorted(list(task.file_dep - previous_set))
                removed_files = sorted(list(previous_set - task.file_dep))
                result.set_reason('added_file_dep', added_files)
                result.set_reason('removed_file_dep', removed_files)
            result.status = 'run'

        # list of file_dep that changed
        check_modified = self.checker.check_modified
        changed = []
        for dep in task.file_dep:
            state = self._get(task.name, dep)
            try:
                file_stat = os.stat(dep)
            except OSError:
                error_msg = "Dependent file '{}' does not exist.".format(dep)
                result.error_reason = error_msg.format(dep)
                if result.add_reason('missing_file_dep', dep, 'error'):
                    return result
            else:
                if state is None or check_modified(dep, file_stat, state):
                    changed.append(dep)
        task.dep_changed = changed

        if len(changed) > 0:
            result.set_reason('changed_file_dep', changed)

        return result

Example 50

Project: PythonJS Source File: run.py
def translate_js(filename, javascript=False, dart=False, coffee=False, lua=False, luajs=False, go=False, gopherjs=False, multioutput=False, requirejs=True):
    global tmpname
    tmpname = os.path.join(
        tempfile.gettempdir(), 
        #'test-%s-js=%s-dart=%s-lua=%s' %(filename.split('/')[-1], javascript, dart, lua)
        'regtest-%s'%filename.split('/')[-1]
    )

    output_name = "%s.py" % tmpname
    if javascript:
        content = 'pythonjs.configure(javascript=True)\n' + patch_python(filename, backend='JAVASCRIPT')
    elif dart:
        source = [
            'pythonjs.configure(dart=True)',
            open('../pythonjs/runtime/dart_builtins.py', 'rb').read().decode('utf-8'),
            patch_python(filename, dart=True, backend='DART')
        ]
        content = '\n'.join( source )
    elif coffee:
        source = [
            'pythonjs.configure(coffee=True)',
            patch_python(filename, backend='COFFEE')
        ]
        content = '\n'.join( source )
    elif lua or luajs:
        source = [
            'pythonjs.configure(lua=True)',
            read('../pythonjs/runtime/lua_builtins.py'),
            patch_python(filename, backend='LUA')
        ]
        content = '\n'.join( source )

    elif go or gopherjs:
        content = patch_python(filename, backend='GO')

    else:
        content = patch_python(filename)

    code = '\n'.join(
        [
            '# -*- coding: utf-8 -*-',
            'pythonjs.configure(runtime_exceptions=False)',
            content
        ]
    )
    write(output_name, code)
    cmd = [
        os.path.join("..", "pythonjs", "translator.py"),
        output_name,
        '--debug'
    ]
    if dart:
        cmd.append( '--dart' )
    elif coffee:
        cmd.append( '--coffee')
    elif lua:
        cmd.append( '--lua')
    elif luajs:
        cmd.append( '--luajs')
    elif go:
        cmd.append( '--go' )
    elif gopherjs:
        cmd.append( '--gopherjs' )

    if not requirejs:
        cmd.append( '--no-wrapper' )

    stdout, stderr = run_command(' '.join(cmd), returns_stdout_stderr=True)
    if stderr:
        return ''
    else:

        #jsheader = 'if (typeof(process) != "undefined") { var requirejs = require("requirejs"); }'
        jsheader = ''

        if multioutput or (stdout.startswith("{") and stdout.endswith("}")):
            d = json.loads( stdout )
            stdout = d.pop('main')
            #builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
            for jsfile in d:
                if not jsfile.startswith('/'):
                    stdout = stdout.replace('"%s"' %jsfile, '"/tmp/%s"' %jsfile)
                write(
                    os.path.join('/tmp', jsfile), 
                    '\n'.join( [jsheader, d[jsfile]] ) 
                )

        if dart:

            if os.path.isfile('/tmp/dart2js-output.js'):
                os.unlink('/tmp/dart2js-output.js')

            dart_input = '/tmp/dart2js-input.dart'
            open( dart_input, 'wb').write( stdout.encode('utf-8') )

            cmd = [
                dart2js,
                '-o', '/tmp/dart2js-output.js',
                dart_input
            ]
            if show_details:
                subprocess.call( cmd )
            else:
                sout, serr = run_command(' '.join(cmd), returns_stdout_stderr=True)

            if os.path.isfile('/tmp/dart2js-output.js'):
                return open('/tmp/dart2js-output.js', 'rb').read().decode('utf-8')
            else:
                return ''

        elif coffee:

            coffee_input = '/tmp/coffee-input.coffee'
            open( coffee_input, 'wb').write( stdout.encode('utf-8') )

            cmd = [
                'coffee',
                '--print', # print js to stdout
                coffee_input
            ]
            #subprocess.call( cmd )
            sout, serr = run_command(' '.join(cmd), returns_stdout_stderr=True)
            if serr:
                return ''
            elif sout:
                builtins = read(os.path.join("../pythonjs", "pythonjs.js"))
                open('/tmp/coffee-output.js', 'wb').write( (builtins+'\n'+sout).encode('utf-8') )
                return sout
            else:
                return ''

        elif luajs:
            lua2js_input = '/tmp/lua2js-input.lua'
            lua2js_output = '/tmp/lua2js-output.js'
            open( lua2js_input, 'wb').write( stdout.encode('utf-8') )

            cmd = [
                lua2js,
                lua2js_input,
                lua2js_output
            ]
            try:
                subprocess.check_call( cmd )
            except subprocess.CalledProcessError:
                return ''
            return open( lua2js_output, 'rb' ).read().decode('utf-8')

        else:
            return '\n'.join( [jsheader, stdout] )
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4