os.remove

Here are the examples of the python api os.remove taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: nnmware
Source File: ajax.py
View license
    def handle_upload(self, request):
        is_raw = True
        if request.FILES:
            is_raw = False
            if len(request.FILES) == 1:
                upload = request.FILES.values()[0]
            else:
                return dict(success=False, error=_("Bad upload."))
            filename = upload.name
        else:
            # the file is stored raw in the request
            upload = request
            # get file size
            try:
                filename = request.GET['qqfile']
            except KeyError as aerr:
                return dict(success=False, error=_("Can't read file name"))
        self.setup(filename)
        # noinspection PyBroadException
        try:
            if is_raw:
                # File was uploaded via ajax, and is streaming in.
                chunk = upload.read(self.BUFFER_SIZE)
                while len(chunk) > 0:
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
                    chunk = upload.read(self.BUFFER_SIZE)
            else:
                # File was uploaded via a POST, and is here.
                for chunk in upload.chunks():
                    self._destination.write(chunk)
                    if self.max_size():
                        raise IOError
        except:
            # things went badly.
            return dict(success=False, error=_("Upload error"))
        self._destination.close()
        if self._filetype == 'image':
            # noinspection PyBroadException
            try:
                i = Image.open(self._path)
            except:
                os.remove(self._path)
                return dict(success=False, error=_("File is not image format"))
            f_name, f_ext = os.path.splitext(self._filename)
            f_without_ext = os.path.splitext(self._path)[0]
            new_path = ".".join([f_without_ext, self._save_format.lower()])
            if setting('IMAGE_STORE_ORIGINAL', False):
                # TODO need change the extension
                orig_path = ".".join([f_without_ext + '_orig', self._save_format.lower()])
                shutil.copy2(self._path, orig_path)
            i.thumbnail((1200, 1200), Image.ANTIALIAS)
            # noinspection PyBroadException
            try:
                if self._path == new_path:
                    i.save(self._path, self._save_format)
                else:
                    i.save(new_path, self._save_format)
                    os.remove(self._path)
                    self._path = new_path
            except:
                # noinspection PyBroadException
                try:
                    os.remove(self._path)
                    os.remove(new_path)
                except:
                    pass
                return dict(success=False, error=_("Error saving image"))
            self._filename = ".".join([f_name, self._save_format.lower()])
        return dict(success=True, fullpath=self._path, path=os.path.relpath(self._path, '/' + settings.MEDIA_ROOT),
                    old_filename=filename, filename=self._filename)

Example 2

View license
def run(test, params, env):
    """
    Test disk attachement of multiple disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()

    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = options["disk_device"]
        if options.has_key("sgio") and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if options.has_key("driver"):
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if options.has_key("share"):
            if options["share"] == "shareable":
                disk_xml.share = True

        if options.has_key("readonly"):
            if options["readonly"] == "readonly":
                disk_xml.readonly = True

        logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

        return disk_xml

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Disk specific attributes.
    vms_sgio = params.get("virt_disk_vms_sgio", "").split()
    vms_share = params.get("virt_disk_vms_share", "").split()
    vms_readonly = params.get("virt_disk_vms_readonly", "").split()
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_device = params.get("virt_disk_device", "disk")
    disk_format = params.get("virt_disk_format", "")
    scsi_options = params.get("scsi_options", "")
    disk_driver_options = params.get("disk_driver_options", "")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    test_error_policy = "yes" == params.get("virt_disk_test_error_policy",
                                            "no")
    test_shareable = "yes" == params.get("virt_disk_test_shareable", "no")
    test_readonly = "yes" == params.get("virt_disk_test_readonly", "no")
    disk_source_path = test.tmpdir
    disk_path = ""
    tmp_filename = "cdrom_te.tmp"
    tmp_readonly_file = ""

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in range(2):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)

    try:
        # Create disk images if needed.
        disks = []
        if disk_format == "scsi":
            disk_source = libvirt.create_scsi_disk(scsi_options)
            if not disk_source:
                raise error.TestNAError("Get scsi disk failed.")
            disks.append({"format": "scsi", "source": disk_source})

        elif disk_format == "iscsi":
            # Create iscsi device if neened.
            image_size = params.get("image_size", "100M")
            disk_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", disk_source)
            # Format the disk and make the file system.
            libvirt.mk_part(disk_source, size="10M")
            libvirt.mkfs("%s1" % disk_source, "ext3")
            disk_source += "1"
            disks.append({"format": disk_format,
                          "source": disk_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_path = "%s/test.%s" % (disk_source_path, disk_format)
            disk_source = libvirt.create_local_disk("file", disk_path, "1",
                                                    disk_format=disk_format)
            libvirt.mkfs(disk_source, "ext3")
            disks.append({"format": disk_format,
                          "source": disk_source})

        if disk_device == "cdrom":
            tmp_readonly_file = "/root/%s" % tmp_filename
            with open(tmp_readonly_file, 'w') as f:
                f.write("teststring\n")
            disk_path = "%s/test.iso" % disk_source_path
            disk_source = libvirt.create_local_disk("iso", disk_path, "1")
            disks.append({"source": disk_source})

        # Compose the new domain xml
        vms_list = []
        for i in range(2):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            if disk_bus == "scsi":
                set_vm_controller_xml(vmxml)
            disk_sgio = ""
            if len(vms_sgio) > i:
                disk_sgio = vms_sgio[i]
            shareable = ""
            if len(vms_share) > i:
                shareable = vms_share[i]
            readonly = ""
            if len(vms_readonly) > i:
                readonly = vms_readonly[i]
            disk_xml = get_vm_disk_xml(disk_type, disk_source,
                                       sgio=disk_sgio, share=shareable,
                                       target=disk_target, bus=disk_bus,
                                       driver=disk_driver_options,
                                       disk_device=disk_device,
                                       readonly=readonly)
            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                vmxml.sync()
            vms_list.append({"name": vm_names[i], "vm": vm,
                             "status": "yes" == status_error[i],
                             "disk": disk_xml})
            logging.debug("vms_list %s" % vms_list)

        for i in range(len(vms_list)):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    raise error.TestFail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        raise error.TestFail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        raise error.TestFail('Hotplug disk device unexpectedly.')

                # Check disk error_policy option in VMs.
                if test_error_policy:
                    error_policy = vms_list[i]['disk'].driver["error_policy"]
                    if i == 0:
                        # If we testing enospace error policy, only 1 vm used
                        if error_policy == "enospace":
                            cmd = ("mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test"
                                   " bs=1M count=2000 2>&1 | grep 'No space left'"
                                   % disk_target)
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm0 exit %s; output: %s", s, o)
                            if 0 != s:
                                raise error.TestFail("Test error_policy %s: cann't see"
                                                     " error messages")
                            session.close()
                            break

                        if session.cmd_status("fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt"
                                              % (disk_target, disk_target)):
                            session.close()
                            raise error.TestFail("Test error_policy: "
                                                 "failed to mount disk")
                    if i == 1:
                        try:
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s "
                                   % (disk_target, disk_target))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm1 exit %s; output: %s", s, o)
                            session.close()
                            cmd = ("dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if="
                                   "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'")
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if error_policy == "report":
                                if s:
                                    raise error.TestFail("Test error_policy %s: cann't report"
                                                         " error" % error_policy)
                            elif error_policy == "ignore":
                                if 0 == s:
                                    raise error.TestFail("Test error_policy %s: error cann't"
                                                         " be ignored" % error_policy)
                            session0.close()
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            if error_policy == "stop":
                                if not vms_list[0]['vm'].is_paused():
                                    raise error.TestFail("Test error_policy %s: cann't stop"
                                                         " VM" % error_policy)
                            else:
                                logging.error(str(e))
                                raise error.TestFail("Test error_policy %s: login failed"
                                                     % error_policy)

                if test_shareable:
                    # Check shared file selinux label with type and MCS as
                    # svirt_image_t:s0
                    if disk_path:
                        se_label = utils_selinux.get_context_of_file(disk_path)
                        logging.debug("Context of shared img '%s' is '%s'" %
                                      (disk_path, se_label))
                        if "svirt_image_t:s0" not in se_label:
                            raise error.TestFail("Context of shared img is not"
                                                 " expected.")
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to write on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' "
                                   "> /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM0 failed")
                            session0.close()
                            # Try to read on vm1.
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s"
                                   " /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM1 failed")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail("Test disk shareable: login failed")

                if test_readonly:
                    # Check shared file selinux label with type and MCS as
                    # virt_content_t:s0
                    if disk_path:
                        se_label = utils_selinux.get_context_of_file(disk_path)
                        logging.debug("Context of shared iso '%s' is '%s'" %
                                      (disk_path, se_label))
                        if "virt_content_t:s0" not in se_label:
                            raise error.TestFail("Context of shared iso is not"
                                                 " expected.")
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to read on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = "mount -o ro /dev/cdrom /mnt && grep "
                            cmd += "%s /mnt/%s" % (test_str, tmp_filename)
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            session0.close()
                            if s:
                                raise error.TestFail("Test file not found in VM0 cdrom")
                            # Try to read on vm1.
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test file not found in VM1 cdrom")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail("Test disk shareable: login failed")
                session.close()
            except virt_vm.VMStartError:
                if vms_list[i]['status']:
                    raise error.TestFail('VM Failed to start'
                                         ' for some reason!')
    finally:
        # Stop VMs.
        for i in range(len(vms_list)):
            if vms_list[i]['vm'].is_alive():
                vms_list[i]['vm'].destroy(gracefully=False)

        # Recover VMs.
        for vmxml_backup in vms_backup:
            vmxml_backup.sync()

        # Remove disks.
        for img in disks:
            if img.has_key('format'):
                if img["format"] == "scsi":
                    libvirt.delete_scsi_disk()
                elif img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
            elif img.has_key("source"):
                os.remove(img["source"])

        if tmp_readonly_file:
            if os.path.exists(tmp_readonly_file):
                os.remove(tmp_readonly_file)

Example 3

Project: plaso
Source File: sqlite.py
View license
  def Open(self, file_object, wal_file_object=None):
    """Opens a SQLite database file.

    Since pysqlite cannot read directly from a file-like object a temporary
    copy of the file is made. After creating a copy the database file this
    function sets up a connection with the database and determines the names
    of the tables.

    Args:
      file_object (dfvfs.FileIO): file-like object.
      wal_file_object (Optional[dfvfs.FileIO]): file-like object for the
          Write-Ahead Log (WAL) file.

    Raises:
      IOError: if the file-like object cannot be read.
      sqlite3.DatabaseError: if the database cannot be parsed.
      ValueError: if the file-like object is missing.
    """
    if not file_object:
      raise ValueError(u'Missing file object.')

    # TODO: Current design copies the entire file into a buffer
    # that is parsed by each SQLite parser. This is not very efficient,
    # especially when many SQLite parsers are ran against a relatively
    # large SQLite database. This temporary file that is created should
    # be usable by all SQLite parsers so the file should only be read
    # once in memory and then deleted when all SQLite parsers have completed.

    # TODO: Change this into a proper implementation using APSW
    # and virtual filesystems when that will be available.
    # Info: http://apidoc.apsw.googlecode.com/hg/vfs.html#vfs and
    # http://apidoc.apsw.googlecode.com/hg/example.html#example-vfs
    # Until then, just copy the file into a tempfile and parse it.

    temporary_file = tempfile.NamedTemporaryFile(
        delete=False, dir=self._temporary_directory)

    try:
      self._CopyFileObjectToTemporaryFile(file_object, temporary_file)
      self._temp_db_file_path = temporary_file.name

    except IOError:
      os.remove(temporary_file.name)
      raise

    finally:
      temporary_file.close()

    if wal_file_object:
      # Create WAL file using same filename so it is available for
      # sqlite3.connect()
      temporary_filename = u'{0:s}-wal'.format(self._temp_db_file_path)
      temporary_file = open(temporary_filename, 'wb')
      try:
        self._CopyFileObjectToTemporaryFile(wal_file_object, temporary_file)
        self._temp_wal_file_path = temporary_filename

      except IOError:
        os.remove(temporary_filename)
        raise

      finally:
        temporary_file.close()

    self._database = sqlite3.connect(self._temp_db_file_path)
    try:
      self._database.row_factory = sqlite3.Row
      cursor = self._database.cursor()

      sql_results = cursor.execute(
          u'SELECT name FROM sqlite_master WHERE type="table"')

      self._table_names = [row[0] for row in sql_results]

    except sqlite3.DatabaseError as exception:
      self._database.close()
      self._database = None

      os.remove(self._temp_db_file_path)
      self._temp_db_file_path = u''
      if self._temp_wal_file_path:
        os.remove(self._temp_wal_file_path)
        self._temp_wal_file_path = u''

      logging.debug(
          u'Unable to parse SQLite database: {0:s} with error: {1:s}'.format(
              self._filename, exception))
      raise

    self._is_open = True

Example 4

View license
def run_subprocess(
    command, tool, stdout=None,
    stderr=None, stdoutlog=False,
        working_dir=None,with_queue=False, stdin=None):
    """ Runs a command on the system shell and forks a new process

        also creates a file for stderr and stdout if needed
        to avoid deadlock.
    """
    # Very dirty hack
    logger.info(tool + ' command = ' + ' '.join(command))
    if (working_dir is None):
        working_dir = '.'
    if(tool == 'selection_pipeline'):
        stderr = working_dir+'/selection_stderr.tmp'
        stdout = working_dir+ '/selection_stdout.tmp'
    if(stderr is None):
        stderr = 'stderr.tmp'
        standard_err = open(stderr, 'w')
    else:
        standard_err = open(stderr, 'w')
    if(stdin is None):
        standard_in = None
    else:
        standard_in = open(working_dir + "/" + stdin, 'r')
    try:
        if(stdout is None):
            standard_out = open('stdout.tmp', 'w')
            exit_code = subprocess.Popen(
                command, stdout=standard_out, stderr=standard_err,cwd=working_dir, stdin=standard_in)
        else:
        # find out what kind of exception to try here
            if(hasattr(stdout, 'read')):
                exit_code = subprocess.Popen(
                    command, stdout=stdout, stderr=standard_err,cwd=working_dir, stdin=standard_in)
            else:
                stdout = open(stdout, 'w')
                exit_code = subprocess.Popen(
                    command, stdout=stdout, stderr=standard_err,cwd=working_dir, stdin=standard_in)
            standard_out = stdout
    except:
        logger.error(tool + " failed to run " + ' '.join(command))
        standard_err = open(stderr, 'r')
        while True:
            line = standard_err.readline()
            if not line:
                break
            logger.info(tool + " STDERR: " + line.strip())
        standard_err.close()
        sys.exit(SUBPROCESS_FAILED_EXIT)
    try:
        while(exit_code.poll() is None):
            sleep(0.2)
            if(STOP == True):
                exit_code.send_signal(signal.SIGINT) 
                if (with_queue) :
                   return
                else:
                    sys.exit(SUBPROCESS_FAILED_EXIT)
    except (KeyboardInterrupt, SystemExit):
        exit_code.send_signal(signal.SIGINT) 
        global STOP
        STOP = True
        if( with_queue) :
            return
        else:
            sys.exit(SUBPROCESS_FAILED_EXIT)
    standard_err.close()
    standard_out.close()
    standard_err = open(stderr, 'r')
    if(exit_code.returncode != 0):
        logger.error(tool + " failed to run " + ' '.join(command))
        while True:
            line = standard_err.readline()
            if not line:
                break
            logger.info(tool + " STDERR: " + line.strip())
        sys.exit(SUBPROCESS_FAILED_EXIT)
    stdout_log = False
    if(stdout is None):
        standard_out = open('stdout.tmp', 'r')
        stdout_log = True
    elif(stdoutlog):
        if(hasattr(stdout, 'write')):
            standard_out = open(stdout.name, 'r')
        else:
            standard_out = open(stdout, 'r')
        stdout_log = True
    if(stdout_log):
        while True:
            line = standard_out.readline()
            if not line:
                break
            logger.info(tool + " STDOUT: " + line.strip())
        standard_out.close()
    while True:
        line = standard_err.readline()
        if not line:
            break
        logger.info(tool + " STDERR: " + line.strip())
    logger.info("Finished tool " + tool)
    logger.debug("command = " + ' '.join(command))
    standard_err.close()
    standard_out.close()
    # Removed stdout if it either was not specified
    # or the log was specified.
    if(stdout is None or stdout is 'selection_stdout.tmp'):
        os.remove('stdout.tmp')
    elif(stdoutlog):
        os.remove(standard_out.name)
    os.remove(stderr)

Example 5

View license
	def __init__(self,filepath):
		print filepath
		self.template =""
		self.totalTime =""
		self.pages =""
		self.words =""
		self.characters =""
		self.application =""
		self.docSecurity =""
		self.lines =""
		self.paragraphs =""
		self.scaleCrop =""
		self.company =""
		self.linksUpToDate =""
		self.charactersWithSpaces =""
		self.shareDoc =""
		self.hyperlinksChanged =""
		self.appVersion =""	
		self.title =""
		self.subject =""
		self.creator =""
		self.keywords =""
		self.lastModifiedBy =""
		self.revision =""
		self.createdDate =""
		self.modifiedDate =""			
		self.thumbnailPath =""	
		
		rnd  = str(random.randrange(0, 1001, 3))
		zip = zipfile.ZipFile(filepath, 'r')
		file('app'+rnd+'.xml', 'w').write(zip.read('docProps/app.xml'))
		file('core'+rnd+'.xml', 'w').write(zip.read('docProps/core.xml'))
		try:
			file('comments'+rnd+'.xml', 'w').write(zip.read('word/comments.xml'))
			self.comments="ok"
		except:
			self.comments="error"
		
		thumbnailPath = ""
		#try:
			#file('thumbnail'+rnd+'.jpeg', 'w').write(zip.read('docProps/thumbnail.jpeg'))
		 	#thumbnailPath = 'thumbnail'+rnd+'.jpeg'
		#except:
		#	pass
			
		zip.close()

		# primero algunas estadisticas del soft usado para la edicion y del documento
		
		f = open ('app'+rnd+'.xml','r')
		app = f.read()
		self.cargaApp(app)
		f.close()
		
		if self.comments=="ok":
			f = open ('comments'+rnd+'.xml','r')
			comm = f.read()
			self.cargaComm(comm)
			f.close()

		# datos respecto a autor, etc

		f = open ('core'+rnd+'.xml','r')
		core = f.read()
		self.cargaCore(core)
		self.thumbnailPath = thumbnailPath
		f.close()

		# borramos todo menos el thumbnail
		
		os.remove('app'+rnd+'.xml')
		os.remove('core'+rnd+'.xml')	
		os.remove('comments'+rnd+'.xml')	

Example 6

Project: cloud-scheduler
Source File: nimbuscluster.py
View license
    def vm_destroy(self, vm, return_resources=True, reason="", shutdown_first=True):
        """
        Shutdown, destroy and return resources of a VM to it's cluster

        Parameters:
        vm -- vm to shutdown and destroy
        return_resources -- if set to false, do not return resources from VM to cluster
        shutdown_first -- if set to false, will first call a shutdown before destroying
        """

        # Create an epr for workspace.sh
        vm_epr = nimbus_xml.ws_epr_factory(vm.id, vm.clusteraddr, vm.clusterport)
        if vm.clusteraddr != self.network_address:
            log.error("Attempting to destroy a VM on wrong cluster - vm belongs to %s, but this is %s. Abort" % (vm.clusteraddr, self.networ_address))
            return -1

        if shutdown_first:
            # Create the workspace command with shutdown option
            shutdown_cmd = self.vmshutdown_factory(vm_epr)
            log.verbose("Shutting down VM with command: " + string.join(shutdown_cmd, " "))

            # Execute the workspace shutdown command.
            shutdown_return = self.vm_exec_silent(shutdown_cmd, env=vm.get_env())
            if (shutdown_return != 0):
                log.debug("(vm_destroy) - VM shutdown request failed, moving directly to destroy.")
            else:
                log.verbose("(vm_destroy) - workspace shutdown command executed successfully.")
                # Sleep for a few seconds to allow for proper shutdown
                log.verbose("Waiting %ss for VM to shut down..." % self.VM_SHUTDOWN)
                time.sleep(self.VM_SHUTDOWN)


        # Create the workspace command with destroy option as a list (priv.)
        destroy_cmd = self.vmdestroy_factory(vm_epr)
        log.verbose("Destroying VM with command: " + string.join(destroy_cmd, " "))

        # Execute the workspace destroy command: wait for return, stdout to log.
        (destroy_return, destroy_out, destroy_error) = self.vm_execwait(destroy_cmd, env=vm.get_env())
        destroy_out = destroy_out + destroy_error


        # Check destroy return code. If successful, continue. Otherwise, set VM to
        # error state (wait, and the polling thread will attempt a destroy later)
        if (destroy_return != 0):

            if "Destroyed" == self._extract_state(destroy_error):
                log.debug("VM %s seems to have already been destroyed." % vm.id)
            else:
                if destroy_out == "" or destroy_out == None:
                    destroy_out = "No Output returned."
                if destroy_error == "" or destroy_error == None:
                    destroy_error = "No Error output returned."
                log.warning("VM %s was not correctly destroyed: %s %s %s" % (vm.id, destroy_out, destroy_error, destroy_return))
                vm.status = "Error"
                os.remove(vm_epr)
                return destroy_return

        # Delete VM proxy
        if (vm.get_proxy_file()) :
            log.verbose("Cleaning up proxy for VM %s (%s)" % (vm.id, vm.get_proxy_file()))
            try:
                os.remove(vm.get_proxy_file())
            except:
                log.exception("Problem removing VM proxy file")

        # Return checked out resources And remove VM from the Cluster's 'vms' list
        with self.vms_lock:
            try:
                self.vms.remove(vm)
            except ValueError:
                log.error("Attempted to remove vm from list that was already removed.")
                return_resources = False
        if return_resources and vm.return_resources:
            self.resource_return(vm)

        # Delete EPR
        os.remove(vm_epr)


        log.info("Destroyed VM: %s Name: %s Reason: %s" % (vm.id, vm.hostname, reason))

        return destroy_return

Example 7

Project: mps-youtube
Source File: download.py
View license
@command(r'(dv|da|d|dl|download)\s*(\d{1,4})')
def download(dltype, num):
    """ Download a track or playlist by menu item number. """
    # This function needs refactoring!
    # pylint: disable=R0912
    # pylint: disable=R0914
    if g.browse_mode == "ytpl" and dltype in ("da", "dv"):
        plid = g.ytpls[int(num) - 1]["link"]
        down_plist(dltype, plid)
        return

    elif g.browse_mode == "ytpl":
        g.message = "Use da or dv to specify audio / video playlist download"
        g.message = c.y + g.message + c.w
        g.content = content.generate_songlist_display()
        return

    elif g.browse_mode != "normal":
        g.message = "Download must refer to a specific video item"
        g.message = c.y + g.message + c.w
        g.content = content.generate_songlist_display()
        return

    screen.writestatus("Fetching video info...")
    song = (g.model[int(num) - 1])
    best = dltype.startswith("dv") or dltype.startswith("da")

    if not best:

        try:
            # user prompt for download stream
            url, ext, url_au, ext_au = prompt_dl(song)

        except KeyboardInterrupt:
            g.message = c.r + "Download aborted!" + c.w
            g.content = content.generate_songlist_display()
            return

        if not url or ext_au == "abort":
            # abort on invalid stream selection
            g.content = content.generate_songlist_display()
            g.message = "%sNo download selected / invalid input%s" % (c.y, c.w)
            return

        else:
            # download user selected stream(s)
            filename = _make_fname(song, ext)
            args = (song, filename, url)

            if url_au and ext_au:
                # downloading video and audio stream for muxing
                audio = False
                filename_au = _make_fname(song, ext_au)
                args_au = (song, filename_au, url_au)

            else:
                audio = ext in ("m4a", "ogg")

            kwargs = dict(audio=audio)

    elif best:
        # set updownload without prompt
        url_au = None
        av = "audio" if dltype.startswith("da") else "video"
        audio = av == "audio"
        filename = _make_fname(song, None, av=av)
        args = (song, filename)
        kwargs = dict(url=None, audio=audio)

    try:
        # perform download(s)
        dl_filenames = [args[1]]
        f = _download(*args, **kwargs)
        if f:
            g.message = "Saved to " + c.g + f + c.w

        if url_au:
            dl_filenames += [args_au[1]]
            _download(*args_au, allow_transcode=False, **kwargs)

    except KeyboardInterrupt:
        g.message = c.r + "Download halted!" + c.w

        try:
            for downloaded in dl_filenames:
                os.remove(downloaded)

        except IOError:
            pass

    if url_au:
        # multiplex
        name, ext = os.path.splitext(args[1])
        tmpvideoname = name + '.' +str(random.randint(10000, 99999)) + ext
        os.rename(args[1], tmpvideoname)
        mux_cmd = [g.muxapp, "-i", tmpvideoname, "-i", args_au[1], "-c",
                   "copy", name + ".mp4"]

        try:
            subprocess.call(mux_cmd)
            g.message = "Saved to :" + c.g + mux_cmd[7] + c.w
            os.remove(tmpvideoname)
            os.remove(args_au[1])

        except KeyboardInterrupt:
            g.message = "Audio/Video multiplex aborted!"

    g.content = content.generate_songlist_display()

Example 8

Project: scansio-sonar-es
Source File: sonar_ssl.py
View license
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--server', default=DEFAULT_SERVER,
                        help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
    parser.add_argument('--port', default=DEFAULT_PORT,
                        help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
    args = parser.parse_args(argv[1:])

    workers = cpu_count()
    process_hosts_queue = Queue(maxsize=20000)
    process_certs_queue = Queue(maxsize=20000)
    update_hosts_queue = Queue(maxsize=20000)

    es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)

    imported_sonar = es.search(index='scansio-sonar-ssl-imported', body={"size": 3000, "query": {"match_all": {}}})
    imported_files = []

    for f in imported_sonar['hits']['hits']:
        imported_files.append(f['_id'])

    scansio_feed = requests.get('https://scans.io/json')
    if scansio_feed.status_code == 200:
        feed = scansio_feed.json()
        if 'studies' in feed:
            for result in feed['studies']:
                if result['name'] == 'SSL Certificates':
                    for res in result['files']:
                        scans_file = res['name']
                        if scans_file.endswith('certs.gz'):
                            if scans_file.endswith('20131030-20150518_certs.gz'):
                                certfile = '20131030-20150518_certs.gz'
                            else:
                                certfile = scans_file[48:65]
                            if certfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=certfile))
                                phys_file = requests.get(scans_file, stream=True)
                                # Need to do this cause some of the files are rather large
                                with open('{f}'.format(f=certfile), 'wb') as newcerts:
                                    for chunk in phys_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            newcerts.write(chunk)
                                with open('{f}'.format(f=certfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_scan_certs, args=(process_certs_queue, queue_es))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=certfile, d=datetime.now()))
                                    parse_certs_file(certfile, process_certs_queue)
                                    for w in xrange(workers):
                                        process_certs_queue.put("DONE")
                                    logger.warning("Importing finished of {f} at {d}".format(f=certfile,
                                                                                             d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-imported', doc_type='imported-file', id=certfile,
                                             body={'file': certfile, 'imported_date': datetime.now(), 'sha1': sha1})
                                else:
                                    logger.error("SHA1 did not match for {f} it was not imported".format(f=certfile))
                                os.remove(certfile)
                                # Now we should optimize each index to max num segments of 1 to help with
                                # searching/sizing and just over all es happiness
                                refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                logger.warning("Optimizing index: {index} at {date}".
                                               format(index='passive-ssl-certs-sonar', date=datetime.now()))
                                refresh_es.indices.optimize(index='passive-ssl-certs-umich',
                                                            max_num_segments=1, request_timeout=7500)
                        if scans_file.endswith('hosts.gz'):
                            hostsfile = scans_file[48:65]
                            if hostsfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=hostsfile))
                                phys_host_file = requests.get(scans_file)
                                with open('{f}'.format(f=hostsfile), 'wb') as hf:
                                    for chunk in phys_host_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            hf.write(chunk)
                                with open('{f}'.format(f=hostsfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_hosts, args=(process_hosts_queue, queue_es))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=hostsfile, d=datetime.now()))
                                    parse_hosts_file(hostsfile, process_hosts_queue)
                                    logger.warning("Hosts updated for {f} now going back and updating first_seen"
                                                   .format(f=hostsfile))
                                    #  this is kinda dirty but without looking up everything at insert time (slow)
                                    #  I don't know of a better way to do
                                    #  this based on the number of documents we will have
                                    update_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                    # construct an elasticsearch query where the filter is looking for any entry
                                    # that is missing the field first_seen
                                    # adding a queue processing system here this should hopefully speed things up.
                                    for work in xrange(workers):
                                        p = Process(target=update_hosts, args=(update_hosts_queue, update_es))
                                        p.daemon = True
                                        p.start()

                                    q = {'size': 500, "query": {"match_all": {}},
                                         "filter": {"missing": {"field": "first_seen"}}}
                                    new_updates = update_es.search(index='passive-ssl-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    # Scan across all the documents missing the first_seen field and bulk update them
                                    missing_first_seen = scan(update_es, query=q, scroll='30m',
                                                              index='passive-ssl-hosts-sonar')
                                    for miss in missing_first_seen:
                                        update_hosts_queue.put(miss)
                                    # for some stupid reason I keep missing some at the end of the scan/scroll
                                    # so going to do them manually
                                    new_updates = update_es.search(index='passive-ssl-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    missing_first_seen_again = scan(update_es, query=q, scroll='30m',
                                                                    index='passive-ssl-hosts-sonar')
                                    bulk_update_missed = []
                                    for m in missing_first_seen_again:
                                        last_seen = m['_source']['last_seen']
                                        first_seen = last_seen
                                        action = {"_op_type": "update", "_index": "passive-ssl-hosts-sonar",
                                                  "_type": "host", "_id": m['_id'], "doc": {'first_seen': first_seen}}
                                        bulk_update_missed.append(action)
                                        if len(bulk_update_missed) == 500:
                                            bulk(update_es, bulk_update_missed)
                                            bulk_update_missed = []
                                    bulk(update_es, bulk_update_missed)
                                    logger.warning("Finished updating hosts at {d}".format(d=datetime.now()))
                                    for w in xrange(workers):
                                        update_hosts_queue.put("DONE")
                                    #  Get the remaining ones that are less than 500 and the loop has ended
                                    logger.warning("Importing finished of {f} at {d}".format(f=hostsfile,
                                                   d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-imported', doc_type='imported-file', id=hostsfile,
                                             body={'file': hostsfile, 'imported_date': datetime.now(), 'sha1': sha1})
                                    os.remove(hostsfile)
                                    refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                    # Now we should optimize each index to max num segments of 1 to help with
                                    # searching/sizing and just over all es happiness
                                    logger.warning("Optimizing index: {index} at {date}".
                                                   format(index='passive-ssl-hosts-sonar', date=datetime.now()))
                                    refresh_es.indices.optimize(index='passive-ssl-hosts-sonar',
                                                                max_num_segments=1, request_timeout=7500)
                                    refresh_es.indices.optimize
                                else:
                                    logger.error("SHA1 did not match for {f} it was not imported".format(f=hostsfile))
                                    os.remove(hostsfile)
        else:
            logger.error("The scans.io/json must have changed or is having issues. I didn't see any studies. Exiting")
            sys.exit()
    else:
        logger.error("There was an error connecting to https://scans.io. I did not get a 200 status code. Exiting")
        sys.exit()

Example 9

Project: pyNastran
Source File: test_mesh_utils.py
View license
    def test_eq2(self):
        r"""
          5
        6 *-------* 40
          | \     |
          |   \   |
          |     \ |
          *-------* 3
          1       20
        """
        msg = (
            'CEND\n'
            'BEGIN BULK\n'
            'GRID,1, , 0.,   0.,   0.\n'
            'GRID,20,, 1.,   0.,   0.\n'
            'GRID,3, , 1.01, 0.,   0.\n'
            'GRID,40,, 1.,   1.,   0.\n'
            'GRID,5, , 0.,   1.,   0.\n'
            'GRID,6, , 0.,   1.01, 0.\n'
            'CTRIA3,1, 100,1,20,6\n'
            'CTRIA3,10,100,3,40,5\n'
            'PSHELL,100,1000,0.1\n'
            'MAT1,1000,3.0,, 0.3\n'
            'ENDDATA'
        )
        bdf_filename = 'nonunique.bdf'
        bdf_filename_out = 'unique.bdf'

        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        tol = 0.2
        # Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=None, crash_on_collapse=False, debug=False)

        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)

        msg = 'nnodes=%s\n' % len(model.nodes)
        for nid, node in sorted(iteritems(model.nodes)):
            msg += 'nid=%s xyz=%s\n' % (nid, node.xyz)

        assert len(model.nodes) == 4, msg
        # os.remove(bdf_filename)
        os.remove(bdf_filename_out)

        tol = 0.009
        # Don't collapse anything because the tolerance is too small
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=None, crash_on_collapse=False, debug=False)
        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)
        assert len(model.nodes) == 6, len(model.nodes)
        os.remove(bdf_filename_out)

        tol = 0.2
        node_set = [2, 3]
        # Node 2 is not defined, so crash
        with self.assertRaises(RuntimeError):
            # node 2 is not defined because it should be node 20
            bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                                  renumber_nodes=False, neq_max=4, xref=True,
                                  node_set=node_set, crash_on_collapse=False, debug=False)

        tol = 0.2
        node_set = [20, 3]
        # Only collpase 2 nodes
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=node_set, crash_on_collapse=False, debug=False)
        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)
        assert len(model.nodes) == 5, len(model.nodes)
        os.remove(bdf_filename_out)

        tol = 0.2
        node_set = set([20, 3])
        # Only collpase 2 nodes
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=node_set, crash_on_collapse=False, debug=False)
        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)
        assert len(model.nodes) == 5, len(model.nodes)
        os.remove(bdf_filename_out)

        tol = 0.2
        aset = np.array([20, 3, 4], dtype='int32')
        bset = np.array([20, 3], dtype='int32')

        node_set = np.intersect1d(aset, bset)
        assert len(node_set) > 0, node_set
        # Only collpase 2 nodes
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=node_set, crash_on_collapse=False, debug=False)
        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)
        assert len(model.nodes) == 5, len(model.nodes)
        os.remove(bdf_filename_out)

Example 10

Project: deepTools
Source File: correctGCBias.py
View license
def main(args=None):
    args = process_args(args)
    global F_gc, N_gc, R_gc

    data = np.loadtxt(args.GCbiasFrequenciesFile.name)

    F_gc = data[:, 0]
    N_gc = data[:, 1]
    R_gc = data[:, 2]

    global global_vars
    global_vars = {}
    global_vars['2bit'] = args.genome
    global_vars['bam'] = args.bamfile

    # compute the probability to find more than one read (a redundant read)
    # at a certain position based on the gc of the read fragment
    # the binomial function is used for that
    max_dup_gc = [binom.isf(1e-7, F_gc[x], 1.0 / N_gc[x])
                  if F_gc[x] > 0 and N_gc[x] > 0 else 1
                  for x in range(len(F_gc))]

    global_vars['max_dup_gc'] = max_dup_gc

    tbit = py2bit.open(global_vars['2bit'])
    bam = pysam.Samfile(global_vars['bam'])

    global_vars['genome_size'] = sum(tbit.chroms().values())
    global_vars['total_reads'] = bam.mapped
    global_vars['reads_per_bp'] = \
        float(global_vars['total_reads']) / args.effectiveGenomeSize

    # apply correction
    print("applying correction")
    # divide the genome in fragments containing about 4e5 reads.
    # This amount of reads takes about 20 seconds
    # to process per core (48 cores, 256 Gb memory)
    chunkSize = int(4e5 / global_vars['reads_per_bp'])

    # chromSizes: list of tuples
    chromSizes = [(bam.references[i], bam.lengths[i])
                  for i in range(len(bam.references))]

    regionStart = 0
    if args.region:
        chromSizes, regionStart, regionEnd, chunkSize = \
            mapReduce.getUserRegion(chromSizes, args.region,
                                    max_chunk_size=chunkSize)

    print("genome partition size for multiprocessing: {}".format(chunkSize))
    print("using region {}".format(args.region))
    mp_args = []
    bedGraphStep = args.binSize
    chrNameBitToBam = tbitToBamChrName(list(tbit.chroms().keys()), bam.references)
    chrNameBamToBit = dict([(v, k) for k, v in chrNameBitToBam.items()])
    print(chrNameBitToBam, chrNameBamToBit)
    c = 1
    for chrom, size in chromSizes:
        start = 0 if regionStart == 0 else regionStart
        for i in range(start, size, chunkSize):
            try:
                chrNameBamToBit[chrom]
            except KeyError:
                print("no sequence information for ")
                "chromosome {} in 2bit file".format(chrom)
                print("Reads in this chromosome will be skipped")
                continue
            length = min(size, i + chunkSize)
            mp_args.append((chrom, chrNameBamToBit[chrom], i, length,
                            bedGraphStep))
            c += 1

    pool = multiprocessing.Pool(args.numberOfProcessors)

    if args.correctedFile.name.endswith('bam'):
        if len(mp_args) > 1 and args.numberOfProcessors > 1:
            print(("using {} processors for {} "
                   "number of tasks".format(args.numberOfProcessors,
                                            len(mp_args))))

            res = pool.map_async(
                writeCorrectedSam_wrapper, mp_args).get(9999999)
        else:
            res = list(map(writeCorrectedSam_wrapper, mp_args))

        if len(res) == 1:
            command = "cp {} {}".format(res[0], args.correctedFile.name)
            run_shell_command(command)
        else:
            print("concatenating (sorted) intermediate BAMs")
            header = pysam.Samfile(res[0])
            of = pysam.Samfile(args.correctedFile.name, "wb", template=header)
            header.close()
            for f in res:
                f = pysam.Samfile(f)
                for e in f.fetch(until_eof=True):
                    of.write(e)
                f.close()
            of.close()

        print("indexing BAM")
        pysam.index(args.correctedFile.name)

        for tempFileName in res:
            os.remove(tempFileName)

    if args.correctedFile.name.endswith('bg') or \
            args.correctedFile.name.endswith('bw'):

        _temp_bg_file_name = utilities.getTempFileName(suffix='_all.bg')
        if len(mp_args) > 1 and args.numberOfProcessors > 1:

            res = pool.map_async(writeCorrected_wrapper, mp_args).get(9999999)
        else:
            res = list(map(writeCorrected_wrapper, mp_args))

        # concatenate intermediary bedgraph files
        _temp_bg_file = open(_temp_bg_file_name, 'w')
        for tempFileName in res:
            if tempFileName:
                # concatenate all intermediate tempfiles into one
                # bedgraph file
                shutil.copyfileobj(open(tempFileName, 'rb'), _temp_bg_file)
                os.remove(tempFileName)
        _temp_bg_file.close()
        args.correctedFile.close()

        if args.correctedFile.name.endswith('bg'):
            shutil.move(_temp_bg_file_name, args.correctedFile.name)

        else:
            chromSizes = [(k, v) for k, v in tbit.chroms().items()]
            writeBedGraph.bedGraphToBigWig(chromSizes, _temp_bg_file_name,
                                           args.correctedFile.name)
            os.remove(_temp_bg_file)

Example 11

Project: PYPOWER
Source File: t_loadcase.py
View license
def t_loadcase(quiet=False):
    """Test that C{loadcase} works with an object as well as case file.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    t_begin(240, quiet)

    ## compare result of loading from M-file file to result of using data matrices
    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    matfile  = join(tdir, 't_mat9_opf')
    pfcasefile = join(tdir, 't_case9_pf')
    pfmatfile  = join(tdir, 't_mat9_pf')
    casefilev2 = join(tdir, 't_case9_opfv2')
    matfilev2  = join(tdir, 't_mat9_opfv2')
    pfcasefilev2 = join(tdir, 't_case9_pfv2')
    pfmatfilev2  = join(tdir, 't_mat9_pfv2')

    ## read version 1 OPF data matrices
    baseMVA, bus, gen, branch, areas, gencost = t_case9_opf()
    ## save as .mat file
    savemat(matfile + '.mat', {'baseMVA': baseMVA, 'bus': bus, 'gen': gen,
            'branch': branch, 'areas': areas, 'gencost': gencost}, oned_as='row')

    ## read version 2 OPF data matrices
    ppc = t_case9_opfv2()
    ## save as .mat file
    savemat(matfilev2 + '.mat', {'ppc': ppc}, oned_as='column')

    ## prepare expected matrices for v1 load
    ## (missing gen cap curve & branch ang diff lims)
    tmp1 = (ppc['baseMVA'], ppc['bus'].copy(), ppc['gen'].copy(), ppc['branch'].copy(),
        ppc['areas'].copy(), ppc['gencost'].copy())
    tmp2 = (ppc['baseMVA'], ppc['bus'].copy(), ppc['gen'].copy(), ppc['branch'].copy(),
        ppc['areas'].copy(), ppc['gencost'].copy())
    ## remove capability curves, angle difference limits
    tmp1[2][1:3, [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX]] = zeros((2,6))
    tmp1[3][0, ANGMAX] = 360
    tmp1[3][8, ANGMIN] = -360

    baseMVA, bus, gen, branch, areas, gencost = tmp1

    ##-----  load OPF data into individual matrices  -----
    t = 'loadcase(opf_PY_file_v1) without .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefile, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_PY_file_v1) with .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefile + '.py', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v1) without .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfile, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v1) with .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfile + '.mat', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'loadcase(opf_PY_file_v2) without .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefilev2, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_PY_file_v2) with .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefilev2 + '.py', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v2) without .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfilev2, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v2) with .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfilev2 + '.mat', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'loadcase(opf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = t_case9_opf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    c['areas']     = areas1.copy()
    c['gencost']   = gencost1.copy()
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_struct_v1) (version=\'1\'): '
    c['version']   = '1'
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'loadcase(opf_struct_v2) (no version): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_struct_v2) (version=''2''): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    c['version']   = '2'
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    ##-----  load OPF data into struct  -----
    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'ppc = loadcase(opf_PY_file_v1) without .py extension : '
    ppc1 = loadcase(casefile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_PY_file_v1) with .py extension : '
    ppc1 = loadcase(casefile + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v1) without .mat extension : '
    ppc1 = loadcase(matfile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v1) with .mat extension : '
    ppc1 = loadcase(matfile + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'ppc = loadcase(opf_PY_file_v2) without .m extension : '
    ppc1 = loadcase(casefilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_PY_file_v2) with .py extension : '
    ppc1 = loadcase(casefilev2 + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v2) without .mat extension : '
    ppc1 = loadcase(matfilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v2) with .mat extension : '
    ppc1 = loadcase(matfilev2 + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'ppc = loadcase(opf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = t_case9_opf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    c['areas']     = areas1.copy()
    c['gencost']   = gencost1.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_struct_v1) (version=''1''): '
    c['version']   = '1'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'ppc = loadcase(opf_struct_v2) (no version): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])
    t_ok(ppc2['version'] == '2', [t, 'version'])

    t = 'ppc = loadcase(opf_struct_v2) (version=''2''): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    c['version']   = '2'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])


    ## read version 1 PF data matrices
    baseMVA, bus, gen, branch = t_case9_pf()
    savemat(pfmatfile + '.mat',
        {'baseMVA': baseMVA, 'bus': bus, 'gen': gen, 'branch': branch},
        oned_as='column')

    ## read version 2 PF data matrices
    ppc = t_case9_pfv2()
    tmp = (ppc['baseMVA'], ppc['bus'].copy(),
           ppc['gen'].copy(), ppc['branch'].copy())
    baseMVA, bus, gen, branch = tmp
    ## save as .mat file
    savemat(pfmatfilev2 + '.mat', {'ppc': ppc}, oned_as='column')

    ##-----  load PF data into individual matrices  -----
    t = 'loadcase(pf_PY_file_v1) without .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefile, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v1) with .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefile + '.py', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v1) without .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfile, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v1) with .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfile + '.mat', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v2) without .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefilev2, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v2) with .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefilev2 + '.py', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v2) without .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfilev2, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v2) with .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfilev2 + '.mat', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1 = t_case9_pf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v1) (version=''1''): '
    c['version']   = '1'
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v2) : '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['version']   = '2'
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])






    ##-----  load PF data into struct  -----
    t = 'ppc = loadcase(pf_PY_file_v1) without .py extension : '
    ppc1 = loadcase(pfcasefile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v1) with .py extension : '
    ppc1 = loadcase(pfcasefile + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v1) without .mat extension : '
    ppc1 = loadcase(pfmatfile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v1) with .mat extension : '
    ppc1 = loadcase(pfmatfile + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v2) without .py extension : '
    ppc1 = loadcase(pfcasefilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v2) with .py extension : '
    ppc1 = loadcase(pfcasefilev2 + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v2) without .mat extension : '
    ppc1 = loadcase(pfmatfilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v2) with .mat extension : '
    ppc1 = loadcase(pfmatfilev2 + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1 = t_case9_pf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v1) (version=''1''): '
    c['version']   = '1'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v2) : '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['version']   = '2'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    ## cleanup
    os.remove(matfile + '.mat')
    os.remove(pfmatfile + '.mat')
    os.remove(matfilev2 + '.mat')
    os.remove(pfmatfilev2 + '.mat')

    t = 'runpf(my_PY_file)'
    ppopt = ppoption(VERBOSE=0, OUT_ALL=0)
    results3, success = runpf(pfcasefile, ppopt)
    baseMVA3, bus3, gen3, branch3 = results3['baseMVA'], results3['bus'], \
            results3['gen'], results3['branch']
    t_ok( success, t )

    t = 'runpf(my_object)'
    results4, success = runpf(c, ppopt)
    baseMVA4, bus4, gen4, branch4 = results4['baseMVA'], results4['bus'], \
            results4['gen'], results4['branch']
    t_ok( success, t )

    t = 'runpf result comparison : '
    t_is(baseMVA3,  baseMVA4,   12, [t, 'baseMVA'])
    t_is(bus3,      bus4,       12, [t, 'bus'])
    t_is(gen3,      gen4,       12, [t, 'gen'])
    t_is(branch3,   branch4,    12, [t, 'branch'])

    t = 'runpf(modified_struct)'
    c['gen'][2, 1] = c['gen'][2, 1] + 1            ## increase gen 3 output by 1
    results5, success = runpf(c, ppopt)
    gen5 = results5['gen']
    t_is(gen5[0, 1], gen4[0, 1] - 1, 1, t)   ## slack bus output should decrease by 1

    t_end()

Example 12

Project: qiime
Source File: beta_significance.py
View license
def main():
    option_parser, opts, args = parse_command_line_parameters(**script_info)
    otu_table_fp = opts.input_path

    otu_table = load_table(otu_table_fp)

    sample_ids = otu_table.ids()
    otu_ids = otu_table.ids(axis='observation')

    # This is not memory safe: need to be able to load the otu table as ints
    otu_table_array = array(list(otu_table.iter_data(axis='observation')),
                            dtype='int')

    if opts.type_of_test == 'all_together':
        type_of_test = TEST_ON_TREE
        header_text = "sample\tp value\tp value (Bonferroni corrected)\n"
    elif opts.type_of_test == 'each_pair':
        type_of_test = TEST_ON_PAIRWISE
        header_text = "sample 1\tsample 2\tp value\tp value (Bonferroni corrected)\n"
    elif opts.type_of_test == 'each_sample':
        type_of_test = TEST_ON_ENVS
        header_text = "sample\tp value\tp value (Bonferroni corrected)\n"
        if opts.significance_test == 'p-test':
            raise RuntimeError(
                'significance test type "each_sample" not allowed for p-test')
    else:
        raise RuntimeError('significance test type "%s" not found' %
                           opts.type_of_test)

    # note, uses ugly temp file
    if opts.significance_test == 'unweighted_unifrac':
        tree_in = open(opts.tree_path, 'U')
        output_fp = opts.output_path + '_envs.tmp'

        result = format_unifrac_sample_mapping(
            sample_ids, otu_ids, otu_table_array)
        of = open(output_fp, 'w')
        of.write('\n'.join(result))
        of.close()
        envs_in = open(output_fp, 'U')
        try:
            result = fast_unifrac_permutations_file(tree_in, envs_in,
                                                    weighted=False,
                                                    num_iters=opts.num_iters,
                                                    verbose=opts.verbose,
                                                    test_on=type_of_test)
        except ValueError as e:
            if e.message == ("No valid samples/environments found. Check"
                             " whether tree tips match otus/taxa present in"
                             " samples/environments"):
                raise ValueError(e.message + " and that the otu abundance is"
                                 " not relative.")
            raise e

        envs_in.close()
        os.remove(output_fp)

        of = open(opts.output_path, 'w')
        of.write("#unweighted unifrac significance test\n")
        of.write(header_text)
        for line in result:
            of.write('\t'.join(map(str, line)) + '\n')
        of.close()

    elif opts.significance_test == 'p-test':
        tree_in = open(opts.tree_path, 'U')
        output_fp = opts.output_path + '_envs.tmp'

        result = format_unifrac_sample_mapping(
            sample_ids, otu_ids, otu_table_array)
        of = open(output_fp, 'w')
        of.write('\n'.join(result))
        of.close()
        envs_in = open(output_fp, 'U')

        result = fast_p_test_file(tree_in, envs_in,
                                  num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
        envs_in.close()
        os.remove(output_fp)
        of = open(opts.output_path, 'w')
        of.write(
            "#andy martin's p-test significance test\n")
        of.write(header_text)
        for line in result:
            of.write('\t'.join(map(str, line)) + '\n')
        of.close()

    elif opts.significance_test == 'weighted_unifrac':
        tree_in = open(opts.tree_path, 'U')
        output_fp = opts.output_path + '_envs.tmp'

        result = format_unifrac_sample_mapping(
            sample_ids, otu_ids, otu_table_array)
        of = open(output_fp, 'w')
        of.write('\n'.join(result))
        of.close()
        envs_in = open(output_fp, 'U')

        result = fast_unifrac_permutations_file(tree_in, envs_in,
                                                weighted=True, num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
        envs_in.close()
        os.remove(output_fp)
        of = open(opts.output_path, 'w')
        of.write(
            "#weighted unifrac significance test\n")
        of.write(header_text)
        for line in result:
            of.write('\t'.join(map(str, line)) + '\n')
        of.close()

    elif opts.significance_test == 'weighted_normalized_unifrac':
        tree_in = open(opts.tree_path, 'U')
        output_fp = opts.output_path + '_envs.tmp'

        result = format_unifrac_sample_mapping(
            sample_ids, otu_ids, otu_table_array)
        of = open(output_fp, 'w')
        of.write('\n'.join(result))
        of.close()
        envs_in = open(output_fp, 'U')

        result = fast_unifrac_permutations_file(tree_in, envs_in,
                                                weighted='correct', num_iters=opts.num_iters, verbose=opts.verbose, test_on=type_of_test)
        envs_in.close()
        os.remove(output_fp)
        of = open(opts.output_path, 'w')
        of.write(
            "#weighted normalized unifrac significance test\n")
        of.write(
            "sample 1\tsample 2\tp value\tp value (Bonferroni corrected)\n")
        for line in result:
            of.write('\t'.join(map(str, line)) + '\n')
        of.close()

    else:
        raise RuntimeError('significance test "%s" not found' %
                           opts.significance_test)

Example 13

Project: git-annex-remote-hubic
Source File: swift.py
View license
    def retrieve(self, key, filename):
        """Retrieve key to filename"""
        md5 = hashlib.md5()
        path = self.get_path(key)

        nb_chunks = None
        chunk_idx = 0
        global_etag = None

        try:
            with ProgressFile(self.remote, filename, "wb") as dst:
                while path is not None:
                    chunk_idx += 1
                    self.remote.debug("Getting chunk %d" % chunk_idx)

                    headers, body = self.conn.get_object(self.container, path, resp_chunk_size=65536)

                    # Read chunk metadata
                    meta_nb_chunks = int(headers.get("x-object-meta-annex-chunks", 1))
                    meta_global_etag = headers.get("x-object-meta-annex-global-md5", headers["etag"])

                    # Check for consistency
                    if nb_chunks is None:
                        nb_chunks = meta_nb_chunks
                    elif nb_chunks != meta_nb_chunks:
                        raise ValueError("Inconsistent number of chunks: %d != %d (%d)"
                                         % (nb_chunks, meta_nb_chunks, chunk_idx))
                    if global_etag is None:
                        global_etag = meta_global_etag
                    elif global_etag != meta_global_etag:
                        raise ValueError("Inconsistent global MD5 checksum: %s != %s (%d)"
                                         % (global_etag, meta_global_etag, chunk_idx))

                    # Path of the next chunk
                    path = headers.get("x-object-meta-annex-next-chunk", None)

                    # Write chunk to file
                    chunk_md5 = hashlib.md5()
                    for chunk in body:
                        dst.write(chunk)
                        md5.update(chunk)
                        chunk_md5.update(chunk)
                    dst.flush()

                    # Check chunk MD5
                    chunk_md5_digest = chunk_md5.hexdigest()
                    if chunk_md5_digest != headers["etag"]:
                        raise ValueError("Checksum mismatch for chunk %d: %s != %s"
                                         % (chunk_idx, chunk_md5_digest, headers["etag"]))

        except KeyboardInterrupt:
            os.remove(filename)
            self.remote.send("TRANSFER-FAILURE RETRIEVE %s Interrupted by user" % key)
            raise
        except Exception as exc:
            os.remove(filename)
            self.remote.send("TRANSFER-FAILURE RETRIEVE %s %s" % (key, str(exc)))
            return

        md5_digest = md5.hexdigest()
        if md5_digest != global_etag:
            os.remove(filename)
            self.remote.send("TRANSFER-FAILURE RETRIEVE %s Checksum mismatch" % key)
        else:
            self.remote.send("TRANSFER-SUCCESS RETRIEVE " + key)

Example 14

Project: pyomo
Source File: test_writers.py
View license
def CreateTestMethod(test_case,
                     modelClass,
                     test_name,
                     symbolic_labels=False):

    # We do not want to test the plugin case on a model
    # class it is not capable of handling
    if not modelClass().validateCapabilities(test_case):
        return None

    # Skip this test if the solver is not available on the system
    if not test_case.available:
        def skipping_test(self):
            return self.skipTest('Solver unavailable: '
                                 +test_case.name+' ('+test_case.io+')')
        return skipping_test

    def writer_test(self):

        # Instantiate the model class
        model_class = modelClass()
        save_filename = join(thisDir,
                             (model_class.descrStr()+"."
                              +test_name+".results"))

        # cleanup possibly existing old test files
        try:
            os.remove(save_filename)
        except OSError:
            pass

        # Make sure we start from a new solver plugin
        # each time. We don't want them to maintain
        # some state that carries over between tests
        opt, io_options = test_case.initialize()

        if test_case.io == 'nl':
            self.assertEqual(opt.problem_format(), ProblemFormat.nl)
        elif test_case.io == 'lp':
            self.assertEqual(opt.problem_format(), ProblemFormat.cpxlp)
        elif test_case.io == 'mps':
            self.assertEqual(opt.problem_format(), ProblemFormat.mps)
        elif test_case.io == 'python':
            self.assertEqual(opt.problem_format(), None)

        # check that the solver plugin is at least as capable as the
        # test_case advertises, otherwise the plugin capabilities need
        # to be change or the test case should be removed
        if not all(opt.has_capability(tag)
                   for tag in test_case.capabilities):
            self.fail("Actual plugin capabilities are less than "
                      "that of the of test case for the plugin: "
                      +test_case.name+' ('+test_case.io+')')

        # Create the model instance and send to the solver
        model_class.generateModel()
        model_class.warmstartModel()

        model = model_class.model
        self.assertTrue(model is not None)

        test_suffixes = [] if model_class.disableSuffixTests() else \
                        test_case.import_suffixes

        for suffix in test_suffixes:
            setattr(model,suffix,Suffix(direction=Suffix.IMPORT))

        if isinstance(opt, PersistentSolver):
            opt.compile_instance(model,
                                 symbolic_solver_labels=symbolic_labels)

        # solve
        if opt.warm_start_capable():
            results = opt.solve(
                model,
                symbolic_solver_labels=symbolic_labels,
                warmstart=True,
                load_solutions=False,
                **io_options)
        else:
            results = opt.solve(
                model,
                symbolic_solver_labels=symbolic_labels,
                load_solutions=False,
                **io_options)

        model_class.postSolveTestValidation(self, results)

        model.solutions.load_from(results,
				  default_variable_value=opt.default_variable_value())

        model_class.saveCurrentSolution(save_filename,
                                        suffixes=test_suffixes)

        # There are certain cases where the latest solver version has
        # a bug so this should not cause a pyomo test to fail
        is_expected_failure, failure_msg = \
            check_expected_failures(test_case, modelClass)

        # validate the solution returned by the solver
        rc = model_class.validateCurrentSolution(suffixes=test_suffixes)

        if is_expected_failure:
            if rc[0] is True:
                warnings.warn("\nTest model '%s' was marked as an expected "
                              "failure but no failure occured. The "
                              "reason given for the expected failure "
                              "is:\n\n****\n%s\n****\n\n"
                              "Please remove this case as an expected "
                              "failure if the above issue has been "
                              "corrected in the latest version of the "
                              "solver." % (model_class.descrStr(), failure_msg))
            if _cleanup_expected_failures:
                os.remove(save_filename)

        if not rc[0]:
            try:
                model.solutions.store_to(results)
            except ValueError:
                pass
            self.fail("Solution mismatch for plugin "+test_case.name
                      +' '+str(opt.version())+', '+test_case.io+
                      " interface and problem type "
                      +model_class.descrStr()+"\n"+rc[1]+"\n"
                      +(str(results.Solution(0)) if len(results.solution) else "No Solution"))

        # cleanup if the test passed
        try:
            os.remove(save_filename)
        except OSError:
            pass

    @unittest.expectedFailure
    def failing_writer_test(self):
        return writer_test(self)

    is_expected_failure, failure_msg = \
        check_expected_failures(test_case, modelClass)

    if is_expected_failure is True:
        return failing_writer_test
    else:
        return writer_test

Example 15

Project: automl-phase-2
Source File: learners.py
View license
    def next_action(self):
        # Check mail
        self.read_messages()
        # Collect up scores and predictions - even if paused, children may still be finishing tasks
        min_n_scores = min(len(scores) for scores in self.child_score_values.itervalues())
        while len(self.score_values) < min_n_scores:
            n = len(self.score_values)
            num_scores = 0
            sum_scores = 0
            for child_scores in self.child_score_values.itervalues():
                # noinspection PyUnresolvedReferences
                if not np.isnan(child_scores[n]):
                    num_scores += 1
                    sum_scores += child_scores[n]
            score = sum_scores / num_scores
            # score = sum(scores[n] for scores in self.child_score_values.itervalues()) /\
            #         len(self.child_score_values)
            maxtime = max(times[n] for times in self.child_score_times.itervalues())
            self.score_values.append(score)
            self.score_times.append(maxtime)
            self.send_to_parent(dict(subject='score', sender=self.name,
                                   time=self.score_times[-1],
                                   score=self.score_values[-1]))

        # FIXME - send all of this data at the same time to prevent gotchas

        min_n_valid = min(len(times) for times in self.child_valid_prediction_times.itervalues())
        while len(self.valid_prediction_times) < min_n_valid:
            n = len(self.valid_prediction_times)
            predictions = None
            for child_name in self.child_score_times.iterkeys():
                filename = self.child_valid_prediction_files[child_name][n]
                child_predictions = np.load(filename)
                os.remove(filename)
                if predictions is None:
                    predictions = child_predictions
                else:
                    predictions += child_predictions
            predictions /= len(self.child_score_times)
            tmp_filename = util.random_temp_file_name('.npy')
            np.save(tmp_filename, predictions)
            maxtime = max(times[n] for times in self.child_valid_prediction_times.itervalues())
            self.valid_prediction_files.append(tmp_filename)
            self.valid_prediction_times.append(maxtime)
            self.send_to_parent(dict(subject='predictions', sender=self.name, partition='valid',
                                     time=self.valid_prediction_times[-1],
                                     filename=self.valid_prediction_files[-1]))

        min_n_test = min(len(times) for times in self.child_test_prediction_times.itervalues())
        while len(self.test_prediction_times) < min_n_test:
            n = len(self.test_prediction_times)
            predictions = None
            for child_name in self.child_score_times.iterkeys():
                filename = self.child_test_prediction_files[child_name][n]
                child_predictions = np.load(filename)
                os.remove(filename)
                if predictions is None:
                    predictions = child_predictions
                else:
                    predictions += child_predictions
            predictions /= len(self.child_score_times)
            tmp_filename = util.random_temp_file_name('.npy')
            np.save(tmp_filename, predictions)
            maxtime = max(times[n] for times in self.child_test_prediction_times.itervalues())
            self.test_prediction_files.append(tmp_filename)
            self.test_prediction_times.append(maxtime)
            self.send_to_parent(dict(subject='predictions', sender=self.name, partition='test',
                                     time=self.test_prediction_times[-1],
                                     filename=self.test_prediction_files[-1]))

        min_n_held_out = min(len(times) for times in self.child_held_out_prediction_times.itervalues())
        while len(self.held_out_prediction_times) < min_n_held_out:
            n = len(self.held_out_prediction_times)
            # FIXME - get rid of if else here
            if self.data_info['task'] == 'multiclass.classification':
                predictions = np.zeros(self.data['Y_train_1_of_k'].shape)
                # print('Prediction shape')
                # print(predictions.shape)
            else:
                predictions = np.zeros(self.data['Y_train'].shape)
            for child_name in self.child_score_times.iterkeys():
                filename = self.child_held_out_prediction_files[child_name][n]
                child_predictions = np.load(filename)
                os.remove(filename)
                predictions[self.child_held_out_idx[child_name]] = child_predictions
            # print('Combined predictions')
            # print(predictions[0])
            tmp_filename = util.random_temp_file_name('.npy')
            np.save(tmp_filename, predictions)
            maxtime = max(times[n] for times in self.child_held_out_prediction_times.itervalues())
            self.held_out_prediction_files.append(tmp_filename)
            self.held_out_prediction_times.append(maxtime)
            self.send_to_parent(dict(subject='predictions', sender=self.name, partition='held out',
                                     time=self.held_out_prediction_times[-1],
                                     filename=self.held_out_prediction_files[-1]))

        # Check to see if all children have terminated - if so, terminate this agent
        # immortal child dying is failure
        # mortal child dying without sending results is failure
        # any child failure should kill parent
        if self.immortal_offspring is True and len(self.conns_from_children) != len(self.child_states):
            logger.error("%s: Immortal child has died. Dying of grief", self.name)
            raise TerminationEx
        elif self.immortal_offspring is False:
            dead_kids = [x for x in self.child_states if x not in self.conns_from_children]
            for dk in dead_kids:
                if len(self.child_test_prediction_files[dk]) == 0:
                    logger.error("%s: Mortal child %s has died without sending results", self.name, dk)
                    raise TerminationEx
            if len(self.conns_from_children) == 0:
                logger.info("%s: No children remaining. Terminating.", self.name)
                raise TerminationEx

Example 16

Project: fusioncatcher
Source File: sra2illumina.py
View license
def sra2illumina(input_file,
                 output_file,
                 tag_read = None,
                 tag='',
                 phred_conversion = False,
                 operation = 'change',
                 tmp_dir = None,
                 size_read_buffer = 10**8):
    """
    It converts the FASTQ file (PHRED-33 qualities and SRA read names) downloaded
    from Short Read Archive (SRA) to Illumina FASTQ file (PHRED-64 Illumina v1.5
    and Illumina read names).
    """
    temp_file = None
    if phred_conversion:
        temp_file = give_me_temp_filename(tmp_dir)
    else:
        temp_file = output_file

    read_name = file(input_file,'r').readline().rstrip('\r\n')
    sra = False
    e = read_name.partition(" ")[0]
    if read_name.startswith('@') and ( not(e.endswith('/1') or e.endswith('/2'))):
        sra = True

    if operation == 'change' or sra:
        fid = open(input_file,'r')
        fod = open(temp_file,'w')
        i = 0
        r = 0
        while True:
            gc.disable()
            lines = fid.readlines(size_read_buffer)
            gc.enable()
            if not lines:
                break
            n = len(lines)
            for j in xrange(n):
                r = r + 1
                i = i + 1
                if i == 1:
                    if tag_read:
                        lines[j] = '@%s%s%s\n' % (tag_read ,int2str(r,12) , tag)
                    else: # if there is no tag_read then the original SRA id is left
                        lines[j] = '%s%s\n' % (lines[j][:-1].partition(" ")[0], tag)
                    #lines[j] = lines[j].rstrip('\r\n').upper().split(' ')[1]+tag+'\n'
                elif i == 3:
                    lines[j] = "+\n"
                elif i == 4:
                    i = 0
            fod.writelines(lines)
        fid.close()
        fod.close()
        if phred_conversion == '64':
            phred.fq2fq(temp_file,'sanger',output_file,'illumina-1.5',tmp_dir = tmp_dir)
            os.remove(temp_file)
        elif phred_conversion == '33':
            phred.fq2fq(temp_file,'auto-detect',output_file,'sanger',tmp_dir = tmp_dir)
            os.remove(temp_file)
    else:
        print "No changes are done!"
        if os.path.isfile(output_file):
            os.remove(output_file)
        if operation == 'soft':
            if os.path.islink(input_file):
                linkto = os.readlink(input_file)
                os.symlink(linkto,ooutput_file)
            else:
                os.symlink(input_file,output_file)
        elif operation == 'hard':
            linkto = input_file
            if os.path.islink(input_file):
                linkto = os.readlink(input_file)
            try:
                os.link(linkto,output_file)
            except OSError as er:
                print >>sys.stderr,"WARNING: Cannot do hard links ('%s' and '%s')!" % (linkto,output_file)
                shutil.copyfile(linkto,output_file)
#                if er.errno == errno.EXDEV:
#                    # they are on different partitions
#                    # [Errno 18] Invalid cross-device link
#                    shutil.copyfile(linkto,output_file)
#                else:
#                    print >>sys.stderr,"ERROR: Cannot do hard links ('%s' and '%s')!" % (linkto,output_file)
#                    print >>sys.stderr,er
#                    sys.exit(1)

        elif operation == 'copy':
            shutil.copyfile(input_file, output_file)
        else:
            print >>sys.stderr, "ERROR: unknown operation of linking!", operation
            sys.exit(1)

Example 17

Project: mps-youtube
Source File: download.py
View license
@command(r'(dv|da|d|dl|download)\s*(\d{1,4})')
def download(dltype, num):
    """ Download a track or playlist by menu item number. """
    # This function needs refactoring!
    # pylint: disable=R0912
    # pylint: disable=R0914
    if g.browse_mode == "ytpl" and dltype in ("da", "dv"):
        plid = g.ytpls[int(num) - 1]["link"]
        down_plist(dltype, plid)
        return

    elif g.browse_mode == "ytpl":
        g.message = "Use da or dv to specify audio / video playlist download"
        g.message = c.y + g.message + c.w
        g.content = content.generate_songlist_display()
        return

    elif g.browse_mode != "normal":
        g.message = "Download must refer to a specific video item"
        g.message = c.y + g.message + c.w
        g.content = content.generate_songlist_display()
        return

    screen.writestatus("Fetching video info...")
    song = (g.model[int(num) - 1])
    best = dltype.startswith("dv") or dltype.startswith("da")

    if not best:

        try:
            # user prompt for download stream
            url, ext, url_au, ext_au = prompt_dl(song)

        except KeyboardInterrupt:
            g.message = c.r + "Download aborted!" + c.w
            g.content = content.generate_songlist_display()
            return

        if not url or ext_au == "abort":
            # abort on invalid stream selection
            g.content = content.generate_songlist_display()
            g.message = "%sNo download selected / invalid input%s" % (c.y, c.w)
            return

        else:
            # download user selected stream(s)
            filename = _make_fname(song, ext)
            args = (song, filename, url)

            if url_au and ext_au:
                # downloading video and audio stream for muxing
                audio = False
                filename_au = _make_fname(song, ext_au)
                args_au = (song, filename_au, url_au)

            else:
                audio = ext in ("m4a", "ogg")

            kwargs = dict(audio=audio)

    elif best:
        # set updownload without prompt
        url_au = None
        av = "audio" if dltype.startswith("da") else "video"
        audio = av == "audio"
        filename = _make_fname(song, None, av=av)
        args = (song, filename)
        kwargs = dict(url=None, audio=audio)

    try:
        # perform download(s)
        dl_filenames = [args[1]]
        f = _download(*args, **kwargs)
        if f:
            g.message = "Saved to " + c.g + f + c.w

        if url_au:
            dl_filenames += [args_au[1]]
            _download(*args_au, allow_transcode=False, **kwargs)

    except KeyboardInterrupt:
        g.message = c.r + "Download halted!" + c.w

        try:
            for downloaded in dl_filenames:
                os.remove(downloaded)

        except IOError:
            pass

    if url_au:
        # multiplex
        name, ext = os.path.splitext(args[1])
        tmpvideoname = name + '.' +str(random.randint(10000, 99999)) + ext
        os.rename(args[1], tmpvideoname)
        mux_cmd = [g.muxapp, "-i", tmpvideoname, "-i", args_au[1], "-c",
                   "copy", name + ".mp4"]

        try:
            subprocess.call(mux_cmd)
            g.message = "Saved to :" + c.g + mux_cmd[7] + c.w
            os.remove(tmpvideoname)
            os.remove(args_au[1])

        except KeyboardInterrupt:
            g.message = "Audio/Video multiplex aborted!"

    g.content = content.generate_songlist_display()

Example 18

View license
def run_subprocess(
    command, tool, stdout=None,
    stderr=None, stdoutlog=False,
        working_dir=None,with_queue=False, stdin=None):
    """ Runs a command on the system shell and forks a new process

        also creates a file for stderr and stdout if needed
        to avoid deadlock.
    """
    # Very dirty hack
    logger.info(tool + ' command = ' + ' '.join(command))
    if (working_dir is None):
        working_dir = '.'
    if(tool == 'selection_pipeline'):
        stderr = working_dir+'/selection_stderr.tmp'
        stdout = working_dir+ '/selection_stdout.tmp'
    if(stderr is None):
        stderr = 'stderr.tmp'
        standard_err = open(stderr, 'w')
    else:
        standard_err = open(stderr, 'w')
    if(stdin is None):
        standard_in = None
    else:
        standard_in = open(working_dir + "/" + stdin, 'r')
    try:
        if(stdout is None):
            standard_out = open('stdout.tmp', 'w')
            exit_code = subprocess.Popen(
                command, stdout=standard_out, stderr=standard_err,cwd=working_dir, stdin=standard_in)
        else:
        # find out what kind of exception to try here
            if(hasattr(stdout, 'read')):
                exit_code = subprocess.Popen(
                    command, stdout=stdout, stderr=standard_err,cwd=working_dir, stdin=standard_in)
            else:
                stdout = open(stdout, 'w')
                exit_code = subprocess.Popen(
                    command, stdout=stdout, stderr=standard_err,cwd=working_dir, stdin=standard_in)
            standard_out = stdout
    except:
        logger.error(tool + " failed to run " + ' '.join(command))
        standard_err = open(stderr, 'r')
        while True:
            line = standard_err.readline()
            if not line:
                break
            logger.info(tool + " STDERR: " + line.strip())
        standard_err.close()
        sys.exit(SUBPROCESS_FAILED_EXIT)
    try:
        while(exit_code.poll() is None):
            sleep(0.2)
            if(STOP == True):
                exit_code.send_signal(signal.SIGINT) 
                if (with_queue) :
                   return
                else:
                    sys.exit(SUBPROCESS_FAILED_EXIT)
    except (KeyboardInterrupt, SystemExit):
        exit_code.send_signal(signal.SIGINT) 
        global STOP
        STOP = True
        if( with_queue) :
            return
        else:
            sys.exit(SUBPROCESS_FAILED_EXIT)
    standard_err.close()
    standard_out.close()
    standard_err = open(stderr, 'r')
    if(exit_code.returncode != 0):
        logger.error(tool + " failed to run " + ' '.join(command))
        while True:
            line = standard_err.readline()
            if not line:
                break
            logger.info(tool + " STDERR: " + line.strip())
        sys.exit(SUBPROCESS_FAILED_EXIT)
    stdout_log = False
    if(stdout is None):
        standard_out = open('stdout.tmp', 'r')
        stdout_log = True
    elif(stdoutlog):
        if(hasattr(stdout, 'write')):
            standard_out = open(stdout.name, 'r')
        else:
            standard_out = open(stdout, 'r')
        stdout_log = True
    if(stdout_log):
        while True:
            line = standard_out.readline()
            if not line:
                break
            logger.info(tool + " STDOUT: " + line.strip())
        standard_out.close()
    while True:
        line = standard_err.readline()
        if not line:
            break
        logger.info(tool + " STDERR: " + line.strip())
    logger.info("Finished tool " + tool)
    logger.debug("command = " + ' '.join(command))
    standard_err.close()
    standard_out.close()
    # Removed stdout if it either was not specified
    # or the log was specified.
    if(stdout is None or stdout is 'selection_stdout.tmp'):
        os.remove('stdout.tmp')
    elif(stdoutlog):
        os.remove(standard_out.name)
    os.remove(stderr)

Example 19

View license
	def __init__(self,filepath):
		print filepath
		self.template =""
		self.totalTime =""
		self.pages =""
		self.words =""
		self.characters =""
		self.application =""
		self.docSecurity =""
		self.lines =""
		self.paragraphs =""
		self.scaleCrop =""
		self.company =""
		self.linksUpToDate =""
		self.charactersWithSpaces =""
		self.shareDoc =""
		self.hyperlinksChanged =""
		self.appVersion =""	
		self.title =""
		self.subject =""
		self.creator =""
		self.keywords =""
		self.lastModifiedBy =""
		self.revision =""
		self.createdDate =""
		self.modifiedDate =""			
		self.thumbnailPath =""	
		
		rnd  = str(random.randrange(0, 1001, 3))
		zip = zipfile.ZipFile(filepath, 'r')
		file('app'+rnd+'.xml', 'w').write(zip.read('docProps/app.xml'))
		file('core'+rnd+'.xml', 'w').write(zip.read('docProps/core.xml'))
		try:
			file('comments'+rnd+'.xml', 'w').write(zip.read('word/comments.xml'))
			self.comments="ok"
		except:
			self.comments="error"
		
		thumbnailPath = ""
		#try:
			#file('thumbnail'+rnd+'.jpeg', 'w').write(zip.read('docProps/thumbnail.jpeg'))
		 	#thumbnailPath = 'thumbnail'+rnd+'.jpeg'
		#except:
		#	pass
			
		zip.close()

		# primero algunas estadisticas del soft usado para la edicion y del documento
		
		f = open ('app'+rnd+'.xml','r')
		app = f.read()
		self.cargaApp(app)
		f.close()
		
		if self.comments=="ok":
			f = open ('comments'+rnd+'.xml','r')
			comm = f.read()
			self.cargaComm(comm)
			f.close()

		# datos respecto a autor, etc

		f = open ('core'+rnd+'.xml','r')
		core = f.read()
		self.cargaCore(core)
		self.thumbnailPath = thumbnailPath
		f.close()

		# borramos todo menos el thumbnail
		
		os.remove('app'+rnd+'.xml')
		os.remove('core'+rnd+'.xml')	
		os.remove('comments'+rnd+'.xml')	

Example 20

Project: gkno_launcher
Source File: parameterSets.py
View license
  def removeParameterSet(self, graph, superpipeline, setName):
    pipeline = graph.pipeline

    # Get the configuration file information for the pipeline and the available parameter sets.
    pipelineConfigurationData = superpipeline.pipelineConfigurationData[pipeline]
    sets                      = pipelineConfigurationData.parameterSets.sets

    # Only user-generated parameter sets can be removed, so trim the list.
    externalSets = []
    for pSet in sets.keys():
      if sets[pSet].isExternal: externalSets.append(pSet)

    # If the parameter set doesn't exist, it cannot be removed.
    if setName not in sets: self.errors.removeNonSet(setName, externalSets)
    else:

      # If the defined pipeline is not a user-generated parameter set, it cannot be removed.
      if setName not in externalSets: self.errors.removeNonExternalSet(setName, externalSets)

      # Remove the parameter set.
      else:

        # Define the name of the configuration file that holds the parameter set information.
        filename = str(pipeline + '-parameter-sets.json')

        # Open the configuration file for writing.
        filehandle = fh.fileHandling.openFileForWriting(filename)

        # Put all of the parameter set information in a dictionary that can be dumped to a json file.
        jsonParameterSets                  = OrderedDict()
        jsonParameterSets['parameter sets'] = []

        # Keep count of the number of parameter set remaining.
        remainingSets = 0

        # Loop over the external parameter sets.
        for parameterSet in externalSets:

          # Do not include the parameter set to be removed.
          if parameterSet != setName:
            parameterSetInformation                = OrderedDict()
            parameterSetInformation['id']          = parameterSet
            parameterSetInformation['description'] = sets[parameterSet].description
            parameterSetInformation['data']        = []
            remainingSets += 1
    
            # Set the nodes.
            for data in sets[parameterSet].data:
              nodeInformation             = OrderedDict()
              nodeInformation['id']       = data.id
              nodeInformation['node']     = data.nodeId
              nodeInformation['values']   = data.values
              parameterSetInformation['data'].append(nodeInformation)
    
            # Store this parameterSets data.
            jsonParameterSets['parameter sets'].append(parameterSetInformation)
    
        # If there are no parameter sets remaining, delete the parameter sets file.
        if remainingSets == 0:
          filehandle.close()
          os.remove(filename)
          os.remove(pipelineConfigurationData.path + '/' + filename)

        # Dump all the parameterSets to file.
        else:
          json.dump(jsonParameterSets, filehandle, indent = 2)
          filehandle.close()
    
          # Move the configuration file.
          shutil.copyfile(filename, str(pipelineConfigurationData.path + '/' + filename))
          os.remove(filename)

    # Terminate gkno once the parameter set has been removed.
    length = len(setName) + 38
    print('=' * length, file = sys.stdout)
    print('Parameter set \'' + setName + '\' successfully removed', file = sys.stdout)
    print('=' * length, file = sys.stdout)
    sys.stdout.flush()
    exit(0)

Example 21

Project: metagoofil
Source File: metadataMSOfficeXML.py
View license
	def __init__(self,filepath):
		self.template =""
		self.totalTime =""
		self.pages =""
		self.words =""
		self.characters =""
		self.application =""
		self.docSecurity =""
		self.lines =""
		self.paragraphs =""
		self.scaleCrop =""
		self.company =""
		self.linksUpToDate =""
		self.charactersWithSpaces =""
		self.shareDoc =""
		self.hyperlinksChanged =""
		self.appVersion =""	
		self.title =""
		self.subject =""
		self.creator =""
		self.keywords =""
		self.lastModifiedBy =""
		self.revision =""
		self.createdDate =""
		self.modifiedDate =""			
		self.thumbnailPath =""	
		rnd  = str(random.randrange(0, 1001, 3))
		zip = zipfile.ZipFile(filepath, 'r')
		file('app'+rnd+'.xml', 'w').write(zip.read('docProps/app.xml'))
		file('core'+rnd+'.xml', 'w').write(zip.read('docProps/core.xml'))
		file('docu'+rnd+'.xml', 'w').write(zip.read('word/document.xml'))
		try:
			file('comments'+rnd+'.xml', 'w').write(zip.read('word/comments.xml'))
			self.comments="ok"
		except:
			self.comments="error"
		thumbnailPath = ""
		#try:
			#file('thumbnail'+rnd+'.jpeg', 'w').write(zip.read('docProps/thumbnail.jpeg'))
		 	#thumbnailPath = 'thumbnail'+rnd+'.jpeg'
		#except:
		#	pass
			
		zip.close()
		# primero algunas estadisticas del soft usado para la edicion y del documento
		f = open ('app'+rnd+'.xml','r')
		app = f.read()
		self.cargaApp(app)
		f.close()
		if self.comments=="ok":
			f = open ('comments'+rnd+'.xml','r')
			comm = f.read()
			self.cargaComm(comm)
			f.close()
		
		# document content
		f = open ('docu'+rnd+'.xml','r')
		docu = f.read()
		self.text = docu
		f.close()
		# datos respecto a autor, etc

		f = open ('core'+rnd+'.xml','r')
		core = f.read()
		self.cargaCore(core)
		self.thumbnailPath = thumbnailPath
		f.close()

		# borramos todo menos el thumbnail
		
		os.remove('app'+rnd+'.xml')
		os.remove('core'+rnd+'.xml')	
		os.remove('comments'+rnd+'.xml')	
		os.remove('docu'+rnd+'.xml')

Example 22

Project: imagrium
Source File: test_posixpath.py
View license
    def test_isfile(self):
        self.assertIs(posixpath.isfile(test_support.TESTFN), False)
        f = open(test_support.TESTFN, "wb")
        try:
            f.write("foo")
            f.close()
            self.assertIs(posixpath.isfile(test_support.TESTFN), True)
            os.remove(test_support.TESTFN)
            os.mkdir(test_support.TESTFN)
            self.assertIs(posixpath.isfile(test_support.TESTFN), False)
            os.rmdir(test_support.TESTFN)
        finally:
            if not f.close():
                f.close()
            try:
                os.remove(test_support.TESTFN)
            except os.error:
                pass
            try:
                os.rmdir(test_support.TESTFN)
            except os.error:
                pass

        self.assertRaises(TypeError, posixpath.isdir)

        def test_samefile(self):
            f = open(test_support.TESTFN + "1", "wb")
            try:
                f.write("foo")
                f.close()
                self.assertIs(
                    posixpath.samefile(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "1"
                    ),
                    True
                )
                # If we don't have links, assume that os.stat doesn't return resonable
                # inode information and thus, that samefile() doesn't work
                if hasattr(os, "symlink"):
                    os.symlink(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "2"
                    )
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        True
                    )
                    os.remove(test_support.TESTFN + "2")
                    f = open(test_support.TESTFN + "2", "wb")
                    f.write("bar")
                    f.close()
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        False
                    )
            finally:
                if not f.close():
                    f.close()
                try:
                    os.remove(test_support.TESTFN + "1")
                except os.error:
                    pass
                try:
                    os.remove(test_support.TESTFN + "2")
                except os.error:
                    pass

            self.assertRaises(TypeError, posixpath.samefile)

Example 23

Project: metagoofil
Source File: metadataMSOfficeXML.py
View license
	def __init__(self,filepath):
		self.template =""
		self.totalTime =""
		self.pages =""
		self.words =""
		self.characters =""
		self.application =""
		self.docSecurity =""
		self.lines =""
		self.paragraphs =""
		self.scaleCrop =""
		self.company =""
		self.linksUpToDate =""
		self.charactersWithSpaces =""
		self.shareDoc =""
		self.hyperlinksChanged =""
		self.appVersion =""	
		self.title =""
		self.subject =""
		self.creator =""
		self.keywords =""
		self.lastModifiedBy =""
		self.revision =""
		self.createdDate =""
		self.modifiedDate =""			
		self.thumbnailPath =""	
		rnd  = str(random.randrange(0, 1001, 3))
		zip = zipfile.ZipFile(filepath, 'r')
		file('app'+rnd+'.xml', 'w').write(zip.read('docProps/app.xml'))
		file('core'+rnd+'.xml', 'w').write(zip.read('docProps/core.xml'))
		file('docu'+rnd+'.xml', 'w').write(zip.read('word/document.xml'))
		try:
			file('comments'+rnd+'.xml', 'w').write(zip.read('word/comments.xml'))
			self.comments="ok"
		except:
			self.comments="error"
		thumbnailPath = ""
		#try:
			#file('thumbnail'+rnd+'.jpeg', 'w').write(zip.read('docProps/thumbnail.jpeg'))
		 	#thumbnailPath = 'thumbnail'+rnd+'.jpeg'
		#except:
		#	pass
			
		zip.close()
		# primero algunas estadisticas del soft usado para la edicion y del documento
		f = open ('app'+rnd+'.xml','r')
		app = f.read()
		self.cargaApp(app)
		f.close()
		if self.comments=="ok":
			f = open ('comments'+rnd+'.xml','r')
			comm = f.read()
			self.cargaComm(comm)
			f.close()
		
		# document content
		f = open ('docu'+rnd+'.xml','r')
		docu = f.read()
		self.text = docu
		f.close()
		# datos respecto a autor, etc

		f = open ('core'+rnd+'.xml','r')
		core = f.read()
		self.cargaCore(core)
		self.thumbnailPath = thumbnailPath
		f.close()

		# borramos todo menos el thumbnail
		
		os.remove('app'+rnd+'.xml')
		os.remove('core'+rnd+'.xml')	
		os.remove('comments'+rnd+'.xml')	
		os.remove('docu'+rnd+'.xml')

Example 24

Project: dopey
Source File: document.py
View license
    def save_ora(self, filename, options=None, **kwargs):
        logger.info('save_ora: %r (%r, %r)', filename, options, kwargs)
        t0 = time.time()
        tempdir = tempfile.mkdtemp('mypaint')
        if not isinstance(tempdir, unicode):
            tempdir = tempdir.decode(sys.getfilesystemencoding())
        # use .tmp extension, so we don't overwrite a valid file if there is an exception
        z = zipfile.ZipFile(filename + '.tmpsave', 'w', compression=zipfile.ZIP_STORED)
        # work around a permission bug in the zipfile library: http://bugs.python.org/issue3394
        def write_file_str(filename, data):
            zi = zipfile.ZipInfo(filename)
            zi.external_attr = 0100644 << 16
            z.writestr(zi, data)
        write_file_str('mimetype', 'image/openraster') # must be the first file
        image = ET.Element('image')
        stack = ET.SubElement(image, 'stack')
        x0, y0, w0, h0 = self.get_effective_bbox()
        a = image.attrib
        a['w'] = str(w0)
        a['h'] = str(h0)

        def store_pixbuf(pixbuf, name):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            pixbuf.savev(tmp, 'png', [], [])
            logger.debug('%.3fs pixbuf saving %s', time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def store_surface(surface, name, rect=[]):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            surface.save_as_png(tmp, *rect, **kwargs)
            logger.debug('%.3fs surface saving %s', time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def add_layer(x, y, opac, surface, name, layer_name, visible=True,
                      locked=False, selected=False,
                      compositeop=DEFAULT_COMPOSITE_OP, rect=[]):
            layer = ET.Element('layer')
            stack.append(layer)
            store_surface(surface, name, rect)
            a = layer.attrib
            if layer_name:
                a['name'] = layer_name
            a['src'] = name
            a['x'] = str(x)
            a['y'] = str(y)
            a['opacity'] = str(opac)
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP
            a['composite-op'] = compositeop
            if visible:
                a['visibility'] = 'visible'
            else:
                a['visibility'] = 'hidden'
            if locked:
                a['edit-locked'] = 'true'
            if selected:
                a['selected'] = 'true'
            return layer

        for idx, l in enumerate(reversed(self.layers)):
            if l.is_empty():
                continue
            opac = l.opacity
            x, y, w, h = l.get_bbox()
            sel = (idx == self.layer_idx)
            el = add_layer(x-x0, y-y0, opac, l._surface,
                           'data/layer%03d.png' % idx, l.name, l.visible,
                           locked=l.locked, selected=sel,
                           compositeop=l.compositeop, rect=(x, y, w, h))

            # strokemap
            sio = StringIO()
            l.save_strokemap_to_file(sio, -x, -y)
            data = sio.getvalue(); sio.close()
            name = 'data/layer%03d_strokemap.dat' % idx
            el.attrib['mypaint_strokemap_v2'] = name
            write_file_str(name, data)

        ani_data = self.ani.xsheet_as_str()
        write_file_str('animation.xsheet', ani_data)

        # save background as layer (solid color or tiled)
        bg = self.background
        # save as fully rendered layer
        x, y, w, h = self.get_bbox()
        l = add_layer(x-x0, y-y0, 1.0, bg, 'data/background.png', 'background',
                      locked=True, selected=False,
                      compositeop=DEFAULT_COMPOSITE_OP,
                      rect=(x,y,w,h))
        x, y, w, h = bg.get_bbox()
        # save as single pattern (with corrected origin)
        store_surface(bg, 'data/background_tile.png', rect=(x+x0, y+y0, w, h))
        l.attrib['background_tile'] = 'data/background_tile.png'

        # preview (256x256)
        t2 = time.time()
        logger.debug('starting to render full image for thumbnail...')

        thumbnail_pixbuf = self.render_thumbnail()
        store_pixbuf(thumbnail_pixbuf, 'Thumbnails/thumbnail.png')
        logger.debug('total %.3fs spent on thumbnail', time.time() - t2)

        helpers.indent_etree(image)
        xml = ET.tostring(image, encoding='UTF-8')

        write_file_str('stack.xml', xml)
        z.close()
        os.rmdir(tempdir)
        if os.path.exists(filename):
            os.remove(filename) # windows needs that
        os.rename(filename + '.tmpsave', filename)

        logger.info('%.3fs save_ora total', time.time() - t0)

        return thumbnail_pixbuf

Example 25

Project: EQcorrscan
Source File: sfile_util_test.py
View license
    def test_read_write(self):
        """
        Function to test the read and write capabilities of sfile_util.
        """
        import os
        from obspy.core.event import Catalog
        import obspy
        if int(obspy.__version__.split('.')[0]) >= 1:
            from obspy.core.event import read_events
        else:
            from obspy.core.event import readEvents as read_events

        # Set-up a test event
        test_event = full_test_event()
        # Add the event to a catalogue which can be used for QuakeML testing
        test_cat = Catalog()
        test_cat += test_event
        # Write the catalog
        test_cat.write("Test_catalog.xml", format='QUAKEML')
        # Read and check
        read_cat = read_events("Test_catalog.xml")
        os.remove("Test_catalog.xml")
        self.assertEqual(read_cat[0].resource_id, test_cat[0].resource_id)
        for i in range(len(read_cat[0].picks)):
            for key in read_cat[0].picks[i].keys():
                # Ignore backazimuth errors and horizontal_slowness_errors
                if key in ['backazimuth_errors', 'horizontal_slowness_errors']:
                    continue
                self.assertEqual(read_cat[0].picks[i][key],
                                 test_cat[0].picks[i][key])
        self.assertEqual(read_cat[0].origins[0].resource_id,
                         test_cat[0].origins[0].resource_id)
        self.assertEqual(read_cat[0].origins[0].time,
                         test_cat[0].origins[0].time)
        # Note that time_residual_RMS is not a quakeML format
        self.assertEqual(read_cat[0].origins[0].longitude,
                         test_cat[0].origins[0].longitude)
        self.assertEqual(read_cat[0].origins[0].latitude,
                         test_cat[0].origins[0].latitude)
        self.assertEqual(read_cat[0].origins[0].depth,
                         test_cat[0].origins[0].depth)
        # Check magnitudes
        self.assertEqual(read_cat[0].magnitudes, test_cat[0].magnitudes)
        self.assertEqual(read_cat[0].event_descriptions,
                         test_cat[0].event_descriptions)
        # Check local magnitude amplitude
        self.assertEqual(read_cat[0].amplitudes[0].resource_id,
                         test_cat[0].amplitudes[0].resource_id)
        self.assertEqual(read_cat[0].amplitudes[0].period,
                         test_cat[0].amplitudes[0].period)
        self.assertEqual(read_cat[0].amplitudes[0].unit,
                         test_cat[0].amplitudes[0].unit)
        self.assertEqual(read_cat[0].amplitudes[0].generic_amplitude,
                         test_cat[0].amplitudes[0].generic_amplitude)
        self.assertEqual(read_cat[0].amplitudes[0].pick_id,
                         test_cat[0].amplitudes[0].pick_id)
        self.assertEqual(read_cat[0].amplitudes[0].waveform_id,
                         test_cat[0].amplitudes[0].waveform_id)
        # Check coda magnitude pick
        self.assertEqual(read_cat[0].amplitudes[1].resource_id,
                         test_cat[0].amplitudes[1].resource_id)
        self.assertEqual(read_cat[0].amplitudes[1].type,
                         test_cat[0].amplitudes[1].type)
        self.assertEqual(read_cat[0].amplitudes[1].unit,
                         test_cat[0].amplitudes[1].unit)
        self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude,
                         test_cat[0].amplitudes[1].generic_amplitude)
        self.assertEqual(read_cat[0].amplitudes[1].pick_id,
                         test_cat[0].amplitudes[1].pick_id)
        self.assertEqual(read_cat[0].amplitudes[1].waveform_id,
                         test_cat[0].amplitudes[1].waveform_id)
        self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint,
                         test_cat[0].amplitudes[1].magnitude_hint)
        self.assertEqual(read_cat[0].amplitudes[1].snr,
                         test_cat[0].amplitudes[1].snr)
        self.assertEqual(read_cat[0].amplitudes[1].category,
                         test_cat[0].amplitudes[1].category)

        # Check the read-write s-file functionality
        sfile = eventtosfile(test_cat[0], userID='TEST',
                             evtype='L', outdir='.',
                             wavefiles='test', explosion=True, overwrite=True)
        del read_cat
        self.assertEqual(readwavename(sfile), ['test'])
        read_cat = Catalog()
        read_cat += readpicks(sfile)
        os.remove(sfile)
        for i in range(len(read_cat[0].picks)):
            self.assertEqual(read_cat[0].picks[i].time,
                             test_cat[0].picks[i].time)
            self.assertEqual(read_cat[0].picks[i].backazimuth,
                             test_cat[0].picks[i].backazimuth)
            self.assertEqual(read_cat[0].picks[i].onset,
                             test_cat[0].picks[i].onset)
            self.assertEqual(read_cat[0].picks[i].phase_hint,
                             test_cat[0].picks[i].phase_hint)
            self.assertEqual(read_cat[0].picks[i].polarity,
                             test_cat[0].picks[i].polarity)
            self.assertEqual(read_cat[0].picks[i].waveform_id.station_code,
                             test_cat[0].picks[i].waveform_id.station_code)
            self.assertEqual(read_cat[0].picks[i].waveform_id.channel_code[-1],
                             test_cat[0].picks[i].waveform_id.channel_code[-1])
        # assert read_cat[0].origins[0].resource_id ==\
        #     test_cat[0].origins[0].resource_id
        self.assertEqual(read_cat[0].origins[0].time,
                         test_cat[0].origins[0].time)
        # Note that time_residual_RMS is not a quakeML format
        self.assertEqual(read_cat[0].origins[0].longitude,
                         test_cat[0].origins[0].longitude)
        self.assertEqual(read_cat[0].origins[0].latitude,
                         test_cat[0].origins[0].latitude)
        self.assertEqual(read_cat[0].origins[0].depth,
                         test_cat[0].origins[0].depth)
        self.assertEqual(read_cat[0].magnitudes[0].mag,
                         test_cat[0].magnitudes[0].mag)
        self.assertEqual(read_cat[0].magnitudes[1].mag,
                         test_cat[0].magnitudes[1].mag)
        self.assertEqual(read_cat[0].magnitudes[2].mag,
                         test_cat[0].magnitudes[2].mag)
        self.assertEqual(read_cat[0].magnitudes[0].creation_info,
                         test_cat[0].magnitudes[0].creation_info)
        self.assertEqual(read_cat[0].magnitudes[1].creation_info,
                         test_cat[0].magnitudes[1].creation_info)
        self.assertEqual(read_cat[0].magnitudes[2].creation_info,
                         test_cat[0].magnitudes[2].creation_info)
        self.assertEqual(read_cat[0].magnitudes[0].magnitude_type,
                         test_cat[0].magnitudes[0].magnitude_type)
        self.assertEqual(read_cat[0].magnitudes[1].magnitude_type,
                         test_cat[0].magnitudes[1].magnitude_type)
        self.assertEqual(read_cat[0].magnitudes[2].magnitude_type,
                         test_cat[0].magnitudes[2].magnitude_type)
        self.assertEqual(read_cat[0].event_descriptions,
                         test_cat[0].event_descriptions)
        # assert read_cat[0].amplitudes[0].resource_id ==\
        #     test_cat[0].amplitudes[0].resource_id
        self.assertEqual(read_cat[0].amplitudes[0].period,
                         test_cat[0].amplitudes[0].period)
        self.assertEqual(read_cat[0].amplitudes[0].snr,
                         test_cat[0].amplitudes[0].snr)
        # Check coda magnitude pick
        # Resource ids get overwritten because you can't have two the same in
        # memory
        # self.assertEqual(read_cat[0].amplitudes[1].resource_id,
        #                  test_cat[0].amplitudes[1].resource_id)
        self.assertEqual(read_cat[0].amplitudes[1].type,
                         test_cat[0].amplitudes[1].type)
        self.assertEqual(read_cat[0].amplitudes[1].unit,
                         test_cat[0].amplitudes[1].unit)
        self.assertEqual(read_cat[0].amplitudes[1].generic_amplitude,
                         test_cat[0].amplitudes[1].generic_amplitude)
        # Resource ids get overwritten because you can't have two the same in
        # memory
        # self.assertEqual(read_cat[0].amplitudes[1].pick_id,
        #                  test_cat[0].amplitudes[1].pick_id)
        self.assertEqual(read_cat[0].amplitudes[1].waveform_id.station_code,
                         test_cat[0].amplitudes[1].waveform_id.station_code)
        self.assertEqual(read_cat[0].amplitudes[1].waveform_id.channel_code,
                         test_cat[0].amplitudes[1].
                         waveform_id.channel_code[0] +
                         test_cat[0].amplitudes[1].
                         waveform_id.channel_code[-1])
        self.assertEqual(read_cat[0].amplitudes[1].magnitude_hint,
                         test_cat[0].amplitudes[1].magnitude_hint)
        # snr is not supported in s-file
        # self.assertEqual(read_cat[0].amplitudes[1].snr,
        #                  test_cat[0].amplitudes[1].snr)
        self.assertEqual(read_cat[0].amplitudes[1].category,
                         test_cat[0].amplitudes[1].category)
        del read_cat

        # Test a deliberate fail
        test_cat.append(full_test_event())
        with self.assertRaises(IOError):
            # Raises error due to multiple events in catalog
            sfile = eventtosfile(test_cat, userID='TEST',
                                 evtype='L', outdir='.',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
            # Raises error due to too long userID
            sfile = eventtosfile(test_cat[0], userID='TESTICLE',
                                 evtype='L', outdir='.',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
            # Raises error due to unrecognised event type
            sfile = eventtosfile(test_cat[0], userID='TEST',
                                 evtype='U', outdir='.',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
            # Raises error due to no output directory
            sfile = eventtosfile(test_cat[0], userID='TEST',
                                 evtype='L', outdir='albatross',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
            # Raises error due to incorrect wavefil formatting
            sfile = eventtosfile(test_cat[0], userID='TEST',
                                 evtype='L', outdir='.',
                                 wavefiles=1234, explosion=True,
                                 overwrite=True)
        with self.assertRaises(IndexError):
            invalid_origin = test_cat[0].copy()
            invalid_origin.origins = []
            sfile = eventtosfile(invalid_origin, userID='TEST',
                                 evtype='L', outdir='.',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
        with self.assertRaises(ValueError):
            invalid_origin = test_cat[0].copy()
            invalid_origin.origins[0].time = None
            sfile = eventtosfile(invalid_origin, userID='TEST',
                                 evtype='L', outdir='.',
                                 wavefiles='test', explosion=True,
                                 overwrite=True)
        # Write a near empty origin
        valid_origin = test_cat[0].copy()
        valid_origin.origins[0].latitude = None
        valid_origin.origins[0].longitude = None
        valid_origin.origins[0].depth = None
        sfile = eventtosfile(valid_origin, userID='TEST',
                             evtype='L', outdir='.',
                             wavefiles='test', explosion=True,
                             overwrite=True)
        self.assertTrue(os.path.isfile(sfile))
        os.remove(sfile)

Example 26

Project: neuroConstruct
Source File: test_posixpath.py
View license
    def test_isfile(self):
        self.assertIs(posixpath.isfile(test_support.TESTFN), False)
        f = open(test_support.TESTFN, "wb")
        try:
            f.write("foo")
            f.close()
            self.assertIs(posixpath.isfile(test_support.TESTFN), True)
            os.remove(test_support.TESTFN)
            os.mkdir(test_support.TESTFN)
            self.assertIs(posixpath.isfile(test_support.TESTFN), False)
            os.rmdir(test_support.TESTFN)
        finally:
            if not f.close():
                f.close()
            try:
                os.remove(test_support.TESTFN)
            except os.error:
                pass
            try:
                os.rmdir(test_support.TESTFN)
            except os.error:
                pass

        self.assertRaises(TypeError, posixpath.isdir)

        def test_samefile(self):
            f = open(test_support.TESTFN + "1", "wb")
            try:
                f.write("foo")
                f.close()
                self.assertIs(
                    posixpath.samefile(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "1"
                    ),
                    True
                )
                # If we don't have links, assume that os.stat doesn't return resonable
                # inode information and thus, that samefile() doesn't work
                if hasattr(os, "symlink"):
                    os.symlink(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "2"
                    )
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        True
                    )
                    os.remove(test_support.TESTFN + "2")
                    f = open(test_support.TESTFN + "2", "wb")
                    f.write("bar")
                    f.close()
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        False
                    )
            finally:
                if not f.close():
                    f.close()
                try:
                    os.remove(test_support.TESTFN + "1")
                except os.error:
                    pass
                try:
                    os.remove(test_support.TESTFN + "2")
                except os.error:
                    pass

            self.assertRaises(TypeError, posixpath.samefile)

Example 27

Project: pykickstart
Source File: __init__.py
View license
def testFile(pofile, prefix=None, releaseMode=False, modifyLinguas=True):
    """Run all registered tests against the given .mo file.

       If run in release mode, this function will always return true, and if
       the mofile does not pass the tests the langauge will be removed.

       :param str mofile: The .mo file name to check
       :param str prefix: An optional directory prefix to strip from error messages
       :param bool releaseMode: whether to run in release mode
       :param bool modifyLinguas: whether to remove translations from LINGUAS in release mode
       :return: whether the checks succeeded or not
       :rtype: bool
    """
    success = True
    for test in _tests:
        # Don't print the tmpdir path in error messages
        if prefix is not None and pofile.startswith(prefix):
            poerror = pofile[len(prefix):]
        else:
            poerror = pofile

        try:
            with warnings.catch_warnings(record=True) as w:
                test(pofile)

                # Print any warnings collected
                for warn in w:
                    print("%s warned on %s: %s" % (test.__name__, poerror, warn.message))
        except Exception as e: # pylint: disable=broad-except
            print("%s failed on %s: %s" % (test.__name__, poerror, str(e)))
            if releaseMode:
                # Remove the po file and the .mo file built from it
                print("Removing %s" % pofile)
                os.remove(pofile)

                # Check for both .mo and .gmo
                mofile = os.path.splitext(pofile)[0] + '.mo'
                if os.path.exists(mofile):
                    print("Removing %s" % mofile)
                    os.remove(mofile)

                gmofile = os.path.splitext(pofile)[0] + '.gmo'
                if os.path.exists(gmofile):
                    print("Removing %s" % gmofile)
                    os.remove(gmofile)

                if modifyLinguas:
                    # If there is a LINGUAS file in the po directory, remove the
                    # language from it
                    linguas = os.path.join(os.path.dirname(mofile), 'LINGUAS')
                    if os.path.exists(linguas):
                        language = os.path.splitext(os.path.basename(pofile))[0]
                        print("Removing %s from LINGUAS" % language)
                        _remove_lingua(linguas, language)

                # No need to run the rest of the tests since we just killed the file
                break
            else:
                success = False

    return success

Example 28

Project: fusioncatcher
Source File: sra2illumina.py
View license
def sra2illumina(input_file,
                 output_file,
                 tag_read = None,
                 tag='',
                 phred_conversion = False,
                 operation = 'change',
                 tmp_dir = None,
                 size_read_buffer = 10**8):
    """
    It converts the FASTQ file (PHRED-33 qualities and SRA read names) downloaded
    from Short Read Archive (SRA) to Illumina FASTQ file (PHRED-64 Illumina v1.5
    and Illumina read names).
    """
    temp_file = None
    if phred_conversion:
        temp_file = give_me_temp_filename(tmp_dir)
    else:
        temp_file = output_file

    read_name = file(input_file,'r').readline().rstrip('\r\n')
    sra = False
    e = read_name.partition(" ")[0]
    if read_name.startswith('@') and ( not(e.endswith('/1') or e.endswith('/2'))):
        sra = True

    if operation == 'change' or sra:
        fid = open(input_file,'r')
        fod = open(temp_file,'w')
        i = 0
        r = 0
        while True:
            gc.disable()
            lines = fid.readlines(size_read_buffer)
            gc.enable()
            if not lines:
                break
            n = len(lines)
            for j in xrange(n):
                r = r + 1
                i = i + 1
                if i == 1:
                    if tag_read:
                        lines[j] = '@%s%s%s\n' % (tag_read ,int2str(r,12) , tag)
                    else: # if there is no tag_read then the original SRA id is left
                        lines[j] = '%s%s\n' % (lines[j][:-1].partition(" ")[0], tag)
                    #lines[j] = lines[j].rstrip('\r\n').upper().split(' ')[1]+tag+'\n'
                elif i == 3:
                    lines[j] = "+\n"
                elif i == 4:
                    i = 0
            fod.writelines(lines)
        fid.close()
        fod.close()
        if phred_conversion == '64':
            phred.fq2fq(temp_file,'sanger',output_file,'illumina-1.5',tmp_dir = tmp_dir)
            os.remove(temp_file)
        elif phred_conversion == '33':
            phred.fq2fq(temp_file,'auto-detect',output_file,'sanger',tmp_dir = tmp_dir)
            os.remove(temp_file)
    else:
        print "No changes are done!"
        if os.path.isfile(output_file):
            os.remove(output_file)
        if operation == 'soft':
            if os.path.islink(input_file):
                linkto = os.readlink(input_file)
                os.symlink(linkto,ooutput_file)
            else:
                os.symlink(input_file,output_file)
        elif operation == 'hard':
            linkto = input_file
            if os.path.islink(input_file):
                linkto = os.readlink(input_file)
            try:
                os.link(linkto,output_file)
            except OSError as er:
                print >>sys.stderr,"WARNING: Cannot do hard links ('%s' and '%s')!" % (linkto,output_file)
                shutil.copyfile(linkto,output_file)
#                if er.errno == errno.EXDEV:
#                    # they are on different partitions
#                    # [Errno 18] Invalid cross-device link
#                    shutil.copyfile(linkto,output_file)
#                else:
#                    print >>sys.stderr,"ERROR: Cannot do hard links ('%s' and '%s')!" % (linkto,output_file)
#                    print >>sys.stderr,er
#                    sys.exit(1)

        elif operation == 'copy':
            shutil.copyfile(input_file, output_file)
        else:
            print >>sys.stderr, "ERROR: unknown operation of linking!", operation
            sys.exit(1)

Example 29

Project: spinalcordtoolbox
Source File: process.py
View license
def main():
    div = [3,5,7,9,11,13,15,19,23,25]
    nurbs_ctl_points = param.nurbs_ctl_points
    fitting_method = param.fitting_method
    sigma = param.sigma
    centerline = param.centerline
    s = 0
    d = 0
    exit = 0
    file_name = ''
    warp = param.warp

    try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:M:n:d:s:c:v:w:r:h:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            file_name = arg
        elif opt in ('-M'):
            fitting_method = arg
        elif opt in ('-n'):
            nurbs_ctl_points = int(arg)
        elif opt in ('-d'):
            d = 1
            div = int(arg)
        elif opt in ('-s'):
            s = 1
            sigma = str(arg)
        elif opt in ('-c'):
            centerline = str(arg)
            fname_centerline = centerline
        elif opt in ('-v'):
            verbose = int(arg)
        elif opt in ('-w'):
            write = arg
        elif opt in ('-r'):
            remove = arg

    print 'file to be processed: ',file_name
    path, fname, ext_fname = sct.extract_fname(file_name)

    if not fitting_method == 'NURBS' and not fitting_method == 'polynomial' and not fitting_method == 'non_parametrique' and not fitting_method == 'smooth':
        usage()

    if centerline == None:
        # Generating centerline using propseg (Warning: be sure propseg work well with the input file)
        print 'Applying propseg to get the centerline as a binary image...'
        fname_centerline = fname+'_centerline'
        cmd = 'sct_propseg -i ' + file_name + ' -o . -t t2 -centerline-binary'
        sct.run(cmd)
    else:
        path, fname_centerline, ext_fname = sct.extract_fname(centerline)

    if s is not 0:
        print 'centerline smoothing...'
        fname_smooth = fname_centerline+'_smooth'
        print 'Gauss sigma: ', s
        cmd = 'fslmaths ' + fname_centerline + ' -s ' + str(s) + ' ' + fname_smooth + ext_fname
        sct.run(cmd)
        fname_centerline = fname_smooth+ext_fname
    else:
        fname_centerline = centerline

    if fitting_method == 'NURBS':
        if not d and not nurbs_ctl_points:
            for d in div:
                add = d - 1
                e = 1
                print 'generating the centerline...'

                # This loops stands for checking if nurbs will work with d
                while e == 1:
                    add += 1
                    e = centerline.check_nurbs(add, None, None, fname_smooth+ext_fname)
                    if add > 30:
                        exit = 1
                        break
                if exit == 1:
                    break
                d = add
                size = e

                nurbs_ctl_points = int(size)/d
        elif not nurbs_ctl_points:
            nurbs_ctl_points = int(size)/d

            print 'straightening...  d = ', str(d)

            # STRAIGHTEN USING NURBS
            cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_centerline + ' -n ' + str(nurbs_ctl_points)
            sct.run(cmd)

            print 'apply warp to segmentation'
            #final_file_name = fname+'_straightttt_seg'+ext_fname
            final_file_name = fname + '_straight_seg' + ext_fname
            #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
            cmd = 'sct_WarpImageMultiTransform 3 ' + fname_centerline + ' ' + final_file_name + ' warp_curve2straight.nii.gz'

            sct.run(cmd)

            print 'annalyzing the straightened file'
            linear_fitting.returnSquaredErrors(final_file_name, d, size)

    elif fitting_method == 'polynomial':
        # STRAIGHTEN USING POLYNOMIAL FITTING
        cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_smooth + ext_fname + ' -f polynomial -v 2'
        d = 'polynomial'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        print 'apply warp to segmentation'
        #final_file_name = fname+'_straightttt_seg'+ext_fname
        final_file_name = fname+'_straight_seg'+ext_fname
        #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
        cmd = 'sct_WarpImageMultiTransform 3 '+fname_smooth+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'

        sct.run(cmd)

    elif fitting_method == 'smooth':
        # STRAIGHTEN USING POLYNOMIAL FITTING
        cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_centerline  + ' -f smooth -v 2'
        d = 'smooth'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        if warp == 1:
            print 'apply warp to segmentation'
            #final_file_name = fname+'_straightttt_seg'+ext_fname
            final_file_name = fname+'_straight_seg'+ext_fname
            #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
            cmd = 'sct_WarpImageMultiTransform 3 '+fname_centerline+' '+final_file_name+' warp_curve2straight.nii.gz'
        else:
            print 'Applying propseg to the straightened volume...'
            fname_straightened = fname+'_straight'
            cmd = 'sct_propseg -i ' + fname_straightened + ext_fname +' -o . -t t2 -centerline-binary'
            final_file_name = fname_straightened + '_centerline' + ext_fname

        sct.run(cmd)

    elif fitting_method == 'non-parametric':
        cmd = 'sct_straighten_spinalcord -i '+file_name+' -c '+fname_smooth+ext_fname+' -f non_parametrique -v 2'
        d = 'polynomial'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        print 'apply warp to segmentation'
        #final_file_name = fname+'_straightttt_seg'+ext_fname
        final_file_name = fname+'_straight_seg'+ext_fname
        #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
        cmd = 'sct_WarpImageMultiTransform 3 '+fname_smooth+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'

        sct.run(cmd)
        

    print 'annalyzing the straightened file'
    linear_fitting.returnSquaredErrors(final_file_name, d, size)

    os.remove(fname_centerline+ext_fname)
    os.remove(fname_smooth+ext_fname)
    os.remove('warp_curve2straight.nii.gz')
    os.remove('warp_straight2curve.nii.gz')

Example 30

Project: scansio-sonar-es
Source File: sonar_ssl.py
View license
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--server', default=DEFAULT_SERVER,
                        help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
    parser.add_argument('--port', default=DEFAULT_PORT,
                        help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
    args = parser.parse_args(argv[1:])

    workers = cpu_count()
    process_hosts_queue = Queue(maxsize=20000)
    process_certs_queue = Queue(maxsize=20000)
    update_hosts_queue = Queue(maxsize=20000)

    es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)

    imported_sonar = es.search(index='scansio-sonar-ssl-imported', body={"size": 3000, "query": {"match_all": {}}})
    imported_files = []

    for f in imported_sonar['hits']['hits']:
        imported_files.append(f['_id'])

    scansio_feed = requests.get('https://scans.io/json')
    if scansio_feed.status_code == 200:
        feed = scansio_feed.json()
        if 'studies' in feed:
            for result in feed['studies']:
                if result['name'] == 'SSL Certificates':
                    for res in result['files']:
                        scans_file = res['name']
                        if scans_file.endswith('certs.gz'):
                            if scans_file.endswith('20131030-20150518_certs.gz'):
                                certfile = '20131030-20150518_certs.gz'
                            else:
                                certfile = scans_file[48:65]
                            if certfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=certfile))
                                phys_file = requests.get(scans_file, stream=True)
                                # Need to do this cause some of the files are rather large
                                with open('{f}'.format(f=certfile), 'wb') as newcerts:
                                    for chunk in phys_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            newcerts.write(chunk)
                                with open('{f}'.format(f=certfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_scan_certs, args=(process_certs_queue, queue_es))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=certfile, d=datetime.now()))
                                    parse_certs_file(certfile, process_certs_queue)
                                    for w in xrange(workers):
                                        process_certs_queue.put("DONE")
                                    logger.warning("Importing finished of {f} at {d}".format(f=certfile,
                                                                                             d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-imported', doc_type='imported-file', id=certfile,
                                             body={'file': certfile, 'imported_date': datetime.now(), 'sha1': sha1})
                                else:
                                    logger.error("SHA1 did not match for {f} it was not imported".format(f=certfile))
                                os.remove(certfile)
                                # Now we should optimize each index to max num segments of 1 to help with
                                # searching/sizing and just over all es happiness
                                refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                logger.warning("Optimizing index: {index} at {date}".
                                               format(index='passive-ssl-certs-sonar', date=datetime.now()))
                                refresh_es.indices.optimize(index='passive-ssl-certs-umich',
                                                            max_num_segments=1, request_timeout=7500)
                        if scans_file.endswith('hosts.gz'):
                            hostsfile = scans_file[48:65]
                            if hostsfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=hostsfile))
                                phys_host_file = requests.get(scans_file)
                                with open('{f}'.format(f=hostsfile), 'wb') as hf:
                                    for chunk in phys_host_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            hf.write(chunk)
                                with open('{f}'.format(f=hostsfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_hosts, args=(process_hosts_queue, queue_es))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=hostsfile, d=datetime.now()))
                                    parse_hosts_file(hostsfile, process_hosts_queue)
                                    logger.warning("Hosts updated for {f} now going back and updating first_seen"
                                                   .format(f=hostsfile))
                                    #  this is kinda dirty but without looking up everything at insert time (slow)
                                    #  I don't know of a better way to do
                                    #  this based on the number of documents we will have
                                    update_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                    # construct an elasticsearch query where the filter is looking for any entry
                                    # that is missing the field first_seen
                                    # adding a queue processing system here this should hopefully speed things up.
                                    for work in xrange(workers):
                                        p = Process(target=update_hosts, args=(update_hosts_queue, update_es))
                                        p.daemon = True
                                        p.start()

                                    q = {'size': 500, "query": {"match_all": {}},
                                         "filter": {"missing": {"field": "first_seen"}}}
                                    new_updates = update_es.search(index='passive-ssl-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    # Scan across all the documents missing the first_seen field and bulk update them
                                    missing_first_seen = scan(update_es, query=q, scroll='30m',
                                                              index='passive-ssl-hosts-sonar')
                                    for miss in missing_first_seen:
                                        update_hosts_queue.put(miss)
                                    # for some stupid reason I keep missing some at the end of the scan/scroll
                                    # so going to do them manually
                                    new_updates = update_es.search(index='passive-ssl-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    missing_first_seen_again = scan(update_es, query=q, scroll='30m',
                                                                    index='passive-ssl-hosts-sonar')
                                    bulk_update_missed = []
                                    for m in missing_first_seen_again:
                                        last_seen = m['_source']['last_seen']
                                        first_seen = last_seen
                                        action = {"_op_type": "update", "_index": "passive-ssl-hosts-sonar",
                                                  "_type": "host", "_id": m['_id'], "doc": {'first_seen': first_seen}}
                                        bulk_update_missed.append(action)
                                        if len(bulk_update_missed) == 500:
                                            bulk(update_es, bulk_update_missed)
                                            bulk_update_missed = []
                                    bulk(update_es, bulk_update_missed)
                                    logger.warning("Finished updating hosts at {d}".format(d=datetime.now()))
                                    for w in xrange(workers):
                                        update_hosts_queue.put("DONE")
                                    #  Get the remaining ones that are less than 500 and the loop has ended
                                    logger.warning("Importing finished of {f} at {d}".format(f=hostsfile,
                                                   d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-imported', doc_type='imported-file', id=hostsfile,
                                             body={'file': hostsfile, 'imported_date': datetime.now(), 'sha1': sha1})
                                    os.remove(hostsfile)
                                    refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                    # Now we should optimize each index to max num segments of 1 to help with
                                    # searching/sizing and just over all es happiness
                                    logger.warning("Optimizing index: {index} at {date}".
                                                   format(index='passive-ssl-hosts-sonar', date=datetime.now()))
                                    refresh_es.indices.optimize(index='passive-ssl-hosts-sonar',
                                                                max_num_segments=1, request_timeout=7500)
                                    refresh_es.indices.optimize
                                else:
                                    logger.error("SHA1 did not match for {f} it was not imported".format(f=hostsfile))
                                    os.remove(hostsfile)
        else:
            logger.error("The scans.io/json must have changed or is having issues. I didn't see any studies. Exiting")
            sys.exit()
    else:
        logger.error("There was an error connecting to https://scans.io. I did not get a 200 status code. Exiting")
        sys.exit()

Example 31

Project: spinalcordtoolbox
Source File: process.py
View license
def main():
    div = [3,5,7,9,11,13,15,19,23,25]
    nurbs_ctl_points = param.nurbs_ctl_points
    fitting_method = param.fitting_method
    sigma = param.sigma
    centerline = param.centerline
    s = 0
    d = 0
    exit = 0
    file_name = ''
    warp = param.warp

    try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:M:n:d:s:c:v:w:r:h:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            file_name = arg
        elif opt in ('-M'):
            fitting_method = arg
        elif opt in ('-n'):
            nurbs_ctl_points = int(arg)
        elif opt in ('-d'):
            d = 1
            div = int(arg)
        elif opt in ('-s'):
            s = 1
            sigma = str(arg)
        elif opt in ('-c'):
            centerline = str(arg)
            fname_centerline = centerline
        elif opt in ('-v'):
            verbose = int(arg)
        elif opt in ('-w'):
            write = arg
        elif opt in ('-r'):
            remove = arg

    print 'file to be processed: ',file_name
    path, fname, ext_fname = sct.extract_fname(file_name)

    if not fitting_method == 'NURBS' and not fitting_method == 'polynomial' and not fitting_method == 'non_parametrique' and not fitting_method == 'smooth':
        usage()

    if centerline == None:
        # Generating centerline using propseg (Warning: be sure propseg work well with the input file)
        print 'Applying propseg to get the centerline as a binary image...'
        fname_centerline = fname+'_centerline'
        cmd = 'sct_propseg -i ' + file_name + ' -o . -t t2 -centerline-binary'
        sct.run(cmd)
    else:
        path, fname_centerline, ext_fname = sct.extract_fname(centerline)

    if s is not 0:
        print 'centerline smoothing...'
        fname_smooth = fname_centerline+'_smooth'
        print 'Gauss sigma: ', s
        cmd = 'fslmaths ' + fname_centerline + ' -s ' + str(s) + ' ' + fname_smooth + ext_fname
        sct.run(cmd)
        fname_centerline = fname_smooth+ext_fname
    else:
        fname_centerline = centerline

    if fitting_method == 'NURBS':
        if not d and not nurbs_ctl_points:
            for d in div:
                add = d - 1
                e = 1
                print 'generating the centerline...'

                # This loops stands for checking if nurbs will work with d
                while e == 1:
                    add += 1
                    e = centerline.check_nurbs(add, None, None, fname_smooth+ext_fname)
                    if add > 30:
                        exit = 1
                        break
                if exit == 1:
                    break
                d = add
                size = e

                nurbs_ctl_points = int(size)/d
        elif not nurbs_ctl_points:
            nurbs_ctl_points = int(size)/d

            print 'straightening...  d = ', str(d)

            # STRAIGHTEN USING NURBS
            cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_centerline + ' -n ' + str(nurbs_ctl_points)
            sct.run(cmd)

            print 'apply warp to segmentation'
            #final_file_name = fname+'_straightttt_seg'+ext_fname
            final_file_name = fname + '_straight_seg' + ext_fname
            #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
            cmd = 'sct_WarpImageMultiTransform 3 ' + fname_centerline + ' ' + final_file_name + ' warp_curve2straight.nii.gz'

            sct.run(cmd)

            print 'annalyzing the straightened file'
            linear_fitting.returnSquaredErrors(final_file_name, d, size)

    elif fitting_method == 'polynomial':
        # STRAIGHTEN USING POLYNOMIAL FITTING
        cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_smooth + ext_fname + ' -f polynomial -v 2'
        d = 'polynomial'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        print 'apply warp to segmentation'
        #final_file_name = fname+'_straightttt_seg'+ext_fname
        final_file_name = fname+'_straight_seg'+ext_fname
        #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
        cmd = 'sct_WarpImageMultiTransform 3 '+fname_smooth+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'

        sct.run(cmd)

    elif fitting_method == 'smooth':
        # STRAIGHTEN USING POLYNOMIAL FITTING
        cmd = 'sct_straighten_spinalcord -i ' + file_name + ' -c ' + fname_centerline  + ' -f smooth -v 2'
        d = 'smooth'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        if warp == 1:
            print 'apply warp to segmentation'
            #final_file_name = fname+'_straightttt_seg'+ext_fname
            final_file_name = fname+'_straight_seg'+ext_fname
            #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
            cmd = 'sct_WarpImageMultiTransform 3 '+fname_centerline+' '+final_file_name+' warp_curve2straight.nii.gz'
        else:
            print 'Applying propseg to the straightened volume...'
            fname_straightened = fname+'_straight'
            cmd = 'sct_propseg -i ' + fname_straightened + ext_fname +' -o . -t t2 -centerline-binary'
            final_file_name = fname_straightened + '_centerline' + ext_fname

        sct.run(cmd)

    elif fitting_method == 'non-parametric':
        cmd = 'sct_straighten_spinalcord -i '+file_name+' -c '+fname_smooth+ext_fname+' -f non_parametrique -v 2'
        d = 'polynomial'
        size = 13
        # STRAIGHTEN USING 'non_parametric'
        sct.run(cmd)

        print 'apply warp to segmentation'
        #final_file_name = fname+'_straightttt_seg'+ext_fname
        final_file_name = fname+'_straight_seg'+ext_fname
        #cmd = 'sct_WarpImageMultiTransform 3 '+fname_seg+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'
        cmd = 'sct_WarpImageMultiTransform 3 '+fname_smooth+ext_fname+' '+final_file_name+' warp_curve2straight.nii.gz'

        sct.run(cmd)
        

    print 'annalyzing the straightened file'
    linear_fitting.returnSquaredErrors(final_file_name, d, size)

    os.remove(fname_centerline+ext_fname)
    os.remove(fname_smooth+ext_fname)
    os.remove('warp_curve2straight.nii.gz')
    os.remove('warp_straight2curve.nii.gz')

Example 32

Project: plaso
Source File: sqlite.py
View license
  def Open(self, file_object, wal_file_object=None):
    """Opens a SQLite database file.

    Since pysqlite cannot read directly from a file-like object a temporary
    copy of the file is made. After creating a copy the database file this
    function sets up a connection with the database and determines the names
    of the tables.

    Args:
      file_object (dfvfs.FileIO): file-like object.
      wal_file_object (Optional[dfvfs.FileIO]): file-like object for the
          Write-Ahead Log (WAL) file.

    Raises:
      IOError: if the file-like object cannot be read.
      sqlite3.DatabaseError: if the database cannot be parsed.
      ValueError: if the file-like object is missing.
    """
    if not file_object:
      raise ValueError(u'Missing file object.')

    # TODO: Current design copies the entire file into a buffer
    # that is parsed by each SQLite parser. This is not very efficient,
    # especially when many SQLite parsers are ran against a relatively
    # large SQLite database. This temporary file that is created should
    # be usable by all SQLite parsers so the file should only be read
    # once in memory and then deleted when all SQLite parsers have completed.

    # TODO: Change this into a proper implementation using APSW
    # and virtual filesystems when that will be available.
    # Info: http://apidoc.apsw.googlecode.com/hg/vfs.html#vfs and
    # http://apidoc.apsw.googlecode.com/hg/example.html#example-vfs
    # Until then, just copy the file into a tempfile and parse it.

    temporary_file = tempfile.NamedTemporaryFile(
        delete=False, dir=self._temporary_directory)

    try:
      self._CopyFileObjectToTemporaryFile(file_object, temporary_file)
      self._temp_db_file_path = temporary_file.name

    except IOError:
      os.remove(temporary_file.name)
      raise

    finally:
      temporary_file.close()

    if wal_file_object:
      # Create WAL file using same filename so it is available for
      # sqlite3.connect()
      temporary_filename = u'{0:s}-wal'.format(self._temp_db_file_path)
      temporary_file = open(temporary_filename, 'wb')
      try:
        self._CopyFileObjectToTemporaryFile(wal_file_object, temporary_file)
        self._temp_wal_file_path = temporary_filename

      except IOError:
        os.remove(temporary_filename)
        raise

      finally:
        temporary_file.close()

    self._database = sqlite3.connect(self._temp_db_file_path)
    try:
      self._database.row_factory = sqlite3.Row
      cursor = self._database.cursor()

      sql_results = cursor.execute(
          u'SELECT name FROM sqlite_master WHERE type="table"')

      self._table_names = [row[0] for row in sql_results]

    except sqlite3.DatabaseError as exception:
      self._database.close()
      self._database = None

      os.remove(self._temp_db_file_path)
      self._temp_db_file_path = u''
      if self._temp_wal_file_path:
        os.remove(self._temp_wal_file_path)
        self._temp_wal_file_path = u''

      logging.debug(
          u'Unable to parse SQLite database: {0:s} with error: {1:s}'.format(
              self._filename, exception))
      raise

    self._is_open = True

Example 33

Project: flud
Source File: TarfileUtils.py
View license
def concatenate(tarfile1, tarfile2):
    """
    Combines tarfile1 and tarfile2 into tarfile1.  tarfile1 is modified in the
    process, and tarfile2 is deleted.
    """
    gzipped = False
    if tarfile1[-7:] == ".tar.gz":
        gzipped = True
        f1 = gzip.GzipFile(tarfile1, 'r')
        tarfile1 = tarfile1[:-3]
        f1unzip = file(tarfile1, 'w')
        f1unzip.write(f1.read())
        f1unzip.close()
        f1.close()
        os.remove(tarfile1+".gz")

    f = open(tarfile1, "r+")
    done = False
    e = '\0'
    empty = tarfile.BLOCKSIZE*e
    emptyblockcount = 0
    while not done:
        header = f.read(tarfile.BLOCKSIZE)
        if header == "":
            print "error: end of archive not found"
            return
        elif header == empty:
            emptyblockcount += 1
            if emptyblockcount == 2:
                done = True
        else:
            emptyblockcount = 0
            fsize = eval(header[124:135])
            skip = int(round(float(fsize) / float(tarfile.BLOCKSIZE) + 0.5))
            f.seek(skip*tarfile.BLOCKSIZE, 1)

    # truncate the file to the spot before the end-of-tar marker 
    trueend = f.tell() - (tarfile.BLOCKSIZE*2)
    f.seek(trueend)
    f.truncate()

    # now write the contents of the second tarfile into this spot
    if tarfile2[-7:] == ".tar.gz":
        f2 = gzip.GzipFile(tarfile2, 'r')
    else:
        f2 = open(tarfile2, "r")
    done = False
    while not done:
        header = f2.read(tarfile.BLOCKSIZE)
        if header == "":
            print "error: end of archive not found"
            f.seek(trueend)
            f.write(empty*2)
            return
        else:
            f.write(header)
            if header == empty:
                emptyblockcount += 1
                if emptyblockcount == 2:
                    done = True
            else:
                emptyblockcount = 0
                fsize = eval(header[124:135])
                bsize = int(round(float(fsize) / float(tarfile.BLOCKSIZE) 
                    + 0.5))
                # XXX: break this up if large
                data = f2.read(bsize*tarfile.BLOCKSIZE)
                f.write(data)

    f2.close()
    f.close()

    if gzipped:
        f2 = gzip.GzipFile(tarfile1+".gz", 'wb')
        f = file(tarfile1, 'rb')
        f2.write(f.read())
        f2.close()
        f.close()
        os.remove(tarfile1)
    
    # and delete the second tarfile
    os.remove(tarfile2)

Example 34

Project: dill
Source File: test_file.py
View license
def test(strictio, fmode):
    # file exists, with same contents
    # read

    write_randomness()

    f = open(fname, "r")
    _f = dill.loads(dill.dumps(f, fmode=fmode))#, strictio=strictio))
    assert _f.mode == f.mode
    assert _f.tell() == f.tell()
    assert _f.read() == f.read()
    f.close()
    _f.close()

    # write

    f = open(fname, "w")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    f2 = dill.loads(f_dumped) #FIXME: fails due to pypy/issues/1233
    # TypeError: expected py_object instance instead of str
    f2mode = f2.mode
    f2tell = f2.tell()
    f2name = f2.name
    f2.write(" world!")
    f2.close()

    if fmode == dill.HANDLE_FMODE:
        assert open(fname).read() == " world!"
        assert f2mode == f1mode
        assert f2tell == 0
    elif fmode == dill.CONTENTS_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2mode == f1mode
        assert f2tell == ftell
        assert f2name == fname
    elif fmode == dill.FILE_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2mode == f1mode
        assert f2tell == ftell
    else:
        raise RuntimeError("Unknown file mode '%s'" % fmode)

    # append

    trunc_file()

    f = open(fname, "a")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    f2 = dill.loads(f_dumped)
    f2mode = f2.mode
    f2tell = f2.tell()
    f2.write(" world!")
    f2.close()

    assert f2mode == f1mode
    if fmode == dill.CONTENTS_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2tell == ftell
    elif fmode == dill.HANDLE_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2tell == ftell
    elif fmode == dill.FILE_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2tell == ftell
    else:
        raise RuntimeError("Unknown file mode '%s'" % fmode)

    # file exists, with different contents (smaller size)
    # read

    write_randomness()

    f = open(fname, "r")
    fstr = f.read()
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    _flen = 150
    _fstr = write_randomness(number=_flen)

    if strictio:  # throw error if ftell > EOF
        assert throws(dill.loads, (f_dumped,), buffer_error)
    else:
        f2 = dill.loads(f_dumped)
        assert f2.mode == f1mode
        if fmode == dill.CONTENTS_FMODE:
            assert f2.tell() == _flen
            assert f2.read() == ""
            f2.seek(0)
            assert f2.read() == _fstr
            assert f2.tell() == _flen  # 150
        elif fmode == dill.HANDLE_FMODE:
            assert f2.tell() == 0
            assert f2.read() == _fstr
            assert f2.tell() == _flen  # 150
        elif fmode == dill.FILE_FMODE:
            assert f2.tell() == ftell  # 200
            assert f2.read() == ""
            f2.seek(0)
            assert f2.read() == fstr
            assert f2.tell() == ftell  # 200
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)
        f2.close()

    # write

    write_randomness()

    f = open(fname, "w")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    fstr = open(fname).read()

    f = open(fname, "w")
    f.write("h")
    _ftell = f.tell()
    f.close()

    if strictio:  # throw error if ftell > EOF
        assert throws(dill.loads, (f_dumped,), buffer_error)
    else:
        f2 = dill.loads(f_dumped)
        f2mode = f2.mode
        f2tell = f2.tell()
        f2.write(" world!")
        f2.close()
        if fmode == dill.CONTENTS_FMODE:
            assert open(fname).read() == "h world!"
            assert f2mode == f1mode
            assert f2tell == _ftell
        elif fmode == dill.HANDLE_FMODE:
            assert open(fname).read() == " world!"
            assert f2mode == f1mode
            assert f2tell == 0
        elif fmode == dill.FILE_FMODE:
            assert open(fname).read() == "hello world!"
            assert f2mode == f1mode
            assert f2tell == ftell
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)
        f2.close()

    # append

    trunc_file()

    f = open(fname, "a")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    fstr = open(fname).read()

    f = open(fname, "w")
    f.write("h")
    _ftell = f.tell()
    f.close()

    if strictio:  # throw error if ftell > EOF
        assert throws(dill.loads, (f_dumped,), buffer_error)
    else:
        f2 = dill.loads(f_dumped)
        f2mode = f2.mode
        f2tell = f2.tell()
        f2.write(" world!")
        f2.close()
        assert f2mode == f1mode
        if fmode == dill.CONTENTS_FMODE:
            # position of writes cannot be changed on some OSs
            assert open(fname).read() == "h world!"
            assert f2tell == _ftell
        elif fmode == dill.HANDLE_FMODE:
            assert open(fname).read() == "h world!"
            assert f2tell == _ftell
        elif fmode == dill.FILE_FMODE:
            assert open(fname).read() == "hello world!"
            assert f2tell == ftell
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)
        f2.close()

    # file does not exist
    # read

    write_randomness()

    f = open(fname, "r")
    fstr = f.read()
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()

    os.remove(fname)

    if strictio:  # throw error if file DNE
        assert throws(dill.loads, (f_dumped,), dne_error)
    else:
        f2 = dill.loads(f_dumped)
        assert f2.mode == f1mode
        if fmode == dill.CONTENTS_FMODE:
            # FIXME: this fails on systems where f2.tell() always returns 0
            # assert f2.tell() == ftell # 200
            assert f2.read() == ""
            f2.seek(0)
            assert f2.read() == ""
            assert f2.tell() == 0
        elif fmode == dill.FILE_FMODE:
            assert f2.tell() == ftell  # 200
            assert f2.read() == ""
            f2.seek(0)
            assert f2.read() == fstr
            assert f2.tell() == ftell  # 200
        elif fmode == dill.HANDLE_FMODE:
            assert f2.tell() == 0
            assert f2.read() == ""
            assert f2.tell() == 0
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)
        f2.close()

    # write

    write_randomness()

    f = open(fname, "w+")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    ftell = f.tell()
    f1mode = f.mode
    f.close()

    os.remove(fname)

    if strictio:  # throw error if file DNE
        assert throws(dill.loads, (f_dumped,), dne_error)
    else:
        f2 = dill.loads(f_dumped)
        f2mode = f2.mode
        f2tell = f2.tell()
        f2.write(" world!")
        f2.close()
        if fmode == dill.CONTENTS_FMODE:
            assert open(fname).read() == " world!"
            assert f2mode == 'w+'
            assert f2tell == 0
        elif fmode == dill.HANDLE_FMODE:
            assert open(fname).read() == " world!"
            assert f2mode == f1mode
            assert f2tell == 0
        elif fmode == dill.FILE_FMODE:
            assert open(fname).read() == "hello world!"
            assert f2mode == f1mode
            assert f2tell == ftell
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)

    # append

    trunc_file()

    f = open(fname, "a")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    ftell = f.tell()
    f1mode = f.mode
    f.close()

    os.remove(fname)

    if strictio:  # throw error if file DNE
        assert throws(dill.loads, (f_dumped,), dne_error)
    else:
        f2 = dill.loads(f_dumped)
        f2mode = f2.mode
        f2tell = f2.tell()
        f2.write(" world!")
        f2.close()
        assert f2mode == f1mode
        if fmode == dill.CONTENTS_FMODE:
            assert open(fname).read() == " world!"
            assert f2tell == 0
        elif fmode == dill.HANDLE_FMODE:
            assert open(fname).read() == " world!"
            assert f2tell == 0
        elif fmode == dill.FILE_FMODE:
            assert open(fname).read() == "hello world!"
            assert f2tell == ftell
        else:
            raise RuntimeError("Unknown file mode '%s'" % fmode)

    # file exists, with different contents (larger size)
    # read

    write_randomness()

    f = open(fname, "r")
    fstr = f.read()
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    f.close()
    _flen = 250
    _fstr = write_randomness(number=_flen)

    # XXX: no safe_file: no way to be 'safe'?

    f2 = dill.loads(f_dumped)
    assert f2.mode == f1mode
    if fmode == dill.CONTENTS_FMODE:
        assert f2.tell() == ftell  # 200
        assert f2.read() == _fstr[ftell:]
        f2.seek(0)
        assert f2.read() == _fstr
        assert f2.tell() == _flen  # 250
    elif fmode == dill.HANDLE_FMODE:
        assert f2.tell() == 0
        assert f2.read() == _fstr
        assert f2.tell() == _flen  # 250
    elif fmode == dill.FILE_FMODE:
        assert f2.tell() == ftell  # 200
        assert f2.read() == ""
        f2.seek(0)
        assert f2.read() == fstr
        assert f2.tell() == ftell  # 200
    else:
        raise RuntimeError("Unknown file mode '%s'" % fmode)
    f2.close()  # XXX: other alternatives?

    # write

    f = open(fname, "w")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()

    fstr = open(fname).read()

    f.write(" and goodbye!")
    _ftell = f.tell()
    f.close()

    # XXX: no safe_file: no way to be 'safe'?

    f2 = dill.loads(f_dumped)
    f2mode = f2.mode
    f2tell = f2.tell()
    f2.write(" world!")
    f2.close()
    if fmode == dill.CONTENTS_FMODE:
        assert open(fname).read() == "hello world!odbye!"
        assert f2mode == f1mode
        assert f2tell == ftell
    elif fmode == dill.HANDLE_FMODE:
        assert open(fname).read() == " world!"
        assert f2mode == f1mode
        assert f2tell == 0
    elif fmode == dill.FILE_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2mode == f1mode
        assert f2tell == ftell
    else:
        raise RuntimeError("Unknown file mode '%s'" % fmode)
    f2.close()

    # append

    trunc_file()

    f = open(fname, "a")
    f.write("hello")
    f_dumped = dill.dumps(f, fmode=fmode)#, strictio=strictio)
    f1mode = f.mode
    ftell = f.tell()
    fstr = open(fname).read()

    f.write(" and goodbye!")
    _ftell = f.tell()
    f.close()

    # XXX: no safe_file: no way to be 'safe'?

    f2 = dill.loads(f_dumped)
    f2mode = f2.mode
    f2tell = f2.tell()
    f2.write(" world!")
    f2.close()
    assert f2mode == f1mode
    if fmode == dill.CONTENTS_FMODE:
        assert open(fname).read() == "hello and goodbye! world!"
        assert f2tell == ftell
    elif fmode == dill.HANDLE_FMODE:
        assert open(fname).read() == "hello and goodbye! world!"
        assert f2tell == _ftell
    elif fmode == dill.FILE_FMODE:
        assert open(fname).read() == "hello world!"
        assert f2tell == ftell
    else:
        raise RuntimeError("Unknown file mode '%s'" % fmode)
    f2.close()

Example 35

Project: babble
Source File: test_posixpath.py
View license
    def test_isfile(self):
        self.assertIs(posixpath.isfile(test_support.TESTFN), False)
        f = open(test_support.TESTFN, "wb")
        try:
            f.write("foo")
            f.close()
            self.assertIs(posixpath.isfile(test_support.TESTFN), True)
            os.remove(test_support.TESTFN)
            os.mkdir(test_support.TESTFN)
            self.assertIs(posixpath.isfile(test_support.TESTFN), False)
            os.rmdir(test_support.TESTFN)
        finally:
            if not f.close():
                f.close()
            try:
                os.remove(test_support.TESTFN)
            except os.error:
                pass
            try:
                os.rmdir(test_support.TESTFN)
            except os.error:
                pass

        self.assertRaises(TypeError, posixpath.isdir)

        def test_samefile(self):
            f = open(test_support.TESTFN + "1", "wb")
            try:
                f.write("foo")
                f.close()
                self.assertIs(
                    posixpath.samefile(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "1"
                    ),
                    True
                )
                # If we don't have links, assume that os.stat doesn't return resonable
                # inode information and thus, that samefile() doesn't work
                if hasattr(os, "symlink"):
                    os.symlink(
                        test_support.TESTFN + "1",
                        test_support.TESTFN + "2"
                    )
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        True
                    )
                    os.remove(test_support.TESTFN + "2")
                    f = open(test_support.TESTFN + "2", "wb")
                    f.write("bar")
                    f.close()
                    self.assertIs(
                        posixpath.samefile(
                            test_support.TESTFN + "1",
                            test_support.TESTFN + "2"
                        ),
                        False
                    )
            finally:
                if not f.close():
                    f.close()
                try:
                    os.remove(test_support.TESTFN + "1")
                except os.error:
                    pass
                try:
                    os.remove(test_support.TESTFN + "2")
                except os.error:
                    pass

            self.assertRaises(TypeError, posixpath.samefile)

Example 36

Project: pymo
Source File: config.py
View license
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().
    """
    if not thread:
        raise NotImplementedError, "listen() needs threading to work"

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            import tempfile
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    #Apply new configuration. We'd like to be able to
                    #create a StringIO and pass that in, but unfortunately
                    #1.5.2 ConfigParser does not support reading file
                    #objects, only actual files. So we create a temporary
                    #file and remove it later.
                    file = tempfile.mktemp(".ini")
                    f = open(file, "w")
                    f.write(chunk)
                    f.close()
                    try:
                        fileConfig(file)
                    except (KeyboardInterrupt, SystemExit):
                        raise
                    except:
                        traceback.print_exc()
                    os.remove(file)
            except socket.error, e:
                if type(e.args) != types.TupleType:
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()

    def serve(rcvr, hdlr, port):
        server = rcvr(port=port, handler=hdlr)
        global _listener
        logging._acquireLock()
        _listener = server
        logging._releaseLock()
        server.serve_until_stopped()

    return threading.Thread(target=serve,
                            args=(ConfigSocketReceiver,
                                  ConfigStreamHandler, port))

Example 37

Project: script.module.youtube.dl
Source File: f4m.py
View license
    def real_download(self, filename, info_dict):
        man_url = info_dict['url']
        requested_bitrate = info_dict.get('tbr')
        self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
        urlh = self.ydl.urlopen(man_url)
        man_url = urlh.geturl()
        # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
        # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
        # and https://github.com/rg3/youtube-dl/issues/7823)
        manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()

        doc = compat_etree_fromstring(manifest)
        formats = [(int(f.attrib.get('bitrate', -1)), f)
                   for f in self._get_unencrypted_media(doc)]
        if requested_bitrate is None or len(formats) == 1:
            # get the best format
            formats = sorted(formats, key=lambda f: f[0])
            rate, media = formats[-1]
        else:
            rate, media = list(filter(
                lambda f: int(f[0]) == requested_bitrate, formats))[0]

        base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
        bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
        # From Adobe F4M 3.0 spec:
        # The <baseURL> element SHALL be the base URL for all relative
        # (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
        # URLs should be relative to the location of the containing document.
        boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
        live = boot_info['live']
        metadata_node = media.find(_add_ns('metadata'))
        if metadata_node is not None:
            metadata = base64.b64decode(metadata_node.text.encode('ascii'))
        else:
            metadata = None

        fragments_list = build_fragments_list(boot_info)
        test = self.params.get('test', False)
        if test:
            # We only download the first fragment
            fragments_list = fragments_list[:1]
        total_frags = len(fragments_list)
        # For some akamai manifests we'll need to add a query to the fragment url
        akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))

        ctx = {
            'filename': filename,
            'total_frags': total_frags,
            'live': live,
        }

        self._prepare_frag_download(ctx)

        dest_stream = ctx['dest_stream']

        write_flv_header(dest_stream)
        if not live:
            write_metadata_tag(dest_stream, metadata)

        base_url_parsed = compat_urllib_parse_urlparse(base_url)

        self._start_frag_download(ctx)

        frags_filenames = []
        while fragments_list:
            seg_i, frag_i = fragments_list.pop(0)
            name = 'Seg%d-Frag%d' % (seg_i, frag_i)
            query = []
            if base_url_parsed.query:
                query.append(base_url_parsed.query)
            if akamai_pv:
                query.append(akamai_pv.strip(';'))
            if info_dict.get('extra_param_to_segment_url'):
                query.append(info_dict['extra_param_to_segment_url'])
            url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
            frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
            try:
                success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
                if not success:
                    return False
                (down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
                down_data = down.read()
                down.close()
                reader = FlvReader(down_data)
                while True:
                    try:
                        _, box_type, box_data = reader.read_box_info()
                    except DataTruncatedError:
                        if test:
                            # In tests, segments may be truncated, and thus
                            # FlvReader may not be able to parse the whole
                            # chunk. If so, write the segment as is
                            # See https://github.com/rg3/youtube-dl/issues/9214
                            dest_stream.write(down_data)
                            break
                        raise
                    if box_type == b'mdat':
                        dest_stream.write(box_data)
                        break
                if live:
                    os.remove(encodeFilename(frag_sanitized))
                else:
                    frags_filenames.append(frag_sanitized)
            except (compat_urllib_error.HTTPError, ) as err:
                if live and (err.code == 404 or err.code == 410):
                    # We didn't keep up with the live window. Continue
                    # with the next available fragment.
                    msg = 'Fragment %d unavailable' % frag_i
                    self.report_warning(msg)
                    fragments_list = []
                else:
                    raise

            if not fragments_list and not test and live and bootstrap_url:
                fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
                total_frags += len(fragments_list)
                if fragments_list and (fragments_list[0][1] > frag_i + 1):
                    msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
                    self.report_warning(msg)

        self._finish_frag_download(ctx)

        for frag_file in frags_filenames:
            os.remove(encodeFilename(frag_file))

        return True

Example 38

Project: entropy
Source File: rescue.py
View license
    @exclusivelock
    def _generate(self, entropy_client, inst_repo):
        """
        Solo Smart Generate command.
        """
        mytxt = "%s: %s"  % (
            brown(_("Attention")),
            darkred(_("the Installed Packages repository "
                      "will be re-generated using the "
                      "Source Package Manager")),
            )
        entropy_client.output(
            mytxt,
            level="warning",
            importance=1)

        mytxt = "%s: %s"  % (
            brown(_("Attention")),
            darkred(_("I am not joking, this is quite disruptive")),
            )
        entropy_client.output(
            mytxt,
            level="warning",
            importance=1)

        rc = entropy_client.ask_question(
            "  %s" % (_("Understood ?"),))
        if rc == _("No"):
            return 1
        rc = entropy_client.ask_question(
            "  %s" % (_("Really ?"),) )
        if rc == _("No"):
            return 1
        rc = entropy_client.ask_question(
            "  %s. %s" % (
                _("This is your last chance"),
                _("Ok?"),)
            )
        if rc == _("No"):
            return 1

        # clean caches
        spm = entropy_client.Spm()
        entropy_client.clear_cache()

        # try to get a list of current package ids, if possible
        try:
            package_ids = inst_repo.listAllPackageIds()
        except Exception as err:
            entropy.tools.print_traceback()
            entropy_client.output(
                "%s: %s" % (
                    darkred(_("Cannot read metadata")),
                    err,
                    ),
                level="warning"
            )
            package_ids = []

        # try to collect current installed revisions if possible
        # and do the same for digest
        revisions_match = {}
        digest_match = {}
        for package_id in package_ids:
            try:
                atom = inst_repo.retrieveAtom(
                    package_id)
                revisions_match[atom] = inst_repo.retrieveRevision(
                    package_id)
                digest_match[atom] = inst_repo.retrieveDigest(
                    package_id)
            except Exception as err:
                entropy.tools.print_traceback()
                entropy_client.output(
                    "%s: %s" % (
                        darkred(_("Cannot read metadata")),
                        err,
                        ),
                    level="warning"
                )

        repo_path = entropy_client.installed_repository_path()
        entropy_client.output(
            darkgreen(_("Creating a backup of the current repository")),
            level="info",
            importance=1,
            header=darkred(" @@ "))
        entropy_client.output(
            repo_path,
            header="  ")

        inst_repo.commit()
        backed_up, msg = self._backup_repository(
            entropy_client, inst_repo, repo_path)
        if not backed_up:
            mytxt = "%s: %s" % (
                darkred(_("Cannot backup the repository")),
                brown("%s" % msg),)
            entropy_client.output(
                mytxt,
                level="error",
                importance=1,
                header=darkred(" @@ "))
            return 1

        entropy_client.close_installed_repository()
        # repository will be re-opened automagically
        # at the next access.
        try:
            os.remove(repo_path)
        except OSError as err:
            if err.errno != errno.ENOENT:
                mytxt = "%s: %s" % (
                    purple(_("Cannot delete old repository")),
                    brown("%s" % err),)
                entropy_client.output(
                    mytxt,
                    level="warning",
                    importance=1,
                    header=darkred(" @@ "))
                return 1

        entropy_client.output(
            purple(_("Initializing a new repository")),
            importance=1,
            header=darkred(" @@ "))
        entropy_client.output(
            brown(repo_path),
            header="  ")

        # open a repository at the old path, if repo_path is
        # not in place, Entropy will forward us to the in-RAM
        # database (for sqlite), which is not what we want.
        inst_repo.initializeRepository()
        inst_repo.commit()

        entropy_client.output(
            purple(_("Repository initialized, generating metadata")),
            importance=1,
            header=darkred(" @@ "))

        spm_packages = spm.get_installed_packages()
        total = len(spm_packages)
        count = 0
        # perf: reuse temp file
        tmp_fd, tmp_path = const_mkstemp(
            prefix="equo.rescue.generate")
        os.close(tmp_fd)

        for spm_package in spm_packages:
            count += 1

            # make sure the file is empty
            with open(tmp_path, "w") as tmp_f:
                tmp_f.flush()

            entropy_client.output(
                teal(spm_package),
                count=(count, total),
                back=True,
                header=brown(" @@ "))

            appended = spm.append_metadata_to_package(
                spm_package, tmp_path)
            if not appended:
                entropy_client.output(
                    "%s: %s" % (
                        purple(_("Invalid package")),
                        teal(spm_package),),
                    importance=1,
                    header=darkred(" @@ "))
                continue

            try:
                data = spm.extract_package_metadata(tmp_path)
            except Exception as err:
                entropy.tools.print_traceback()
                entropy_client.output(
                    "%s, %s: %s" % (
                        teal(spm_package),
                        purple(_("Metadata generation error")),
                        err,
                        ),
                    level="warning",
                    importance=1,
                    header=darkred(" @@ ")
                    )
                continue

            # Try to see if it's possible to use
            # the revision of a possible old db
            data['revision'] = etpConst['spmetprev']
            # create atom string
            atom = entropy.dep.create_package_atom_string(
                data['category'],
                data['name'],
                data['version'],
                data['versiontag'])

            # now see if a revision is available
            saved_rev = revisions_match.get(atom)
            if saved_rev is not None:
                saved_rev = saved_rev
                data['revision'] = saved_rev

            # set digest to "0" to disable entropy dependencies
            # calculation check that forces the pkg to
            # be pulled in if digest differs from the one on the repo
            saved_digest = digest_match.get(atom, "0")
            data['digest'] = saved_digest

            package_id = inst_repo.addPackage(data,
                revision = data['revision'])
            inst_repo.storeInstalledPackage(package_id,
                etpConst['spmdbid'])

        try:
            os.remove(tmp_path)
        except OSError:
            pass

        entropy_client.output(
            purple(_("Indexing metadata, please wait...")),
            header=darkgreen(" @@ "), back=True
            )
        inst_repo.createAllIndexes()
        inst_repo.commit()
        entropy_client.output(
            purple(_("Repository metadata generation complete")),
            header=darkgreen(" @@ ")
            )
        return 0

Example 39

Project: tp-libvirt
Source File: virsh_desc.py
View license
def run(test, params, env):
    """
    Test command: virsh desc.

    This command allows to show or modify description or title of a domain.
    1). For running domain, get/set description&title with options.
    2). For shut off domian, get/set description&title with options.
    3). For persistent/transient domain, get/set description&title with options.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    options = params.get("desc_option", "")
    persistent_vm = params.get("persistent_vm", "yes")
    domain = params.get("domain", "name")
    if domain == "UUID":
        vm_name = vm.get_uuid()
    elif domain == "invalid_domain":
        vm_name = "domain_" + str(uuid.uuid1())
    elif domain == "invalid_uuid":
        vm_name = uuid.uuid1()

    def run_cmd(name, options, desc_str, status_error):
        """
        Run virsh desc command

        :return: cmd output
        """
        if "--edit" not in options:
            cmd_result = virsh.desc(name, options, desc_str, ignore_status=True,
                                    debug=True)
            output = cmd_result.stdout.strip()
            err = cmd_result.stderr.strip()
            status = cmd_result.exit_status
        else:
            logging.debug("Setting domain desc \"%s\" by --edit", desc_str)
            session = aexpect.ShellSession("sudo -s")
            try:
                session.sendline("virsh -c %s desc %s --edit" %
                                 (vm.connect_uri, name))
                session.sendline("dgg")
                session.sendline("dG")
                session.sendline(":%s/^$/" + desc_str + "/")
                session.send('\x1b')
                session.send('ZZ')
                match, text = session.read_until_any_line_matches(
                    [r"Domain description updated successfully"],
                    timeout=10, internal_timeout=1)
                session.close()
                if match == -1:
                    status = 0
                    output = "Domain description updated successfully"
                else:
                    status = 1
                    err = "virsh desc --edit fails"
            except:
                raise error.TestFail("Fail to create session.")
        if status_error == "no" and status:
            raise error.TestFail(err)
        elif status_error == "yes" and status == 0:
            raise error.TestFail("Expect fail, but run successfully.")
        return output

    def vm_state_switch():
        """
        Switch the vm state
        """
        if vm.is_dead():
            vm.start()
        if vm.is_alive():
            vm.destroy()

    def desc_check(name, desc_str, options):
        """
        Check the domain's description or title
        """
        ret = False
        state_switch = False
        if options.count("--config") and vm.is_persistent():
            state_switch = True
        # If both --live and --config are specified, the --config
        # option takes precedence on getting the current description
        # and both live configuration and config are updated while
        # setting the description.
        # This situation just happens vm is alive
        if options.count("--config") and options.count("--live"):
            # Just test options exclude --config (--live [--title])
            desc_check(name, desc_str, options.replace("--config", ""))
            # Just test options exclude --live (--config [--title])
            desc_check(name, desc_str, options.replace("--live", ""))
            ret = True
        else:
            if state_switch:
                vm_state_switch()
            # --new-desc and --edit option should not appear in check
            if options.count("--edit") or options.count("--new-desc"):
                output = run_cmd(name, "", "", "no")
            else:
                output = run_cmd(name, options, "", "no")
            if desc_str == output:
                logging.debug("Domain desc check successfully.")
                ret = True
            else:
                raise error.TestFail("Expect fail, but run successfully.")

        return ret

    def run_test():
        """
        Get/Set vm desc by running virsh desc command.
        """
        status_error = params.get("status_error", "no")
        desc_str = params.get("desc_str", "")
        # Test 1: get vm desc
        if "--edit" not in options:
            if "--new-desc" in options:
                run_cmd(vm_name, options, "", "yes")
            else:
                run_cmd(vm_name, options, "", status_error)
        # Test 2: set vm desc
        if options.count("--live") and vm.state() == "shut off":
            status_error = "yes"
        if len(desc_str) == 0 and status_error == "no":
            desc_str = "New Description/title for the %s vm" % vm.state()
            logging.debug("Use the default desc message: %s", desc_str)
        run_cmd(vm_name, options, desc_str, status_error)
        if status_error == "no":
            desc_check(vm_name, desc_str, options)

    # Prepare transient/persistent vm
    original_xml = vm.backup_xml()
    if persistent_vm == "no" and vm.is_persistent():
        vm.undefine()
    elif persistent_vm == "yes" and not vm.is_persistent():
        vm.define(original_xml)
    try:
        if vm.is_dead():
            vm.start()
        if domain == "ID":
            vm_name = vm.get_id()
        run_test()
        # Recvoer the vm and shutoff it
        if persistent_vm == "yes" and domain != "ID":
            vm.define(original_xml)
            vm.destroy()
            run_test()
    finally:
        vm.destroy(False)
        virsh.define(original_xml)
        os.remove(original_xml)

Example 40

Project: pyomo
Source File: model.py
View license
    def apply(self, *args, **kwds):
        """
        Generate a NL or LP file from Pyomo, and then do subsequent
        conversions.
        """

        import pyomo.scripting.convert

        capabilities = kwds.pop("capabilities", None)

        # all non-consumed keywords are assumed to be options
        # that should be passed to the writer.
        io_options = {}
        for kwd, value in iteritems(kwds):
            io_options[kwd] = value
        kwds.clear()

        # basestring is gone in Python 3.x, merged with str.
        if using_py3:
            compare_type = str
        else:
            compare_type = basestring

        if isinstance(args[2], compare_type):
            instance = None
        else:
            instance = args[2]

        if args[1] == ProblemFormat.cpxlp:
            problem_filename = pyutilib.services.TempfileManager.\
                               create_tempfile(suffix = '.pyomo.lp')
            if instance is not None:
                (problem_filename, symbol_map) = \
                    instance.write(filename=problem_filename,
                                   format=ProblemFormat.cpxlp,
                                   solver_capability=capabilities,
                                   io_options=io_options)
                return (problem_filename,), symbol_map
            else:

                #
                # I'm simply exposing a fatal issue with
                # this code path. How would we convert the
                # collected keywords into command-line
                # arguments that can be sent to the writer?
                #
                if len(io_options):
                    raise ValueError(
                        "The following io_options will be ignored "
                        "(please create a bug report):\n\t" +
                        "\n\t".join("%s = %s" % (k,v)
                                    for k,v in iteritems(io_options)))

                ans = pyomo.scripting.convert.\
                      pyomo2lp(['--output',problem_filename,args[2]])
                if ans.errorcode:
                    raise RuntimeError("pyomo2lp conversion "
                                       "returned nonzero error code "
                                       "(%s)" % ans.errorcode)

                model = ans.retval
                problem_filename = model.filename
                symbol_map = model.symbol_map
                return (problem_filename,),symbol_map

        elif args[1] == ProblemFormat.bar:
            problem_filename = pyutilib.services.TempfileManager.\
                               create_tempfile(suffix = '.pyomo.bar')
            if instance is not None:
                (problem_filename, symbol_map) = \
                    instance.write(filename=problem_filename,
                                   format=ProblemFormat.bar,
                                   solver_capability=capabilities,
                                   io_options=io_options)
                return (problem_filename,), symbol_map
            else:

                #
                # I'm simply exposing a fatal issue with
                # this code path. How would we convert the
                # collected keywords into command-line
                # arguments that can be sent to the writer?
                #
                if len(io_options):
                    raise ValueError(
                        "The following io_options will be ignored "
                        "(please create a bug report):\n\t" +
                        "\n\t".join("%s = %s" % (k,v)
                                    for k,v in iteritems(io_options)))

                ans = pyomo.scripting.convert.\
                      pyomo2bar(['--output',problem_filename,args[2]])
                if ans.errorcode:
                    raise RuntimeError("pyomo2bar conversion "
                                       "returned nonzero error code "
                                       "(%s)" % ans.errorcode)
                model = ans.retval
                problem_filename = model.filename
                symbol_map = model.symbol_map
                return (problem_filename,),symbol_map

        elif args[1] in [ProblemFormat.mps, ProblemFormat.nl]:
            if args[1] == ProblemFormat.nl:
                problem_filename = pyutilib.services.TempfileManager.\
                                   create_tempfile(suffix = '.pyomo.nl')
            else:
                assert args[1] == ProblemFormat.mps
                problem_filename = pyutilib.services.TempfileManager.\
                                   create_tempfile(suffix = '.pyomo.mps')
            if instance is not None:
                (problem_filename, symbol_map) = \
                    instance.write(filename=problem_filename,
                                   format=args[1],
                                   solver_capability=capabilities,
                                   io_options=io_options)
                return (problem_filename,), symbol_map
            else:

                #
                # I'm simply exposing a fatal issue with
                # this code path. How would we convert the
                # collected keywords into command-line
                # arguments that can be sent to the writer?
                #
                if len(io_options):
                    raise ValueError(
                        "The following io_options will be ignored "
                        "(please create a bug report):\n\t" +
                        "\n\t".join("%s = %s" % (k,v)
                                    for k,v in iteritems(io_options)))

                ans = pyomo.scripting.convert.\
                      pyomo2nl(['--output',problem_filename,args[2]])
                if ans.errorcode:
                    raise RuntimeError("pyomo2nl conversion "
                                       "returned nonzero error "
                                       "code (%s)" % ans.errorcode)
                model = ans.retval
                problem_filename = model.filename
                symbol_map = model.symbol_map

                if args[1] == ProblemFormat.nl:
                    return (problem_filename,),symbol_map
                #
                # Convert from NL to MPS
                #
                # TBD: We don't support a variable map file when going
                #      from NL to MPS within the PICO converter.
                # NOTE: this is a problem with the MPS writer that is
                #       provided by COIN-OR
                # NOTE: we should generalize this so it doesn't strictly
                #       depend on the PICO converter utility.
                #
                ans = self.pico_converter.apply(ProblemFormat.nl,
                                                ProblemFormat.mps,
                                                problem_filename)
                os.remove(problem_filename)
                return ans

        elif args[1] == ProblemFormat.osil:
            if False:
                problem_filename = pyutilib.services.TempfileManager.\
                               create_tempfile(suffix='pyomo.osil')
                if instance:
                    (problem_filename, symbol_map) = \
                        instance.write(filename=problem_filename,
                                    format=ProblemFormat.osil,
                                    solver_capability=capabilities,
                                    io_options=io_options)
                    return (problem_filename,), None
            else:
                raise NotImplementedError(
                    "There is currently no "
                    "script conversion available from "
                    "Pyomo to OSiL format.")

Example 41

Project: entropy
Source File: fetch.py
View license
    def _download_file(self, url, download_path, digest = None,
                       resume = True, package_id = None,
                       repository_id = None):
        """
        Internal method. Try to download the package file.
        """

        def do_stfu_rm(xpath):
            try:
                os.remove(xpath)
            except OSError:
                pass

        def do_get_md5sum(path):
            try:
                return entropy.tools.md5sum(path)
            except IOError:
                return None
            except OSError:
                return None

        download_path_dir = os.path.dirname(download_path)
        try:
            os.makedirs(download_path_dir, 0o755)
        except OSError as err:
            if err.errno != errno.EEXIST:
                const_debug_write(
                    __name__,
                    "_download_file.makedirs, %s, error: %s" % (
                        download_path_dir, err))
                return -1, 0, False

        fetch_abort_function = self._meta.get('fetch_abort_function')
        existed_before = False
        if os.path.isfile(download_path) and os.path.exists(download_path):
            existed_before = True

        avail_data = self._settings['repositories']['available']
        repo_data = avail_data[self._repository_id]

        basic_user = repo_data.get('username')
        basic_pwd = repo_data.get('password')
        https_validate_cert = not repo_data.get('https_validate_cert') == "false"

        fetch_intf = self._entropy._url_fetcher(
            url, download_path, resume = resume,
            abort_check_func = fetch_abort_function,
            http_basic_user = basic_user,
            http_basic_pwd = basic_pwd,
            https_validate_cert = https_validate_cert)

        if (package_id is not None) and (repository_id is not None):
            self._setup_differential_download(
                self._entropy._url_fetcher, url,
                resume, download_path, repository_id, package_id)

        data_transfer = 0
        resumed = False
        try:
            # make sure that we don't need to abort already
            # doing the check here avoids timeouts
            if fetch_abort_function != None:
                fetch_abort_function()

            fetch_checksum = fetch_intf.download()
            data_transfer = fetch_intf.get_transfer_rate()
            resumed = fetch_intf.is_resumed()
        except (KeyboardInterrupt, InterruptError):
            return -100, data_transfer, resumed

        except NameError:
            raise

        except:
            if const_debug_enabled():
                self._entropy.output(
                    "fetch_file:",
                    importance = 1,
                    level = "warning",
                    header = red("   ## ")
                )
                entropy.tools.print_traceback()
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -1, data_transfer, resumed

        if fetch_checksum == UrlFetcher.GENERIC_FETCH_ERROR:
            # !! not found
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -3, data_transfer, resumed

        elif fetch_checksum == UrlFetcher.TIMEOUT_FETCH_ERROR:
            # maybe we already have it?
            # this handles the case where network is unavailable
            # but file is already downloaded
            fetch_checksum = do_get_md5sum(download_path)
            if (fetch_checksum != digest) or fetch_checksum is None:
                return -4, data_transfer, resumed

        if digest and (fetch_checksum != digest):
            # not properly downloaded
            if (not existed_before) or (not resume):
                do_stfu_rm(download_path)
            return -2, data_transfer, resumed

        return 0, data_transfer, resumed

Example 42

Project: entropy
Source File: client.py
View license
    def _generic_post_handler(self, function_name, params, file_params,
        timeout):
        """
        Given a function name and the request data (dict format), do the actual
        HTTP request and return the response object to caller.
        WARNING: params and file_params dict keys must be ASCII string only.

        @param function_name: name of the function that called this method
        @type function_name: string
        @param params: POST parameters
        @type params: dict
        @param file_params: mapping composed by file names as key and tuple
            composed by (file_name, file object) as values
        @type file_params: dict
        @param timeout: socket timeout
        @type timeout: float
        @return: tuple composed by the server response string or None
            (in case of empty response) and the HTTPResponse object (useful
                for checking response status)
        @rtype: tuple
        """
        if timeout is None:
            timeout = self._default_timeout_secs
        multipart_boundary = "---entropy.services,boundary---"
        request_path = self._request_path.rstrip("/") + "/" + function_name
        const_debug_write(__name__,
            "WebService _generic_post_handler, calling: %s at %s -- %s,"
            " tx_callback: %s, timeout: %s" % (self._request_host, request_path,
                params, self._transfer_callback, timeout,))
        connection = None
        try:
            if self._request_protocol == "http":
                connection = httplib.HTTPConnection(self._request_host,
                    timeout = timeout)
            elif self._request_protocol == "https":
                ssl_context = None
                if hasattr(ssl, 'create_default_context'):
                    ssl_context = ssl.create_default_context(
                        purpose = ssl.Purpose.CLIENT_AUTH)
                connection = httplib.HTTPSConnection(
                    self._request_host, timeout = timeout, context = ssl_context)
            else:
                raise WebService.RequestError("invalid request protocol",
                    method = function_name)

            headers = {
                "Accept": "text/plain",
                "User-Agent": self._generate_user_agent(function_name),
            }

            if file_params is None:
                file_params = {}
            # autodetect file parameters in params
            for k in list(params.keys()):
                if isinstance(params[k], (tuple, list)) \
                    and (len(params[k]) == 2):
                    f_name, f_obj = params[k]
                    if isinstance(f_obj, file):
                        file_params[k] = params[k]
                        del params[k]
                elif const_isunicode(params[k]):
                    # convert to raw string
                    params[k] = const_convert_to_rawstring(params[k],
                        from_enctype = "utf-8")
                elif not const_isstring(params[k]):
                    # invalid ?
                    if params[k] is None:
                        # will be converted to ""
                        continue
                    int_types = const_get_int()
                    supported_types = (float, list, tuple) + int_types
                    if not isinstance(params[k], supported_types):
                        raise WebService.UnsupportedParameters(
                            "%s is unsupported type %s" % (k, type(params[k])))
                    list_types = (list, tuple)
                    if isinstance(params[k], list_types):
                        # not supporting nested lists
                        non_str = [x for x in params[k] if not \
                            const_isstring(x)]
                        if non_str:
                            raise WebService.UnsupportedParameters(
                                "%s is unsupported type %s" % (k,
                                    type(params[k])))

            body = None
            if not file_params:
                headers["Content-Type"] = "application/x-www-form-urlencoded"
                encoded_params = urllib_parse.urlencode(params)
                data_size = len(encoded_params)
                if self._transfer_callback is not None:
                    self._transfer_callback(0, data_size, False)

                if data_size < 65536:
                    try:
                        connection.request("POST", request_path, encoded_params,
                            headers)
                    except socket.error as err:
                        raise WebService.RequestError(err,
                            method = function_name)
                else:
                    try:
                        connection.request("POST", request_path, None, headers)
                    except socket.error as err:
                        raise WebService.RequestError(err,
                            method = function_name)
                    sio = StringIO(encoded_params)
                    data_size = len(encoded_params)
                    while True:
                        chunk = sio.read(65535)
                        if not chunk:
                            break
                        try:
                            connection.send(chunk)
                        except socket.error as err:
                            raise WebService.RequestError(err,
                                method = function_name)
                        if self._transfer_callback is not None:
                            self._transfer_callback(sio.tell(),
                                data_size, False)
                # for both ways, send a signal through the callback
                if self._transfer_callback is not None:
                    self._transfer_callback(data_size, data_size, False)

            else:
                headers["Content-Type"] = "multipart/form-data; boundary=" + \
                    multipart_boundary
                body_file, body_fpath = self._encode_multipart_form(params,
                    file_params, multipart_boundary)
                try:
                    data_size = body_file.tell()
                    headers["Content-Length"] = str(data_size)
                    body_file.seek(0)
                    if self._transfer_callback is not None:
                        self._transfer_callback(0, data_size, False)

                    try:
                        connection.request("POST", request_path, None, headers)
                    except socket.error as err:
                        raise WebService.RequestError(err,
                            method = function_name)
                    while True:
                        chunk = body_file.read(65535)
                        if not chunk:
                            break
                        try:
                            connection.send(chunk)
                        except socket.error as err:
                            raise WebService.RequestError(err,
                                method = function_name)
                        if self._transfer_callback is not None:
                            self._transfer_callback(body_file.tell(),
                                data_size, False)
                    if self._transfer_callback is not None:
                        self._transfer_callback(data_size, data_size, False)
                finally:
                    body_file.close()
                    os.remove(body_fpath)

            try:
                response = connection.getresponse()
            except socket.error as err:
                raise WebService.RequestError(err,
                    method = function_name)
            const_debug_write(__name__, "WebService.%s(%s), "
                "response header: %s" % (
                    function_name, params, response.getheaders(),))
            total_length = response.getheader("Content-Length", "-1")
            try:
                total_length = int(total_length)
            except ValueError:
                total_length = -1
            outcome = const_convert_to_rawstring("")
            current_len = 0
            if self._transfer_callback is not None:
                self._transfer_callback(current_len, total_length, True)
            while True:
                try:
                    chunk = response.read(65536)
                except socket.error as err:
                    raise WebService.RequestError(err,
                        method = function_name)
                if not chunk:
                    break
                outcome += chunk
                current_len += len(chunk)
                if self._transfer_callback is not None:
                    self._transfer_callback(current_len, total_length, True)

            if self._transfer_callback is not None:
                self._transfer_callback(total_length, total_length, True)

            if const_is_python3():
                outcome = const_convert_to_unicode(outcome)
            if not outcome:
                return None, response
            return outcome, response

        except httplib.HTTPException as err:
            raise WebService.RequestError(err,
                method = function_name)
        finally:
            if connection is not None:
                connection.close()

Example 43

Project: eden
Source File: filesync.py
View license
    def pull(self, task, onconflict=None):
        """
            Fetch updates from the peer repository and import them
            into the local database (active pull)

            @param task: the synchronization task (sync_task Row)
            @param onconflict: callback for automatic conflict resolution

            @return: tuple (error, mtime), with error=None if successful,
                     else error=message, and mtime=modification timestamp
                     of the youngest record sent
        """

        repository = self.repository
        log = repository.log

        error = None
        result = None

        # Instantiate the target resource
        tablename = task.resource_name
        if tablename == "mixed":
            resource = None
            mixed = True
        else:
            try:
                resource = current.s3db.resource(tablename)
            except SyntaxError:
                result = log.FATAL
                error = msg = sys.exc_info()[1]
            mixed = False

        # Get input files
        if not result:
            input_files = self._input_files(task)
            if not input_files:
                result = log.SUCCESS
                msg = "No files to import"

        # Instantiate back-end
        if not result:
            adapter = None
            backend = repository.backend
            if not backend:
                backend = "eden"
            backend = "s3.sync_adapter.%s" % backend
            try:
                name = "S3SyncAdapter"
                api = getattr(__import__(backend, fromlist=[name]), name)
            except ImportError:
                result = log.FATAL
                error = msg = "Unsupported back-end: %s" % backend
            else:
                adapter = api(repository)

        # If any of the previous actions has produced a non-default result:
        if result:
            # Log the operation and return
            log.write(repository_id = repository.id,
                      resource_name = tablename,
                      transmission = log.OUT,
                      mode = log.PULL,
                      action = None,
                      remote = False,
                      result = result,
                      message = msg,
                      )
            return (error, None)

        # Set strategy and policies
        from ..s3import import S3ImportItem
        strategy = task.strategy
        conflict_policy = task.conflict_policy
        if not conflict_policy:
            conflict_policy = S3ImportItem.POLICY.MASTER
        update_policy = task.update_policy
        if not update_policy:
            update_policy = S3ImportItem.POLICY.NEWER
        if update_policy not in ("THIS", "OTHER"):
            last_sync = task.last_pull
        else:
            last_sync = None

        # Import the files
        error = None
        mtime = None

        for f in input_files:
            current.log.debug("FileSync: importing %s" % f)
            try:
                with open(f, "r") as source:
                    result = adapter.receive([source],
                                             resource,
                                             strategy = strategy,
                                             update_policy = update_policy,
                                             conflict_policy = conflict_policy,
                                             onconflict = onconflict,
                                             last_sync = last_sync,
                                             mixed = mixed,
                                             )
            except IOError:
                msg = sys.exc_info()[1]
                current.log.warning(msg)
                continue

            status = result["status"]

            # Log the operation
            log.write(repository_id = repository.id,
                      resource_name = tablename,
                      transmission = log.OUT,
                      mode = log.PULL,
                      action = "import %s" % f,
                      remote = result["remote"],
                      result = status,
                      message = result["message"],
                      )

            if status in (log.ERROR, log.FATAL):
                error = "Error while importing %s" % f
                current.log.error(error)
                mtime = None

            else:
                if resource:
                    mtime = resource.mtime
                else:
                    mtime = current.request.utcnow
                if task.delete_input_files:
                    try:
                        os.remove(f)
                    except os.error:
                        current.log.warning("FileSync: can not delete %s" % f)
                    else:
                        current.log.debug("FileSync: %s deleted" % f)

        return error, mtime

Example 44

Project: eden
Source File: build.sahana.py
View license
def dojs(dogis = False, warnings = True):
    """ Minifies the JavaScript """

    # Do we have local version of the Closure Compiler available?
    use_compressor = "jsmin" # Fallback
    try:
        import closure
        use_compressor = "closure"
        print "using local Closure Compiler"
    except Exception, E:
        print "No closure (%s)" % E
        print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
        try:
            import closure_ws
            use_compressor = "closure_ws"
            print "Using Closure via Web Service - limited to files < 1Mb!"
        except ImportError:
            print "No closure_ws"

    if use_compressor == "closure":
        if not warnings:
            closure.extra_params = "--warning_level QUIET"
        minimize = closure.minimize
    elif use_compressor == "closure_ws":
        minimize = closure_ws.minimize
    elif use_compressor == "jsmin":
        minimize = jsmin.jsmin

    sourceDirectory = ".."
    configFilename = "sahana.js.cfg"
    outputFilename = "S3.min.js"

    # Merge JS files
    print "Merging Core libraries."
    merged = mergejs.run(sourceDirectory, None, configFilename)

    # Compress JS files
    print "Compressing - JS"
    minimized = minimize(merged)

    # Add license
    print "Adding license file."
    minimized = open("license.txt").read() + minimized

    # Print to output files
    print "Writing to %s." % outputFilename
    open(outputFilename, "w").write(minimized)

    # Remove old JS files
    print "Deleting %s." % outputFilename
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass

    # Move new JS files
    print "Moving new JS files"
    shutil.move(outputFilename, "../S3")

    # Bootstrap
    # print "Compressing Bootstrap"
    # sourceDirectoryBootstrap = ".."
    # configFilenameBootstrap = "sahana.js.bootstrap.cfg"
    # outputFilenameBootstrap = "bootstrap.min.js"
    # mergedBootstrap = mergejs.run(sourceDirectoryBootstrap,
                                  # None,
                                  # configFilenameBootstrap)
    # minimizedBootstrap = minimize(mergedBootstrap)
    # open(outputFilenameBootstrap, "w").write(minimizedBootstrap)
    # try:
        # os.remove("../%s" % outputFilenameBootstrap)
    # except:
        # pass
    # shutil.move(outputFilenameBootstrap, "..")

    # Calendar
    print "Compressing calendar"
    sourceDirectory = ".."
    configFilename = "sahana.js.calendar.cfg"
    outputFilename = "s3.ui.calendar.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # dataLists
    print "Compressing dataLists"
    sourceDirectory = ".."
    configFilename = "sahana.js.dataLists.cfg"
    outputFilename = "s3.dataLists.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # dataTables
    print "Compressing dataTables"
    sourceDirectory = ".."
    configFilename = "sahana.js.dataTables.cfg"
    outputFilename = "s3.dataTables.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    configFilename = "sahana.js.dataTables_multi.cfg"
    outputFilename = "s3.dataTables.multi.min.js"
    merged = mergejs.run(sourceDirectory,
                                   None,
                                   configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # pivotTables
    print "Compressing pivotTables"
    sourceDirectory = ".."
    configFilename = "sahana.js.pivotTables.cfg"
    outputFilename = "s3.pivotTables.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # timeplot
    print "Compressing timeplot"
    sourceDirectory = ".."
    configFilename = "sahana.js.timeplot.cfg"
    outputFilename = "s3.timeplot.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # groupedItems
    print "Compressing groupedItems"
    sourceDirectory = ".."
    configFilename = "sahana.js.groupeditems.cfg"
    outputFilename = "s3.groupeditems.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # ImageCrop
    print "Compressing ImageCrop"
    sourceDirectory = ".."
    configFilename = "sahana.js.imageCrop.cfg"
    outputFilename = "s3.imagecrop.widget.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # JSTree
    print "Compressing JSTree"
    sourceDirectory = ".."
    configFilename = "sahana.js.jstree.cfg"
    outputFilename = "s3.jstree.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # Chat
    print "Compressing Chat"
    sourceDirectory = ".."
    configFilename = "sahana.js.chat.cfg"
    outputFilename = "s3.chat.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # Guided Tour
    print "Compressing Guided Tour"
    sourceDirectory = ".."
    configFilename = "sahana.js.guidedTour.cfg"
    outputFilename = "s3.guidedtour.min.js"
    merged = mergejs.run(sourceDirectory,
                         None,
                         configFilename)
    minimized = minimize(merged)
    open(outputFilename, "w").write(minimized)
    try:
        os.remove("../S3/%s" % outputFilename)
    except:
        pass
    shutil.move(outputFilename, "../S3")

    # Single scripts
    for filename in ("add_person",
                     "cap",
                     "dvr",
                     "gis",
                     "gis.feature_crud",
                     "gis.fullscreen",
                     "gis.latlon",
                     "gis.loader",
                     "gis.pois",
                     "locationselector.widget",
                     "msg",
                     "popup",
                     "register_validation",
                     "select_person",
                     "sync",
                     "timeline",
                     "ui.contacts",
                     "ui.dashboard",
                     "ui.embeddedcomponent",
                     "ui.locationselector",
                     "work",
                     ):
        print "Compressing s3.%s.js" % filename
        inputFilename = os.path.join("..", "S3", "s3.%s.js" % filename)
        outputFilename = "s3.%s.min.js" % filename
        input = open(inputFilename, "r").read()
        minimized = minimize(input)
        open(outputFilename, "w").write(minimized)
        try:
            os.remove("../S3/%s" % outputFilename)
        except:
            pass
        shutil.move(outputFilename, "../S3")

    # Enable when needed
    full = False
    if full:
        for filename in ("spectrum",
                         "tag-it",
                         ):
            print "Compressing %s.js" % filename
            in_f = os.path.join("..", filename + ".js")
            out_f = os.path.join("..", filename + ".min.js")
            with open(in_f, "r") as inp:
                with open(out_f, "w") as out:
                    out.write(minimize(inp.read()))

        # Vulnerability
        print "Compressing Vulnerability"
        sourceDirectory = "../.."
        configFilename = "sahana.js.vulnerability.cfg"
        outputFilename = "s3.vulnerability.min.js"
        merged = mergejs.run(sourceDirectory,
                             None,
                             configFilename)
        minimized = minimize(merged)
        open(outputFilename, "w").write(minimized)
        try:
            os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
        except:
            pass
        shutil.move(outputFilename, "../../themes/Vulnerability/js")
        print "Compressing Vulnerability GIS"
        sourceDirectory = "../.."
        configFilename = "sahana.js.vulnerability_gis.cfg"
        outputFilename = "OpenLayers.js"
        merged = mergejs.run(sourceDirectory,
                             None,
                             configFilename)
        minimized = minimize(merged)
        open(outputFilename, "w").write(minimized)
        try:
            os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
        except:
            pass
        shutil.move(outputFilename, "../../themes/Vulnerability/js")

    if dogis:
        sourceDirectoryOpenLayers = "../gis/openlayers/lib"
        sourceDirectoryMGRS = "../gis"
        sourceDirectoryGeoExt = "../gis/GeoExt/lib"
        sourceDirectoryGxp = "../gis/gxp"
        configFilenameOpenLayers = "sahana.js.ol.cfg"
        configFilenameMGRS = "sahana.js.mgrs.cfg"
        configFilenameGeoExt = "sahana.js.geoext.cfg"
        configFilenameGxpMin = "sahana.js.gxp.cfg"
        configFilenameGxp2 = "sahana.js.gxp2.cfg"
        configFilenameGxpFull = "sahana.js.gxpfull.cfg"
        outputFilenameOpenLayers = "OpenLayers.js"
        outputFilenameMGRS = "MGRS.min.js"
        outputFilenameGeoExt = "GeoExt.js"
        outputFilenameGxp = "gxp.js"
        outputFilenameGxp2 = "gxp_upload.js"

        # Merge GIS JS Files
        print "Merging OpenLayers libraries."
        mergedOpenLayers = mergejs.run(sourceDirectoryOpenLayers,
                                       None,
                                       configFilenameOpenLayers)

        print "Merging MGRS libraries."
        mergedMGRS = mergejs.run(sourceDirectoryMGRS,
                                 None,
                                 configFilenameMGRS)

        print "Merging GeoExt libraries."
        mergedGeoExt = mergejs.run(sourceDirectoryGeoExt,
                                   None,
                                   configFilenameGeoExt)

        print "Merging gxp libraries."
        mergedGxpMin = mergejs.run(sourceDirectoryGxp,
                                   None,
                                   configFilenameGxpMin)
        mergedGxp2 = mergejs.run(sourceDirectoryGxp,
                                 None,
                                 configFilenameGxp2)
        mergedGxpFull = mergejs.run(sourceDirectoryGxp,
                                    None,
                                    configFilenameGxpFull)

        # Compress JS files
        print "Compressing - OpenLayers JS"
        if use_compressor == "closure_ws":
            # Limited to files < 1Mb!
            minimizedOpenLayers = jsmin.jsmin(mergedOpenLayers)
            #minimizedOpenLayers = jsmin.jsmin("%s\n%s" % (mergedOpenLayers,
            #                                              mergedOpenLayersExten))
        else:
            minimizedOpenLayers = minimize(mergedOpenLayers)
            #minimizedOpenLayers = minimize("%s\n%s" % (mergedOpenLayers,
            #                                           mergedOpenLayersExten))

        # OpenLayers extensions
        for filename in ["OWM.OpenLayers",
                         ]:
            inputFilename = os.path.join("..", "gis", "%s.js" % filename)
            outputFilename = "%s.min.js" % filename
            input = open(inputFilename, "r").read()
            minimized = minimize(input)
            open(outputFilename, "w").write(minimized)
            try:
                os.remove("../gis/%s" % outputFilename)
            except:
                pass
            shutil.move(outputFilename, "../gis")

        print "Compressing - MGRS JS"
        minimizedMGRS = minimize(mergedMGRS)

        print "Compressing - GeoExt JS"
        minimizedGeoExt = minimize("%s\n%s" % (mergedGeoExt,
                                               #mergedGeoExtux,
                                               mergedGxpMin))

        # GeoNamesSearchCombo
        inputFilename = os.path.join("..", "gis", "GeoExt", "ux", "GeoNamesSearchCombo.js")
        outputFilename = "GeoNamesSearchCombo.min.js"
        input = open(inputFilename, "r").read()
        minimized = minimize(input)
        open(outputFilename, "w").write(minimized)
        try:
            os.remove("../gis/GeoExt/ux/%s" % outputFilename)
        except:
            pass
        shutil.move(outputFilename, "../gis/GeoExt/ux")

        print "Compressing - gxp JS"
        minimizedGxp = minimize(mergedGxpFull)
        minimizedGxp2 = minimize(mergedGxp2)

        for filename in ("WMSGetFeatureInfo",
                         ):
            inputFilename = os.path.join("..", "gis", "gxp", "plugins", "%s.js" % filename)
            outputFilename = "%s.min.js" % filename
            input = open(inputFilename, "r").read()
            minimized = minimize(input)
            open(outputFilename, "w").write(minimized)
            try:
                os.remove("../gis/gxp/plugins/%s" % outputFilename)
            except:
                pass
            shutil.move(outputFilename, "../gis/gxp/plugins")

        for filename in ("GoogleEarthPanel",
                         "GoogleStreetViewPanel",
                         ):
            inputFilename = os.path.join("..", "gis", "gxp", "widgets", "%s.js" % filename)
            outputFilename = "%s.min.js" % filename
            input = open(inputFilename, "r").read()
            minimized = minimize(input)
            open(outputFilename, "w").write(minimized)
            try:
                os.remove("../gis/gxp/widgets/%s" % outputFilename)
            except:
                pass
            shutil.move(outputFilename, "../gis/gxp/widgets")

        # Add license
        #minimizedGIS = open("license.gis.txt").read() + minimizedGIS

        # Print to output files
        print "Writing to %s." % outputFilenameOpenLayers
        open(outputFilenameOpenLayers, "w").write(minimizedOpenLayers)

        print "Writing to %s." % outputFilenameMGRS
        open(outputFilenameMGRS, "w").write(minimizedMGRS)

        print "Writing to %s." % outputFilenameGeoExt
        open(outputFilenameGeoExt, "w").write(minimizedGeoExt)

        print "Writing to %s." % outputFilenameGxp
        open(outputFilenameGxp, "w").write(minimizedGxp)

        print "Writing to %s." % outputFilenameGxp2
        open(outputFilenameGxp2, "w").write(minimizedGxp2)

        # Move new JS files
        print "Deleting %s." % outputFilenameOpenLayers
        try:
            os.remove("../gis/%s" % outputFilenameOpenLayers)
        except:
            pass
        print "Moving new OpenLayers JS files"
        shutil.move(outputFilenameOpenLayers, "../gis")

        print "Deleting %s." % outputFilenameMGRS
        try:
            os.remove("../gis/%s" % outputFilenameMGRS)
        except:
            pass
        print "Moving new MGRS JS files"
        shutil.move(outputFilenameMGRS, "../gis")

        print "Deleting %s." % outputFilenameGeoExt
        try:
            os.remove("../gis/%s" % outputFilenameGeoExt)
        except:
            pass
        print "Moving new GeoExt JS files"
        shutil.move(outputFilenameGeoExt, "../gis")

        print "Deleting %s." % outputFilenameGxp
        try:
            os.remove("../gis/%s" % outputFilenameGxp)
        except:
            pass
        print "Moving new gxp JS files"
        shutil.move(outputFilenameGxp, "../gis")

        print "Deleting %s." % outputFilenameGxp2
        try:
            os.remove("../gis/%s" % outputFilenameGxp2)
        except:
            pass
        print "Moving new gxp2 JS files"
        shutil.move(outputFilenameGxp2, "../gis")

Example 45

Project: HPOlib
Source File: test_runsolver_wrapper.py
View license
    def test_get_trial_index_cv(self):
        try:
            os.remove("test_get_trial_index.pkl")
        except OSError:
            pass

        try:
            os.remove("test_get_trial_index.pkl.lock")
        except OSError:
            pass

        experiment = Experiment.Experiment(".", "test_get_trial_index", folds=5)
        params0 = {"x": "1"}
        params1 = {"x": "2"}
        params2 = {"x": "3"}
        params3 = {"x": "4"}
        params4 = {"x": "5"}

        trial_index0 = runsolver_wrapper.get_trial_index(experiment, 0, params0)
        self.assertEqual(trial_index0, 0)
        experiment.set_one_fold_running(trial_index0, 0)
        experiment.set_one_fold_complete(trial_index0, 0, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 1, params0))
        experiment.set_one_fold_running(trial_index0, 1)
        experiment.set_one_fold_complete(trial_index0, 1, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 2, params0))
        experiment.set_one_fold_running(trial_index0, 2)
        experiment.set_one_fold_complete(trial_index0, 2, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 3, params0))
        experiment.set_one_fold_running(trial_index0, 3)
        experiment.set_one_fold_complete(trial_index0, 3, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 4, params0))
        experiment.set_one_fold_running(trial_index0, 4)
        experiment.set_one_fold_complete(trial_index0, 4, 1, 1)

        trial_index1 = runsolver_wrapper.get_trial_index(experiment, 0, params1)
        self.assertEqual(trial_index1, 1)
        experiment.set_one_fold_running(trial_index1, 0)
        experiment.set_one_fold_complete(trial_index1, 0, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 1, params1))
        experiment.set_one_fold_running(trial_index1, 1)
        experiment.set_one_fold_complete(trial_index1, 1, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 2, params1))
        experiment.set_one_fold_running(trial_index1, 2)
        experiment.set_one_fold_complete(trial_index1, 2, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 3, params1))
        experiment.set_one_fold_running(trial_index1, 3)
        experiment.set_one_fold_complete(trial_index1, 3, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 4, params1))
        experiment.set_one_fold_running(trial_index1, 4)
        experiment.set_one_fold_complete(trial_index1, 4, 1, 1)

        trial_index2 = runsolver_wrapper.get_trial_index(experiment, 0, params2)
        self.assertEqual(trial_index2, 2)
        experiment.set_one_fold_running(trial_index2, 0)
        experiment.set_one_fold_complete(trial_index2, 0, 1, 1)

        trial_index3 = runsolver_wrapper.get_trial_index(experiment, 0, params3)
        self.assertEqual(trial_index3, 3)
        experiment.set_one_fold_running(trial_index3, 0)
        experiment.set_one_fold_complete(trial_index3, 0, 1, 1)

        trial_index4 = runsolver_wrapper.get_trial_index(experiment, 0, params4)
        self.assertEqual(trial_index4, 4)
        experiment.set_one_fold_running(trial_index4, 0)
        experiment.set_one_fold_complete(trial_index4, 0, 1, 1)

        self.assertEqual(trial_index2,
                         runsolver_wrapper.get_trial_index(experiment, 3, params2))
        self.assertEqual(trial_index4,
                         runsolver_wrapper.get_trial_index(experiment, 4, params4))

        # Since params1 were already evaluated, this should be a new trial_index
        trial_index_test1 = runsolver_wrapper.get_trial_index(experiment, 0, params1)
        self.assertEqual(trial_index_test1, 5)

Example 46

Project: tp-libvirt
Source File: virsh_domfstrim.py
View license
def run(test, params, env):
    """
    Test domfstrim command, make sure that all supported options work well

    Test scenaries:
    1. fstrim without options
    2. fstrim with --minimum with large options
    3. fstrim with --minimum with small options

    Note: --mountpoint still not supported so will not test here
    """
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()

    if not virsh.has_help_command('domfstrim'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domfstrim test")

    try:
        utils_path.find_command("lsscsi")
    except utils_path.CmdNotFoundError:
        raise error.TestNAError("Command 'lsscsi' is missing. You must "
                                "install it.")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    status_error = ("yes" == params.get("status_error", "no"))
    minimum = params.get("domfstrim_minimum")
    mountpoint = params.get("domfstrim_mountpoint")
    options = params.get("domfstrim_options", "")
    is_fulltrim = ("yes" == params.get("is_fulltrim", "yes"))
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no"))
    start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no"))
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Do backup for origin xml
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        vm = env.get_vm(vm_name)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        bef_list = session.cmd_output("fdisk -l|grep ^/dev|"
                                      "cut -d' ' -f1").split("\n")
        session.close()
        vm.destroy()

        # Load module and get scsi disk name
        utils.load_module("scsi_debug lbpu=1 lbpws=1")
        scsi_disk = utils.run("lsscsi|grep scsi_debug|"
                              "awk '{print $6}'").stdout.strip()
        # Create partition
        open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n")
        output = utils.run("fdisk %s < /tmp/fdisk-cmd"
                           % scsi_disk).stdout.strip()
        logging.debug("fdisk output %s", output)
        os.remove("/tmp/fdisk-cmd")
        # Format disk
        output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip()
        logging.debug("output %s", output)
        # Add scsi disk in guest
        recompose_xml(vm_name, scsi_disk)

        # Prepare guest agent and start guest
        if has_qemu_ga:
            vm.prepare_guest_agent(start=start_qemu_ga)
        else:
            # Remove qemu-ga channel
            vm.prepare_guest_agent(channel=has_qemu_ga, start=False)

        guest_session = vm.wait_for_login()
        # Get new generated disk
        af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|"
                                           "cut -d' ' -f1").split('\n')
        new_disk = "".join(list(set(bef_list) ^ set(af_list)))
        # Mount disk in guest
        guest_session.cmd("mkdir -p /home/test && mount %s /home/test" %
                          new_disk)

        # Do first fstrim before all to get original map for compare
        cmd_result = virsh.domfstrim(vm_name)
        if cmd_result.exit_status != 0:
            if not status_error:
                raise error.TestFail("Fail to do virsh domfstrim, error %s" %
                                     cmd_result.stderr)

        def get_diskmap_size():
            """
            Collect size from disk map
            :return: disk size
            """
            map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map"
            diskmap = utils.run(map_cmd).stdout.strip('\n\x00')
            sum = 0
            for i in diskmap.split(","):
                sum = sum + int(i.split("-")[1]) - int(i.split("-")[0])
            logging.debug("disk map (size:%d) is %s", sum, diskmap)
            return sum

        ori_size = get_diskmap_size()

        # Write date in disk
        dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync"
        guest_session.cmd(dd_cmd)

        def _full_mapped():
            """
            Do full map check
            :return: True or False
            """
            full_size = get_diskmap_size()
            return (ori_size < full_size)

        if not utils_misc.wait_for(_full_mapped, timeout=30):
            raise error.TestError("Scsi map is not updated after dd command.")

        full_size = get_diskmap_size()

        # Remove disk content in guest
        guest_session.cmd("rm -rf /home/test/*; sync")
        guest_session.close()

        def _trim_completed():
            """
            Do empty fstrim check
            :return: True of False
            """
            cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options,
                                         unprivileged_user=unprivileged_user,
                                         uri=uri)
            if cmd_result.exit_status != 0:
                if not status_error:
                    raise error.TestFail("Fail to do virsh domfstrim, error %s"
                                         % cmd_result.stderr)
                else:
                    logging.info("Fail to do virsh domfstrim as expected: %s",
                                 cmd_result.stderr)
                    return True

            empty_size = get_diskmap_size()
            logging.info("Trimmed disk to %d", empty_size)

            if is_fulltrim:
                return empty_size <= ori_size
            else:
                # For partly trim will check later
                return False

        if not utils_misc.wait_for(_trim_completed, timeout=30):
            # Get result again to check partly fstrim
            empty_size = get_diskmap_size()
            if not is_fulltrim:
                if ori_size < empty_size <= full_size:
                    logging.info("Success to do fstrim partly")
                    return True
            raise error.TestFail("Fail to do fstrim. (original size: %s), "
                                 "(current size: %s), (full size: %s)" %
                                 (ori_size, empty_size, full_size))
        logging.info("Success to do fstrim")

    finally:
        # Do domain recovery
        vm.shutdown()
        xml_backup.sync()
        utils.unload_module("scsi_debug")

Example 47

Project: AvsPmod
Source File: pyavs_avifile.py
View license
    def __init__(self, script, filename='', fitHeight=None, fitWidth=None, oldFramecount=None, keepRaw=False):
        self.initialized = False
        self.error_message = None
        self.current_frame = -1
        self.pvidstream = LONG() # = PAVISTREAM()
        self.bmih = BITMAPINFOHEADER()
        self.pgf = LONG()
        self.pBits = None
        self.pInfo = None
        psi = AVISTREAMINFO()
        # Avisynth script properties
        self.Width = -1
        self.Height = -1
        self.Framecount = -1
        self.Framerate = -1.0
        self.FramerateNumerator = -1
        self.FramerateDenominator = -1
        self.Audiorate = -1.0
        self.Audiolength = -1
        #~ self.AudiolengthF = None
        self.Audiochannels = -1
        self.Audiobits = -1
        self.IsAudioFloat = None
        self.IsAudioInt = None
        self.IsRGB = None
        self.IsRGB24 = None
        self.IsRGB32 = None
        self.IsYUY2 = None
        self.IsYV12 = None
        self.IsYUV = None
        self.IsPlanar = None
        self.IsInterleaved = None
        self.IsFieldBased = None
        self.IsFrameBased = None
        self.GetParity  = None
        self.HasAudio = None
        self.HasVideo = None
        self.Colorspace = 'RGB32'
        
        # Open the avi file
        previewname = MakePreviewScriptFile(script, filename)
        AVIStreamOpenFromFile = AVIStreamOpenFromFileA
        if type(previewname) == type(u''):
            try:
                AVIStreamOpenFromFile = AVIStreamOpenFromFileW
            except NameError:
                pass
        if (AVIStreamOpenFromFile(ctypes.byref(self.pvidstream), previewname, streamtypeVIDEO, 0, OF_READ, NULL)!=0):
            if __debug__:
                print>>sys.stderr, _("Failed to open the AVI file")
                #~ print>>sys.stderr, filename
            #~ AVIFileExit()
            return
        else:
            if __debug__:
                print "AVI file opened successfully"
            pass
        
        # Read basic data from the avi file
        AVIStreamInfo(self.pvidstream, ctypes.byref(psi), ctypes.sizeof(psi))
        self.Framecount = psi.dwLength
        self.Width = psi.rcFrame.right-psi.rcFrame.left
        self.Height = psi.rcFrame.bottom-psi.rcFrame.top
        self.WidthActual, self.HeightActual = self.Width, self.Height
        self.Framerate = psi.dwRate/(psi.dwScale+0.0)
        
        if fitHeight is not None:
            fitWidthTemp = int(round(fitHeight *  (self.Width/float(self.Height))))
            if fitWidth is None:
                fitWidth = fitWidthTemp
            elif fitWidthTemp > fitWidth:
                fitHeight = int(round(fitWidth *  (self.Height/float(self.Width))))
            else:
                fitWidth = fitWidthTemp
            if fitHeight >= 4 and fitWidth >= 4:
                resizeScript = 'Import("%s").ConvertToRGB().BicubicResize(%i,%i)' % (previewname, fitWidth, fitHeight)
                previewname2 = MakePreviewScriptFile(resizeScript, filename)
                AVIStreamRelease(self.pvidstream)
                if (AVIStreamOpenFromFile(ctypes.byref(self.pvidstream), previewname2, streamtypeVIDEO, 0, OF_READ, NULL)!=0):
                    if __debug__:
                        print>>sys.stderr, _("Failed to open the AVI file")
                    return
                else:
                    if __debug__:
                        print "AVI file opened successfully"
                    pass
                # Set internal width and height variables appropriately
                self.Width, self.Height = fitWidth, fitHeight
                os.remove(previewname2)
            
        # Define the desired image format
        self.bmih.biSize = ctypes.sizeof(BITMAPINFOHEADER)
        self.bmih.biPlanes = 1
        self.bmih.biBitCount = 24
        self.bmih.biWidth = self.Width
        self.bmih.biHeight = self.Height
        self.bmih.biCompression = BI_RGB
        self.bmih.biSizeImage = 0
        self.bmih.biClrUsed = 0
        self.bmih.biClrImportant = 0
        # Change desired format to 32 bit (RGBA) if necessary
        bmihtemp = BITMAPINFOHEADER()
        bmihtemp_size = LONG(ctypes.sizeof(bmihtemp))
        AVIStreamReadFormat(self.pvidstream,0,ctypes.byref(bmihtemp),ctypes.byref(bmihtemp_size))
        if(bmihtemp.biBitCount==32):
            self.bmih.biBitCount = 32
        
        # Open the video stream
        self.pgf = AVIStreamGetFrameOpen(self.pvidstream,ctypes.byref(self.bmih))
        if self.pgf==-1:
            AVIStreamRelease(self.pvidstream)
            if __debug__:
                print>>sys.stderr, _("Failed to open the AVI frame")
            #~ AVIFileExit()
            return
        else:
            if __debug__:
                print "AVI frame opened successfully"
            pass
            
        self.AVIStreamGetFrameClose = AVIStreamGetFrameClose
        self.AVIStreamRelease = AVIStreamRelease
        self.AVIFileRelease = AVIFileRelease
        #~ self.AVIFileExit = AVIFileExit
        self.initialized = True
        os.remove(previewname)

Example 48

Project: easytf2_mapper
Source File: createPrefab.py
View license
def create(name, prefab_name, prefab_text, prefab_icon, rot_enabled, workshop_export,indexLine,index):
  if indexLine == 'END':
    insertBool = False
  else:
    insertBool = True

  py_list = []
  ent_py_list = []
  rot_py_list = []
  rot_ent_py_list = []
  txt_list = []
  ent_list = []
  num_list = []
  id_num_list = []
  id_value_list = []
  value_list = []
  compile_list = [
  """import os
import math

def rotatePoint(centerPoint,point,angle):
    angle = math.radians(angle)
    temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
    temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle))
    temp_point = temp_point[0]+centerPoint[0] , temp_point[1]+centerPoint[1]
    return temp_point

def createTile(posx, posy, id_num, world_id_num, entity_num, placeholder_list, rotation, level):
    
    looplist = '1'
    values=[]#Values are all of the lines of a prefab that have the vertex coords
""",

  "#INSERT_OPEN_FILE\n",

  """
    lines = f.readlines() #gathers each line of the prefab and puts numbers them
""",

  "#INSERT_ROT_IF\n",

  "#INSERT_PY_LIST\n",
  
  "#INSERT_ROT_CODE\n",

  "#INSERT_VAR_COUNT\n",

  """
    values = "".join(lines)#converting list to string
    ogvalues = "".join(lines)

    normal_list,axislist,negaxislist,vaxis,uaxis=[],['1 0 0 1','0 1 0 1','0 0 1 1'],['-1 0 0 1','0 -1 0 1','0 0 -1 1'],0,0
    def evaluate(coords):
        dist_x,dist_y,dist_z = abs(coords[0]),abs(coords[1]),abs(coords[2]),
        if dist_x >= dist_y and dist_x >= dist_z:
            return axislist[0]
        if dist_y >= dist_z:
            return axislist[1]
        return axislist[2]

    def get_normal(coord_list):
        vector_a = (coord_list[1][0]-coord_list[0][0],coord_list[1][1]-coord_list[0][1],coord_list[1][2]-coord_list[0][2])
        vector_b = (coord_list[2][0]-coord_list[0][0],coord_list[2][1]-coord_list[0][1],coord_list[2][2]-coord_list[0][2])
        
        normal = (vector_a[1]*vector_b[2]-vector_a[2]*vector_b[1],vector_a[2]*vector_b[0]-vector_a[0]*vector_b[2],vector_a[0]*vector_b[1]-vector_a[1]*vector_b[0])
        return normal
    
    for normal_num in range(1,var_count+1,3):
        normal_list=[]
        for i in range(3):
            normal_list.append([])
            for var in ["x", "y", "z"]:
                normal_list[i].append(eval(var+str(normal_num+i)))
        coords = get_normal(normal_list)  
        response = evaluate(coords)
        if response == axislist[0]:
            uaxis = axislist[1]
        else:
            uaxis = axislist[0]
        if response == axislist[2]:
            vaxis = negaxislist[1]
        else:
            vaxis = negaxislist[2]
        values = values.replace('AXIS_REPLACE_U',uaxis,1)
        values = values.replace('AXIS_REPLACE_V',vaxis,1)
    
    for i in range(ogvalues.count("world_idnum")):
        values = values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1
    
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "z":
                values = values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
            else:
                values = values.replace(string + " ",string_var + " ")

    for i in range(ogvalues.count('id_num')):
        values = values.replace('id_num', str(id_num), 1)
        id_num = id_num+1
        if "ROTATION_RIGHT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_RIGHT","0 180 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_UP" in values:
            if rotation == 0:
                values = values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_UP","0 180 0",1)
        if "ROTATION_LEFT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in values:
            if rotation == 0:
                values = values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_DOWN","0 0 0",1)

    values = values.replace('"[0 0 0 1] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 0 1 0] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 1 0 0] 0.25"','"[1 1 1 1] 0.25"')       
    values = values.replace('"[1 0 0 0] 0.25"','"[1 1 1 1] 0.25"')
        
""",

  "#INSERT_ENT_CODE\n",
  
  ]

  ent_code =["#INSERT_ENT_OPEN_FILE\n",

             """
    lines_ent = g.readlines()
""",

             "#INSERT_ROT_IF\n",

             "#INSERT_ENT_PY_LIST\n",

             "#INSERT_ROT_ENT_CODE\n",
             
             "#INSERT_ENT_VAR_COUNT\n",

"""
    ent_values = "".join(lines_ent)
    ent_values_split = ent_values.split("\\"")
    valcount = "".join(lines_ent)

    for item in ent_values_split:
        if "entity_name" in item or "parent_name" in item or "door_large" in item:
            placeholder_list.append(item)

    for i in range(valcount.count('world_idnum')):
        ent_values = ent_values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1

    for var in ["px", "py", "pz"]:
        for count in range(1,ent_var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "pz":
                ent_values = ent_values.replace(string + "\\"",string_var + "\\"") #we need to do this or else it will mess up on 2 digit numbers
            else:
                ent_values = ent_values.replace(string + " ",string_var + " ")
                
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            try:
                string = var + str(count)
                string_var = str(eval(var + str(count)))
                if var == "z":
                    ent_values = ent_values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
                else:
                    ent_values = ent_values.replace(string + " ",string_var + " ")
            except:
                pass

    for i in range(valcount.count('id_num')):
        ent_values = ent_values.replace('id_num', str(id_num), 1)
        id_num = id_num+1

    for i in range(int(valcount.count('laser_target')/2)):
        if "laser_target_plac" in ent_values:
            ent_values = ent_values.replace("laser_target_plac", "laser_target" + str(entity_num), 2)
            entity_num += 1

    for i in range(int(valcount.count('sound'))):
        if "sound_plac" in ent_values:
            ent_values = ent_values.replace("sound_plac", "AmbSound"+str(entity_num), 2)
            ent_values = ent_values.replace("relay_plac", "LogicRelay"+str(entity_num),2)
            entity_num += 1

    for i in range(valcount.count("entity_name")):
        try:
            ent_values = ent_values.replace("entity_name", "entity" + str(entity_num), 1)
            ent_values = ent_values.replace("entity_same", "entity" + str(entity_num), 1)
            if "parent_name" in placeholder_list[entity_num]:
                ent_values = ent_values.replace("parent_name", "entity" + str(entity_num), 1)
                placeholder_list.remove(placeholder_list[entity_num])
            
            if "door_large" in ent_values:
                ent_values = ent_values.replace("door_large", "door_large" + str(entity_num), 4)
            if "\\"respawn_name\\"" in ent_values:
                ent_values = ent_values.replace("\\"respawn_name\\"", "\\"respawn_name" + str(entity_num) + "\\"", 2)
            entity_num += 1
        except Exception as e:
            print(str(e))

    for i in range(valcount.count("ROTATION")):
        if "ROTATION_RIGHT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 180 0 ",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_LEFT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_DOWN","0 0 0",1)
        if "ROTATION_UP" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_UP","0 180 0",1)

        entity_num += 1
"""]

  rot_code = [["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"],

["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"]]

  var_num = 1
  ent_var_num = 1
  contains_ent = False #True if there are entities in the vmf
  in_solid_block = False #True if in a solid code block
  in_entity_block = False #True if in an entity code block
  in_editor_block = False #True if in an editor cod (i luv dat gme) block
  in_connections_block = False #True if in a connections code block
  solid_to_ent = False #True if you want to put the solid block into ent_list
  black_list_var = False #True means it IS on the blacklist, False otherwise
  value_list_history = []
  #name = "prefab_template\godplsno.vmf" #name of the vmf file, changed to allow user to open a file
  file = open(name, "r")

  openlines = file.readlines()

  prefab_icon_list = prefab_icon.split("/")

  if "easytf2_mapper" in prefab_icon_list:
    del prefab_icon_list[ :prefab_icon_list.index("easytf2_mapper")+1]

    for index, item in enumerate(prefab_icon_list): #enumerate allows you to give 2 vars in the for loop
      if index != len(prefab_icon_list) - 1:
       prefab_icon_list[index] = item + "/" # add the "/" back into the filepath
        
  txt_path = "prefab_template/" + prefab_name + ".txt"
  ent_path = "prefab_template/" + prefab_name + "_entities.txt"
  py_path = "prefabs/" + prefab_name + ".py"
  loopernum = 0
  for line in openlines:

    which_list = "txt_list" if not solid_to_ent else "ent_list"
      
    if "\t" in line or "entity" in line:
      
      if in_solid_block and "\t}" not in line or in_solid_block and "\t\t" in line:
        if "(" not in line:

          if "\"id\"" not in line:
            if "\"uaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_U] 0.25")
            elif "\"vaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_V] 0.25")

            else:
              eval(which_list).append(line)

          elif "\t\t\"id\"" in line:
            for letter in line:
              try:
                number = int(letter)
              except ValueError:
                eval(which_list).append(letter)

            if "\t\t\t" in line:
              eval(which_list).insert(-2, "id_num") #need to insert because it creates a \n at the end of the line
            else: 
              eval(which_list).insert(-2, "world_idnum")
        
        elif "(" in line:
          for letter in line:
            try:
              number = int(letter)    
              num_list.append(letter)
            except ValueError:
              if letter != "-" and letter != ".":
                eval(which_list).append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == ")":
                #print(num_list)
                write_var(num_list, eval(which_list), py_list, var_num, value_list_history, in_solid_block, in_entity_block, rot_py_list, rot_enabled) 
                var_num += 1
                num_list = []

      elif in_solid_block and "\t}" in line and "\t\t" not in line:
        in_solid_block = False

        eval(which_list).append(line)
        if solid_to_ent:
          ent_list.append("}\n")
        solid_to_ent = False


      elif in_entity_block and "\"" in line:

        if "\"id\"" not in line and "\t\"targetname\"" not in line and "\t\"origin\"" not in line and "\t\"associatedmodel\"" not in line and "\t\"parentname\"" not in line and "\t\"respawnroomname\"" not in line and "\"angles\"" not in line and "LaserTarget" not in line:
          ent_list.append(line)
        elif "\"id\"" in line:
          for letter in line:
            try:
              number = int(letter)
            except ValueError:
              ent_list.append(letter)
                  
          ent_list.insert(-2, "world_idnum")

        elif "\"angles\" \"0 0 0\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_RIGHT")
        elif '"angles" "0 90 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_UP")
        elif '"angles" "0 180 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_LEFT")
        elif '"angles" "0 270 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_DOWN")
        elif "\t\"targetname\"" in line and "relay" not in line and "ambient_generic" not in openlines[loopernum-17] and "respawn_trigger" not in line and "\"func_door\"" not in openlines[loopernum-19] and "filter_activator_tfteam" not in openlines[loopernum-2] and "info_target" not in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_name")
        elif "\t\"targetname\"" in line and "respawn_trigger" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "respawn_name")
        elif "\t\"targetname\"" in line and "filter_blu" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_blu")
        elif "\t\"targetname\"" in line and "relay" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "relay_plac")
        elif "\t\"targetname\"" in line and "ambient_generic" in openlines[loopernum-17]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "sound_plac")
        elif "\t\"targetname\"" in line and "filter_red" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_red")
        elif "\t\"associatedmodel\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_same")

        elif "\t\"parentname\"" in line and "\"func_door\"" not in openlines[loopernum-19] and "door" not in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "parent_name")
        elif "LaserTarget" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")

        elif "targetname" in line and "info_target" in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")
          
        elif "\t\"parentname\"" in line and "door" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"targetname\"" in line and "\"func_door\"" in openlines[loopernum-19]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-3]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")

        elif "\t\"respawnroomname\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
          ent_list.insert(-2, "respawn_name")
                
        elif "\t\"origin\"" in line:
          nums_yet = False #if True then numbers have been received
          for letter in line:
            
            try:
              number = int(letter)    
              num_list.append(letter)
              nums_yet = True
            except ValueError:
              if letter != "-" and letter != ".":
                ent_list.append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == "\"" and nums_yet:
                write_var(num_list, ent_list, ent_py_list, ent_var_num, value_list_history, in_solid_block, in_entity_block, rot_ent_py_list, rot_enabled) 
                ent_var_num += 1
                num_list = []
          

      elif in_entity_block and "\"" not in line:
        in_entity_block = False
        if "editor" in line:
          ent_list.append(line)
          in_editor_block = True
        elif "connections" in line:
          ent_list.append(line)
          in_connections_block = True
        elif "solid" in line:
          solid_to_ent = True

      elif in_editor_block and "\t}" not in line:
        ent_list.append(line)

      elif in_editor_block and "\t}" in line:
        in_editor_block = False
        ent_list.append(line)
        ent_list.append("}\n")

      elif in_connections_block and "\t}" not in line:
        ent_list.append(line)

      elif in_connections_block and "\t}" in line:
        in_connections_block = False
        ent_list.append(line)
        solid_to_ent = True #IMPORTANT: Might need to change because solid might not always follow connections

      which_list = "txt_list" if not solid_to_ent else "ent_list"
        
                  
      if "solid" in line and "\"" not in line: #or "side" in line:# or "origin" in line: #need to add this because somehow, the solid/side
                        
        eval(which_list).append(line)
        #go until "\t}"
        in_solid_block = True
              
      elif "entity" in line:
        contains_ent = True
        in_entity_block = True
        ent_list.append(line)
        ent_list.append("{\n")

        
    loopernum += 1            
          


  file.close()
  global insertBool
  if rot_enabled:
    print(prefab_icon)
    ext_list = ["_right.jpg","_down.jpg","_left.jpg","_up.jpg"]
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write(icondir+"_icon_list.txt\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,icondir+"_icon_list.txt\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()

    imageRot = Image.open(prefab_icon)
    imageRot.save("icons/"+ icondir+"_right.jpg")
    imageRot2 = Image.open(prefab_icon)
    imageRot2 = imageRot2.rotate(270)
    imageRot2.save("icons/"+ icondir+"_down.jpg")
    imageRot3 = Image.open(prefab_icon)
    imageRot3 = imageRot3.rotate(180)
    imageRot3.save("icons/"+ icondir+"_left.jpg")
    imageRot4 = Image.open(prefab_icon)
    imageRot4 = imageRot4.rotate(90)
    imageRot4.save("icons/"+ icondir+"_up.jpg")
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in ext_list:
      f.write("icons/"+ icondir+i+"\n")
    f.close()

  else:
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write("NO_ROTATION\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,"NO_ROTATION\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in range(4):
      f.write("icons/"+icondir+"\n")
    f.close()


  txtReturn = compileTXT(txt_path, txt_list, prefab_name, prefab_text, prefab_icon, ent_list, ent_path,indexLine)
  pyReturn = compilePY(py_path, py_list, txt_path, compile_list, contains_ent, ent_code, ent_path, ent_py_list, rot_code, rot_py_list, rot_ent_py_list, rot_enabled)

  if workshop_export:
    d = open("info.txt","w")
    d.write(icondir+"\n"+prefab_name+"\n"+prefab_text+"\n"+str(index)+"\n")
    d.close()
    with zipfile.ZipFile(prefab_name + '.zip', 'w') as f:
      f.write(txt_path)
      f.write(py_path)

      if rot_enabled:
        f.write("icons/"+ icondir+"_right.jpg")
        f.write("icons/"+ icondir+"_down.jpg")
        f.write("icons/"+ icondir+"_left.jpg")
        f.write("icons/"+ icondir+"_up.jpg")
      else:
        f.write("icons/"+ icondir+".jpg")
      
      if contains_ent:
        f.write(ent_path)

      f.write("prefab_template/iconlists/"+icondir+"_icon_list.txt")
      f.write("info.txt")
      os.remove("info.txt")

  return txtReturn + pyReturn

Example 49

Project: easytf2_mapper
Source File: createPrefab.py
View license
def create(name, prefab_name, prefab_text, prefab_icon, rot_enabled, workshop_export,indexLine,index):
  if indexLine == 'END':
    insertBool = False
  else:
    insertBool = True

  py_list = []
  ent_py_list = []
  rot_py_list = []
  rot_ent_py_list = []
  txt_list = []
  ent_list = []
  num_list = []
  id_num_list = []
  id_value_list = []
  value_list = []
  compile_list = [
  """import os
import math

def rotatePoint(centerPoint,point,angle):
    angle = math.radians(angle)
    temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
    temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle))
    temp_point = temp_point[0]+centerPoint[0] , temp_point[1]+centerPoint[1]
    return temp_point

def createTile(posx, posy, id_num, world_id_num, entity_num, placeholder_list, rotation, level):
    
    looplist = '1'
    values=[]#Values are all of the lines of a prefab that have the vertex coords
""",

  "#INSERT_OPEN_FILE\n",

  """
    lines = f.readlines() #gathers each line of the prefab and puts numbers them
""",

  "#INSERT_ROT_IF\n",

  "#INSERT_PY_LIST\n",
  
  "#INSERT_ROT_CODE\n",

  "#INSERT_VAR_COUNT\n",

  """
    values = "".join(lines)#converting list to string
    ogvalues = "".join(lines)

    normal_list,axislist,negaxislist,vaxis,uaxis=[],['1 0 0 1','0 1 0 1','0 0 1 1'],['-1 0 0 1','0 -1 0 1','0 0 -1 1'],0,0
    def evaluate(coords):
        dist_x,dist_y,dist_z = abs(coords[0]),abs(coords[1]),abs(coords[2]),
        if dist_x >= dist_y and dist_x >= dist_z:
            return axislist[0]
        if dist_y >= dist_z:
            return axislist[1]
        return axislist[2]

    def get_normal(coord_list):
        vector_a = (coord_list[1][0]-coord_list[0][0],coord_list[1][1]-coord_list[0][1],coord_list[1][2]-coord_list[0][2])
        vector_b = (coord_list[2][0]-coord_list[0][0],coord_list[2][1]-coord_list[0][1],coord_list[2][2]-coord_list[0][2])
        
        normal = (vector_a[1]*vector_b[2]-vector_a[2]*vector_b[1],vector_a[2]*vector_b[0]-vector_a[0]*vector_b[2],vector_a[0]*vector_b[1]-vector_a[1]*vector_b[0])
        return normal
    
    for normal_num in range(1,var_count+1,3):
        normal_list=[]
        for i in range(3):
            normal_list.append([])
            for var in ["x", "y", "z"]:
                normal_list[i].append(eval(var+str(normal_num+i)))
        coords = get_normal(normal_list)  
        response = evaluate(coords)
        if response == axislist[0]:
            uaxis = axislist[1]
        else:
            uaxis = axislist[0]
        if response == axislist[2]:
            vaxis = negaxislist[1]
        else:
            vaxis = negaxislist[2]
        values = values.replace('AXIS_REPLACE_U',uaxis,1)
        values = values.replace('AXIS_REPLACE_V',vaxis,1)
    
    for i in range(ogvalues.count("world_idnum")):
        values = values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1
    
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "z":
                values = values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
            else:
                values = values.replace(string + " ",string_var + " ")

    for i in range(ogvalues.count('id_num')):
        values = values.replace('id_num', str(id_num), 1)
        id_num = id_num+1
        if "ROTATION_RIGHT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_RIGHT","0 180 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_UP" in values:
            if rotation == 0:
                values = values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_UP","0 180 0",1)
        if "ROTATION_LEFT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in values:
            if rotation == 0:
                values = values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_DOWN","0 0 0",1)

    values = values.replace('"[0 0 0 1] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 0 1 0] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 1 0 0] 0.25"','"[1 1 1 1] 0.25"')       
    values = values.replace('"[1 0 0 0] 0.25"','"[1 1 1 1] 0.25"')
        
""",

  "#INSERT_ENT_CODE\n",
  
  ]

  ent_code =["#INSERT_ENT_OPEN_FILE\n",

             """
    lines_ent = g.readlines()
""",

             "#INSERT_ROT_IF\n",

             "#INSERT_ENT_PY_LIST\n",

             "#INSERT_ROT_ENT_CODE\n",
             
             "#INSERT_ENT_VAR_COUNT\n",

"""
    ent_values = "".join(lines_ent)
    ent_values_split = ent_values.split("\\"")
    valcount = "".join(lines_ent)

    for item in ent_values_split:
        if "entity_name" in item or "parent_name" in item or "door_large" in item:
            placeholder_list.append(item)

    for i in range(valcount.count('world_idnum')):
        ent_values = ent_values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1

    for var in ["px", "py", "pz"]:
        for count in range(1,ent_var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "pz":
                ent_values = ent_values.replace(string + "\\"",string_var + "\\"") #we need to do this or else it will mess up on 2 digit numbers
            else:
                ent_values = ent_values.replace(string + " ",string_var + " ")
                
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            try:
                string = var + str(count)
                string_var = str(eval(var + str(count)))
                if var == "z":
                    ent_values = ent_values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
                else:
                    ent_values = ent_values.replace(string + " ",string_var + " ")
            except:
                pass

    for i in range(valcount.count('id_num')):
        ent_values = ent_values.replace('id_num', str(id_num), 1)
        id_num = id_num+1

    for i in range(int(valcount.count('laser_target')/2)):
        if "laser_target_plac" in ent_values:
            ent_values = ent_values.replace("laser_target_plac", "laser_target" + str(entity_num), 2)
            entity_num += 1

    for i in range(int(valcount.count('sound'))):
        if "sound_plac" in ent_values:
            ent_values = ent_values.replace("sound_plac", "AmbSound"+str(entity_num), 2)
            ent_values = ent_values.replace("relay_plac", "LogicRelay"+str(entity_num),2)
            entity_num += 1

    for i in range(valcount.count("entity_name")):
        try:
            ent_values = ent_values.replace("entity_name", "entity" + str(entity_num), 1)
            ent_values = ent_values.replace("entity_same", "entity" + str(entity_num), 1)
            if "parent_name" in placeholder_list[entity_num]:
                ent_values = ent_values.replace("parent_name", "entity" + str(entity_num), 1)
                placeholder_list.remove(placeholder_list[entity_num])
            
            if "door_large" in ent_values:
                ent_values = ent_values.replace("door_large", "door_large" + str(entity_num), 4)
            if "\\"respawn_name\\"" in ent_values:
                ent_values = ent_values.replace("\\"respawn_name\\"", "\\"respawn_name" + str(entity_num) + "\\"", 2)
            entity_num += 1
        except Exception as e:
            print(str(e))

    for i in range(valcount.count("ROTATION")):
        if "ROTATION_RIGHT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 180 0 ",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_LEFT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_DOWN","0 0 0",1)
        if "ROTATION_UP" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_UP","0 180 0",1)

        entity_num += 1
"""]

  rot_code = [["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"],

["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"]]

  var_num = 1
  ent_var_num = 1
  contains_ent = False #True if there are entities in the vmf
  in_solid_block = False #True if in a solid code block
  in_entity_block = False #True if in an entity code block
  in_editor_block = False #True if in an editor cod (i luv dat gme) block
  in_connections_block = False #True if in a connections code block
  solid_to_ent = False #True if you want to put the solid block into ent_list
  black_list_var = False #True means it IS on the blacklist, False otherwise
  value_list_history = []
  #name = "prefab_template\godplsno.vmf" #name of the vmf file, changed to allow user to open a file
  file = open(name, "r")

  openlines = file.readlines()

  prefab_icon_list = prefab_icon.split("/")

  if "easytf2_mapper" in prefab_icon_list:
    del prefab_icon_list[ :prefab_icon_list.index("easytf2_mapper")+1]

    for index, item in enumerate(prefab_icon_list): #enumerate allows you to give 2 vars in the for loop
      if index != len(prefab_icon_list) - 1:
       prefab_icon_list[index] = item + "/" # add the "/" back into the filepath
        
  txt_path = "prefab_template/" + prefab_name + ".txt"
  ent_path = "prefab_template/" + prefab_name + "_entities.txt"
  py_path = "prefabs/" + prefab_name + ".py"
  loopernum = 0
  for line in openlines:

    which_list = "txt_list" if not solid_to_ent else "ent_list"
      
    if "\t" in line or "entity" in line:
      
      if in_solid_block and "\t}" not in line or in_solid_block and "\t\t" in line:
        if "(" not in line:

          if "\"id\"" not in line:
            if "\"uaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_U] 0.25")
            elif "\"vaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_V] 0.25")

            else:
              eval(which_list).append(line)

          elif "\t\t\"id\"" in line:
            for letter in line:
              try:
                number = int(letter)
              except ValueError:
                eval(which_list).append(letter)

            if "\t\t\t" in line:
              eval(which_list).insert(-2, "id_num") #need to insert because it creates a \n at the end of the line
            else: 
              eval(which_list).insert(-2, "world_idnum")
        
        elif "(" in line:
          for letter in line:
            try:
              number = int(letter)    
              num_list.append(letter)
            except ValueError:
              if letter != "-" and letter != ".":
                eval(which_list).append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == ")":
                #print(num_list)
                write_var(num_list, eval(which_list), py_list, var_num, value_list_history, in_solid_block, in_entity_block, rot_py_list, rot_enabled) 
                var_num += 1
                num_list = []

      elif in_solid_block and "\t}" in line and "\t\t" not in line:
        in_solid_block = False

        eval(which_list).append(line)
        if solid_to_ent:
          ent_list.append("}\n")
        solid_to_ent = False


      elif in_entity_block and "\"" in line:

        if "\"id\"" not in line and "\t\"targetname\"" not in line and "\t\"origin\"" not in line and "\t\"associatedmodel\"" not in line and "\t\"parentname\"" not in line and "\t\"respawnroomname\"" not in line and "\"angles\"" not in line and "LaserTarget" not in line:
          ent_list.append(line)
        elif "\"id\"" in line:
          for letter in line:
            try:
              number = int(letter)
            except ValueError:
              ent_list.append(letter)
                  
          ent_list.insert(-2, "world_idnum")

        elif "\"angles\" \"0 0 0\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_RIGHT")
        elif '"angles" "0 90 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_UP")
        elif '"angles" "0 180 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_LEFT")
        elif '"angles" "0 270 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_DOWN")
        elif "\t\"targetname\"" in line and "relay" not in line and "ambient_generic" not in openlines[loopernum-17] and "respawn_trigger" not in line and "\"func_door\"" not in openlines[loopernum-19] and "filter_activator_tfteam" not in openlines[loopernum-2] and "info_target" not in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_name")
        elif "\t\"targetname\"" in line and "respawn_trigger" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "respawn_name")
        elif "\t\"targetname\"" in line and "filter_blu" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_blu")
        elif "\t\"targetname\"" in line and "relay" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "relay_plac")
        elif "\t\"targetname\"" in line and "ambient_generic" in openlines[loopernum-17]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "sound_plac")
        elif "\t\"targetname\"" in line and "filter_red" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_red")
        elif "\t\"associatedmodel\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_same")

        elif "\t\"parentname\"" in line and "\"func_door\"" not in openlines[loopernum-19] and "door" not in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "parent_name")
        elif "LaserTarget" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")

        elif "targetname" in line and "info_target" in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")
          
        elif "\t\"parentname\"" in line and "door" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"targetname\"" in line and "\"func_door\"" in openlines[loopernum-19]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-3]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")

        elif "\t\"respawnroomname\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
          ent_list.insert(-2, "respawn_name")
                
        elif "\t\"origin\"" in line:
          nums_yet = False #if True then numbers have been received
          for letter in line:
            
            try:
              number = int(letter)    
              num_list.append(letter)
              nums_yet = True
            except ValueError:
              if letter != "-" and letter != ".":
                ent_list.append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == "\"" and nums_yet:
                write_var(num_list, ent_list, ent_py_list, ent_var_num, value_list_history, in_solid_block, in_entity_block, rot_ent_py_list, rot_enabled) 
                ent_var_num += 1
                num_list = []
          

      elif in_entity_block and "\"" not in line:
        in_entity_block = False
        if "editor" in line:
          ent_list.append(line)
          in_editor_block = True
        elif "connections" in line:
          ent_list.append(line)
          in_connections_block = True
        elif "solid" in line:
          solid_to_ent = True

      elif in_editor_block and "\t}" not in line:
        ent_list.append(line)

      elif in_editor_block and "\t}" in line:
        in_editor_block = False
        ent_list.append(line)
        ent_list.append("}\n")

      elif in_connections_block and "\t}" not in line:
        ent_list.append(line)

      elif in_connections_block and "\t}" in line:
        in_connections_block = False
        ent_list.append(line)
        solid_to_ent = True #IMPORTANT: Might need to change because solid might not always follow connections

      which_list = "txt_list" if not solid_to_ent else "ent_list"
        
                  
      if "solid" in line and "\"" not in line: #or "side" in line:# or "origin" in line: #need to add this because somehow, the solid/side
                        
        eval(which_list).append(line)
        #go until "\t}"
        in_solid_block = True
              
      elif "entity" in line:
        contains_ent = True
        in_entity_block = True
        ent_list.append(line)
        ent_list.append("{\n")

        
    loopernum += 1            
          


  file.close()
  global insertBool
  if rot_enabled:
    print(prefab_icon)
    ext_list = ["_right.jpg","_down.jpg","_left.jpg","_up.jpg"]
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write(icondir+"_icon_list.txt\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,icondir+"_icon_list.txt\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()

    imageRot = Image.open(prefab_icon)
    imageRot.save("icons/"+ icondir+"_right.jpg")
    imageRot2 = Image.open(prefab_icon)
    imageRot2 = imageRot2.rotate(270)
    imageRot2.save("icons/"+ icondir+"_down.jpg")
    imageRot3 = Image.open(prefab_icon)
    imageRot3 = imageRot3.rotate(180)
    imageRot3.save("icons/"+ icondir+"_left.jpg")
    imageRot4 = Image.open(prefab_icon)
    imageRot4 = imageRot4.rotate(90)
    imageRot4.save("icons/"+ icondir+"_up.jpg")
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in ext_list:
      f.write("icons/"+ icondir+i+"\n")
    f.close()

  else:
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write("NO_ROTATION\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,"NO_ROTATION\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in range(4):
      f.write("icons/"+icondir+"\n")
    f.close()


  txtReturn = compileTXT(txt_path, txt_list, prefab_name, prefab_text, prefab_icon, ent_list, ent_path,indexLine)
  pyReturn = compilePY(py_path, py_list, txt_path, compile_list, contains_ent, ent_code, ent_path, ent_py_list, rot_code, rot_py_list, rot_ent_py_list, rot_enabled)

  if workshop_export:
    d = open("info.txt","w")
    d.write(icondir+"\n"+prefab_name+"\n"+prefab_text+"\n"+str(index)+"\n")
    d.close()
    with zipfile.ZipFile(prefab_name + '.zip', 'w') as f:
      f.write(txt_path)
      f.write(py_path)

      if rot_enabled:
        f.write("icons/"+ icondir+"_right.jpg")
        f.write("icons/"+ icondir+"_down.jpg")
        f.write("icons/"+ icondir+"_left.jpg")
        f.write("icons/"+ icondir+"_up.jpg")
      else:
        f.write("icons/"+ icondir+".jpg")
      
      if contains_ent:
        f.write(ent_path)

      f.write("prefab_template/iconlists/"+icondir+"_icon_list.txt")
      f.write("info.txt")
      os.remove("info.txt")

  return txtReturn + pyReturn

Example 50

Project: Devede
Source File: devede_avconv_convert.py
View license
	def __init__(self,global_vars,videofile,filename,filefolder,progresbar,proglabel,disctype,title,chapter,threads,seconds,encpass,fix_ac3):

		""" This class converts a video file to MPEG-1 or MPEG-2 format

		VIDEOFILE contains the parameters to convert the video
		FILENAME is the generic file name given by the user
		FILEFOLDER is the path where all the temporary and finall files will be created
		PROGRESBAR is the progress bar where the class will show the progress
		PROGLABEL is the label where the class will show what is it doing
		DISCTYPE can be dvd, vcd, svcd, cvd or divx
		TITLE and CHAPTER are the numbers used to identify the TITLE and CHAPTER number for this file
		THREADS is the number of threads to use
		SECONDS is the number of seconds we want to convert (for previews) 
		ENCPASS is the number of encoding pass"""
		
		devede_executor.executor.__init__(self,filename,filefolder,progresbar)
		self.printout=False

		self.percent2=120
		self.film_length=float(videofile["olength"])
		if seconds==0:
			self.divide=float(videofile["olength"])
			if (videofile["cutting"]==1) or (videofile["cutting"]==2): # if we want only one half of the file
				self.divide/=2
		else:
			self.divide=float(seconds)

		if self.divide==0:
			self.divide=1

		self.error=""
		progresbar.set_fraction(0)
		progresbar.set_text("")
		
		if videofile["ismpeg"]: # if the file hasn't to be converted, we simply copy or link it
			self.pulse=True
			self.print_error=_("File copy failed\nMaybe you ran out of disk space?")
			if seconds==0:
				texto=_("Copying the file")+"\n"
			else:
				texto=_("Creating preview")+"\n"
			proglabel.set_text(texto+videofile["filename"])
			currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")
		
			print "\ncurrentfile is: ", currentfile , "\n" 

			try:
				os.remove(currentfile)
			except:
				pass

			if (sys.platform=="win32") or (sys.platform=="win64"):
				# links do not work on windows, so just copy the file
				# self.launch_shell('copy "'+videofile["path"].replace('"','""')+'" "'+currentfile+'"',output=False)
				# Only hardlinks are available on 2000 and XP, reparse points are available from vista onwards.
				win32file.CreateHardLink(currentfile, videofile["path"].replace('"','""'))
			else:
				if len(videofile["sub_list"])==0:
					self.launch_shell('ln -s "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
				else:
					self.launch_shell('cp "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
			return

		isvob=videofile["isvob"]

		self.pulse=False
		if seconds==0:
			texto=(_("Converting files from title %(title_number)s (pass %(pass_number)s)\n\n%(file_name)s") % {"title_number":str(title),"pass_number":str(encpass),"file_name":videofile["filename"]} )
			proglabel.set_text(texto) #+" "+str(title)+" Pass: "+ str(encpass) +"\n\n"+videofile["filename"] )
		else:
			texto=_("Creating preview")
			proglabel.set_text(texto+"\n"+videofile["filename"])

		addbars=False
		framerate=int(videofile["ofps"])
		videorate=int(videofile["vrate"])
		audiorate=self.adjust_audiorate(int(videofile["arate"]),disctype=="dvd")
		
		audio_final_rate=int(videofile["arateunc"])
		audiodelay=float(videofile["adelay"])
		final_framerate=float(videofile["fps"])
		aspect_ratio_original=videofile["oaspect"]
		aspect_ratio_final=videofile["aspect"]
		resx_final=videofile["width"]
		resy_final=videofile["height"]
		resx_original=videofile["owidth"]
		resy_original=videofile["oheight"]
		copy_audio=videofile["copy_audio"]
		sound51=videofile["sound51"]
		gop12=videofile["gop12"]
		audiostream=videofile["audio_stream"]
		swap_fields=videofile["swap_fields"]
		volume=videofile["volume"]
		audio_tracks=len(videofile["audio_list"])

		if (videofile["resolution"]==0) and (disctype=="divx"):
			default_res=True
		else:
			default_res=False
		
		speed1,speed2=devede_other.get_speedup(videofile)
		if speed1==speed2:
			speedup=None
		else:
			speedup=str(speed1)+":"+str(speed2)
	
		if aspect_ratio_original<1.3:
			aspect_ratio_original=float(videofile["owidth"])/(float(videofile["oheight"]))
		if aspect_ratio_original<1.33333333:
			aspect_ratio_original=1.33333333
	
		max_videorate=int(videorate*2)
		min_videorate=int(videorate*0.75)
		
		dsize,minvid,maxvid=devede_other.get_dvd_size(None,disctype)
		
		if max_videorate>maxvid:
			max_videorate=maxvid
		if min_videorate<minvid:
			min_videorate=minvid
			
		if videofile["blackbars"]==0: # check if has to add black bars
			addbars=True
			if (videofile["rotate"]==90) or (videofile["rotate"]==270):
				resx_original2=resy_original
				resy_original2=resx_original
				aratio=1/aspect_ratio_original
			else:
				resx_original2=resx_original
				resy_original2=resy_original
				aratio=aspect_ratio_original

			if (resx_original2%2)==1:
				resx_original2+=1
			if (resy_original2%2)==1:
				resy_original2+=1
			
			resy_tmp = int(resy_final*aspect_ratio_final/aratio)
			resx_tmp = int(resx_final*aratio/aspect_ratio_final)
			
			
			if (resx_tmp>resx_final):
				resx_inter=resx_final
				resy_inter=resy_tmp
			else:
				resx_inter=resx_tmp
				resy_inter=resy_final
			
			#resx_inter=resx_original2
			#resy_inter=int((resy_original2*aspect_ratio_original)/aspect_ratio_final)
			if (resx_inter%2)==1:
				resx_inter-=1
			if (resy_inter%2)==1:
				resy_inter-=1
			
			#if ((resy_inter<resy_original) or (resy_original+5>resy_inter)):
			#	addbars=False

		if addbars==False:
			resx_inter=resx_final
			resy_inter=resy_final
		else:
			if (resx_inter==resx_final):
				addx=0
				addy=int((resy_final-resy_inter)/2)
				if(addy%2)==1:
					addy+=1
			else:
				addy=0
				addx=int((resx_final-resx_inter)/2)
				if(addx%2)==1:
					addx+=1
					
		
		command_var=["avconv"]

		command_var.append("-i")
		command_var.append(videofile["path"])
		
		if (volume!=100):
			command_var.append("-vol")
			command_var.append(str((256*volume)/100))
		
		if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
			command_var.append("-itsoffset")
			command_var.append(str(audiodelay))
			command_var.append("-i")
			command_var.append(videofile["path"])
			command_var.append("-map")
			command_var.append("1:0")
			for l in range(audio_tracks):
				command_var.append("-map")
				command_var.append("0"+":"+str(l+1))
		
		if (isvob==False):
			cmd_line=""
			
			extra_params=videofile["params_vf"] # take the VF extra params
			while (extra_params!=""):
				extra_params,new_param=devede_other.get_new_param(extra_params)
				if (new_param!="") and (new_param!=','):
					while (len(new_param)>1) and (new_param[0]==','):
						new_param=new_param[1:]
					while (len(new_param)>1) and (new_param[-1]==','):
						new_param=new_param[:-1]
					if new_param=="fifo":
						continue
					if cmd_line!="":
						cmd_line+=",fifo,"
					cmd_line+=new_param
			
			if videofile["deinterlace"]=="yadif":
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="yadif"
			
			vflip=0
			hflip=0
	
			if (videofile["rotate"]==90):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=1"
			elif (videofile["rotate"]==270):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=2"
			elif (videofile["rotate"]==180):
				vflip=1
				hflip=1
			
			if (videofile["vmirror"]):
				vflip=1-vflip
			if (videofile["hmirror"]):
				hflip=1-hflip
	
			if (vflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="vflip"
			if (hflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="hflip"
			
			if addbars and ((resx_inter!=resx_original) or (resy_inter!=resy_original)) and (default_res==False):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="scale="+str(resx_inter)+":"+str(resy_inter)+",fifo,pad="+str(resx_final)+":"+str(resy_final)+":"+str(addx)+":"+str(addy)+":0x000000"
			
			if cmd_line!="":
				command_var.append("-vf")
				command_var.append(cmd_line)
			
		
		command_var.append("-y")

		vcd=False
		
		if (disctype!="divx"):
			command_var.append("-target")
			if (disctype=="dvd"):
				if final_framerate==30:
					command_var.append("ntsc-dvd")
				elif (framerate==24):
					command_var.append("film-dvd")
				else:
					command_var.append("pal-dvd")
				if (copy_audio==False):
					command_var.append("-acodec")
					if fix_ac3:
						command_var.append("ac3_fixed")
					else:
						command_var.append("ac3")
				#command_var.append("-maxrate")
				#command_var.append("7000k")
				#command_var.append("-minrate")
				#command_var.append("2200k")
			elif (disctype=="vcd"):
				vcd=True
				if final_framerate==30:
					command_var.append("ntsc-vcd")
				else:
					command_var.append("pal-vcd")
			elif (disctype=="svcd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
			elif (disctype=="cvd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
		else: # DivX
			command_var.append("-vcodec")
			command_var.append("mpeg4")
			command_var.append("-acodec")
			command_var.append("libmp3lame")
			command_var.append("-f")
			command_var.append("avi")
		
		if  (not isvob):
			command_var.append("-sn") # no subtitles

		if copy_audio or isvob:
			command_var.append("-acodec")
			command_var.append("copy")
		#else:
		#	if (disctype=="divx"):
		#		command_var.append("-acodec")
		#		command_var.append("mp3")

		#if (audiostream!=10000):
		#	command_var.append("-aid")
		#	command_var.append(str(audiostream))

		if isvob:
			command_var.append("-vcodec")
			command_var.append("copy")
		
		if (vcd==False):
			if final_framerate==30:
				if (framerate==24) and ((disctype=="dvd") or (disctype=="divx")):
					str_final_framerate="24000/1001"
					keyintv=15
					telecine=True
				else:
					str_final_framerate="30000/1001"
					keyintv=18
			else:
				str_final_framerate=str(int(final_framerate))
				keyintv=15
		
		if (disctype=="divx"):
			command_var.append("-g")
			command_var.append("300")
		elif gop12 and (isvob==False):
			command_var.append("-g")
			command_var.append("12")
		
		command_var.append("-bf")
		command_var.append("2")
		command_var.append("-strict")
		command_var.append("1")
		
		if seconds!=0:
			command_var.append("-t")
			command_var.append(str(seconds))
		else:
			if videofile["cutting"]==1: # first half only
				command_var.append("-t")
				command_var.append(str(videofile["olength"]/2))
			elif videofile["cutting"]==2: # second half only
				command_var.append("-ss")
				command_var.append(str((videofile["olength"]/2)-5)) # start 5 seconds before

		#if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
		#	command_var.append("-delay")
		#	command_var.append(str(audiodelay))

		command_var.append("-ac")
		if (sound51) and ((disctype=="dvd") or (disctype=="divx")):
			command_var.append("6")
		else:
			command_var.append("2")

		#if (isvob==False) and (default_res==False):
		#	command_var.append("-ofps")
		#	command_var.append(str_final_framerate)

		if disctype=="divx":
			command_var.append("-vtag")
			command_var.append("DX50")

		lineatemp=""
		acoma=False;
		
		#if swap_fields:
		#	lineatemp+="phase=a"
		#	acoma=True
		
		passlog_var = None
		
		if (videofile["deinterlace"]!="none") and (videofile["deinterlace"]!="yadif") and (isvob==False):
			command_var.append("-deinterlace")
			
		print "Addbars "+str(addbars)+" resx_o "+str(resx_original)+" resy_o "+str(resy_original)
		print "resx_i "+str(resx_inter)+" resy_i "+str(resy_inter)
 
 		if (isvob==False) and (vcd==False):
				command_var.append("-s")
				command_var.append(str(resx_final)+"x"+str(resy_final))

		# Currently Mencoder supports up to 8 threads
		if isvob==False:
			threads
			
			if threads>1:
				command_var.append("-threads")
				command_var.append(str(threads))

			command_var.append("-trellis")
			if videofile["trellis"]:
				command_var.append("1")
			else:
				command_var.append("0")
		
			if videofile["mbd"]==0:
				command_var.append("-mbd")
				command_var.append("0")
			elif videofile["mbd"]==1:
				command_var.append("-mbd")
				command_var.append("1")
			elif videofile["mbd"]==2:
				command_var.append("-mbd")
				command_var.append("2")
		
			#if disctype!="divx":
			#	lavcopts+=":keyint="+str(keyintv)
			if(copy_audio==False) and (vcd==False):
#					lavcopts+=":acodec="
#					if disctype=="dvd":
#						if fix_ac3:
#							lavcopts+="ac3_fixed"
#						else:
#							lavcopts+="ac3"
#					else:
#						lavcopts+="mp2"
					#lavcopts+=":abitrate="+str(audiorate)
				command_var.append("-b:a")
				command_var.append(str(audiorate)+"k")

			if (default_res==False):
				command_var.append("-aspect")
				if aspect_ratio_final>1.4:
					command_var.append("16:9")
				else:
					command_var.append("4:3")
			
			passlog_var=None
			if (encpass>0)  and (isvob==False):
				command_var.append("-pass")
				command_var.append(str(encpass))
				passlog_var=os.path.join(filefolder,filename)+".log"
				if encpass==1:
					try:
						os.remove(passlog_var)
					except:
						 pass
	
			if (vcd==False):
				command_var.append("-b")
				command_var.append(str(videorate)+"k")
	
		
		
		at=audio_tracks
		while (at>1):
			if (volume!=100):
				command_var.append("-vol")
				command_var.append(str((256*volume)/100))
			command_var.append("-newaudio")
			at-=1
			

		extra_params=videofile["params"] # take the extra params
		while (extra_params!=""):
			extra_params,new_param=devede_other.get_new_param(extra_params)
			if new_param!="":
				command_var.append(new_param)

		currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")

		if (passlog_var != None) and (isvob==False):
			command_var.append("-passlogfile")
			command_var.append(passlog_var)

		if (encpass==1) and (isvob==False):
			command_var.append("-y")
			command_var.append("/dev/null")
		else:
			command_var.append(currentfile)

		self.print_error=_("Conversion failed.\nIt seems a bug of Mencoder.")
		if (videofile["params"]!="") or (videofile["params_vf"]!="") or (videofile["params_lame"]!=""):
			self.print_error+="\n"+_("Also check the extra params passed to Mencoder for syntax errors.")
		self.error_not_done=True
		self.launch_program(command_var,read_chars=300)