os.remove

Here are the examples of the python api os.remove taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 151

Project: pdbtools
Source File: interface.py
View license
def charmmWash(input_structures,calc_type="single",keep_temp=False,
               hbond=None,fix_atoms=True,num_steps=500):
    """
    Wash a structure through CHARMM, adding polar hydrogens and missing heavy
    atoms.
    
    Input:
        pdb_files = [(file_contents,n_terminus,c_terminus),...]
            file_contents are the coordinates of a pdb file
            n_terminus and c_terminus are boolean
        calc_type = type of hydrogen minimization to do
        keep_temp = whether or not to keep temporary files.
        hbond = file to write out hbonds to.  (If hbonds are calculated, the
            structure changes...)
        fix_atoms = whether or not to fix atoms that were in the original
            file.
    """
       
    # Convert all structures in input_structures to charmm-readable files
    struct_input = []    
    for index, structure in enumerate(input_structures):
    
        pdb = structure[0]
        n_terminus = structure[1]
        c_terminus = structure[2]
        
        tmp_file = "%i_tmp.pdb" % index
        g = open(tmp_file,'w')
        g.writelines(pdb)
        g.close()
        
        struct_input.append((tmp_file,n_terminus,c_terminus))
  
    # Make sure that there are not too many input files for CHARMM.  If too 
    # many files are passed, CHARMM doesn't raise an error; it just idles. 
    # To prevent the stall, I've made it a hard error.
    if len(struct_input) > 25:
        err = "There are too many input files (%i) " % len(struct_input)
        err += "for CHARMM.  Try splitting the input pdb into parts."
        raise CharmmInterfaceError(err)
 
    # Create CHARMM
    input = gen_input.createCharmmFile(struct_input,calc_type,hbond,
                                       fix_atoms=fix_atoms,
                                       num_steps=num_steps)
    
    # Run CHARMM
    charmm_out = runCharmm(input)
    
    # Try to read CHARMM output coordinates
    try:
        f = open("out.cor","r")
        coord_out = f.readlines()
        f.close()
    except IOError:
        
        f = open("charmm.inp","w")
        f.write(input)
        f.close()
        
        f = open("charmm.out","w")
        f.writelines(charmm_out)
        f.close()
        
        err = ["It appears that CHARMM has failed.\n"]
        err.append("   Input written to: charmm.inp\n")
        err.append("   Output writen to: charmm.out")
        raise CharmmInterfaceError("".join(err))
    
    # Convert CHARMM coordinates to a standard pdb format
    pdb_out = charmm2pdb(coord_out)
    
    # Delete temporary files
    if not keep_temp:
        tmp_files = [s[0] for s in struct_input]
        tmp_files.append("out.cor")
        for f in tmp_files:
            try:
                os.remove(f)
            except OSError:
                pass
    else:
        f = open("charmm.inp","w")
        f.write(input)
        f.close()
        
        f = open("charmm.out","w")
        f.writelines(charmm_out)
        f.close()
    
    return pdb_out

Example 152

Project: HPOlib
Source File: test_runsolver_wrapper.py
View license
    def test_get_trial_index_cv(self):
        try:
            os.remove("test_get_trial_index.pkl")
        except OSError:
            pass

        try:
            os.remove("test_get_trial_index.pkl.lock")
        except OSError:
            pass

        experiment = Experiment.Experiment(".", "test_get_trial_index", folds=5)
        params0 = {"x": "1"}
        params1 = {"x": "2"}
        params2 = {"x": "3"}
        params3 = {"x": "4"}
        params4 = {"x": "5"}

        trial_index0 = runsolver_wrapper.get_trial_index(experiment, 0, params0)
        self.assertEqual(trial_index0, 0)
        experiment.set_one_fold_running(trial_index0, 0)
        experiment.set_one_fold_complete(trial_index0, 0, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 1, params0))
        experiment.set_one_fold_running(trial_index0, 1)
        experiment.set_one_fold_complete(trial_index0, 1, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 2, params0))
        experiment.set_one_fold_running(trial_index0, 2)
        experiment.set_one_fold_complete(trial_index0, 2, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 3, params0))
        experiment.set_one_fold_running(trial_index0, 3)
        experiment.set_one_fold_complete(trial_index0, 3, 1, 1)
        self.assertEqual(trial_index0,
                         runsolver_wrapper.get_trial_index(experiment, 4, params0))
        experiment.set_one_fold_running(trial_index0, 4)
        experiment.set_one_fold_complete(trial_index0, 4, 1, 1)

        trial_index1 = runsolver_wrapper.get_trial_index(experiment, 0, params1)
        self.assertEqual(trial_index1, 1)
        experiment.set_one_fold_running(trial_index1, 0)
        experiment.set_one_fold_complete(trial_index1, 0, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 1, params1))
        experiment.set_one_fold_running(trial_index1, 1)
        experiment.set_one_fold_complete(trial_index1, 1, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 2, params1))
        experiment.set_one_fold_running(trial_index1, 2)
        experiment.set_one_fold_complete(trial_index1, 2, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 3, params1))
        experiment.set_one_fold_running(trial_index1, 3)
        experiment.set_one_fold_complete(trial_index1, 3, 1, 1)
        self.assertEqual(trial_index1,
                         runsolver_wrapper.get_trial_index(experiment, 4, params1))
        experiment.set_one_fold_running(trial_index1, 4)
        experiment.set_one_fold_complete(trial_index1, 4, 1, 1)

        trial_index2 = runsolver_wrapper.get_trial_index(experiment, 0, params2)
        self.assertEqual(trial_index2, 2)
        experiment.set_one_fold_running(trial_index2, 0)
        experiment.set_one_fold_complete(trial_index2, 0, 1, 1)

        trial_index3 = runsolver_wrapper.get_trial_index(experiment, 0, params3)
        self.assertEqual(trial_index3, 3)
        experiment.set_one_fold_running(trial_index3, 0)
        experiment.set_one_fold_complete(trial_index3, 0, 1, 1)

        trial_index4 = runsolver_wrapper.get_trial_index(experiment, 0, params4)
        self.assertEqual(trial_index4, 4)
        experiment.set_one_fold_running(trial_index4, 0)
        experiment.set_one_fold_complete(trial_index4, 0, 1, 1)

        self.assertEqual(trial_index2,
                         runsolver_wrapper.get_trial_index(experiment, 3, params2))
        self.assertEqual(trial_index4,
                         runsolver_wrapper.get_trial_index(experiment, 4, params4))

        # Since params1 were already evaluated, this should be a new trial_index
        trial_index_test1 = runsolver_wrapper.get_trial_index(experiment, 0, params1)
        self.assertEqual(trial_index_test1, 5)

Example 153

Project: mne-python
Source File: utils.py
View license
def _data_path(path=None, force_update=False, update_path=True, download=True,
               name=None, check_version=False, return_version=False,
               archive_name=None):
    """Aux function."""
    key = {
        'fake': 'MNE_DATASETS_FAKE_PATH',
        'misc': 'MNE_DATASETS_MISC_PATH',
        'sample': 'MNE_DATASETS_SAMPLE_PATH',
        'spm': 'MNE_DATASETS_SPM_FACE_PATH',
        'somato': 'MNE_DATASETS_SOMATO_PATH',
        'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
        'testing': 'MNE_DATASETS_TESTING_PATH',
        'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH',
    }[name]

    path = _get_path(path, key, name)
    # To update the testing or misc dataset, push commits, then make a new
    # release on GitHub. Then update the "releases" variable:
    releases = dict(testing='0.26', misc='0.3')
    # And also update the "hashes['testing']" variable below.

    # To update any other dataset, update the data archive itself (upload
    # an updated version) and update the hash.
    archive_names = dict(
        misc='mne-misc-data-%s.tar.gz' % releases['misc'],
        sample='MNE-sample-data-processed.tar.gz',
        somato='MNE-somato-data.tar.gz',
        spm='MNE-spm-face.tar.gz',
        testing='mne-testing-data-%s.tar.gz' % releases['testing'],
        multimodal='MNE-multimodal-data.tar.gz',
        fake='foo.tgz',
    )
    if archive_name is not None:
        archive_names.update(archive_name)
    folder_names = dict(
        brainstorm='MNE-brainstorm-data',
        fake='foo',
        misc='MNE-misc-data',
        sample='MNE-sample-data',
        somato='MNE-somato-data',
        multimodal='MNE-multimodal-data',
        spm='MNE-spm-face',
        testing='MNE-testing-data',
    )
    urls = dict(
        brainstorm='https://mne-tools.s3.amazonaws.com/datasets/'
                   'MNE-brainstorm-data/%s',
        fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
             'datasets/%s',
        misc='https://codeload.github.com/mne-tools/mne-misc-data/'
             'tar.gz/%s' % releases['misc'],
        sample="https://mne-tools.s3.amazonaws.com/datasets/%s",
        somato='https://mne-tools.s3.amazonaws.com/datasets/%s',
        spm='https://mne-tools.s3.amazonaws.com/datasets/%s',
        testing='https://codeload.github.com/mne-tools/mne-testing-data/'
                'tar.gz/%s' % releases['testing'],
        multimodal='https://ndownloader.figshare.com/files/5999598',
    )
    hashes = dict(
        brainstorm=None,
        fake='3194e9f7b46039bb050a74f3e1ae9908',
        misc='d822a720ef94302467cb6ad1d320b669',
        sample='1d5da3a809fded1ef5734444ab5bf857',
        somato='f3e3a8441477bb5bacae1d0c6e0964fb',
        spm='f61041e3f3f2ba0def8a2ca71592cc41',
        testing='4e0d069249081135076daebf57043a54',
        multimodal='26ec847ae9ab80f58f204d09e2c08367',
    )
    folder_origs = dict(  # not listed means None
        misc='mne-misc-data-%s' % releases['misc'],
        testing='mne-testing-data-%s' % releases['testing'],
    )
    folder_name = folder_names[name]
    archive_name = archive_names[name]
    hash_ = hashes[name]
    url = urls[name]
    folder_orig = folder_origs.get(name, None)
    if '%s' in url:
        url = url % archive_name

    folder_path = op.join(path, folder_name)
    if name == 'brainstorm':
        extract_path = folder_path
        folder_path = op.join(folder_path, archive_names[name].split('.')[0])

    rm_archive = False
    martinos_path = '/cluster/fusion/sample_data/' + archive_name
    neurospin_path = '/neurospin/tmp/gramfort/' + archive_name

    if not op.exists(folder_path) and not download:
        return ''
    if not op.exists(folder_path) or force_update:
        if name == 'brainstorm':
            if '--accept-brainstorm-license' in sys.argv:
                answer = 'y'
            else:
                answer = input('%sAgree (y/[n])? ' % _bst_license_text)
            if answer.lower() != 'y':
                raise RuntimeError('You must agree to the license to use this '
                                   'dataset')
        logger.info('Downloading or reinstalling '
                    'data archive %s at location %s' % (archive_name, path))

        if op.exists(martinos_path):
            archive_name = martinos_path
        elif op.exists(neurospin_path):
            archive_name = neurospin_path
        else:
            archive_name = op.join(path, archive_name)
            rm_archive = True
            fetch_archive = True
            if op.exists(archive_name):
                msg = ('Archive already exists. Overwrite it (y/[n])? ')
                answer = input(msg)
                if answer.lower() == 'y':
                    os.remove(archive_name)
                else:
                    fetch_archive = False

            if fetch_archive:
                _fetch_file(url, archive_name, print_destination=False,
                            hash_=hash_)

        if op.exists(folder_path):
            def onerror(func, path, exc_info):
                """Deal with access errors (e.g. testing dataset read-only)."""
                # Is the error an access error ?
                do = False
                if not os.access(path, os.W_OK):
                    perm = os.stat(path).st_mode | stat.S_IWUSR
                    os.chmod(path, perm)
                    do = True
                if not os.access(op.dirname(path), os.W_OK):
                    dir_perm = (os.stat(op.dirname(path)).st_mode |
                                stat.S_IWUSR)
                    os.chmod(op.dirname(path), dir_perm)
                    do = True
                if do:
                    func(path)
                else:
                    raise
            shutil.rmtree(folder_path, onerror=onerror)

        logger.info('Decompressing the archive: %s' % archive_name)
        logger.info('(please be patient, this can take some time)')
        for ext in ['gz', 'bz2']:  # informed guess (and the only 2 options).
            try:
                if name != 'brainstorm':
                    extract_path = path
                tf = tarfile.open(archive_name, 'r:%s' % ext)
                tf.extractall(path=extract_path)
                tf.close()
                break
            except tarfile.ReadError as err:
                logger.info('%s is %s trying "bz2"' % (archive_name, err))
        if folder_orig is not None:
            shutil.move(op.join(path, folder_orig), folder_path)

        if rm_archive:
            os.remove(archive_name)

    path = _do_path_update(path, update_path, key, name)
    path = op.join(path, folder_name)

    # compare the version of the dataset and mne
    data_version = _dataset_version(path, name)
    try:
        from distutils.version import LooseVersion as LV
    except:
        warn('Could not determine %s dataset version; dataset could '
             'be out of date. Please install the "distutils" package.'
             % name)
    else:  # 0.7 < 0.7.git shoud be False, therefore strip
        if check_version and LV(data_version) < LV(mne_version.strip('.git')):
            warn('The {name} dataset (version {current}) is older than '
                 'mne-python (version {newest}). If the examples fail, '
                 'you may need to update the {name} dataset by using '
                 'mne.datasets.{name}.data_path(force_update=True)'.format(
                     name=name, current=data_version, newest=mne_version))
    return (path, data_version) if return_version else path

Example 154

Project: FriendlyTorrent
Source File: tftornado.py
View license
def run(autoDie, shareKill, userName, params):

    try:

        h = HeadlessDisplayer()
        h.autoShutdown = autoDie
        h.shareKill = shareKill
        h.user = userName

        while 1:
            try:
                config = parse_params(params)
            except ValueError, e:
                print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
                break
            if not config:
                print get_usage()
                break

            # log what we are starting up
            transferLog("tornado starting up :\n", True)
            transferLog(" - torrentfile : " + config['responsefile'] + "\n", True)
            transferLog(" - userName : " + userName + "\n", True)
            transferLog(" - transferStatFile : " + transferStatFile + "\n", True)
            transferLog(" - transferCommandFile : " + transferCommandFile + "\n", True)
            transferLog(" - transferLogFile : " + transferLogFile + "\n", True)
            transferLog(" - transferPidFile : " + transferPidFile + "\n", True)
            transferLog(" - autoDie : " + autoDie + "\n", True)
            transferLog(" - shareKill : " + shareKill + "\n", True)
            transferLog(" - minport : " + str(config['minport']) + "\n", True)
            transferLog(" - maxport : " + str(config['maxport']) + "\n", True)
            transferLog(" - max_upload_rate : " + str(config['max_upload_rate']) + "\n", True)
            transferLog(" - max_download_rate : " + str(config['max_download_rate']) + "\n", True)
            transferLog(" - min_uploads : " + str(config['min_uploads']) + "\n", True)
            transferLog(" - max_uploads : " + str(config['max_uploads']) + "\n", True)
            transferLog(" - min_peers : " + str(config['min_peers']) + "\n", True)
            transferLog(" - max_initiate : " + str(config['max_initiate']) + "\n", True)
            transferLog(" - max_connections : " + str(config['max_connections']) + "\n", True)
            transferLog(" - super_seeder : " + str(config['super_seeder']) + "\n", True)
            transferLog(" - security : " + str(config['security']) + "\n", True)
            transferLog(" - auto_kick : " + str(config['auto_kick']) + "\n", True)
            if 'crypto_allowed' in config:
                transferLog(" - crypto_allowed : " + str(config['crypto_allowed']) + "\n", True)
            if 'crypto_only' in config:
                transferLog(" - crypto_only : " + str(config['crypto_only']) + "\n", True)
            if 'crypto_stealth' in config:
                transferLog(" - crypto_stealth : " + str(config['crypto_stealth']) + "\n", True)
            transferLog(" - priority : " + str(config['priority']) + "\n", True)
            transferLog(" - alloc_type : " + str(config['alloc_type']) + "\n", True)
            transferLog(" - alloc_rate : " + str(config['alloc_rate']) + "\n", True)
            transferLog(" - buffer_reads : " + str(config['buffer_reads']) + "\n", True)
            transferLog(" - write_buffer_size : " + str(config['write_buffer_size']) + "\n", True)
            transferLog(" - check_hashes : " + str(config['check_hashes']) + "\n", True)
            transferLog(" - max_files_open : " + str(config['max_files_open']) + "\n", True)
            transferLog(" - upnp_nat_access : " + str(config['upnp_nat_access']) + "\n", True)

            # remove command-file if exists
            if isfile(transferCommandFile):
                try:
                    transferLog("removing command-file " + transferCommandFile + "...\n", True)
                    remove(transferCommandFile)
                except:
                    pass

            # write pid-file
            currentPid = (str(getpid())).strip()
            transferLog("writing pid-file : " + transferPidFile + " (" + currentPid + ")\n", True)
            try:
                pidFile = open(transferPidFile, 'w')
                pidFile.write(currentPid + "\n")
                pidFile.flush()
                pidFile.close()
            except Exception, e:
                transferLog("Failed to write pid-file, shutting down : " + transferPidFile + " (" + currentPid + ")" + "\n", True)
                break

            myid = createPeerID()
            seed(myid)

            doneflag = Event()
            def disp_exception(text):
                print text
            rawserver = RawServer(doneflag, config['timeout_check_interval'],
                              config['timeout'], ipv6_enable = config['ipv6_enabled'],
                              failfunc = h.failed, errorfunc = disp_exception)
            upnp_type = UPnP_test(config['upnp_nat_access'])
            while True:
                try:
                    listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
                                config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
                                upnp = upnp_type, randomizer = config['random_port'])
                    break
                except socketerror, e:
                    if upnp_type and e == UPnP_ERROR:
                        print 'WARNING: COULD NOT FORWARD VIA UPnP'
                        upnp_type = 0
                        continue
                    print "error: Couldn't listen - " + str(e)
                    h.failed()
                    return

            response = get_response(config['responsefile'], config['url'], h.error)
            if not response:
                break

            infohash = sha1(bencode(response['info'])).digest()

            h.dow = BT1Download(h.display, h.finished, h.error, disp_exception, doneflag,
                        config, response, infohash, myid, rawserver, listen_port)

            if not h.dow.saveAs(h.chooseFile, h.newpath):
                break

            if not h.dow.initFiles(old_style = True):
                break

            if not h.dow.startEngine():
                h.dow.shutdown()
                break
            h.dow.startRerequester()
            h.dow.autoStats()
		
            if not h.dow.am_I_finished():
                h.display(activity = 'connecting to peers')

            # log that we are done with startup
            transferLog("tornado up and running.\n", True)

            # listen forever
            rawserver.listen_forever(h.dow.getPortHandler())

            # shutdown
            h.display(activity = 'shutting down')
            h.dow.shutdown()
            break

        try:
            rawserver.shutdown()
        except:
            pass

        if not h.done:
            h.failed()

    finally:
        transferLog("removing pid-file : " + transferPidFile + "\n", True)
        try:
            remove(transferPidFile)
        except:
            transferLog("Failed to remove pid-file : " + transferPidFile + "\n", True)
            pass

Example 155

Project: mne-python
Source File: test_ica.py
View license
@slow_test
@requires_sklearn
def test_ica_additional():
    """Test additional ICA functionality."""
    import matplotlib.pyplot as plt
    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
                       eog=False, exclude='bads')
    epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None, random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info, meg=True, stim=False, ecg=False,
                        eog=True, exclude='bads')
    epochs_eog = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks2,
                        baseline=(None, 0), preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2, n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    template = _get_ica_map(ica)[0]
    corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])
    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4,
              n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4,
              n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert_true(np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov, n_components=2, max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(ica.exclude == [ica_raw.ch_names.index(e) for e in
                                        ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, l_trans_bandwidth='auto',
                       h_trans_bandwidth='auto', filter_length='auto',
                       phase='zero', fir_window='hamming')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], filter_length='auto', trans_bandwidth=10,
                             phase='zero', fir_window='hamming')
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                     'pca_mean_', 'pca_explained_variance_',
                     '_pre_whitener']:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw, target='EOG 061', score_func=func,
                                   start=0, stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw,
                  target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
                             eog_ch=ch_name, skew_criterion=idx,
                             var_criterion=idx, kurt_criterion=idx)
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        ica.labels_ = None
        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError, ica.find_bads_ecg, epochs.average(),
                      method='ctps')
        assert_raises(ValueError, ica.find_bads_ecg, raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog, target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs,
                  target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw, target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw, target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_true(len(ica_raw._filenames) == 0)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)

Example 156

Project: easytf2_mapper
Source File: createPrefab.py
View license
def create(name, prefab_name, prefab_text, prefab_icon, rot_enabled, workshop_export,indexLine,index):
  if indexLine == 'END':
    insertBool = False
  else:
    insertBool = True

  py_list = []
  ent_py_list = []
  rot_py_list = []
  rot_ent_py_list = []
  txt_list = []
  ent_list = []
  num_list = []
  id_num_list = []
  id_value_list = []
  value_list = []
  compile_list = [
  """import os
import math

def rotatePoint(centerPoint,point,angle):
    angle = math.radians(angle)
    temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
    temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle))
    temp_point = temp_point[0]+centerPoint[0] , temp_point[1]+centerPoint[1]
    return temp_point

def createTile(posx, posy, id_num, world_id_num, entity_num, placeholder_list, rotation, level):
    
    looplist = '1'
    values=[]#Values are all of the lines of a prefab that have the vertex coords
""",

  "#INSERT_OPEN_FILE\n",

  """
    lines = f.readlines() #gathers each line of the prefab and puts numbers them
""",

  "#INSERT_ROT_IF\n",

  "#INSERT_PY_LIST\n",
  
  "#INSERT_ROT_CODE\n",

  "#INSERT_VAR_COUNT\n",

  """
    values = "".join(lines)#converting list to string
    ogvalues = "".join(lines)

    normal_list,axislist,negaxislist,vaxis,uaxis=[],['1 0 0 1','0 1 0 1','0 0 1 1'],['-1 0 0 1','0 -1 0 1','0 0 -1 1'],0,0
    def evaluate(coords):
        dist_x,dist_y,dist_z = abs(coords[0]),abs(coords[1]),abs(coords[2]),
        if dist_x >= dist_y and dist_x >= dist_z:
            return axislist[0]
        if dist_y >= dist_z:
            return axislist[1]
        return axislist[2]

    def get_normal(coord_list):
        vector_a = (coord_list[1][0]-coord_list[0][0],coord_list[1][1]-coord_list[0][1],coord_list[1][2]-coord_list[0][2])
        vector_b = (coord_list[2][0]-coord_list[0][0],coord_list[2][1]-coord_list[0][1],coord_list[2][2]-coord_list[0][2])
        
        normal = (vector_a[1]*vector_b[2]-vector_a[2]*vector_b[1],vector_a[2]*vector_b[0]-vector_a[0]*vector_b[2],vector_a[0]*vector_b[1]-vector_a[1]*vector_b[0])
        return normal
    
    for normal_num in range(1,var_count+1,3):
        normal_list=[]
        for i in range(3):
            normal_list.append([])
            for var in ["x", "y", "z"]:
                normal_list[i].append(eval(var+str(normal_num+i)))
        coords = get_normal(normal_list)  
        response = evaluate(coords)
        if response == axislist[0]:
            uaxis = axislist[1]
        else:
            uaxis = axislist[0]
        if response == axislist[2]:
            vaxis = negaxislist[1]
        else:
            vaxis = negaxislist[2]
        values = values.replace('AXIS_REPLACE_U',uaxis,1)
        values = values.replace('AXIS_REPLACE_V',vaxis,1)
    
    for i in range(ogvalues.count("world_idnum")):
        values = values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1
    
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "z":
                values = values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
            else:
                values = values.replace(string + " ",string_var + " ")

    for i in range(ogvalues.count('id_num')):
        values = values.replace('id_num', str(id_num), 1)
        id_num = id_num+1
        if "ROTATION_RIGHT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_RIGHT","0 180 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_UP" in values:
            if rotation == 0:
                values = values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_UP","0 180 0",1)
        if "ROTATION_LEFT" in values:
            if rotation == 0:
                values = values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in values:
            if rotation == 0:
                values = values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                values = values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                values = values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                values = values.replace("ROTATION_DOWN","0 0 0",1)

    values = values.replace('"[0 0 0 1] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 0 1 0] 0.25"','"[1 1 1 1] 0.25"')
    values = values.replace('"[0 1 0 0] 0.25"','"[1 1 1 1] 0.25"')       
    values = values.replace('"[1 0 0 0] 0.25"','"[1 1 1 1] 0.25"')
        
""",

  "#INSERT_ENT_CODE\n",
  
  ]

  ent_code =["#INSERT_ENT_OPEN_FILE\n",

             """
    lines_ent = g.readlines()
""",

             "#INSERT_ROT_IF\n",

             "#INSERT_ENT_PY_LIST\n",

             "#INSERT_ROT_ENT_CODE\n",
             
             "#INSERT_ENT_VAR_COUNT\n",

"""
    ent_values = "".join(lines_ent)
    ent_values_split = ent_values.split("\\"")
    valcount = "".join(lines_ent)

    for item in ent_values_split:
        if "entity_name" in item or "parent_name" in item or "door_large" in item:
            placeholder_list.append(item)

    for i in range(valcount.count('world_idnum')):
        ent_values = ent_values.replace('world_idnum', str(world_id_num), 1)
        world_id_num += 1

    for var in ["px", "py", "pz"]:
        for count in range(1,ent_var_count+1):
            string = var + str(count)
            string_var = str(eval(var + str(count)))

            if var == "pz":
                ent_values = ent_values.replace(string + "\\"",string_var + "\\"") #we need to do this or else it will mess up on 2 digit numbers
            else:
                ent_values = ent_values.replace(string + " ",string_var + " ")
                
    for var in ["x", "y", "z"]:
        for count in range(1,var_count+1):
            try:
                string = var + str(count)
                string_var = str(eval(var + str(count)))
                if var == "z":
                    ent_values = ent_values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
                else:
                    ent_values = ent_values.replace(string + " ",string_var + " ")
            except:
                pass

    for i in range(valcount.count('id_num')):
        ent_values = ent_values.replace('id_num', str(id_num), 1)
        id_num = id_num+1

    for i in range(int(valcount.count('laser_target')/2)):
        if "laser_target_plac" in ent_values:
            ent_values = ent_values.replace("laser_target_plac", "laser_target" + str(entity_num), 2)
            entity_num += 1

    for i in range(int(valcount.count('sound'))):
        if "sound_plac" in ent_values:
            ent_values = ent_values.replace("sound_plac", "AmbSound"+str(entity_num), 2)
            ent_values = ent_values.replace("relay_plac", "LogicRelay"+str(entity_num),2)
            entity_num += 1

    for i in range(valcount.count("entity_name")):
        try:
            ent_values = ent_values.replace("entity_name", "entity" + str(entity_num), 1)
            ent_values = ent_values.replace("entity_same", "entity" + str(entity_num), 1)
            if "parent_name" in placeholder_list[entity_num]:
                ent_values = ent_values.replace("parent_name", "entity" + str(entity_num), 1)
                placeholder_list.remove(placeholder_list[entity_num])
            
            if "door_large" in ent_values:
                ent_values = ent_values.replace("door_large", "door_large" + str(entity_num), 4)
            if "\\"respawn_name\\"" in ent_values:
                ent_values = ent_values.replace("\\"respawn_name\\"", "\\"respawn_name" + str(entity_num) + "\\"", 2)
            entity_num += 1
        except Exception as e:
            print(str(e))

    for i in range(valcount.count("ROTATION")):
        if "ROTATION_RIGHT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 0 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 270 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 180 0 ",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_RIGHT","0 90 0",1)
        if "ROTATION_LEFT" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_LEFT","0 180 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_LEFT","0 90 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_LEFT","0 0 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_LEFT","0 270 0",1)
        if "ROTATION_DOWN" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_DOWN","0 270 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_DOWN","0 180 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_DOWN","0 90 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_DOWN","0 0 0",1)
        if "ROTATION_UP" in ent_values:
            if rotation == 0:
                ent_values = ent_values.replace("ROTATION_UP","0 90 0",1)
            elif rotation == 1:
                ent_values = ent_values.replace("ROTATION_UP","0 0 0",1)
            elif rotation == 2:
                ent_values = ent_values.replace("ROTATION_UP","0 270 0",1)
            elif rotation == 3:
                ent_values = ent_values.replace("ROTATION_UP","0 180 0",1)

        entity_num += 1
"""]

  rot_code = [["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"],

["""
    if rotation == 0:
""",
    "#INSERT_ROT_0_PY_LIST\n",
"""
    elif rotation == 1:
""",
    "#INSERT_ROT_1_PY_LIST\n",
"""
    elif rotation == 2:
""",
    "#INSERT_ROT_2_PY_LIST\n",
"""
    elif rotation == 3:
""",
    "#INSERT_ROT_3_PY_LIST\n"]]

  var_num = 1
  ent_var_num = 1
  contains_ent = False #True if there are entities in the vmf
  in_solid_block = False #True if in a solid code block
  in_entity_block = False #True if in an entity code block
  in_editor_block = False #True if in an editor cod (i luv dat gme) block
  in_connections_block = False #True if in a connections code block
  solid_to_ent = False #True if you want to put the solid block into ent_list
  black_list_var = False #True means it IS on the blacklist, False otherwise
  value_list_history = []
  #name = "prefab_template\godplsno.vmf" #name of the vmf file, changed to allow user to open a file
  file = open(name, "r")

  openlines = file.readlines()

  prefab_icon_list = prefab_icon.split("/")

  if "easytf2_mapper" in prefab_icon_list:
    del prefab_icon_list[ :prefab_icon_list.index("easytf2_mapper")+1]

    for index, item in enumerate(prefab_icon_list): #enumerate allows you to give 2 vars in the for loop
      if index != len(prefab_icon_list) - 1:
       prefab_icon_list[index] = item + "/" # add the "/" back into the filepath
        
  txt_path = "prefab_template/" + prefab_name + ".txt"
  ent_path = "prefab_template/" + prefab_name + "_entities.txt"
  py_path = "prefabs/" + prefab_name + ".py"
  loopernum = 0
  for line in openlines:

    which_list = "txt_list" if not solid_to_ent else "ent_list"
      
    if "\t" in line or "entity" in line:
      
      if in_solid_block and "\t}" not in line or in_solid_block and "\t\t" in line:
        if "(" not in line:

          if "\"id\"" not in line:
            if "\"uaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_U] 0.25")
            elif "\"vaxis\"" in line:
              quote_num = 0
              for letter in line:
                  if letter == "\"":
                    quote_num += 1
                  if quote_num != 3:
                    eval(which_list).append(letter)
                  elif letter == "\"":
                    eval(which_list).append(letter)
                              
              eval(which_list).insert(-2, "[AXIS_REPLACE_V] 0.25")

            else:
              eval(which_list).append(line)

          elif "\t\t\"id\"" in line:
            for letter in line:
              try:
                number = int(letter)
              except ValueError:
                eval(which_list).append(letter)

            if "\t\t\t" in line:
              eval(which_list).insert(-2, "id_num") #need to insert because it creates a \n at the end of the line
            else: 
              eval(which_list).insert(-2, "world_idnum")
        
        elif "(" in line:
          for letter in line:
            try:
              number = int(letter)    
              num_list.append(letter)
            except ValueError:
              if letter != "-" and letter != ".":
                eval(which_list).append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == ")":
                #print(num_list)
                write_var(num_list, eval(which_list), py_list, var_num, value_list_history, in_solid_block, in_entity_block, rot_py_list, rot_enabled) 
                var_num += 1
                num_list = []

      elif in_solid_block and "\t}" in line and "\t\t" not in line:
        in_solid_block = False

        eval(which_list).append(line)
        if solid_to_ent:
          ent_list.append("}\n")
        solid_to_ent = False


      elif in_entity_block and "\"" in line:

        if "\"id\"" not in line and "\t\"targetname\"" not in line and "\t\"origin\"" not in line and "\t\"associatedmodel\"" not in line and "\t\"parentname\"" not in line and "\t\"respawnroomname\"" not in line and "\"angles\"" not in line and "LaserTarget" not in line:
          ent_list.append(line)
        elif "\"id\"" in line:
          for letter in line:
            try:
              number = int(letter)
            except ValueError:
              ent_list.append(letter)
                  
          ent_list.insert(-2, "world_idnum")

        elif "\"angles\" \"0 0 0\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_RIGHT")
        elif '"angles" "0 90 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_UP")
        elif '"angles" "0 180 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_LEFT")
        elif '"angles" "0 270 0"' in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "ROTATION_DOWN")
        elif "\t\"targetname\"" in line and "relay" not in line and "ambient_generic" not in openlines[loopernum-17] and "respawn_trigger" not in line and "\"func_door\"" not in openlines[loopernum-19] and "filter_activator_tfteam" not in openlines[loopernum-2] and "info_target" not in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_name")
        elif "\t\"targetname\"" in line and "respawn_trigger" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "respawn_name")
        elif "\t\"targetname\"" in line and "filter_blu" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_blu")
        elif "\t\"targetname\"" in line and "relay" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "relay_plac")
        elif "\t\"targetname\"" in line and "ambient_generic" in openlines[loopernum-17]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "sound_plac")
        elif "\t\"targetname\"" in line and "filter_red" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "filter_red")
        elif "\t\"associatedmodel\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "entity_same")

        elif "\t\"parentname\"" in line and "\"func_door\"" not in openlines[loopernum-19] and "door" not in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "parent_name")
        elif "LaserTarget" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")

        elif "targetname" in line and "info_target" in openlines[loopernum-3]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "laser_target_plac")
          
        elif "\t\"parentname\"" in line and "door" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"targetname\"" in line and "\"func_door\"" in openlines[loopernum-19]:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-3]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")
        elif "\t\"parentname\"" in line and "connections" in openlines[loopernum-2]: 
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
                        
          ent_list.insert(-2, "door_large")

        elif "\t\"respawnroomname\"" in line:
          quote_num = 0
          for letter in line:
              if letter == "\"":
                quote_num += 1
              if quote_num != 3:
                ent_list.append(letter)
              elif letter == "\"":
                ent_list.append(letter)
          ent_list.insert(-2, "respawn_name")
                
        elif "\t\"origin\"" in line:
          nums_yet = False #if True then numbers have been received
          for letter in line:
            
            try:
              number = int(letter)    
              num_list.append(letter)
              nums_yet = True
            except ValueError:
              if letter != "-" and letter != ".":
                ent_list.append(letter)
              if letter == " ":
                num_list.append("SEPARATE")
              elif letter == ".":
                num_list.append(".")
              elif letter == "-":
                num_list.append("-")
              elif letter == "\"" and nums_yet:
                write_var(num_list, ent_list, ent_py_list, ent_var_num, value_list_history, in_solid_block, in_entity_block, rot_ent_py_list, rot_enabled) 
                ent_var_num += 1
                num_list = []
          

      elif in_entity_block and "\"" not in line:
        in_entity_block = False
        if "editor" in line:
          ent_list.append(line)
          in_editor_block = True
        elif "connections" in line:
          ent_list.append(line)
          in_connections_block = True
        elif "solid" in line:
          solid_to_ent = True

      elif in_editor_block and "\t}" not in line:
        ent_list.append(line)

      elif in_editor_block and "\t}" in line:
        in_editor_block = False
        ent_list.append(line)
        ent_list.append("}\n")

      elif in_connections_block and "\t}" not in line:
        ent_list.append(line)

      elif in_connections_block and "\t}" in line:
        in_connections_block = False
        ent_list.append(line)
        solid_to_ent = True #IMPORTANT: Might need to change because solid might not always follow connections

      which_list = "txt_list" if not solid_to_ent else "ent_list"
        
                  
      if "solid" in line and "\"" not in line: #or "side" in line:# or "origin" in line: #need to add this because somehow, the solid/side
                        
        eval(which_list).append(line)
        #go until "\t}"
        in_solid_block = True
              
      elif "entity" in line:
        contains_ent = True
        in_entity_block = True
        ent_list.append(line)
        ent_list.append("{\n")

        
    loopernum += 1            
          


  file.close()
  global insertBool
  if rot_enabled:
    print(prefab_icon)
    ext_list = ["_right.jpg","_down.jpg","_left.jpg","_up.jpg"]
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write(icondir+"_icon_list.txt\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,icondir+"_icon_list.txt\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()

    imageRot = Image.open(prefab_icon)
    imageRot.save("icons/"+ icondir+"_right.jpg")
    imageRot2 = Image.open(prefab_icon)
    imageRot2 = imageRot2.rotate(270)
    imageRot2.save("icons/"+ icondir+"_down.jpg")
    imageRot3 = Image.open(prefab_icon)
    imageRot3 = imageRot3.rotate(180)
    imageRot3.save("icons/"+ icondir+"_left.jpg")
    imageRot4 = Image.open(prefab_icon)
    imageRot4 = imageRot4.rotate(90)
    imageRot4.save("icons/"+ icondir+"_up.jpg")
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in ext_list:
      f.write("icons/"+ icondir+i+"\n")
    f.close()

  else:
    icondir = str(prefab_name)
    if not insertBool:
        with open("prefab_template/rot_prefab_list.txt", "a") as f:
          f.write("NO_ROTATION\n")
          f.close()
    else:
        tempApp = open("prefab_template/rot_prefab_list.txt", "r")
        tempLines = tempApp.readlines()
        tempApp.close()
        tempLines.insert(indexLine,"NO_ROTATION\n")
        tempLines = "".join(tempLines)
        tempWrite = open("prefab_template/rot_prefab_list.txt", "w")
        tempWrite.write(tempLines)
        tempWrite.close()
    f = open("prefab_template/iconlists/"+ icondir+"_icon_list.txt","w+")
    for i in range(4):
      f.write("icons/"+icondir+"\n")
    f.close()


  txtReturn = compileTXT(txt_path, txt_list, prefab_name, prefab_text, prefab_icon, ent_list, ent_path,indexLine)
  pyReturn = compilePY(py_path, py_list, txt_path, compile_list, contains_ent, ent_code, ent_path, ent_py_list, rot_code, rot_py_list, rot_ent_py_list, rot_enabled)

  if workshop_export:
    d = open("info.txt","w")
    d.write(icondir+"\n"+prefab_name+"\n"+prefab_text+"\n"+str(index)+"\n")
    d.close()
    with zipfile.ZipFile(prefab_name + '.zip', 'w') as f:
      f.write(txt_path)
      f.write(py_path)

      if rot_enabled:
        f.write("icons/"+ icondir+"_right.jpg")
        f.write("icons/"+ icondir+"_down.jpg")
        f.write("icons/"+ icondir+"_left.jpg")
        f.write("icons/"+ icondir+"_up.jpg")
      else:
        f.write("icons/"+ icondir+".jpg")
      
      if contains_ent:
        f.write(ent_path)

      f.write("prefab_template/iconlists/"+icondir+"_icon_list.txt")
      f.write("info.txt")
      os.remove("info.txt")

  return txtReturn + pyReturn

Example 157

Project: genmod
Source File: annotate_models.py
View license
@click.command()
@variant_file
@family_file
@family_type
@click.option('-r', '--reduced_penetrance',
                    nargs=1, 
                    type=click.File('r'),
                    metavar='<tsv_file>',
                    help='File with gene ids that have reduced penetrance.'
)
@click.option('--vep', 
                    is_flag=True,
                    help='If variants are annotated with the Variant Effect Predictor.'
)
@click.option('--phased', 
                    is_flag=True,
                    help='If data is phased use this flag.'
)
@click.option('-s' ,'--strict', 
                    is_flag=True,
                    help='If strict model annotations should be used(see documentation).'
)
@processes
@silent
@click.option('-w', '--whole_gene',
                    is_flag=True,
                    help='If compounds should be checked over the whole gene.'
)
@click.option('-k' ,'--keyword', 
                    default="Annotation",
                    help="""What annotation keyword that should be used when 
                    searching for features."""
)
@outfile
@temp_dir
def models(variant_file, family_file, family_type, reduced_penetrance, vep,
keyword, phased, strict, silent, processes, whole_gene, outfile, temp_dir):
    """
    Annotate genetic models for vcf variants. 
    
    Checks what patterns of inheritance that are followed in a VCF file.
    The analysis is family based so each family that are specified in the family
    file and exists in the variant file will get it's own annotation.
    """
    logger = logging.getLogger(__name__)
    
    ######### This is for logging the command line string #########
    frame = inspect.currentframe()
    args, _, _, values = inspect.getargvalues(frame)
    argument_list = [
        i+'='+str(values[i]) for i in values if values[i] and 
        i not in ['frame']
    ]
    
    variant_file = get_file_handle(variant_file)
    ###########################################################################
    
    logger.info("Running GENMOD annotate version {0}".format(__version__))
    logger.debug("Arguments: {0}".format(', '.join(argument_list)))
    
    reduced_penetrance_genes = set()
    nr_reduced_penetrance_genes = 0
    if reduced_penetrance:
        logger.info("Found file with genes that have reduced penetrance")
        for line in reduced_penetrance:
            if not line.startswith('#'):
                nr_reduced_penetrance_genes += 1
                gene_id = line.rstrip().split()[0]
                logger.debug("Adding gene {0} to reduced penetrance genes".format(
                    gene_id
                ))
                reduced_penetrance_genes.add(
                    gene_id
                )
    
        logger.info("Found {0} genes with reduced penetrance".format(
            nr_reduced_penetrance_genes))
    
    
    if not family_file:
        logger.warning("Please provide a family file with -f/--family_file")
        logger.info("Exiting")
        sys.exit(1)
    
    logger.info("Setting up a family parser")
    family_parser = FamilyParser(family_file, family_type)
    logger.debug("Family parser done")
    
    families = {}
    logger.info("Check if the familys have any affected")
    for family_id in family_parser.families:
        found_affected = False
        family_obj = family_parser.families[family_id]
        for ind_id in family_obj.individuals:
            ind_obj = family_obj.individuals[ind_id]
            if ind_obj.affected:
                found_affected = True
        
        if found_affected:
            families[family_id] = family_obj
        else:
            logger.warning("No affected individuals found for family {0}."\
                           " Skipping family.".format(family_id))
    
    if not families:
        logger.warning("Please provide at least one family with affected individuals")
        sys.exit(0)
    # The individuals in the ped file must be present in the variant file:
    logger.info("Families used in analysis: {0}".format(
                    ','.join(list(families.keys()))))
    logger.info("Individuals included in analysis: {0}".format(
                    ','.join(list(family_parser.individuals.keys()))))
    
    
    head = HeaderParser()
    
    for line in variant_file:
        line = line.rstrip()
        if line.startswith('#'):
            if line.startswith('##'):
                head.parse_meta_data(line)
            else:
                head.parse_header_line(line)
        else:
            break
    
    #Add the first variant to the iterator
    variant_file = itertools.chain([line], variant_file)
    
    if vep:
        if not "CSQ" in head.info_dict:
            logger.warning("vep flag is used but there is no CSQ field specified in header")
            logger.info("Please check VCF file")
            logger.info("Exiting...")
            sys.exit(1)
        else:
            logger.info("Using VEP annotation")
    else:
        if not keyword in head.info_dict:
            logger.warning("Annotation key {0} could not be found in VCF header".format(keyword))
            logger.info("Please check VCF file")
            logger.info("Exiting...")
            sys.exit(1)
        else:
            logger.info("Using {0} annotation".format(keyword))
        
    
    if "GeneticModels" in head.info_dict:
        logger.warning("Genetic models are already annotated according to vcf"\
        " header.")
        logger.info("Exiting...")
        sys.exit(1)
    
    logger.info("Adding genmod version to vcf header")
    head.add_version_tracking(
                    info_id='genmod',
                    version=__version__,
                    date=datetime.now().strftime("%Y-%m-%d %H:%M"),
                    command_line=' '.join(argument_list)
                )
    
    logger.debug("Version added")
    logger.info("Adding genetic models to vcf header")
    add_metadata(
        head,
        'info',
        'GeneticModels',
        annotation_number='.',
        entry_type='String',
        description="':'-separated list of genetic models for this variant."
    )
    
    logger.debug("Genetic models added")
    logger.info("Adding model score to vcf header")
    add_metadata(
        head,
        'info',
        'ModelScore',
        annotation_number='.',
        entry_type='String',
        description="PHRED score for genotype models."
    )
    logger.debug("Model score added")
    
    logger.info("Adding Compounds to vcf header")
    add_metadata(
        head,
        'info',
        'Compounds',
        annotation_number='.',
        entry_type='String',
        description=("List of compound pairs for this variant."
        "The list is splitted on ',' family id is separated with compounds"
        "with ':'. Compounds are separated with '|'.")
    )
    logger.debug("Compounds added")
    
    vcf_individuals = head.individuals
    logger.debug("Individuals found in vcf file: {}".format(', '.join(vcf_individuals)))
    

    start_time_analysis = datetime.now()
    
    try:
        check_individuals(family_parser.individuals, vcf_individuals)
    except IOError as e:
        logger.error(e)
        logger.info("Individuals in PED file: {0}".format(
                        ', '.join(family_parser.individuals)))
        logger.info("Individuals in VCF file: {0}".format(', '.join(vcf_individuals)))
        logger.info("Exiting...")
        sys.exit(1)

    analysis_individuals = list(family_parser.individuals.keys())
    
    logger.info("Individuals used in analysis: {0}".format(
        ', '.join(analysis_individuals)))
    
    ###################################################################
    ### The task queue is where all jobs(in this case batches that  ###
    ### represents variants in a region) is put. The consumers will ###
    ### then pick their jobs from this queue.                       ###
    ###################################################################

    logger.debug("Setting up a JoinableQueue for storing variant batches")
    variant_queue = JoinableQueue(maxsize=1000)
    logger.debug("Setting up a Queue for storing results from workers")
    results = Manager().Queue()

    num_model_checkers = processes
    #Adapt the number of processes to the machine that run the analysis
    logger.info('Number of CPU:s {}'.format(cpu_count()))
    logger.info('Number of model checkers: {}'.format(num_model_checkers))


    # These are the workers that do the heavy part of the analysis
    logger.info('Seting up the workers')
    model_checkers = [
        VariantAnnotator(
            task_queue=variant_queue,
            results_queue=results,
            families=families,
            individuals=analysis_individuals,
            phased=phased,
            strict=strict,
            whole_gene=whole_gene,
            vep=vep,
            reduced_penetrance_genes = reduced_penetrance_genes
        )
        for i in range(num_model_checkers)
    ]
    logger.info('Starting the workers')
    for worker in model_checkers:
        logger.debug('Starting worker {0}'.format(worker))
        worker.start()

    # This process prints the variants to temporary files
    logger.info('Seting up the variant printer')
    if len(model_checkers) == 1:
        print_headers(head=head, outfile=outfile, silent=silent)
        variant_printer = VariantPrinter(
                task_queue=results,
                head=head,
                mode='normal',
                outfile = outfile
        )
    else:
        # We use a temp file to store the processed variants
        logger.debug("Build a tempfile for printing the variants")
        if temp_dir:
            temp_file = NamedTemporaryFile(delete=False, dir=temp_dir)
        else:
            temp_file = NamedTemporaryFile(delete=False)
        temp_file.close()
        
        variant_printer = VariantPrinter(
                task_queue=results,
                head=head,
                mode='chromosome',
                outfile = temp_file.name
        )
    
    logger.info('Starting the variant printer process')
    variant_printer.start()

    start_time_variant_parsing = datetime.now()
    
    # This process parses the original vcf and create batches to put in the variant queue:
    logger.info('Start parsing the variants')
    chromosome_list = get_batches(
                                variants = variant_file,
                                batch_queue = variant_queue,
                                header = head,
                                vep = vep,
                                annotation_keyword = keyword
                            )
    
    logger.debug("Put stop signs in the variant queue")
    for i in range(num_model_checkers):
        variant_queue.put(None)
    
    variant_queue.join()
    results.put(None)
    variant_printer.join()
    
    if len(model_checkers) > 1:
        sort_variants(infile=temp_file.name, mode='chromosome')

        print_headers(head=head, outfile=outfile, silent=silent)

        with open(temp_file.name, 'r', encoding='utf-8') as f:
            for line in f:
                print_variant(
                    variant_line=line,
                    outfile=outfile,
                    mode='modified',
                    silent=silent
                )
    
        logger.debug("Removing temp file")
        os.remove(temp_file.name)
        logger.debug("Temp file removed")

    logger.info('Time for whole analyis: {0}'.format(
        str(datetime.now() - start_time_analysis)))

Example 158

Project: weka
Source File: arff.py
View license
    def test_sparse_stream(self):
        
        s0 = """% 
@relation test-abalone
@attribute 'Diameter' numeric
@attribute 'Length' numeric
@attribute 'Sex' {F,M}
@attribute 'Whole_weight' numeric
@attribute 'Class_Rings' integer
@data
{0 0.286, 1 0.35, 2 M, 4 15}
{0 0.86, 2 F, 3 0.98, 4 7}
"""
        
        rows = [
            dict(
                Sex=Nom('M'),
                Length=Num(0.35),
                Diameter=Num(0.286),
                Class_Rings=Int(15, cls=True)),
            dict(
                Sex=Nom('F'),
                Diameter=Num(0.86),
                Whole_weight=Num(0.98),
                Class_Rings=Int(7, cls=True)),
        ]
        rows_extra = [
            dict(
                Sex=Nom('N'),
                Length=Num(0.35),
                Diameter=Num(0.286),
                Class_Rings=Int(15, cls=True)),
            dict(
                Sex=Nom('B'),
                Diameter=Num(0.86),
                Whole_weight=Num(0.98),
                Class_Rings=Int(7, cls=True)),
        ]
        
        a1 = ArffFile(relation='test-abalone')
        for row in rows:
            a1.append(row)
        self.assertEqual(a1.class_attr_name, 'Class_Rings')
        s1 = a1.write()
#        print s0
#        print s1
        self.assertEqual(s1, s0)
        
        a2 = ArffFile.parse(s1)
        for i, line in enumerate(a2.data):
            self.assertEqual(line, a1.data[i])
        s2 = a2.write()
        self.assertEqual(s1, s2)
        
        a3 = ArffFile(relation='test-abalone')
        self.assertEqual(len(a3.data), 0)
        #a3.open_stream(class_attr_name='Class_Rings')
        
        # When streaming, you have to provide your schema ahead of time,
        # since otherwise we'd have to update the indexes on all files
        # previously written to the file.
        for row in rows:
            a3.append(row, schema_only=True)
            self.assertEqual(len(a3.data), 0)
            
        a3.open_stream(class_attr_name='Class_Rings')
        for row in (rows+rows_extra):
            a3.append(row)
            self.assertEqual(len(a3.data), 0)
        
        fn = a3.close_stream()
        s3 = open(fn, 'r').read()
        #print s3
        os.remove(fn)
        # Note the rows that have features violating the schema are
        # automatically omitted when in streaming mode.
        self.assertEqual(s3, """% 
@relation test-abalone
@attribute 'Diameter' numeric
@attribute 'Length' numeric
@attribute 'Sex' {F,M}
@attribute 'Whole_weight' numeric
@attribute 'Class_Rings' integer
@data
{0 0.286, 1 0.35, 2 M, 4 15}
{0 0.86, 2 F, 3 0.98, 4 7}
{0 0.286, 1 0.35, 4 15}
{0 0.86, 3 0.98, 4 7}
""")

Example 159

Project: genmod
Source File: annotate_variant.py
View license
@click.command()
@variant_file
@click.option('-r', '--annotate_regions', 
                is_flag=True,
                help='Increase output verbosity.'
)
@click.option('-c', '--cadd_file', 
                    multiple = True,
                    type=click.Path(exists=True), 
                    help="Specify the path to a bgzipped cadd file"\
                    " (with index) with variant scores. This command can be"\
                    " used multiple times if multiple cadd files."
)
@click.option('--thousand_g',
                    type=click.Path(exists=True), 
                    help="Specify the path to a bgzipped vcf file"\
                            " (with index) with 1000g variants"
)
@click.option('--exac',
                    type=click.Path(exists=True), 
                    help="Specify the path to a bgzipped vcf file"\
                            " (with index) with exac variants."
)
@click.option('--cosmic',
                    type=click.Path(exists=True), 
                    help="Specify the path to a bgzipped vcf file"\
                            " (with index) with COSMIC variants."
)
@click.option('--max_af',
                    is_flag=True,
                    help="If the MAX AF should be annotated"
)
@click.option('--spidex',
                    type=click.Path(exists=True), 
                    help="Specify the path to a bgzipped tsv file"\
                            " (with index) with spidex information."
)
@click.option('-a' ,'--annotation_dir',
                    type=click.Path(exists=True),
                    default=pkg_resources.resource_filename('genmod', 'annotations'),
                    help="""Specify the path to the directory where the annotation 
                    databases are. 
                    Default is the gene pred files that comes with the distribution."""
)
@click.option('--cadd_raw', 
                    is_flag=True,
                    help="""If the raw cadd scores should be annotated."""
)
@processes
@outfile
@silent
@temp_dir
def annotate(variant_file, annotate_regions, cadd_file, thousand_g, exac, 
spidex,annotation_dir, outfile, silent, cadd_raw, cosmic, max_af, processes,
temp_dir):
    """
    Annotate vcf variants.
    
    Annotate variants with a number of different sources.
    Please use --help for more info.
    """

    logger.info("Running genmod annotate_variant version {0}".format(__version__))
    
    start_time_analysis = datetime.now()
    annotator_arguments = {}
    
    variant_file = get_file_handle(variant_file)
    
    logger.info("Initializing a Header Parser")
    head = HeaderParser()
    
    line = None
    for line in variant_file:
        line = line.rstrip()

        if line.startswith('#'):
            if line.startswith('##'):
                head.parse_meta_data(line)
            else:
                head.parse_header_line(line)
        else:
            break
    
    #Add the first variant to the iterator
    if line:
        variant_file = itertools.chain([line], variant_file)
    
    header_line = head.header
    annotator_arguments['header_line'] = header_line
    
    if annotate_regions:
        logger.info("Loading annotations")
        gene_trees, exon_trees = load_annotations(annotation_dir)
        annotator_arguments['gene_trees'] = gene_trees
        annotator_arguments['exon_trees'] = exon_trees
        
        add_metadata(
            head,
            'info',
            'Annotation',
            annotation_number='.',
            entry_type='String',
            description='Annotates what feature(s) this variant belongs to.'
        )
        add_metadata(
            head,
            'info',
            'Exonic',
            annotation_number='0',
            entry_type='Flag',
            description='Indicates if the variant is exonic.'
        )

    if exac:
        logger.info("Annotating ExAC frequencies")
        logger.debug("Using ExAC file: {0}".format(exac))
        annotator_arguments['exac'] = exac
        add_metadata(
            head,
            'info',
            'ExACAF',
            annotation_number='1',
            entry_type='Float',
            description="Frequency in the ExAC database."
        )
        
    if thousand_g:
        logger.info("Annotating 1000G frequencies")
        logger.debug("Using 1000G file: {0}".format(thousand_g))
        annotator_arguments['thousand_g'] = thousand_g
        add_metadata(
            head,
            'info',
            '1000GAF',
            annotation_number='1',
            entry_type='Float',
            description="Frequency in the 1000G database."
        )

    if spidex:
        logger.info("Annotating Spidex z scores")
        logger.debug("Using Spidex file: {0}".format(spidex))
        annotator_arguments['spidex'] = spidex
        add_metadata(
            head,
            'info',
            'SPIDEX',
            annotation_number='1',
            entry_type='Float',
            description="Z score from the spidex database."
        )
    
    if cadd_file:
        logger.info("Annotating CADD scores")
        logger.debug("Using CADD file(s): {0}".format(', '.join(cadd_file)))
        annotator_arguments['cadd_files'] = cadd_file
        any_cadd_file = True

        add_metadata(
            head,
            'info',
            'CADD',
            annotation_number='1',
            entry_type='Integer',
            description="The CADD relative score for this alternative."
        )
        if cadd_raw:
            annotator_arguments['cadd_raw'] = cadd_raw
            logger.debug("Adding vcf metadata for CADD raw score")
            add_metadata(
                head,
                'info',
                'CADD_raw',
                annotation_number='1',
                entry_type='Float',
                description="The CADD raw score(s) for this alternative(s)."
            )

    if max_af:
        annotator_arguments['max_af'] = max_af
        if thousand_g:
            add_metadata(
                head,
                'info',
                '1000G_MAX_AF',
                annotation_number='1',
                entry_type='Float',
                description="The max af for thousand genomes populations."
            )
        if exac:
            add_metadata(
                head,
                'info',
                'ExAC_MAX_AF',
                annotation_number='1',
                entry_type='Float',
                description="The max af for ExAC populations."
            )

    if cosmic:
        logger.info("Annotating if variant is in COSMIC")
        logger.debug("Using COSMOC file: {0}".format(cosmic))
        annotator_arguments['cosmic'] = cosmic
        add_metadata(
            head,
            'info',
            'COSMIC',
            annotation_number='0',
            entry_type='Flag',
            description="If variant is in COSMIC database."
        )
    
    ###################################################################
    ### The task queue is where all jobs(in this case batches that  ###
    ### represents variants in a region) is put. The consumers will ###
    ### then pick their jobs from this queue.                       ###
    ###################################################################

    logger.debug("Setting up a JoinableQueue for storing variant batches")
    variant_queue = JoinableQueue(maxsize=1000)
    logger.debug("Setting up a Queue for storing results from workers")
    results = Manager().Queue()

    num_annotators = processes
    #Adapt the number of processes to the machine that run the analysis
    if cadd_file or spidex:
        # We need more power when annotating cadd scores:
        # But if flag is used that overrides
        if num_annotators == min(4, cpu_count()):
            num_annotators = min(8, cpu_count())

    logger.info('Number of CPU:s {}'.format(cpu_count()))
    logger.info('Number of model checkers: {}'.format(num_annotators))


    # These are the workers that do the heavy part of the analysis
    logger.info('Setting up the workers')
    annotators = [
        VariantAnnotator(
            variant_queue, 
            results, 
            **annotator_arguments
        )
        for i in range(num_annotators)
    ]

    logger.info('Starting the workers')
    for worker in annotators:
        logger.debug('Starting worker {0}'.format(worker))
        worker.start()

    # This process prints the variants to temporary files
    # If there is only one annotation process we can print the results as soon
    # as they are done
    logger.info('Setting up the variant printer')
    if len(annotators) == 1:
        print_headers(head, outfile, silent)
        var_printer = VariantPrinter(
                        task_queue = results, 
                        head = head, 
                        mode='normal', 
                        outfile = outfile
                        )
    else:
        # We use a temp file to store the processed variants
        logger.debug("Build a tempfile for printing the variants")
        if temp_dir:
            temp_file = NamedTemporaryFile(delete=False, dir=temp_dir)
        else:
            temp_file = NamedTemporaryFile(delete=False)
            
        temp_file.close()
        
        var_printer = VariantPrinter(
                        task_queue = results, 
                        head = head, 
                        mode='chromosome', 
                        outfile = temp_file.name
                        )
    
    logger.info('Starting the variant printer process')
    var_printer.start()

    start_time_variant_parsing = datetime.now()
    start_time_twenty = datetime.now()
    nr_of_lines = 0
    # This process parses the original vcf and create batches to put in the variant queue:
    logger.info('Start parsing the variants')
    
    for line in variant_file:
        line = line.rstrip()
        
        if not line.startswith('#'):
            variant_queue.put(line)
            
            nr_of_lines += 1
            
            if nr_of_lines % 20000 == 0:
                logger.info('{0} variants parsed'.format(nr_of_lines))
                logger.info('Last 20000 took {0} to parse'.format(
                    datetime.now()-start_time_twenty))
                start_time_twenty = datetime.now()
    
    logger.info('Put stop signs in the variant queue')
    
    for i in range(num_annotators):
        variant_queue.put(None)

    variant_queue.join()
    results.put(None)
    var_printer.join()

    if len(annotators) > 1:
        logger.info("Start sorting the variants")
        sort_variants(temp_file.name, mode='chromosome')

        logger.info("Print the headers")
        print_headers(head, outfile, silent)

        with open(temp_file.name, 'r', encoding='utf-8') as f:
            for line in f:
                print_variant(
                    variant_line=line,
                    outfile=outfile,
                    mode='modified',
                    silent=silent
                )

        logger.info("Removing temp file")
        os.remove(temp_file.name)
        logger.debug("Temp file removed")

    logger.info('Time for whole analyis: {0}'.format(
        str(datetime.now() - start_time_analysis)))

Example 160

Project: Devede
Source File: devede_avconv_convert.py
View license
	def __init__(self,global_vars,videofile,filename,filefolder,progresbar,proglabel,disctype,title,chapter,threads,seconds,encpass,fix_ac3):

		""" This class converts a video file to MPEG-1 or MPEG-2 format

		VIDEOFILE contains the parameters to convert the video
		FILENAME is the generic file name given by the user
		FILEFOLDER is the path where all the temporary and finall files will be created
		PROGRESBAR is the progress bar where the class will show the progress
		PROGLABEL is the label where the class will show what is it doing
		DISCTYPE can be dvd, vcd, svcd, cvd or divx
		TITLE and CHAPTER are the numbers used to identify the TITLE and CHAPTER number for this file
		THREADS is the number of threads to use
		SECONDS is the number of seconds we want to convert (for previews) 
		ENCPASS is the number of encoding pass"""
		
		devede_executor.executor.__init__(self,filename,filefolder,progresbar)
		self.printout=False

		self.percent2=120
		self.film_length=float(videofile["olength"])
		if seconds==0:
			self.divide=float(videofile["olength"])
			if (videofile["cutting"]==1) or (videofile["cutting"]==2): # if we want only one half of the file
				self.divide/=2
		else:
			self.divide=float(seconds)

		if self.divide==0:
			self.divide=1

		self.error=""
		progresbar.set_fraction(0)
		progresbar.set_text("")
		
		if videofile["ismpeg"]: # if the file hasn't to be converted, we simply copy or link it
			self.pulse=True
			self.print_error=_("File copy failed\nMaybe you ran out of disk space?")
			if seconds==0:
				texto=_("Copying the file")+"\n"
			else:
				texto=_("Creating preview")+"\n"
			proglabel.set_text(texto+videofile["filename"])
			currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")
		
			print "\ncurrentfile is: ", currentfile , "\n" 

			try:
				os.remove(currentfile)
			except:
				pass

			if (sys.platform=="win32") or (sys.platform=="win64"):
				# links do not work on windows, so just copy the file
				# self.launch_shell('copy "'+videofile["path"].replace('"','""')+'" "'+currentfile+'"',output=False)
				# Only hardlinks are available on 2000 and XP, reparse points are available from vista onwards.
				win32file.CreateHardLink(currentfile, videofile["path"].replace('"','""'))
			else:
				if len(videofile["sub_list"])==0:
					self.launch_shell('ln -s "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
				else:
					self.launch_shell('cp "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
			return

		isvob=videofile["isvob"]

		self.pulse=False
		if seconds==0:
			texto=(_("Converting files from title %(title_number)s (pass %(pass_number)s)\n\n%(file_name)s") % {"title_number":str(title),"pass_number":str(encpass),"file_name":videofile["filename"]} )
			proglabel.set_text(texto) #+" "+str(title)+" Pass: "+ str(encpass) +"\n\n"+videofile["filename"] )
		else:
			texto=_("Creating preview")
			proglabel.set_text(texto+"\n"+videofile["filename"])

		addbars=False
		framerate=int(videofile["ofps"])
		videorate=int(videofile["vrate"])
		audiorate=self.adjust_audiorate(int(videofile["arate"]),disctype=="dvd")
		
		audio_final_rate=int(videofile["arateunc"])
		audiodelay=float(videofile["adelay"])
		final_framerate=float(videofile["fps"])
		aspect_ratio_original=videofile["oaspect"]
		aspect_ratio_final=videofile["aspect"]
		resx_final=videofile["width"]
		resy_final=videofile["height"]
		resx_original=videofile["owidth"]
		resy_original=videofile["oheight"]
		copy_audio=videofile["copy_audio"]
		sound51=videofile["sound51"]
		gop12=videofile["gop12"]
		audiostream=videofile["audio_stream"]
		swap_fields=videofile["swap_fields"]
		volume=videofile["volume"]
		audio_tracks=len(videofile["audio_list"])

		if (videofile["resolution"]==0) and (disctype=="divx"):
			default_res=True
		else:
			default_res=False
		
		speed1,speed2=devede_other.get_speedup(videofile)
		if speed1==speed2:
			speedup=None
		else:
			speedup=str(speed1)+":"+str(speed2)
	
		if aspect_ratio_original<1.3:
			aspect_ratio_original=float(videofile["owidth"])/(float(videofile["oheight"]))
		if aspect_ratio_original<1.33333333:
			aspect_ratio_original=1.33333333
	
		max_videorate=int(videorate*2)
		min_videorate=int(videorate*0.75)
		
		dsize,minvid,maxvid=devede_other.get_dvd_size(None,disctype)
		
		if max_videorate>maxvid:
			max_videorate=maxvid
		if min_videorate<minvid:
			min_videorate=minvid
			
		if videofile["blackbars"]==0: # check if has to add black bars
			addbars=True
			if (videofile["rotate"]==90) or (videofile["rotate"]==270):
				resx_original2=resy_original
				resy_original2=resx_original
				aratio=1/aspect_ratio_original
			else:
				resx_original2=resx_original
				resy_original2=resy_original
				aratio=aspect_ratio_original

			if (resx_original2%2)==1:
				resx_original2+=1
			if (resy_original2%2)==1:
				resy_original2+=1
			
			resy_tmp = int(resy_final*aspect_ratio_final/aratio)
			resx_tmp = int(resx_final*aratio/aspect_ratio_final)
			
			
			if (resx_tmp>resx_final):
				resx_inter=resx_final
				resy_inter=resy_tmp
			else:
				resx_inter=resx_tmp
				resy_inter=resy_final
			
			#resx_inter=resx_original2
			#resy_inter=int((resy_original2*aspect_ratio_original)/aspect_ratio_final)
			if (resx_inter%2)==1:
				resx_inter-=1
			if (resy_inter%2)==1:
				resy_inter-=1
			
			#if ((resy_inter<resy_original) or (resy_original+5>resy_inter)):
			#	addbars=False

		if addbars==False:
			resx_inter=resx_final
			resy_inter=resy_final
		else:
			if (resx_inter==resx_final):
				addx=0
				addy=int((resy_final-resy_inter)/2)
				if(addy%2)==1:
					addy+=1
			else:
				addy=0
				addx=int((resx_final-resx_inter)/2)
				if(addx%2)==1:
					addx+=1
					
		
		command_var=["avconv"]

		command_var.append("-i")
		command_var.append(videofile["path"])
		
		if (volume!=100):
			command_var.append("-vol")
			command_var.append(str((256*volume)/100))
		
		if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
			command_var.append("-itsoffset")
			command_var.append(str(audiodelay))
			command_var.append("-i")
			command_var.append(videofile["path"])
			command_var.append("-map")
			command_var.append("1:0")
			for l in range(audio_tracks):
				command_var.append("-map")
				command_var.append("0"+":"+str(l+1))
		
		if (isvob==False):
			cmd_line=""
			
			extra_params=videofile["params_vf"] # take the VF extra params
			while (extra_params!=""):
				extra_params,new_param=devede_other.get_new_param(extra_params)
				if (new_param!="") and (new_param!=','):
					while (len(new_param)>1) and (new_param[0]==','):
						new_param=new_param[1:]
					while (len(new_param)>1) and (new_param[-1]==','):
						new_param=new_param[:-1]
					if new_param=="fifo":
						continue
					if cmd_line!="":
						cmd_line+=",fifo,"
					cmd_line+=new_param
			
			if videofile["deinterlace"]=="yadif":
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="yadif"
			
			vflip=0
			hflip=0
	
			if (videofile["rotate"]==90):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=1"
			elif (videofile["rotate"]==270):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=2"
			elif (videofile["rotate"]==180):
				vflip=1
				hflip=1
			
			if (videofile["vmirror"]):
				vflip=1-vflip
			if (videofile["hmirror"]):
				hflip=1-hflip
	
			if (vflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="vflip"
			if (hflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="hflip"
			
			if addbars and ((resx_inter!=resx_original) or (resy_inter!=resy_original)) and (default_res==False):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="scale="+str(resx_inter)+":"+str(resy_inter)+",fifo,pad="+str(resx_final)+":"+str(resy_final)+":"+str(addx)+":"+str(addy)+":0x000000"
			
			if cmd_line!="":
				command_var.append("-vf")
				command_var.append(cmd_line)
			
		
		command_var.append("-y")

		vcd=False
		
		if (disctype!="divx"):
			command_var.append("-target")
			if (disctype=="dvd"):
				if final_framerate==30:
					command_var.append("ntsc-dvd")
				elif (framerate==24):
					command_var.append("film-dvd")
				else:
					command_var.append("pal-dvd")
				if (copy_audio==False):
					command_var.append("-acodec")
					if fix_ac3:
						command_var.append("ac3_fixed")
					else:
						command_var.append("ac3")
				#command_var.append("-maxrate")
				#command_var.append("7000k")
				#command_var.append("-minrate")
				#command_var.append("2200k")
			elif (disctype=="vcd"):
				vcd=True
				if final_framerate==30:
					command_var.append("ntsc-vcd")
				else:
					command_var.append("pal-vcd")
			elif (disctype=="svcd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
			elif (disctype=="cvd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
		else: # DivX
			command_var.append("-vcodec")
			command_var.append("mpeg4")
			command_var.append("-acodec")
			command_var.append("libmp3lame")
			command_var.append("-f")
			command_var.append("avi")
		
		if  (not isvob):
			command_var.append("-sn") # no subtitles

		if copy_audio or isvob:
			command_var.append("-acodec")
			command_var.append("copy")
		#else:
		#	if (disctype=="divx"):
		#		command_var.append("-acodec")
		#		command_var.append("mp3")

		#if (audiostream!=10000):
		#	command_var.append("-aid")
		#	command_var.append(str(audiostream))

		if isvob:
			command_var.append("-vcodec")
			command_var.append("copy")
		
		if (vcd==False):
			if final_framerate==30:
				if (framerate==24) and ((disctype=="dvd") or (disctype=="divx")):
					str_final_framerate="24000/1001"
					keyintv=15
					telecine=True
				else:
					str_final_framerate="30000/1001"
					keyintv=18
			else:
				str_final_framerate=str(int(final_framerate))
				keyintv=15
		
		if (disctype=="divx"):
			command_var.append("-g")
			command_var.append("300")
		elif gop12 and (isvob==False):
			command_var.append("-g")
			command_var.append("12")
		
		command_var.append("-bf")
		command_var.append("2")
		command_var.append("-strict")
		command_var.append("1")
		
		if seconds!=0:
			command_var.append("-t")
			command_var.append(str(seconds))
		else:
			if videofile["cutting"]==1: # first half only
				command_var.append("-t")
				command_var.append(str(videofile["olength"]/2))
			elif videofile["cutting"]==2: # second half only
				command_var.append("-ss")
				command_var.append(str((videofile["olength"]/2)-5)) # start 5 seconds before

		#if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
		#	command_var.append("-delay")
		#	command_var.append(str(audiodelay))

		command_var.append("-ac")
		if (sound51) and ((disctype=="dvd") or (disctype=="divx")):
			command_var.append("6")
		else:
			command_var.append("2")

		#if (isvob==False) and (default_res==False):
		#	command_var.append("-ofps")
		#	command_var.append(str_final_framerate)

		if disctype=="divx":
			command_var.append("-vtag")
			command_var.append("DX50")

		lineatemp=""
		acoma=False;
		
		#if swap_fields:
		#	lineatemp+="phase=a"
		#	acoma=True
		
		passlog_var = None
		
		if (videofile["deinterlace"]!="none") and (videofile["deinterlace"]!="yadif") and (isvob==False):
			command_var.append("-deinterlace")
			
		print "Addbars "+str(addbars)+" resx_o "+str(resx_original)+" resy_o "+str(resy_original)
		print "resx_i "+str(resx_inter)+" resy_i "+str(resy_inter)
 
 		if (isvob==False) and (vcd==False):
				command_var.append("-s")
				command_var.append(str(resx_final)+"x"+str(resy_final))

		# Currently Mencoder supports up to 8 threads
		if isvob==False:
			threads
			
			if threads>1:
				command_var.append("-threads")
				command_var.append(str(threads))

			command_var.append("-trellis")
			if videofile["trellis"]:
				command_var.append("1")
			else:
				command_var.append("0")
		
			if videofile["mbd"]==0:
				command_var.append("-mbd")
				command_var.append("0")
			elif videofile["mbd"]==1:
				command_var.append("-mbd")
				command_var.append("1")
			elif videofile["mbd"]==2:
				command_var.append("-mbd")
				command_var.append("2")
		
			#if disctype!="divx":
			#	lavcopts+=":keyint="+str(keyintv)
			if(copy_audio==False) and (vcd==False):
#					lavcopts+=":acodec="
#					if disctype=="dvd":
#						if fix_ac3:
#							lavcopts+="ac3_fixed"
#						else:
#							lavcopts+="ac3"
#					else:
#						lavcopts+="mp2"
					#lavcopts+=":abitrate="+str(audiorate)
				command_var.append("-b:a")
				command_var.append(str(audiorate)+"k")

			if (default_res==False):
				command_var.append("-aspect")
				if aspect_ratio_final>1.4:
					command_var.append("16:9")
				else:
					command_var.append("4:3")
			
			passlog_var=None
			if (encpass>0)  and (isvob==False):
				command_var.append("-pass")
				command_var.append(str(encpass))
				passlog_var=os.path.join(filefolder,filename)+".log"
				if encpass==1:
					try:
						os.remove(passlog_var)
					except:
						 pass
	
			if (vcd==False):
				command_var.append("-b")
				command_var.append(str(videorate)+"k")
	
		
		
		at=audio_tracks
		while (at>1):
			if (volume!=100):
				command_var.append("-vol")
				command_var.append(str((256*volume)/100))
			command_var.append("-newaudio")
			at-=1
			

		extra_params=videofile["params"] # take the extra params
		while (extra_params!=""):
			extra_params,new_param=devede_other.get_new_param(extra_params)
			if new_param!="":
				command_var.append(new_param)

		currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")

		if (passlog_var != None) and (isvob==False):
			command_var.append("-passlogfile")
			command_var.append(passlog_var)

		if (encpass==1) and (isvob==False):
			command_var.append("-y")
			command_var.append("/dev/null")
		else:
			command_var.append(currentfile)

		self.print_error=_("Conversion failed.\nIt seems a bug of Mencoder.")
		if (videofile["params"]!="") or (videofile["params_vf"]!="") or (videofile["params_lame"]!=""):
			self.print_error+="\n"+_("Also check the extra params passed to Mencoder for syntax errors.")
		self.error_not_done=True
		self.launch_program(command_var,read_chars=300)

Example 161

Project: scansio-sonar-es
Source File: sonar_non443.py
View license
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--server', default=DEFAULT_SERVER,
                        help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
    parser.add_argument('--port', default=DEFAULT_PORT,
                        help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
    args = parser.parse_args(argv[1:])

    workers = cpu_count()
    process_hosts_queue = Queue(maxsize=20000)
    process_certs_queue = Queue(maxsize=20000)
    update_hosts_queue = Queue(maxsize=20000)

    es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)

    imported_sonar = es.search(index='scansio-sonar-ssl-non443-imported', body={"size": 3000, "query": {"match_all": {}}
                                                                                })
    imported_files = []
    for f in imported_sonar['hits']['hits']:
        imported_files.append(f['_id'])

    scansio_feed = requests.get('https://scans.io/json')
    if scansio_feed.status_code == 200:
        feed = scansio_feed.json()
        if 'studies' in feed:
            for result in feed['studies']:
                if result['name'] == "More SSL Certificates (non-443)":
                    for res in result['files']:
                        scans_file = res['name']
                        if scans_file.endswith('certs.gz'):
                            if 'smtp_25' in scans_file:
                                certfile = scans_file[52:86]
                                port = 25
                            if 'smtp_465' in scans_file:
                                certfile = scans_file[52:82]
                                port = 465
                            if 'imap_993' in scans_file:
                                certfile = scans_file[52:82]
                                port = 993
                            if 'imap_143' in scans_file:
                                certfile = scans_file[52:87]
                                port = 143
                            if 'pop3_995' in scans_file:
                                certfile = scans_file[52:82]
                                port = 995
                            if certfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=certfile))
                                phys_file = requests.get(scans_file, stream=True)
                                with open('{f}'.format(f=certfile), 'wb') as newcerts:
                                    for chunk in phys_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            newcerts.write(chunk)
                                with open('{f}'.format(f=certfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_scan_certs, args=(process_certs_queue, queue_es,
                                                                                     port))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=certfile, d=datetime.now()))
                                    parse_certs_file(certfile, process_certs_queue)
                                    for w in xrange(workers):
                                        process_certs_queue.put("DONE")
                                    logger.warning("Importing finished of {f} at {d}".format(f=certfile,
                                                                                             d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-non443-imported', doc_type='imported-file',
                                             id=certfile, body={'file': certfile, 'imported_date': datetime.now(),
                                                                'sha1': sha1})
                                else:
                                    logger.error("SHA1 did not match for {f} it was not imported".format(f=certfile))
                                os.remove(certfile)
                        if scans_file.endswith('endpoints.gz'):
                            if 'smtp_25' in scans_file:
                                hostsfile = scans_file[52:90]
                                port = 25
                            if 'smtp_465' in scans_file:
                                hostsfile = scans_file[52:86]
                                port = 465
                            if 'imap_993' in scans_file:
                                hostsfile = scans_file[52:86]
                                port = 993
                            if 'imap_143' in scans_file:
                                hostsfile = scans_file[52:91]
                                port = 143
                            if 'pop3_995' in scans_file:
                                hostsfile = scans_file[52:86]
                                port = 995
                            if hostsfile not in imported_files:
                                logger.warning("We don't have {file} imported lets download it".format(file=hostsfile))
                                phys_host_file = requests.get(scans_file)
                                with open('{f}'.format(f=hostsfile), 'wb') as hf:
                                    for chunk in phys_host_file.iter_content(chunk_size=1024):
                                        if chunk:
                                            hf.write(chunk)
                                with open('{f}'.format(f=hostsfile), 'rb') as fh:
                                    h = hashlib.sha1()
                                    while True:
                                        data = fh.read(8192)
                                        if not data:
                                            break
                                        h.update(data)
                                sha1 = h.hexdigest()
                                if sha1 == res['fingerprint']:
                                    for w in xrange(workers):
                                        queue_es = Elasticsearch([{u'host': args.server, u'port': args.port}],
                                                                 timeout=60)
                                        p = Process(target=process_hosts, args=(process_hosts_queue, queue_es))
                                        p.daemon = True
                                        p.start()
                                    logger.warning("Importing {f} at {d}".format(f=hostsfile, d=datetime.now()))
                                    parse_hosts_file(hostsfile, process_hosts_queue)
                                    logger.warning("Hosts updated for {f} now going back and updating first_seen"
                                                   .format(f=hostsfile))
                                    update_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=60)
                                    # construct an elasticsearch query where the filter is looking for any entry
                                    # that is missing the field first_seen
                                    # adding a queue processing system here this should hopefully speed things up.
                                    for work in xrange(workers):
                                        p = Process(target=update_hosts, args=(update_hosts_queue, update_es))
                                        p.daemon = True
                                        p.start()
                                    q = {'size': 500, "query": {"match_all": {}},
                                         "filter": {"missing": {"field": "first_seen"}}}
                                    new_updates = update_es.search(index='passive-ssl-non443-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    # Scan across all the documents missing the first_seen field and bulk update them
                                    missing_first_seen = scan(update_es, query=q, scroll='30m',
                                                              index='passive-ssl-non443-hosts-sonar')
                                    for miss in missing_first_seen:
                                        update_hosts_queue.put(miss)

                                    # for some stupid reason I keep missing some at the end of the scan/scroll
                                    # so going to do them manually
                                    new_updates = update_es.search(index='passive-ssl-non443-hosts-sonar', body=q)
                                    logger.warning("Numer of hosts to update is {count}"
                                                   .format(count=new_updates['hits']['total']))
                                    missing_first_seen_again = scan(update_es, query=q, scroll='30m',
                                                                    index='passive-ssl-non443-hosts-sonar')
                                    bulk_update_missed = []
                                    for m in missing_first_seen_again:
                                        last_seen = m['_source']['last_seen']
                                        first_seen = last_seen
                                        action = {"_op_type": "update", "_index": "passive-ssl-non443-hosts-sonar",
                                                  "_type": "host", "_id": m['_id'], "doc": {'first_seen': first_seen}}
                                        bulk_update_missed.append(action)
                                        if len(bulk_update_missed) == 500:
                                            bulk(update_es, bulk_update_missed)
                                            bulk_update_missed = []
                                    bulk(update_es, bulk_update_missed)
                                    for w in xrange(workers):
                                        update_hosts_queue.put("DONE")
                                    logger.warning("Finished updating hosts at {d}".format(d=datetime.now()))

                                    #  Get the remaining ones that are less than 500 and the loop has ended
                                    logger.warning("Importing finished of {f} at {d}".format(f=hostsfile,
                                                   d=datetime.now()))
                                    es.index(index='scansio-sonar-ssl-non443-imported', doc_type='imported-file',
                                             id=hostsfile, body={'file': hostsfile, 'imported_date': datetime.now(),
                                                                 'sha1': sha1})
                                    os.remove(hostsfile)
        else:
            logger.error("The scans.io/json must have changed or is having issues. I didn't see any studies. Exiting")
            sys.exit()
    else:
        logger.error("There was an error connecting to https://scans.io. I did not get a 200 status code. Exiting")
        sys.exit()

Example 162

Project: gtfslib-python
Source File: export.py
View license
    def run(self, context, skip_shape_dist=False, bundle=None, **kwargs):

        with PrettyCsv("agency.txt", ["agency_id", "agency_name", "agency_url", "agency_timezone", "agency_lang", "agency_phone", "agency_fare_url", "agency_email" ], **kwargs) as csvout:
            nagencies = 0
            for agency in context.dao().agencies(fltr=context.args.filter):
                nagencies += 1
                csvout.writerow([ agency.agency_id, agency.agency_name, agency.agency_url, agency.agency_timezone, agency.agency_lang, agency.agency_phone, agency.agency_fare_url, agency.agency_email ])
            print("Exported %d agencies" % (nagencies))

        with PrettyCsv("stops.txt", ["stop_id", "stop_code", "stop_name", "stop_desc", "stop_lat", "stop_lon", "zone_id", "stop_url", "location_type", "parent_station", "stop_timezone", "wheelchair_boarding" ], **kwargs) as csvout:
            nstops = 0
            for stop in context.dao().stops(fltr=context.args.filter, prefetch_parent=False, prefetch_substops=False):
                nstops += 1
                csvout.writerow([ stop.stop_id, stop.stop_code, stop.stop_name, stop.stop_desc, stop.stop_lat, stop.stop_lon, stop.zone_id, stop.stop_url, stop.location_type, stop.parent_station_id, stop.stop_timezone, stop.wheelchair_boarding ])
            print("Exported %d stops" % (nstops))

        with PrettyCsv("routes.txt", ["route_id", "agency_id", "route_short_name", "route_long_name", "route_desc", "route_type", "route_url", "route_color", "route_text_color" ], **kwargs) as csvout:
            nroutes = 0
            for route in context.dao().routes(fltr=context.args.filter):
                nroutes += 1
                csvout.writerow([ route.route_id, route.agency_id, route.route_short_name, route.route_long_name, route.route_desc, route.route_type, route.route_url, route.route_color, route.route_text_color ])
            print("Exported %d routes" % (nroutes))

        stop_times_columns = ["trip_id", "arrival_time", "departure_time", "stop_id", "stop_sequence", "stop_headsign", "pickup_type", "drop_off_type", "timepoint"]
        if not skip_shape_dist:
            stop_times_columns.append("shape_dist_traveled")
        with PrettyCsv("trips.txt", ["route_id", "service_id", "trip_id", "trip_headsign", "trip_short_name", "direction_id", "block_id", "shape_id", "wheelchair_accessible", "bikes_allowed" ], **kwargs) as csvout1:
            with PrettyCsv("stop_times.txt", stop_times_columns, **kwargs) as csvout2:
                ntrips = 0
                nstoptimes = 0
                for trip in context.dao().trips(fltr=context.args.filter, prefetch_stops=False, prefetch_stop_times=True, prefetch_calendars=False, prefetch_routes=False):
                    ntrips += 1
                    if ntrips % 1000 == 0:
                        print("%d trips..." % (ntrips))
                    csvout1.writerow([ trip.route_id, trip.service_id, trip.trip_id, trip.trip_headsign, trip.trip_short_name, trip.direction_id, trip.block_id, trip.shape_id, trip.wheelchair_accessible, trip.bikes_allowed])
                    for stoptime in trip.stop_times:
                        nstoptimes += 1
                        row = [ trip.trip_id,
                                fmttime(stoptime.arrival_time if stoptime.arrival_time else stoptime.departure_time),
                                fmttime(stoptime.departure_time if stoptime.departure_time else stoptime.arrival_time),
                                stoptime.stop_id,
                                stoptime.stop_sequence,
                                stoptime.stop_headsign,
                                stoptime.pickup_type,
                                stoptime.drop_off_type,
                                stoptime.timepoint ]
                        if not skip_shape_dist:
                            row.append(stoptime.shape_dist_traveled)
                        csvout2.writerow(row)
                print("Exported %d trips with %d stop times" % (ntrips, nstoptimes))

        # Note: GTFS' model does not have calendars objects to export,
        # since a calendar is renormalized/expanded to a list of dates.

        with PrettyCsv("calendar_dates.txt", ["service_id", "date", "exception_type"], **kwargs) as csvout:
            ncals = ndates = 0
            for calendar in context.dao().calendars(fltr=context.args.filter, prefetch_dates=True):
                ncals += 1
                if ncals % 1000 == 0:
                    print("%d calendars, %d dates..." % (ncals, ndates))
                for date in calendar.dates:
                    ndates += 1
                    csvout.writerow([calendar.service_id, date.toYYYYMMDD(), 1])
            print("Exported %d calendars with %d dates" % (ncals, ndates))

        with PrettyCsv("fare_attributes.txt", ["fare_id", "price", "currency_type", "payment_method", "transfers", "transfer_duration"], **kwargs) as csvout:
            nfareattrs = 0
            for fareattr in context.dao().fare_attributes(fltr=context.args.filter, prefetch_fare_rules=False):
                nfareattrs += 1
                csvout.writerow([ fareattr.fare_id, fareattr.price, fareattr.currency_type, fareattr.payment_method, fareattr.transfers, fareattr.transfer_duration ])
            print("Exported %d fare attributes" % (nfareattrs))

        with PrettyCsv("fare_rules.txt", ["fare_id", "route_id", "origin_id", "destination_id", "contains_id"], **kwargs) as csvout:
            nfarerules = 0
            for farerule in context.dao().fare_rules(fltr=context.args.filter, prefetch_fare_attributes=False):
                nfarerules += 1
                csvout.writerow([ farerule.fare_id, farerule.route_id, farerule.origin_id, farerule.destination_id, farerule.contains_id ])
            print("Exported %d fare rules" % (nfarerules))

        shapes_columns = ["shape_id", "shape_pt_lat", "shape_pt_lon", "shape_pt_sequence"]
        if not skip_shape_dist:
            shapes_columns.append("shape_dist_traveled")
        with PrettyCsv("shapes.txt", shapes_columns, **kwargs) as csvout:
            nshapes = nshapepoints = 0
            for shape in context.dao().shapes(fltr=context.args.filter, prefetch_points=True):
                nshapes += 1
                if nshapes % 100 == 0:
                    print("%d shapes, %d points..." % (nshapes, nshapepoints))
                for point in shape.points:
                    nshapepoints += 1
                    row = [shape.shape_id, point.shape_pt_lat, point.shape_pt_lon, point.shape_pt_sequence]
                    if not skip_shape_dist:
                        row.append(point.shape_dist_traveled)
                    csvout.writerow(row)
            print("Exported %d shapes with %d points" % (nshapes, nshapepoints))

        with PrettyCsv("transfers.txt", ["from_stop_id", "to_stop_id", "transfer_type", "min_transfer_time"], **kwargs) as csvout:
            ntransfers = 0
            for transfer in context.dao().transfers(fltr=context.args.filter, prefetch_stops=False):
                ntransfers += 1
                csvout.writerow([ transfer.from_stop_id, transfer.to_stop_id, transfer.transfer_type, transfer.min_transfer_time ])
            print("Exported %d transfers" % (ntransfers))

        if bundle:
            if not isinstance(bundle, six.string_types):
                # Allow the use of "--bundle" option only
                bundle = "gtfs.zip"
            if not bundle.endswith('.zip'):
                bundle = bundle + '.zip'
            print("Zipping result to %s (removing .txt files)" % (bundle))
            with zipfile.ZipFile(bundle, 'w', zipfile.ZIP_DEFLATED) as zipf:
                for f in [ "agency.txt", "stops.txt", "routes.txt", "trips.txt", "stop_times.txt", "calendar_dates.txt", "fare_rules.txt", "fare_attributes.txt", "shapes.txt", "transfers.txt" ]:
                    zipf.write(f)
                    os.remove(f)

Example 163

View license
    def test_doctest_issue4197(self):
        # To avoid having to keep two copies of the doctest module's
        # unit tests in sync, this test works by taking the source of
        # test_doctest itself, rewriting it a bit to cope with a new
        # location, and then throwing it in a zip file to make sure
        # everything still works correctly
        test_src = inspect.getsource(test_doctest)
        test_src = test_src.replace(
                         "from test import test_doctest",
                         "import test_zipped_doctest as test_doctest")
        test_src = test_src.replace("test.test_doctest",
                                    "test_zipped_doctest")
        test_src = test_src.replace("test.sample_doctest",
                                    "sample_zipped_doctest")
        sample_src = inspect.getsource(sample_doctest)
        sample_src = sample_src.replace("test.test_doctest",
                                        "test_zipped_doctest")
        with temp_dir() as d:
            script_name = make_script(d, 'test_zipped_doctest',
                                            test_src)
            zip_name, run_name = make_zip_script(d, 'test_zip',
                                                script_name)
            z = zipfile.ZipFile(zip_name, 'a')
            z.writestr("sample_zipped_doctest.py", sample_src)
            z.close()
            if verbose:
                zip_file = zipfile.ZipFile(zip_name, 'r')
                print ('Contents of %r:' % zip_name)
                zip_file.printdir()
                zip_file.close()
            os.remove(script_name)
            sys.path.insert(0, zip_name)
            import test_zipped_doctest
            try:
                # Some of the doc tests depend on the colocated text files
                # which aren't available to the zipped version (the doctest
                # module currently requires real filenames for non-embedded
                # tests). So we're forced to be selective about which tests
                # to run.
                # doctest could really use some APIs which take a text
                # string or a file object instead of a filename...
                known_good_tests = [
                    test_zipped_doctest.SampleClass,
                    test_zipped_doctest.SampleClass.NestedClass,
                    test_zipped_doctest.SampleClass.NestedClass.__init__,
                    test_zipped_doctest.SampleClass.__init__,
                    test_zipped_doctest.SampleClass.a_classmethod,
                    test_zipped_doctest.SampleClass.a_property,
                    test_zipped_doctest.SampleClass.a_staticmethod,
                    test_zipped_doctest.SampleClass.double,
                    test_zipped_doctest.SampleClass.get,
                    test_zipped_doctest.SampleNewStyleClass,
                    test_zipped_doctest.SampleNewStyleClass.__init__,
                    test_zipped_doctest.SampleNewStyleClass.double,
                    test_zipped_doctest.SampleNewStyleClass.get,
                    test_zipped_doctest.sample_func,
                    test_zipped_doctest.test_DocTest,
                    test_zipped_doctest.test_DocTestParser,
                    test_zipped_doctest.test_DocTestRunner.basics,
                    test_zipped_doctest.test_DocTestRunner.exceptions,
                    test_zipped_doctest.test_DocTestRunner.option_directives,
                    test_zipped_doctest.test_DocTestRunner.optionflags,
                    test_zipped_doctest.test_DocTestRunner.verbose_flag,
                    test_zipped_doctest.test_Example,
                    test_zipped_doctest.test_debug,
                    test_zipped_doctest.test_pdb_set_trace,
                    test_zipped_doctest.test_pdb_set_trace_nested,
                    test_zipped_doctest.test_testsource,
                    test_zipped_doctest.test_trailing_space_in_test,
                    test_zipped_doctest.test_DocTestSuite,
                    test_zipped_doctest.test_DocTestFinder,
                ]
                # These remaining tests are the ones which need access
                # to the data files, so we don't run them
                fail_due_to_missing_data_files = [
                    test_zipped_doctest.test_DocFileSuite,
                    test_zipped_doctest.test_testfile,
                    test_zipped_doctest.test_unittest_reportflags,
                ]
                for obj in known_good_tests:
                    _run_object_doctest(obj, test_zipped_doctest)
            finally:
                del sys.modules["test_zipped_doctest"]

Example 164

Project: cloud-scheduler
Source File: nimbuscluster.py
View license
    def vm_create(self, vm_name, vm_type, vm_user, vm_networkassoc, vm_cpuarch,
            vm_image, vm_mem, vm_cores, vm_storage, customization=None, vm_keepalive=0,
            job_proxy_file_path=None, myproxy_creds_name=None, myproxy_server=None, 
            myproxy_server_port=None, job_per_core=False, proxy_non_boot=False,
            vmimage_proxy_file=None, vmimage_proxy_file_path=None):
        """Attempt to boot up a new VM on the cluster."""
        def _remove_files(files):
            """Private function to clean up temporary files created during the create process."""
            for file in files:
                try:
                    if file:
                        log.verbose("Deleting %s" % file)
                        os.remove(file)
                except:
                    log.exception("Couldn't delete %s" % file)

        log.verbose("Nimbus cloud create command")

        if vm_networkassoc == "":
            # No network specified, so just pick the first available one
            try:
                for netpool in self.net_slots.keys():
                    if self.net_slots[netpool] > 0:
                        vm_networkassoc = netpool
                        break
                if vm_networkassoc == "":
                    vm_networkassoc = self.network_pools[0]
            except:
                log.exception("No network pool available? Aborting vm creation.")
                return self.ERROR

        # Create a workspace metadata xml file
        if not self.temp_lease_storage:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                self.cpu_archs[0], vm_image, vm_storage > 0, self.image_attach_device,
                self.scratch_attach_device,)
        else:
            vm_metadata = nimbus_xml.ws_metadata_factory(vm_name, vm_networkassoc, \
                self.cpu_archs[0], vm_image, False, self.image_attach_device,
                self.scratch_attach_device,)


        # Create a deployment request file
        if not self.temp_lease_storage:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = vm_storage, vm_nodes = self.VM_NODES, vm_cores=vm_cores)
        else:
            vm_deploymentrequest = nimbus_xml.ws_deployment_factory(vm_duration = self.vm_lifetime, \
                vm_targetstate = self.VM_TARGETSTATE, vm_mem = vm_mem, vm_storage = None, vm_nodes = self.VM_NODES, vm_cores=vm_cores)

        job_proxy = None
        try:
            with open(job_proxy_file_path) as proxy:
                job_proxy = proxy.read()
        except:
            if job_proxy_file_path:
                log.exception("Couldn't open '%s', Backing out of VM Creation." % (job_proxy_file_path))
                return -1 # Temp Ban job

        if customization or job_proxy or vmimage_proxy_file:
            image_scheme = urlparse(vm_image).scheme
            if image_scheme == "https":
                if vmimage_proxy_file:
                    try:
                        with open(vmimage_proxy_file_path) as proxy:
                            vmimage_proxy = proxy.read()
                    except:
                        if vmimage_proxy_file:
                            log.exception("Couldn't open '%s' path for %s, Backing out of VM Creation." % (vmimage_proxy_file_path, vmimage_proxy_file))
                            return -1 # Temp Ban job
                    _job_proxy = vmimage_proxy
                else:
                    _job_proxy = job_proxy
            else:
                _job_proxy = None
            vm_optional = nimbus_xml.ws_optional_factory(custom_tasks=customization, credential=_job_proxy)
        else:
            vm_optional = None


        # Set a timestamp for VM creation
        #now = datetime.datetime.now()

        # Create an EPR file name (unique with timestamp)
        (epr_handle, vm_epr) = tempfile.mkstemp(suffix=".vm_epr")
        os.close(epr_handle)

        nimbus_files = [vm_epr, vm_metadata, vm_deploymentrequest, vm_optional]

        # Create cached copy of job proxy to be used by VM for startup and shutdown.
        vm_proxy_file_path = None
        if job_proxy_file_path and not proxy_non_boot:
            try:
                vm_proxy_file_path = self._cache_proxy(job_proxy_file_path)
                log.verbose("Cached proxy to '%s'" % vm_proxy_file_path)
            except:
                log.exception("Problem caching proxy.")
                _remove_files(nimbus_files)
                return -1

        # Create the workspace command as a list (private method)
        ws_cmd = self.vmcreate_factory(vm_epr, vm_metadata, vm_deploymentrequest, optional_file=vm_optional)
        

        log.debug("Command: " + string.join(ws_cmd, " "))

        # Execute the workspace create command: returns immediately.
        env = None;
        if vm_proxy_file_path != None and not proxy_non_boot:
            env = {'X509_USER_PROXY':vm_proxy_file_path}
            log.debug("VM creation environment will contain:\n\tX509_USER_PROXY = %s" % (vm_proxy_file_path))

        (create_return, create_out, create_err) = self.vm_execwait(ws_cmd, env)

        if (create_return != 0):
            if create_out == "" or create_out == None:
                create_out = "No Output returned."
            if create_err == "" or create_err == None:
                create_err = "No Error output returned."
            log.warning("Error creating VM %s: %s %s %s" % (vm_name, create_out, create_err, create_return))
            _remove_files(nimbus_files + [vm_proxy_file_path])
            err_type = self._extract_create_error(create_err)
            ## TODO Figure out some error codes to return then handle the codes in the scheduler vm creation code
            if err_type == 'NoProxy' or err_type == 'ExpiredProxy':
                create_return = -1
            elif err_type == 'NoSlotsInNetwork' and config.adjust_insufficient_resources:
                with self.res_lock:
                    if vm_networkassoc in self.net_slots.keys():
                        self.vm_slots -= self.net_slots[vm_networkassoc]
                        self.net_slots[vm_networkassoc] = 0 # no slots remaining
                create_return = -2
            elif err_type =='NotEnoughMemory' and config.adjust_insufficient_resources:
                with self.res_lock:
                    index = self.find_mementry(vm_mem)
                    self.memory[index] = vm_mem - 1 # may still be memory, but just not enough for this vm
                create_return = -2
            elif err_type == 'ExceedMaximumWorkspaces' or err_type == 'NotAuthorized':
                create_return = -3

            return create_return

        log.verbose("Nimbus create command executed.")

        log.verbose("Deleting temporary Nimbus Metadata files")
        _remove_files(nimbus_files)

        # Find the memory entry in the Cluster 'memory' list which _create will be
        # subtracted from
        vm_mementry = self.find_mementry(vm_mem)
        if (vm_mementry < 0):
            # At this point, there should always be a valid mementry, as the ResourcePool
            # get_resource methods have selected this cluster based on having an open
            # memory entry that fits VM requirements.
            log.error("Cluster memory list has no sufficient memory " +\
              "entries (Not supposed to happen). Returning error.")
        log.verbose("Memory entry found in given cluster: %d" % vm_mementry)

        # Get the id of the VM from the output of workspace.sh
        try:
            vm_id = re.search("Workspace created: id (\d*)", create_out).group(1)
        except:
            log.error("Couldn't find workspace id for new VM")
            create_return = -3
            return create_return
        try:
            vm_ip = re.search("IP address: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", create_out).group(1)
        except:
            log.error("Couldn't find the ip address for new VM")
            create_return = -3
            return create_return

        # Get the first part of the hostname given to the VM
        vm_hostname = self._extract_hostname(create_out)
        if vm_hostname:
            log.verbose("Hostname for vm_id %s is %s" % (vm_id, vm_hostname))
        else:
            log.warning("Unable to get the VM hostname, for vm_id %s" % vm_id)

        if not vm_keepalive and self.keep_alive: #if job didn't set a keep_alive use the clouds default
            vm_keepalive = self.keep_alive
        # Create a VM object to represent the newly created VM
        new_vm = cluster_tools.VM(name = vm_name, id = vm_id, vmtype = vm_type, user = vm_user,
            hostname = vm_hostname, ipaddress = vm_ip, 
            clusteraddr = self.network_address, clusterport = self.port,
            cloudtype = self.cloud_type,network = vm_networkassoc,
            cpuarch = vm_cpuarch, image = vm_image,
            memory = vm_mem, mementry = vm_mementry, cpucores = vm_cores,
            storage = vm_storage, keep_alive = vm_keepalive, 
            proxy_file = vm_proxy_file_path, 
            myproxy_creds_name = myproxy_creds_name, myproxy_server = myproxy_server, 
            myproxy_server_port = myproxy_server_port, job_per_core = job_per_core)

        # Add the new VM object to the cluster's vms list And check out required resources
        try:
            self.resource_checkout(new_vm)
        except:
            log.exception("Unexpected error checking out resources when creating a VM. Programming error?")
            return self.ERROR
        self.vms.append(new_vm)
        log.info("Started vm %s on %s using image at %s" % (new_vm.id, new_vm.clusteraddr, new_vm.image))
        return create_return

Example 165

Project: pyload
Source File: pyLoadCore.py
View license
    def start(self, rpc=True, web=True):
        """ starts the fun :D """

        self.version = CURRENT_VERSION

        if not exists("pyload.conf"):
            from module.setup import Setup

            print "This is your first start, running configuration assistent now."
            self.config = ConfigParser()
            s = Setup(pypath, self.config)
            res = False
            try:
                res = s.start()
            except SystemExit:
                pass
            except KeyboardInterrupt:
                print "\nSetup interrupted"
            except:
                res = False
                print_exc()
                print "Setup failed"
            if not res:
                remove("pyload.conf")

            exit()

        try: signal.signal(signal.SIGQUIT, self.quit)
        except: pass

        self.config = ConfigParser()

        gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
        translation = gettext.translation("pyLoad", self.path("locale"),
                                          languages=[self.config['general']['language'],"en"],fallback=True)
        translation.install(True)

        self.debug = self.doDebug or self.config['general']['debug_mode']
        self.remote &= self.config['remote']['activated']

        pid = self.isAlreadyRunning()
        if pid:
            print _("pyLoad already running with pid %s") % pid
            exit()

        if os.name != "nt" and self.config["general"]["renice"]:
            os.system("renice %d %d" % (self.config["general"]["renice"], os.getpid()))

        if self.config["permission"]["change_group"]:
            if os.name != "nt":
                try:
                    from grp import getgrnam

                    group = getgrnam(self.config["permission"]["group"])
                    os.setgid(group[2])
                except Exception, e:
                    print _("Failed changing group: %s") % e

        if self.config["permission"]["change_user"]:
            if os.name != "nt":
                try:
                    from pwd import getpwnam

                    user = getpwnam(self.config["permission"]["user"])
                    os.setuid(user[2])
                except Exception, e:
                    print _("Failed changing user: %s") % e

        self.check_file(self.config['log']['log_folder'], _("folder for logs"), True)

        if self.debug:
            self.init_logger(logging.DEBUG) # logging level
        else:
            self.init_logger(logging.INFO) # logging level

        self.do_kill = False
        self.do_restart = False
        self.shuttedDown = False

        self.log.info(_("Starting") + " pyLoad %s" % CURRENT_VERSION)
        self.log.info(_("Using home directory: %s") % getcwd())

        self.writePidFile()

        #@TODO refractor

        remote.activated = self.remote
        self.log.debug("Remote activated: %s" % self.remote)

        self.check_install("Crypto", _("pycrypto to decode container files"))
        #img = self.check_install("Image", _("Python Image Libary (PIL) for captcha reading"))
        #self.check_install("pycurl", _("pycurl to download any files"), True, True)
        self.check_file("tmp", _("folder for temporary files"), True)
        #tesser = self.check_install("tesseract", _("tesseract for captcha reading"), False) if os.name != "nt" else True

        self.captcha = True # checks seems to fail, althoug tesseract is available

        self.check_file(self.config['general']['download_folder'], _("folder for downloads"), True)

        if self.config['ssl']['activated']:
            self.check_install("OpenSSL", _("OpenSSL for secure connection"))

        self.setupDB()
        if self.config.oldRemoteData:
            self.log.info(_("Moving old user config to DB"))
            self.db.addUser(self.config.oldRemoteData["username"], self.config.oldRemoteData["password"])

            self.log.info(_("Please check your logindata with ./pyLoadCore.py -u"))

        if self.deleteLinks:
            self.log.info(_("All links removed"))
            self.db.purgeLinks()

        self.requestFactory = RequestFactory(self)
        __builtin__.pyreq = self.requestFactory

        self.lastClientConnected = 0

        # later imported because they would trigger api import, and remote value not set correctly
        from module import Api
        from module.HookManager import HookManager
        from module.ThreadManager import ThreadManager

        if Api.activated != self.remote:
            self.log.warning("Import error: API remote status not correct.")

        self.api = Api.Api(self)

        self.scheduler = Scheduler(self)

        #hell yeah, so many important managers :D
        self.pluginManager = PluginManager(self)
        self.pullManager = PullManager(self)
        self.accountManager = AccountManager(self)
        self.threadManager = ThreadManager(self)
        self.captchaManager = CaptchaManager(self)
        self.hookManager = HookManager(self)
        self.remoteManager = RemoteManager(self)

        self.js = JsEngine()

        self.log.info(_("Downloadtime: %s") % self.api.isTimeDownload())

        if rpc:
            self.remoteManager.startBackends()

        if web:
            self.init_webserver()

        spaceLeft = freeSpace(self.config["general"]["download_folder"])

        self.log.info(_("Free space: %s") % formatSize(spaceLeft))

        self.config.save() #save so config files gets filled

        link_file = join(pypath, "links.txt")

        if exists(link_file):
            f = open(link_file, "rb")
            if f.read().strip():
                self.api.addPackage("links.txt", [link_file], 1)
            f.close()

        link_file = "links.txt"
        if exists(link_file):
            f = open(link_file, "rb")
            if f.read().strip():
                self.api.addPackage("links.txt", [link_file], 1)
            f.close()

        #self.scheduler.addJob(0, self.accountManager.getAccountInfos)
        self.log.info(_("Activating Accounts..."))
        self.accountManager.getAccountInfos()

        self.threadManager.pause = False
        self.running = True

        self.log.info(_("Activating Plugins..."))
        self.hookManager.coreReady()

        self.log.info(_("pyLoad is up and running"))

        #test api
#        from module.common.APIExerciser import startApiExerciser
#        startApiExerciser(self, 3)

        #some memory stats
#        from guppy import hpy
#        hp=hpy()
#        import objgraph
#        objgraph.show_most_common_types(limit=20)
#        import memdebug
#        memdebug.start(8002)

        locals().clear()

        while True:
            sleep(2)
            if self.do_restart:
                self.log.info(_("restarting pyLoad"))
                self.restart()
            if self.do_kill:
                self.shutdown()
                self.log.info(_("pyLoad quits"))
                self.removeLogger()
                _exit(0) #@TODO thrift blocks shutdown

            self.threadManager.work()
            self.scheduler.work()

Example 166

Project: attention-lvcsr
Source File: check_whitespace.py
View license
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    parser = argparse.ArgumentParser(description="Pretxncommit hook for Mercurial to check for whitespace issues")
    parser.add_argument("-n", "--no-indentation",
                        action="store_const",
                        default=False,
                        const=True,
                        help="don't check indentation, just basic parsing"
                       )
    parser.add_argument("-i", "--incremental",
                        action="store_const",
                        default=False,
                        const=True,
                        help="only block on newly introduced indentation problems; ignore all others"
                       )
    parser.add_argument("-p", "--incremental-with-patch",
                        action="store_const",
                        default=False,
                        const=True,
                        help="only block on newly introduced indentation problems; propose a patch for all others"
                       )
    parser.add_argument("-s", "--skip-after-failure",
                        action="store_const",
                        default=False,
                        const=True,
                        help="when this pre-commit hook fails, don't run it on the next commit; "
                             "this lets you check in your changes and then check in "
                             "any necessary whitespace changes in the subsequent commit"
                       )
    args = parser.parse_args(argv)

    # -i and -s are incompatible; if you skip checking, you end up with a not-correctly-indented
    # file, which -i then causes you to ignore!
    if args.skip_after_failure and args.incremental:
        print("*** check whitespace hook misconfigured! -i and -s are incompatible.", file=sys.stderr)
        return 1

    if is_merge():
        # don't inspect merges: (a) they're complex and (b) they don't really introduce new code
        return 0

    if args.skip_after_failure and should_skip_commit():
        # we're set up to skip this one, so skip it, but
        # first, make sure we don't skip the next one as well :)
        os.remove(SKIP_WHITESPACE_CHECK_FILENAME)
        return 0

    block_commit = False

    diffs = []

    added_filenames = added_files()
    changed_filenames = changed_files()

    for filename in filter(is_python_file, added_filenames + changed_filenames):
        code = get_file_contents(filename)
        parse_error = get_parse_error(code)
        if parse_error is not None:
            print("*** %s has parse error: %s" % (filename, parse_error), file=sys.stderr)
            block_commit = True
        else:
            # parsing succeeded, it is safe to check indentation
            if not args.no_indentation:
                was_clean = None  # unknown
                # only calculate was_clean if it will matter to us
                if args.incremental or args.incremental_with_patch:
                    if filename in changed_filenames:
                        old_file_contents = get_file_contents(filename, revision=parent_commit())
                        was_clean = get_correct_indentation_diff(old_file_contents, "") is None
                    else:
                        was_clean = True  # by default -- it was newly added and thus had no prior problems

                check_indentation = was_clean or not args.incremental
                if check_indentation:
                    indentation_diff = get_correct_indentation_diff(code, filename)
                    if indentation_diff is not None:
                        if was_clean or not args.incremental_with_patch:
                            block_commit = True
                        diffs.append(indentation_diff)
                        print("%s is not correctly indented" % filename, file=sys.stderr)

    if len(diffs) > 0:
        diffs_filename = ".hg/indentation_fixes.patch"
        save_diffs(diffs, diffs_filename)
        print("*** To fix all indentation issues, run: cd `hg root` && patch -p0 < %s" % diffs_filename, file=sys.stderr)

    if block_commit:
        save_filename = ".hg/commit_message.saved"
        save_commit_message(save_filename)
        print("*** Commit message saved to %s" % save_filename, file=sys.stderr)

        if args.skip_after_failure:
            save_skip_next_commit()
            print("*** Next commit attempt will not be checked. To change this, rm %s" % SKIP_WHITESPACE_CHECK_FILENAME, file=sys.stderr)

    return int(block_commit)

Example 167

Project: turbulenz_tools
Source File: maketzjs.py
View license
def tzjs_generate(env, options, input_js):

    # The set of files to be injected

    Profiler.start('find_inject_code')
    inject_js = inject_js_from_options(options)
    Profiler.stop('find_inject_code')

    if 0 < len(inject_js):
        LOG.info("Files to inject:")
        for i in inject_js:
            LOG.info(" - '%s'", i)

    # Create a context and render the template

    Profiler.start('load_templates')
    context = context_from_options(options, input_js[0])
    templates_js = env_load_templates(env, input_js)
    Profiler.stop('load_templates')

    Profiler.start('render_js')
    (rendered_js, inc_js) = render_js(context, options, templates_js,
                                      inject_js)
    rendered_js = rendered_js.encode('utf-8')
    Profiler.stop('render_js')

    if 0 != len(inc_js):
        raise ToolsException("internal error")

    # If required, remove all calls to 'debug.*' methods BEFORE
    # compacting

    # TODO: We write and read the files too many times.  Better to
    # write once to a temporary, keep track of the name and invoke
    # each external command on files, creating subsequent temporaries
    # as required.

    if options.stripdebug:

        strip_path = "strip-debug"
        if options.stripdebugpath:
            strip_path = normpath(abspath(options.stripdebugpath))

        LOG.info("Stripping debug method calls ...")

        # Check we can actually run strip debug, with the given path
        p = subprocess.Popen('%s -h' % strip_path,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             shell=True)
        p.communicate()
        if p.returncode != 0:
            raise ToolsException( \
                "\n\tstrip-debug tool could not be found, check it's on your path\n"
                "\tor supply the path with --strip-debug <path>. To run maketzjs\n"
                "\twithout stripping debug code run with --no-strip-debug." )

        Profiler.start('strip_debug')

        strip_debug_flags = "-Ddebug=false"

        # Add the default flags first, in case the custom flags
        # override them.

        if options.verbose:
            strip_debug_flags += " -v"
        for s in options.stripnamespaces:
            strip_debug_flags += " --namespace %s" % s
        for v in options.stripvars:
            strip_debug_flags += " -D %s" % v
        if options.ignoreerrors:
            strip_debug_flags += " --ignore-errors"

        # Launch the strip command and pass in the full script via
        # streams.

        with NamedTemporaryFile(delete = False) as t:
            LOG.info("Writing temp JS to '%s'", t.name)
            t.write(rendered_js)

        with NamedTemporaryFile(delete = False) as tmp_out:
            pass

        strip_cmd = "%s %s -o %s %s" % (strip_path, strip_debug_flags,
                                        tmp_out.name, t.name)
        LOG.info("Strip cmd: %s", strip_cmd)
        strip_retval = subprocess.call(strip_cmd, shell=True)

        if 0 != strip_retval:
            raise ToolsException( \
                "strip-debug tool exited with code %d\n"
                "The (merged) input probably contains a syntax error:\n"
                "  %s" % (strip_retval, t.name))

        rendered_js = read_file_utf8(tmp_out.name).encode('utf-8')
        remove(tmp_out.name)
        remove(t.name)

        Profiler.stop('strip_debug')

    # If required, compact the JS via a temporary file, otherwise just
    # write out directly to the output file.

    if options.mode != 'webworker-debug' and (options.yui or options.closure or options.uglifyjs):

        Profiler.start('compact')

        with NamedTemporaryFile(delete = False) as t:
            LOG.info("Writing temp JS to '%s'", t.name)
            t.write(rendered_js)

        LOG.info("Compacting temp JS to '%s'", options.output)
        tzjs_compact(options, t.name, options.output)
        remove(t.name)
        Profiler.stop('compact')

    else:

        LOG.info("Writing JS to '%s'", options.output)
        Profiler.start('write_out')
        try:
            with open(options.output, 'wb') as f:
                f.write(rendered_js)
                LOG.info("Succeeded")
        except IOError:
            raise ToolsException("failed to write file: %s" % options.output)
        Profiler.stop('write_out')

    return 0

Example 168

View license
@task_method
def parse_strace_deps(self, path, cwd):
	# uncomment the following line to disable the dependencies and force a file scan
	# return
	try:
		cnt = Utils.readf(path)
	finally:
		try:
			os.remove(path)
		except OSError:
			pass

	if not isinstance(cwd, str):
		cwd = cwd.abspath()

	nodes = []
	bld = self.generator.bld
	try:
		cache = bld.strace_cache
	except AttributeError:
		cache = bld.strace_cache = {}

	# chdir and relative paths
	pid_to_cwd = {}

	global BANNED
	done = set()
	for m in re.finditer(re_lines, cnt):
		# scraping the output of strace
		pid = m.group('pid')
		if m.group('npid'):
			npid = m.group('npid')
			pid_to_cwd[npid] = pid_to_cwd.get(pid, cwd)
			continue

		p = m.group('path').replace('\\"', '"')

		if p == '.' or m.group().find('= -1 ENOENT') > -1:
			# just to speed it up a bit
			continue

		if not os.path.isabs(p):
			p = os.path.join(pid_to_cwd.get(pid, cwd), p)

		call = m.group('call')
		if call == 'chdir':
			pid_to_cwd[pid] = p
			continue

		if p in done:
			continue
		done.add(p)

		for x in BANNED:
			if p.startswith(x):
				break
		else:
			if p.endswith('/') or os.path.isdir(p):
				continue

			try:
				node = cache[p]
			except KeyError:
				strace_lock.acquire()
				try:
					cache[p] = node = bld.root.find_node(p)
					if not node:
						continue
				finally:
					strace_lock.release()
			nodes.append(node)

	# record the dependencies then force the task signature recalculation for next time
	if Logs.verbose:
		Logs.debug('deps: real scanner for %r returned %r', self, nodes)
	bld = self.generator.bld
	bld.node_deps[self.uid()] = nodes
	bld.raw_deps[self.uid()] = []
	try:
		del self.cache_sig
	except AttributeError:
		pass
	self.signature()

Example 169

View license
    def test_doctest_issue4197(self):
        # To avoid having to keep two copies of the doctest module's
        # unit tests in sync, this test works by taking the source of
        # test_doctest itself, rewriting it a bit to cope with a new
        # location, and then throwing it in a zip file to make sure
        # everything still works correctly
        test_src = inspect.getsource(test_doctest)
        test_src = test_src.replace(
                         "from test import test_doctest",
                         "import test_zipped_doctest as test_doctest")
        test_src = test_src.replace("test.test_doctest",
                                    "test_zipped_doctest")
        test_src = test_src.replace("test.sample_doctest",
                                    "sample_zipped_doctest")
        # The sample doctest files rewritten to include in the zipped version.
        sample_sources = {}
        for mod in [sample_doctest, sample_doctest_no_doctests,
                    sample_doctest_no_docstrings]:
            src = inspect.getsource(mod)
            src = src.replace("test.test_doctest", "test_zipped_doctest")
            # Rewrite the module name so that, for example,
            # "test.sample_doctest" becomes "sample_zipped_doctest".
            mod_name = mod.__name__.split(".")[-1]
            mod_name = mod_name.replace("sample_", "sample_zipped_")
            sample_sources[mod_name] = src

        with test.support.temp_dir() as d:
            script_name = make_script(d, 'test_zipped_doctest',
                                            test_src)
            zip_name, run_name = make_zip_script(d, 'test_zip',
                                                script_name)
            z = zipfile.ZipFile(zip_name, 'a')
            for mod_name, src in sample_sources.items():
                z.writestr(mod_name + ".py", src)
            z.close()
            if verbose:
                zip_file = zipfile.ZipFile(zip_name, 'r')
                print ('Contents of %r:' % zip_name)
                zip_file.printdir()
                zip_file.close()
            os.remove(script_name)
            sys.path.insert(0, zip_name)
            import test_zipped_doctest
            try:
                # Some of the doc tests depend on the colocated text files
                # which aren't available to the zipped version (the doctest
                # module currently requires real filenames for non-embedded
                # tests). So we're forced to be selective about which tests
                # to run.
                # doctest could really use some APIs which take a text
                # string or a file object instead of a filename...
                known_good_tests = [
                    test_zipped_doctest.SampleClass,
                    test_zipped_doctest.SampleClass.NestedClass,
                    test_zipped_doctest.SampleClass.NestedClass.__init__,
                    test_zipped_doctest.SampleClass.__init__,
                    test_zipped_doctest.SampleClass.a_classmethod,
                    test_zipped_doctest.SampleClass.a_property,
                    test_zipped_doctest.SampleClass.a_staticmethod,
                    test_zipped_doctest.SampleClass.double,
                    test_zipped_doctest.SampleClass.get,
                    test_zipped_doctest.SampleNewStyleClass,
                    test_zipped_doctest.SampleNewStyleClass.__init__,
                    test_zipped_doctest.SampleNewStyleClass.double,
                    test_zipped_doctest.SampleNewStyleClass.get,
                    test_zipped_doctest.sample_func,
                    test_zipped_doctest.test_DocTest,
                    test_zipped_doctest.test_DocTestParser,
                    test_zipped_doctest.test_DocTestRunner.basics,
                    test_zipped_doctest.test_DocTestRunner.exceptions,
                    test_zipped_doctest.test_DocTestRunner.option_directives,
                    test_zipped_doctest.test_DocTestRunner.optionflags,
                    test_zipped_doctest.test_DocTestRunner.verbose_flag,
                    test_zipped_doctest.test_Example,
                    test_zipped_doctest.test_debug,
                    test_zipped_doctest.test_testsource,
                    test_zipped_doctest.test_trailing_space_in_test,
                    test_zipped_doctest.test_DocTestSuite,
                    test_zipped_doctest.test_DocTestFinder,
                ]
                # These tests are the ones which need access
                # to the data files, so we don't run them
                fail_due_to_missing_data_files = [
                    test_zipped_doctest.test_DocFileSuite,
                    test_zipped_doctest.test_testfile,
                    test_zipped_doctest.test_unittest_reportflags,
                ]

                for obj in known_good_tests:
                    _run_object_doctest(obj, test_zipped_doctest)
            finally:
                del sys.modules["test_zipped_doctest"]

Example 170

View license
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    dest_uri = params.get(
        "virsh_migrate_dest_uri", "qemu+ssh://MIGRATE_EXAMPLE/system")
    src_uri = params.get(
        "virsh_migrate_src_uri", "qemu+ssh://MIGRATE_EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your destination uri first.")
    if src_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your source uri first.")
    if src_uri == dest_uri:
        raise error.TestNAError("You should not set dest uri same as local.")
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000)
    if (migrate_maxdowntime == ""):
        downtime = ""
    else:
        downtime = int(float(migrate_maxdowntime)) * 1000
    extra = params.get("setmmdt_extra")

    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()

    grep_str_local = params.get("grep_str_from_local_libvirt_log", "")

    # For safety reasons, we'd better back up original guest xml
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        raise error.TestError("Backing up xmlfile failed.")

    # Params to configure libvirtd.conf
    log_file = "/var/log/libvirt/libvirtd.log"
    log_level = "1"
    log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"'
    libvirtd_conf_dict = {"log_level": log_level,
                          "log_filters": log_filters,
                          "log_outputs": '"%s:file:%s"' % (log_level, log_file)}

    # Update libvirtd config with new parameters
    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd_conf = config_libvirt(libvirtd_conf_dict)
    libvirtd.restart()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Params to setup SSH connection
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Params to enable SELinux boolean on remote host
    params["remote_boolean_varible"] = "virt_use_nfs"
    params["remote_boolean_value"] = "on"
    params["set_sebool_remote"] = "yes"

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {'debug': True, 'ignore_status': True}

    seLinuxBool = None
    nfs_client = None
    local_selinux_bak = ""

    try:
        # Update the disk using shared storage
        libvirt.set_vm_disk(vm, params)

        # Backup the SELinux status on local host for recovering
        local_selinux_bak = params.get("selinux_status_bak", "")

        # Configure NFS client on remote host
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

        logging.info("Enable virt NFS SELinux boolean on target host")
        seLinuxBool = utils_misc.SELinuxBoolean(params)
        seLinuxBool.setup()

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        domid = vm.get_id()

        # Confirm how to reference a VM.
        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid

        # Prepare vm state
        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shutoff":
            vm.destroy(gracefully=False)
            # Ensure VM in 'shut off' status
            utils_misc.wait_for(lambda: vm.state() == "shut off", 30)

        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(threading.Thread(target=thread_func_live_migration,
                                            args=(vm, dest_uri,
                                                  migrate_dargs)))

        threads.append(threading.Thread(target=thread_func_setmmdt,
                                        args=(vm_ref, downtime, extra,
                                              setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

        if (status_error is False or do_migrate is False):
            logging.debug("To match the expected pattern '%s' ...",
                          grep_str_local)
            cmd = "grep -E '%s' %s" % (grep_str_local, log_file)
            cmdResult = process.run(cmd, shell=True, verbose=False)
            logging.debug(cmdResult)

    finally:
        # Clean up.
        if do_migrate:
            logging.debug("Cleanup VM on remote host...")
            cleanup_dest(vm, src_uri, dest_uri)

        if orig_config_xml:
            logging.debug("Recover VM XML...")
            orig_config_xml.sync()

        if seLinuxBool:
            logging.info("Recover NFS SELinux boolean on remote host...")
            seLinuxBool.cleanup(True)

        if nfs_client:
            logging.info("Cleanup NFS client environment...")
            nfs_client.cleanup()

        logging.info("Remove the NFS image...")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)

        logging.info("Cleanup NFS server environment...")
        exp_dir = params.get("export_dir")
        mount_dir = params.get("mnt_path_name")
        libvirt.setup_or_cleanup_nfs(False, export_dir=exp_dir,
                                     mount_dir=mount_dir,
                                     restore_selinux=local_selinux_bak)

        # Recover libvirtd service configuration on local
        if libvirtd_conf:
            logging.debug("Recover local libvirtd configuration...")
            libvirtd_conf.restore()
            libvirtd.restart()
            os.remove(log_file)

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                raise error.TestFail("virsh migrate-setmaxdowntime succeed "
                                     "but not expected.")
    else:
        if do_migrate and not ret_migration:
            raise error.TestFail("Migration failed.")

        if not ret_setmmdt:
            raise error.TestFail("virsh migrate-setmaxdowntime failed.")

Example 171

Project: TYOS
Source File: app.py
View license
    def display(self):
        while True:
            if self.mode == 'gallery':                        
                self.screen.blit(self.image_in_view, (0,0))
                self.screen.blit(self.left, (20, 410))
                self.screen.blit(self.right, (240, 410))
                self.screen.blit(self.home, (125, 400))
                self.screen.blit(self.delete, (5, 5))
                self.screen.blit(self.upload, (40, 5))
                if self.deleted:
                    self.screen.blit(self.deleted_image, (79, 200))
                    if time.time() - self.delete_time > 3:
                        self.deleted = False
                if self.uploading:
                    self.screen.blit(self.uploading_image, (79, 200))
                    if time.time() - self.uploading_time > 6:
                        self.uploading = False
                
            if self.mode == 'capture':
                #Get camera stream
                self.stream = io.BytesIO() # Capture into in-memory stream
                self.camera.capture(self.stream, use_video_port=True, format='raw')
                self.stream.seek(0)
                self.stream.readinto(self.yuv)  # stream -> YUV buffer
                self.stream.close()
                yuv2rgb.convert(self.yuv, self.rgb, 320, 480)
            
                #Create pygame image from screen and blit it
                img = pygame.image.frombuffer(self.rgb[0:(320 * 480 * 3)], (320, 480), 'RGB')
                self.screen.blit(img, (0,0))
            
                #Blit buttons
                self.screen.blit(self.capture, (125, 400))
                self.screen.blit(self.gallery, (20, 415))
                self.screen.blit(self.door, (240, 410))

                if self.no_files:
                    self.screen.blit(self.no_files_image, (79, 200))
                    if time.time() - self.files_time > 3:
                        self.no_files = False
            
            pygame.display.update()
            
            #Handle events
            for event in pygame.event.get():
                if event.type == pygame.MOUSEBUTTONUP:
                    if self.mode == 'gallery':
                        if event.pos[1] < 40 and event.pos[0] < 35:
                            self.deleted = True
                            self.delete_time = time.time()
                            os.remove('/home/pi/Photos/' + self.images[self.current_image])
                            self.current_image = 0
                            self.images = os.listdir('/home/pi/Photos/')
                            if len(self.images) == 0:
                                self.mode = 'capture'
                                self.no_files = True
                                self.files_time = time.time()
                            
                        if event.pos[1] < 40 and event.pos[0] > 35 and event.pos[0] < 75:
                            self.uploading = True
                            self.uploading_time = time.time()
                            cam = Popen(['/home/pi/Dropbox-Uploader/./dropbox_uploader.sh', 'upload', '/home/pi/Photos/' +
                                         self.images[self.current_image], self.images[self.current_image]])
                            
                    if event.pos[1] > 400 and event.pos[1] < 470:
                        if event.pos[0] > 125 and event.pos[0] < 195:
                            if self.mode == 'capture':
                                self.camera.capture('/home/pi/Photos/' + str(self.index) + '.jpg')
                                self.index += 1
                            if self.mode == 'gallery':
                                self.mode = 'capture'
            
                        if event.pos[0] < 70:
                            if self.mode == 'capture':
                                self.mode = 'gallery'
                                self.current_image = 0
                                self.images = os.listdir('/home/pi/Photos/')
                                if len(self.images) == 0:
                                    self.mode = 'capture'
                                    self.no_files = True
                                    self.files_time = time.time()
                                else:
                                    self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])
                                
                            if self.mode == 'gallery':
                                self.current_image -= 1
                                if self.current_image == -1:
                                    self.current_image = len(self.images) - 1
                                self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])
                                
                        if event.pos[0] > 255:
                            if self.mode == 'capture':
                                print 'exiting...'
                                os.remove('/home/pi/index.dat')
                                new = open('/home/pi/index.dat', 'w+')
                                new.write(str(self.index))
                                new.close()
                                cam = Popen(['sudo', 'python', '/home/pi/tyos/src/main.py'])
                                pygame.quit()
                                sys.exit()


                            if self.mode == 'gallery':
                                if self.current_image == len(self.images) - 1:
                                    self.current_image = 0
                                else:
                                    self.current_image += 1
                                self.image_in_view = pygame.image.load('/home/pi/Photos/' + self.images[self.current_image])

Example 172

Project: holland
Source File: config.py
View license
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().
    """
    if not thread:
        raise NotImplementedError, "listen() needs threading to work"

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length,
            followed by the config file. Uses fileConfig() to do the
            grunt work.
            """
            import tempfile
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    #Apply new configuration. We'd like to be able to
                    #create a StringIO and pass that in, but unfortunately
                    #1.5.2 ConfigParser does not support reading file
                    #objects, only actual files. So we create a temporary
                    #file and remove it later.
                    file = tempfile.mktemp(".ini")
                    f = open(file, "w")
                    f.write(chunk)
                    f.close()
                    fileConfig(file)
                    os.remove(file)
            except socket.error, e:
                if type(e.args) != types.TupleType:
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()

    def serve(rcvr, hdlr, port):
        server = rcvr(port=port, handler=hdlr)
        global _listener
        logging._acquireLock()
        _listener = server
        logging._releaseLock()
        server.serve_until_stopped()

    return threading.Thread(target=serve,
                            args=(ConfigSocketReceiver,
                                  ConfigStreamHandler, port))

Example 173

Project: dopey
Source File: document.py
View license
    def load_ora(self, filename, feedback_cb=None):
        """Loads from an OpenRaster file"""
        logger.info('load_ora: %r', filename)
        t0 = time.time()
        tempdir = tempfile.mkdtemp('mypaint')
        if not isinstance(tempdir, unicode):
            tempdir = tempdir.decode(sys.getfilesystemencoding())
        z = zipfile.ZipFile(filename)
        logger.debug('mimetype: %r', z.read('mimetype').strip())
        xml = z.read('stack.xml')
        image = ET.fromstring(xml)
        stack = image.find('stack')

        image_w = int(image.attrib['w'])
        image_h = int(image.attrib['h'])

        def get_pixbuf(filename):
            t1 = time.time()

            try:
                fp = z.open(filename, mode='r')
            except KeyError:
                # support for bad zip files (saved by old versions of the GIMP ORA plugin)
                fp = z.open(filename.encode('utf-8'), mode='r')
                logger.warning('Bad OpenRaster ZIP file. There is an utf-8 '
                               'encoded filename that does not have the '
                               'utf-8 flag set: %r', filename)

            res = self._pixbuf_from_stream(fp, feedback_cb)
            fp.close()
            logger.debug('%.3fs loading pixbuf %s', time.time() - t1, filename)
            return res

        def get_layers_list(root, x=0,y=0):
            res = []
            for item in root:
                if item.tag == 'layer':
                    if 'x' in item.attrib:
                        item.attrib['x'] = int(item.attrib['x']) + x
                    if 'y' in item.attrib:
                        item.attrib['y'] = int(item.attrib['y']) + y
                    res.append(item)
                elif item.tag == 'stack':
                    stack_x = int( item.attrib.get('x', 0) )
                    stack_y = int( item.attrib.get('y', 0) )
                    res += get_layers_list(item, stack_x, stack_y)
                else:
                    logger.warning('ignoring unsupported tag %r', item.tag)
            return res

        self.clear() # this leaves one empty layer
        no_background = True

        selected_layer = None
        for layer in get_layers_list(stack):
            a = layer.attrib

            if 'background_tile' in a:
                assert no_background
                try:
                    logger.debug("background tile: %r", a['background_tile'])
                    self.set_background(get_pixbuf(a['background_tile']))
                    no_background = False
                    continue
                except tiledsurface.BackgroundError, e:
                    logger.warning('ORA background tile not usable: %r', e)

            src = a.get('src', '')
            if not src.lower().endswith('.png'):
                logger.warning('Ignoring non-png layer %r', src)
                continue
            name = a.get('name', '')
            x = int(a.get('x', '0'))
            y = int(a.get('y', '0'))
            opac = float(a.get('opacity', '1.0'))
            compositeop = str(a.get('composite-op', DEFAULT_COMPOSITE_OP))
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP
            selected = self.__xsd2bool(a.get("selected", 'false'))
            locked = self.__xsd2bool(a.get("edit-locked", 'false'))

            visible = not 'hidden' in a.get('visibility', 'visible')
            self.add_layer(insert_idx=0, name=name)
            t1 = time.time()

            # extract the png form the zip into a file first
            # the overhead for doing so seems to be neglegible (around 5%)
            z.extract(src, tempdir)
            tmp_filename = join(tempdir, src)
            self.load_layer_from_png(tmp_filename, x, y, feedback_cb)
            os.remove(tmp_filename)

            layer = self.layers[0]

            self.set_layer_opacity(helpers.clamp(opac, 0.0, 1.0), layer)
            self.set_layer_compositeop(compositeop, layer)
            self.set_layer_visibility(visible, layer)
            self.set_layer_locked(locked, layer)
            if selected:
                selected_layer = layer
            logger.debug('%.3fs loading and converting layer png',
                         time.time() - t1)
            # strokemap
            fname = a.get('mypaint_strokemap_v2', None)
            if fname:
                sio = StringIO(z.read(fname))
                layer.load_strokemap_from_file(sio, x, y)
                sio.close()

        if len(self.layers) == 1:
            # no assertion (allow empty documents)
            logger.error('Could not load any layer, document is empty.')

        if len(self.layers) > 1:
            # remove the still present initial empty top layer
            self.select_layer(len(self.layers)-1)
            self.remove_layer()
            # this leaves the topmost layer selected

        try:
            ani_data = z.read('animation.xsheet')
            self.ani.str_to_xsheet(ani_data)
        except KeyError:
            self.ani.load_xsheet(filename)

        if selected_layer is not None:
            for i, layer in zip(range(len(self.layers)), self.layers):
                if layer is selected_layer:
                    self.select_layer(i)
                    break

        # Set the frame size to that saved in the image.
        self.update_frame(x=0, y=0, width=image_w, height=image_h,
                          user_initiated=False)

        # Enable frame if the saved image size is something other than the
        # calculated bounding box. Goal: if the user saves an "infinite
        # canvas", it loads as an infinite canvas.
        bbox_c = helpers.Rect(x=0, y=0, w=image_w, h=image_h)
        bbox = self.get_bbox()
        frame_enab = not (bbox_c==bbox or bbox.empty() or bbox_c.empty())
        self.set_frame_enabled(frame_enab, user_initiated=False)

        z.close()

        # remove empty directories created by zipfile's extract()
        for root, dirs, files in os.walk(tempdir, topdown=False):
            for name in dirs:
                os.rmdir(os.path.join(root, name))
        os.rmdir(tempdir)

        logger.info('%.3fs load_ora total', time.time() - t0)

Example 174

Project: QGIS-CKAN-Browser
Source File: ckanconnector.py
View license
    def download_resource(self, url, resource_format, dest_file, delete):
        try:
#             if resource_format is not None:
#                 if resource_format.lower() == 'georss':
#                     dest_file += '.xml'
            if delete is True:
                os.remove(dest_file)
            #urls might have line breaks
            url = self.util.remove_newline(url)
            response = requests.get(
                url
                , headers=self.ua_chrome
                , verify=False
                , stream=True
                , proxies=self.settings.get_proxies()[1]
                , timeout=self.settings.request_timeout
            )
            if not response.ok:
                return False, self.util.tr(u'cc_download_error').format(response.reason), None

            # TODO remove after testing
            # doesn't work headers is object of type 'request.structures.CaseInsensitiveDict'
            # self.util.msg_log(u'{0}'.format(json.dumps(response.headers, indent=2, sort_keys=True)))
            for k, v in response.headers.iteritems():
                self.util.msg_log(u"['{0}']: \t{1}".format(k, v))

            # Content-Disposition:
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html
            # http://www.iana.org/assignments/cont-disp/cont-disp.xhtml
            file_name_from_service = self.__file_name_from_service(
                url
                , response.headers.get('content-disposition')
                , response.headers.get('content-type')
            )
            self.util.msg_log(u'file name from service: {0}'.format(file_name_from_service))
            if file_name_from_service:
                # set new dest_file name
                dest_file = os.path.join(os.path.dirname(dest_file), file_name_from_service)

            self.util.msg_log(u'dest_file: {0}'.format(dest_file))
            # hack for WFS/WM(T)S Services, that don't specify the format as wms, wmts or wfs
            url_low = url.lower()
            if 'wfs' in url_low and 'getcapabilities' in url_low and False is dest_file.endswith('.wfs'):
                if string.find(dest_file, '?') > -1: dest_file = dest_file[:string.find(dest_file, '?')]
                dest_file += '.wfs'
            if 'wmts' in url_low and 'getcapabilities' in url_low and False is dest_file.endswith('.wmts'):
                if string.find(dest_file, '?') > -1: dest_file = dest_file[:string.find(dest_file, '?')]
                dest_file += '.wmts'
            # we use extension wmts for wms too
            if 'wms' in url_low and 'getcapabilities' in url_low and False is dest_file.endswith('.wmts'):
                if string.find(dest_file, '?') > -1: dest_file = dest_file[:string.find(dest_file, '?')]
                dest_file += '.wmts'

            self.util.msg_log(u'dest_file: {0}'.format(dest_file))

            # if file name has been set from service, set again after above changes for wfs/wm(t)s
            if file_name_from_service:
                # set return value to full path
                file_name_from_service = dest_file

            #chunk_size = 1024
            chunk_size = None
            #http://docs.python-requests.org/en/latest/user/advanced/#chunk-encoded-requests
            if self.__is_chunked(response.headers.get('transfer-encoding')):
                self.util.msg_log('response is chunked')
                chunk_size = None

            with open(dest_file, 'wb') as handle:
                for chunk in response.iter_content(chunk_size):
                    if chunk:
                        handle.write(chunk)

            return True, '', file_name_from_service
        except requests.exceptions.ConnectTimeout as cte:
            #self.util.msg_log(u'{0}\n{1}\n\n\n{2}'.format(cte, dir(cte), cte.message))
            return False, self.util.tr(u'cc_connection_timeout').format(cte.message)
        except IOError, e:
            self.util.msg_log("Can't retrieve {0} to {1}: {2}".format(url, dest_file, e))
            return False, self.util.tr(u'cc_download_error').format(e.strerror), None
        except NameError as ne:
            self.util.msg_log(u'{0}'.format(ne))
            return False, ne.message, None
        except:
            return False, self.util.tr(u'cc_download_error').format(sys.exc_info()[0]), None

Example 175

View license
    def test_doctest_issue4197(self):
        # To avoid having to keep two copies of the doctest module's
        # unit tests in sync, this test works by taking the source of
        # test_doctest itself, rewriting it a bit to cope with a new
        # location, and then throwing it in a zip file to make sure
        # everything still works correctly
        test_src = inspect.getsource(test_doctest)
        test_src = test_src.replace(
                         "from test import test_doctest",
                         "import test_zipped_doctest as test_doctest")
        test_src = test_src.replace("test.test_doctest",
                                    "test_zipped_doctest")
        test_src = test_src.replace("test.sample_doctest",
                                    "sample_zipped_doctest")
        sample_src = inspect.getsource(sample_doctest)
        sample_src = sample_src.replace("test.test_doctest",
                                        "test_zipped_doctest")
        with temp_dir() as d:
            script_name = make_script(d, 'test_zipped_doctest',
                                            test_src)
            zip_name, run_name = make_zip_script(d, 'test_zip',
                                                script_name)
            z = zipfile.ZipFile(zip_name, 'a')
            z.writestr("sample_zipped_doctest.py", sample_src)
            z.close()
            if verbose:
                zip_file = zipfile.ZipFile(zip_name, 'r')
                print 'Contents of %r:' % zip_name
                zip_file.printdir()
                zip_file.close()
            os.remove(script_name)
            sys.path.insert(0, zip_name)
            import test_zipped_doctest
            # Some of the doc tests depend on the colocated text files
            # which aren't available to the zipped version (the doctest
            # module currently requires real filenames for non-embedded
            # tests). So we're forced to be selective about which tests
            # to run.
            # doctest could really use some APIs which take a text
            # string or a file object instead of a filename...
            known_good_tests = [
                test_zipped_doctest.SampleClass,
                test_zipped_doctest.SampleClass.NestedClass,
                test_zipped_doctest.SampleClass.NestedClass.__init__,
                test_zipped_doctest.SampleClass.__init__,
                test_zipped_doctest.SampleClass.a_classmethod,
                test_zipped_doctest.SampleClass.a_property,
                test_zipped_doctest.SampleClass.a_staticmethod,
                test_zipped_doctest.SampleClass.double,
                test_zipped_doctest.SampleClass.get,
                test_zipped_doctest.SampleNewStyleClass,
                test_zipped_doctest.SampleNewStyleClass.__init__,
                test_zipped_doctest.SampleNewStyleClass.double,
                test_zipped_doctest.SampleNewStyleClass.get,
                test_zipped_doctest.old_test1,
                test_zipped_doctest.old_test2,
                test_zipped_doctest.old_test3,
                test_zipped_doctest.old_test4,
                test_zipped_doctest.sample_func,
                test_zipped_doctest.test_DocTest,
                test_zipped_doctest.test_DocTestParser,
                test_zipped_doctest.test_DocTestRunner.basics,
                test_zipped_doctest.test_DocTestRunner.exceptions,
                test_zipped_doctest.test_DocTestRunner.option_directives,
                test_zipped_doctest.test_DocTestRunner.optionflags,
                test_zipped_doctest.test_DocTestRunner.verbose_flag,
                test_zipped_doctest.test_Example,
                test_zipped_doctest.test_debug,
                test_zipped_doctest.test_pdb_set_trace,
                test_zipped_doctest.test_pdb_set_trace_nested,
                test_zipped_doctest.test_testsource,
                test_zipped_doctest.test_trailing_space_in_test,
                test_zipped_doctest.test_DocTestSuite,
                test_zipped_doctest.test_DocTestFinder,
            ]
            # These remaining tests are the ones which need access
            # to the data files, so we don't run them
            fail_due_to_missing_data_files = [
                test_zipped_doctest.test_DocFileSuite,
                test_zipped_doctest.test_testfile,
                test_zipped_doctest.test_unittest_reportflags,
            ]
            # Needed for test_DocTestParser and test_debug
            deprecations = [
                # Ignore all warnings about the use of class Tester in this module.
                ("class Tester is deprecated", DeprecationWarning)]
            if sys.py3kwarning:
                deprecations += [
                    ("backquote not supported", SyntaxWarning),
                    ("execfile.. not supported", DeprecationWarning)]
            with test.test_support.check_warnings(*deprecations):
                for obj in known_good_tests:
                    _run_object_doctest(obj, test_zipped_doctest)

Example 176

Project: Devede
Source File: devede_ffmpeg_convert.py
View license
	def __init__(self,global_vars,videofile,filename,filefolder,progresbar,proglabel,disctype,title,chapter,threads,seconds,encpass,fix_ac3):

		""" This class converts a video file to MPEG-1 or MPEG-2 format

		VIDEOFILE contains the parameters to convert the video
		FILENAME is the generic file name given by the user
		FILEFOLDER is the path where all the temporary and finall files will be created
		PROGRESBAR is the progress bar where the class will show the progress
		PROGLABEL is the label where the class will show what is it doing
		DISCTYPE can be dvd, vcd, svcd, cvd or divx
		TITLE and CHAPTER are the numbers used to identify the TITLE and CHAPTER number for this file
		THREADS is the number of threads to use
		SECONDS is the number of seconds we want to convert (for previews) 
		ENCPASS is the number of encoding pass"""
		
		devede_executor.executor.__init__(self,filename,filefolder,progresbar)
		self.printout=False

		self.percent2=120
		self.film_length=float(videofile["olength"])
		if seconds==0:
			self.divide=float(videofile["olength"])
			if (videofile["cutting"]==1) or (videofile["cutting"]==2): # if we want only one half of the file
				self.divide/=2
		else:
			self.divide=float(seconds)

		if self.divide==0:
			self.divide=1

		self.error=""
		progresbar.set_fraction(0)
		progresbar.set_text("")
		
		if videofile["ismpeg"]: # if the file hasn't to be converted, we simply copy or link it
			self.pulse=True
			self.print_error=_("File copy failed\nMaybe you ran out of disk space?")
			if seconds==0:
				texto=_("Copying the file")+"\n"
			else:
				texto=_("Creating preview")+"\n"
			proglabel.set_text(texto+videofile["filename"])
			currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")
		
			print "\ncurrentfile is: ", currentfile , "\n" 

			try:
				os.remove(currentfile)
			except:
				pass

			if (sys.platform=="win32") or (sys.platform=="win64"):
				# links do not work on windows, so just copy the file
				# self.launch_shell('copy "'+videofile["path"].replace('"','""')+'" "'+currentfile+'"',output=False)
				# Only hardlinks are available on 2000 and XP, reparse points are available from vista onwards.
				win32file.CreateHardLink(currentfile, videofile["path"].replace('"','""'))
			else:
				if len(videofile["sub_list"])==0:
					self.launch_shell('ln -s "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
				else:
					self.launch_shell('cp "'+videofile["path"].replace('"','\\"')+'" "'+currentfile+'"',output=False)
			return

		isvob=videofile["isvob"]

		self.pulse=False
		if seconds==0:
			texto=(_("Converting files from title %(title_number)s (pass %(pass_number)s)\n\n%(file_name)s") % {"title_number":str(title),"pass_number":str(encpass),"file_name":videofile["filename"]} )
			proglabel.set_text(texto) #+" "+str(title)+" Pass: "+ str(encpass) +"\n\n"+videofile["filename"] )
		else:
			texto=_("Creating preview")
			proglabel.set_text(texto+"\n"+videofile["filename"])

		addbars=False
		framerate=int(videofile["ofps"])
		videorate=int(videofile["vrate"])
		audiorate=self.adjust_audiorate(int(videofile["arate"]),disctype=="dvd")
		
		audio_final_rate=int(videofile["arateunc"])
		audiodelay=float(videofile["adelay"])
		final_framerate=float(videofile["fps"])
		aspect_ratio_original=videofile["oaspect"]
		aspect_ratio_final=videofile["aspect"]
		resx_final=videofile["width"]
		resy_final=videofile["height"]
		resx_original=videofile["owidth"]
		resy_original=videofile["oheight"]
		copy_audio=videofile["copy_audio"]
		sound51=videofile["sound51"]
		gop12=videofile["gop12"]
		audiostream=videofile["audio_stream"]
		swap_fields=videofile["swap_fields"]
		volume=videofile["volume"]
		audio_tracks=len(videofile["audio_list"])

		if (videofile["resolution"]==0) and (disctype=="divx"):
			default_res=True
		else:
			default_res=False
		
		speed1,speed2=devede_other.get_speedup(videofile)
		if speed1==speed2:
			speedup=None
		else:
			speedup=str(speed1)+":"+str(speed2)
	
		if aspect_ratio_original<1.3:
			aspect_ratio_original=float(videofile["owidth"])/(float(videofile["oheight"]))
		if aspect_ratio_original<1.33333333:
			aspect_ratio_original=1.33333333
	
		max_videorate=int(videorate*2)
		min_videorate=int(videorate*0.75)
		
		dsize,minvid,maxvid=devede_other.get_dvd_size(None,disctype)
		
		if max_videorate>maxvid:
			max_videorate=maxvid
		if min_videorate<minvid:
			min_videorate=minvid
			
		if videofile["blackbars"]==0: # check if has to add black bars
			addbars=True
			if (videofile["rotate"]==90) or (videofile["rotate"]==270):
				resx_original2=resy_original
				resy_original2=resx_original
				aratio=1/aspect_ratio_original
			else:
				resx_original2=resx_original
				resy_original2=resy_original
				aratio=aspect_ratio_original

			if (resx_original2%2)==1:
				resx_original2+=1
			if (resy_original2%2)==1:
				resy_original2+=1
			
			resy_tmp = int(resy_final*aspect_ratio_final/aratio)
			resx_tmp = int(resx_final*aratio/aspect_ratio_final)
			
			
			if (resx_tmp>resx_final):
				resx_inter=resx_final
				resy_inter=resy_tmp
			else:
				resx_inter=resx_tmp
				resy_inter=resy_final
			
			#resx_inter=resx_original2
			#resy_inter=int((resy_original2*aspect_ratio_original)/aspect_ratio_final)
			if (resx_inter%2)==1:
				resx_inter-=1
			if (resy_inter%2)==1:
				resy_inter-=1
			
			#if ((resy_inter<resy_original) or (resy_original+5>resy_inter)):
			#	addbars=False

		if addbars==False:
			resx_inter=resx_final
			resy_inter=resy_final
		else:
			if (resx_inter==resx_final):
				addx=0
				addy=int((resy_final-resy_inter)/2)
				if(addy%2)==1:
					addy+=1
			else:
				addy=0
				addx=int((resx_final-resx_inter)/2)
				if(addx%2)==1:
					addx+=1
					
		
		command_var=["ffmpeg"]
		#command_var=["avconv"]

		command_var.append("-i")
		command_var.append(videofile["path"])
		
		if (volume!=100):
			command_var.append("-vol")
			command_var.append(str((256*volume)/100))
		
		if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
			command_var.append("-itsoffset")
			command_var.append(str(audiodelay))
			command_var.append("-i")
			command_var.append(videofile["path"])
			command_var.append("-map")
			command_var.append("1:0")
			for l in range(audio_tracks):
				command_var.append("-map")
				command_var.append("0"+":"+str(l+1))
		
		if (isvob==False):
			cmd_line=""
			
			extra_params=videofile["params_vf"] # take the VF extra params
			while (extra_params!=""):
				extra_params,new_param=devede_other.get_new_param(extra_params)
				if (new_param!="") and (new_param!=','):
					while (len(new_param)>1) and (new_param[0]==','):
						new_param=new_param[1:]
					while (len(new_param)>1) and (new_param[-1]==','):
						new_param=new_param[:-1]
					if new_param=="fifo":
						continue
					if cmd_line!="":
						cmd_line+=",fifo,"
					cmd_line+=new_param
			
			if videofile["deinterlace"]=="yadif":
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="yadif"
			
			vflip=0
			hflip=0
	
			if (videofile["rotate"]==90):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=1"
			elif (videofile["rotate"]==270):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="transpose=2"
			elif (videofile["rotate"]==180):
				vflip=1
				hflip=1
			
			if (videofile["vmirror"]):
				vflip=1-vflip
			if (videofile["hmirror"]):
				hflip=1-hflip
	
			if (vflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="vflip"
			if (hflip==1):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="hflip"
			
			if addbars and ((resx_inter!=resx_original) or (resy_inter!=resy_original)) and (default_res==False):
				if (cmd_line!=""):
					cmd_line+=",fifo,"
				cmd_line+="scale="+str(resx_inter)+":"+str(resy_inter)+",fifo,pad="+str(resx_final)+":"+str(resy_final)+":"+str(addx)+":"+str(addy)+":0x000000"
			
			if cmd_line!="":
				command_var.append("-vf")
				command_var.append(cmd_line)
			
		
		command_var.append("-y")

		vcd=False
		
		if (disctype!="divx"):
			command_var.append("-target")
			if (disctype=="dvd"):
				if final_framerate==30:
					command_var.append("ntsc-dvd")
				elif (framerate==24):
					command_var.append("film-dvd")
				else:
					command_var.append("pal-dvd")
				if (copy_audio==False):
					command_var.append("-acodec")
					if fix_ac3:
						command_var.append("ac3_fixed")
					else:
						command_var.append("ac3")
				#command_var.append("-maxrate")
				#command_var.append("7000k")
				#command_var.append("-minrate")
				#command_var.append("2200k")
			elif (disctype=="vcd"):
				vcd=True
				if final_framerate==30:
					command_var.append("ntsc-vcd")
				else:
					command_var.append("pal-vcd")
			elif (disctype=="svcd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
			elif (disctype=="cvd"):
				if final_framerate==30:
					command_var.append("ntsc-svcd")
				else:
					command_var.append("pal-svcd")
		else: # DivX
			command_var.append("-vcodec")
			command_var.append("mpeg4")
			command_var.append("-acodec")
			command_var.append("libmp3lame")
			command_var.append("-f")
			command_var.append("avi")
		
		if  (not isvob):
			command_var.append("-sn") # no subtitles

		if copy_audio or isvob:
			command_var.append("-acodec")
			command_var.append("copy")
		#else:
		#	if (disctype=="divx"):
		#		command_var.append("-acodec")
		#		command_var.append("mp3")

		#if (audiostream!=10000):
		#	command_var.append("-aid")
		#	command_var.append(str(audiostream))

		if isvob:
			command_var.append("-vcodec")
			command_var.append("copy")
		
		if (vcd==False):
			if final_framerate==30:
				if (framerate==24) and ((disctype=="dvd") or (disctype=="divx")):
					str_final_framerate="24000/1001"
					keyintv=15
					telecine=True
				else:
					str_final_framerate="30000/1001"
					keyintv=18
			else:
				str_final_framerate=str(int(final_framerate))
				keyintv=15
		
		if (disctype=="divx"):
			command_var.append("-g")
			command_var.append("300")
		elif gop12 and (isvob==False):
			command_var.append("-g")
			command_var.append("12")
		
		command_var.append("-bf")
		command_var.append("2")
		command_var.append("-strict")
		command_var.append("1")
		
		if seconds!=0:
			command_var.append("-t")
			command_var.append(str(seconds))
		else:
			if videofile["cutting"]==1: # first half only
				command_var.append("-t")
				command_var.append(str(videofile["olength"]/2))
			elif videofile["cutting"]==2: # second half only
				command_var.append("-ss")
				command_var.append(str((videofile["olength"]/2)-5)) # start 5 seconds before

		#if (audiodelay!=0.0) and (copy_audio==False) and (isvob==False):
		#	command_var.append("-delay")
		#	command_var.append(str(audiodelay))

		command_var.append("-ac")
		if (sound51) and ((disctype=="dvd") or (disctype=="divx")):
			command_var.append("6")
		else:
			command_var.append("2")

		#if (isvob==False) and (default_res==False):
		#	command_var.append("-ofps")
		#	command_var.append(str_final_framerate)

		if disctype=="divx":
			command_var.append("-vtag")
			command_var.append("DX50")

		lineatemp=""
		acoma=False;
		
		#if swap_fields:
		#	lineatemp+="phase=a"
		#	acoma=True
		
		passlog_var = None
		
		if (videofile["deinterlace"]!="none") and (videofile["deinterlace"]!="yadif") and (isvob==False):
			command_var.append("-deinterlace")
			
		print "Addbars "+str(addbars)+" resx_o "+str(resx_original)+" resy_o "+str(resy_original)
		print "resx_i "+str(resx_inter)+" resy_i "+str(resy_inter)
 
 		if (isvob==False) and (vcd==False):
				command_var.append("-s")
				command_var.append(str(resx_final)+"x"+str(resy_final))

		# Currently Mencoder supports up to 8 threads
		if isvob==False:
			threads
			
			if threads>1:
				command_var.append("-threads")
				command_var.append(str(threads))

			command_var.append("-trellis")
			if videofile["trellis"]:
				command_var.append("1")
			else:
				command_var.append("0")
		
			if videofile["mbd"]==0:
				command_var.append("-mbd")
				command_var.append("0")
			elif videofile["mbd"]==1:
				command_var.append("-mbd")
				command_var.append("1")
			elif videofile["mbd"]==2:
				command_var.append("-mbd")
				command_var.append("2")
	
			if (vcd==False):
				command_var.append("-b")
				command_var.append(str(videorate)+"000")
		
			#if disctype!="divx":
			#	lavcopts+=":keyint="+str(keyintv)
			if(copy_audio==False) and (vcd==False):
#					lavcopts+=":acodec="
#					if disctype=="dvd":
#						if fix_ac3:
#							lavcopts+="ac3_fixed"
#						else:
#							lavcopts+="ac3"
#					else:
#						lavcopts+="mp2"
					#lavcopts+=":abitrate="+str(audiorate)
				command_var.append("-ab")
				command_var.append(str(audiorate)+"000")

			if (default_res==False):
				command_var.append("-aspect")
				if aspect_ratio_final>1.4:
					command_var.append("16:9")
				else:
					command_var.append("4:3")
			
			passlog_var=None
			if (encpass>0)  and (isvob==False):
				command_var.append("-pass")
				command_var.append(str(encpass))
				passlog_var=os.path.join(filefolder,filename)+".log"
				if encpass==1:
					try:
						os.remove(passlog_var)
					except:
						 pass

		at=audio_tracks
		while (at>1):
			if (volume!=100):
				command_var.append("-vol")
				command_var.append(str((256*volume)/100))
			command_var.append("-newaudio")
			at-=1
			

		extra_params=videofile["params"] # take the extra params
		while (extra_params!=""):
			extra_params,new_param=devede_other.get_new_param(extra_params)
			if new_param!="":
				command_var.append(new_param)
	
		currentfile=self.create_filename(filefolder+filename,title,chapter,disctype=="divx")

		if (passlog_var != None) and (isvob==False):
			command_var.append("-passlogfile")
			command_var.append(passlog_var)

		if (encpass==1) and (isvob==False):
			command_var.append("-y")
			command_var.append("/dev/null")
		else:
			command_var.append(currentfile)
		

		self.print_error=_("Conversion failed.\nIt seems a bug of Mencoder.")
		if (videofile["params"]!="") or (videofile["params_vf"]!="") or (videofile["params_lame"]!=""):
			self.print_error+="\n"+_("Also check the extra params passed to Mencoder for syntax errors.")
		self.error_not_done=True
		self.launch_program(command_var,read_chars=300)

Example 177

Project: deluge-updatorr
Source File: core.py
View license
    def walk(self, force=False):
        """Implemets automatic torrent updates process.
        Automatic update is available for torrents selected by user
        and having tracker's page URL in torrent's `comment` field.

        Besides that torrent a tracker handler class should be
        associated with domain from the URL mentioned above.

        If `force` set to a list of torrent IDs, only those
        torrents will be checked for updates.
        If `force` is False every torrent scheduled to updates
        by used will be checked.

        """

        # To prevent possible concurent runs.
        self.walking = True
        try:
            log.info('Updatorr walking...')
            component.get('EventManager').emit(UpdatorrUpdatesCheckStartedEvent())
    
            allow_last_walk_update = False
    
            if isinstance(force, list):
                torrents_list = force
            else:
                torrents_list = self.torrents_to_update
    
            for torrent_id in torrents_list:
                try:
                    torrent_data = self.core.get_torrent_status(torrent_id, [])
                    log.info('Updatorr Processing %s ...' % torrent_data['name'])
                except KeyError:
                    log.debug('Updatorr \tSKIPPED No torrent with id %s listed [yet]' % torrent_id)
                    continue
                # Remove not url data from comment
                torrent_data['comment'] = RE_LINK.search(torrent_data['comment']).group('url')
                if not is_url(torrent_data['comment']):
                    log.info('Updatorr \tSKIPPED No URL found in torrent comment')
                    continue
                # From now on we consider that update took its place.
                # If only this update is not forced.
                if not force:
                    allow_last_walk_update = True
                tracker_handler = get_tracker_handler(torrent_data, log)
                if tracker_handler is None:
                    self.dump_error(torrent_id, 'Unable to find tracker handler for %s' % torrent_data['comment'])
                    continue
                tracker_handler.set_settings(self.trackers_settings.get(tracker_handler.tracker_host))
                new_torrent_filepath = tracker_handler.get_torrent_file()
                if new_torrent_filepath is None:
                    self.dump_error(torrent_id, 'Error in tracker handling: %s' % tracker_handler.get_error_text())
                    continue
    
                # Let's store cookies form that tracker to enter without logins in future sessions.
                self.trackers_settings[tracker_handler.tracker_host]['cookies'] = tracker_handler.get_cookies(as_dict=True)
    
                new_torrent_contents = read_torrent_file(new_torrent_filepath)
                new_torrent_info = read_torrent_info(new_torrent_contents)
                if torrent_data['hash'] == new_torrent_info['hash']:
                    log.info('Updatorr \tSKIPPED Torrent is up-to-date')
                    continue
                log.info('Updatorr \tTorrent update is available')
    
                new_torrent_prefs = get_new_prefs(torrent_data, new_torrent_info)
                added_torrent_id = self.core.add_torrent_file(None, base64.encodestring(new_torrent_contents), new_torrent_prefs)
    
                if added_torrent_id is not None:
                    self.core.remove_torrent(torrent_id, False)
                    log.info('Updatorr \tTorrent is updated')
                    # Fire up update finished event.
                    component.get('EventManager').emit(UpdatorrUpdateDoneEvent(new_torrent_info['hash']))
                    # Add new torrent hash to continue autoupdates.
                    self.set_items_to_update(new_torrent_info['hash'], True)
                    # Remove old torrent from autoupdates list.
                    self.set_items_to_update(torrent_id, False)
                else:
                    self.dump_error(torrent_id, 'Unable to replace current torrent with a new one')
    
                # No littering, remove temporary .torrent file.
                os.remove(new_torrent_filepath)
    
            if allow_last_walk_update:
                # Remember lastrun time.
                self.last_walk = time.time()
    
            log.info('Updatorr walk is finished')
            component.get('EventManager').emit(UpdatorrUpdatesCheckFinishedEvent())
        except:
            log.error(traceback.format_exc())    
        finally:    
            self.walking = False

Example 178

Project: babble
Source File: config.py
View license
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().
    """
    if not thread:
        raise NotImplementedError, "listen() needs threading to work"

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            import tempfile
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    #Apply new configuration. We'd like to be able to
                    #create a StringIO and pass that in, but unfortunately
                    #1.5.2 ConfigParser does not support reading file
                    #objects, only actual files. So we create a temporary
                    #file and remove it later.
                    file = tempfile.mktemp(".ini")
                    f = open(file, "w")
                    f.write(chunk)
                    f.close()
                    try:
                        fileConfig(file)
                    except (KeyboardInterrupt, SystemExit):
                        raise
                    except:
                        traceback.print_exc()
                    os.remove(file)
            except socket.error, e:
                if type(e.args) != types.TupleType:
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()

    def serve(rcvr, hdlr, port):
        server = rcvr(port=port, handler=hdlr)
        global _listener
        logging._acquireLock()
        _listener = server
        logging._releaseLock()
        server.serve_until_stopped()

    return threading.Thread(target=serve,
                            args=(ConfigSocketReceiver,
                                  ConfigStreamHandler, port))

Example 179

Project: galah
Source File: create_gradebook_csv.py
View license
def _create_gradebook_csv(csv_id, requester, class_id, fill=0):
    csv_id = ObjectId(csv_id)

    csv_file = temp_directory = ""

    # Find any expired archives and remove them
    deleted_files = []
    for i in CSV.objects(expires__lt = datetime.datetime.today()):
        deleted_files.append(i.file_location)

        if i.file_location:
            try:
                os.remove(i.file_location)
            except OSError as e:
                logger.warning(
                    "Could not remove expired csv file at %s: %s.",
                    i.file_location, str(e)
                )

        i.delete()

    if deleted_files:
        logger.info("Deleted csv files %s.", str(deleted_files))

    # This is the CSV object that will be added to the database
    new_csv = CSV(
        id = csv_id,
        requester = requester
    )

    temp_directory = csv_file = None
    try:
        # Create the actual csv file.
        csv_file = open(os.path.join(config["CSV_DIRECTORY"], str(csv_id)), "w")

        the_class = Class.objects.get(id = ObjectId(class_id))

        # Grab all assignments in this class
        assns = list(
            Assignment.objects(for_class = the_class.id)
        )

        print >> csv_file, "%s,%s" % \
            ("Username", ",".join('"{0}"'.format(i.name) for i in assns))

        # Grab all student users for this class.
        users = list(
            User.objects(
                account_type = "student",
                classes = the_class.id
            )
        )

        assn_ids = [i.id for i in assns]
        for user in users:
            # Query for user's most recent submissions in the known assignments
            query = {
                "assignment__in": assn_ids,
                "most_recent": True,
                "user": user.id
            }

            submissions = list(Submission.objects(**query))

            # Initialize each assignment score to empty at first.
            assn_to_score = OrderedDict((i, str(fill)) for i in assn_ids)

            # Go through submissions, associating scores with assignment
            for sub in submissions:
                if sub.test_results:
                    test_result = TestResult.objects.get(id = sub.test_results)
                    if test_result.score is not None:
                        assn_to_score[sub.assignment] = str(test_result.score)

            # Write gradebook results to csv file.
            print >> csv_file, "%s,%s" % \
                (user.email, ",".join(assn_to_score.values()))

        csv_file.close()

        new_csv.file_location = os.path.join(config["CSV_DIRECTORY"], str(csv_id))

        new_csv.expires = \
            datetime.datetime.today() + config["TEACHER_CSV_LIFETIME"]

        new_csv.save(force_insert = True)
    except Exception as e:
        new_csv.file_location = None
        os.remove(os.path.join(config["CSV_DIRECTORY"], str(csv_id)))

        new_csv.error_string = str(e)
        new_csv.save(force_insert = True)

        raise

Example 180

Project: ilastik-0.5
Source File: testModule.py
View license
def test_InteractiveSegmentationItemModuleMgr():
    def h5equal(filename, a):
        f = h5py.File(filename, 'r')
        d = f['volume/data'].value.squeeze()
        a = a.squeeze()        
        assert a.shape == d.shape
        if a.dtype != d.dtype:
            print a.dtype, '!=', d.dtype
            assert a.dtype == d.dtype
        assert numpy.array_equal(d, a)
        return True
        
    def arrayEqual(a,b):
        assert a.shape == b.shape
        assert a.dtype == b.dtype
        if not numpy.array_equal(a,b):
            assert len(a.shape) == 3
            for x in range(a.shape[0]):
                for y in range(a.shape[1]):
                    for z in range(a.shape[2]):
                        if a[x,y,z] != b[x,y,z]:
                            print x,y,z, "a=", a[x,y,z], "b=", b[x,y,z]
            return False
        return True
    
    class FakeSegmentor(SegmentorBase):
        segmentation = None
        seeds = None
        
        segmentationGT = []
        seedsGT = []
        
        def __init__(self):
            for ver in range(3):
                seeds = numpy.zeros((120,120,120,1), dtype=numpy.uint8)
                if  ver == 0:
                    seeds[0,0,0,0] = 1
                elif ver == 1:
                    seeds[0,0,0,0] = 2
                elif ver == 2:
                    seeds[0,0,0,0] = 3
                self.seedsGT.append(seeds)
            
            for i in range(3):
                seg = numpy.ones((120,120,120,1), dtype=numpy.uint8)
                if i == 0:
                    seg[5:10,5:10,5:10,0]    = 2
                elif i == 1:
                    seg[5:30,5:20,8:17,0]    = 3
                    seg[50:70,50:70,40:60,0] = 5
                elif i == 2:
                    seg[8:12,10:30,30:40,0]  = 2
                    seg[20:30,10:30,30:40,0] = 4
                    seg[40:50,10:30,30:40,0] = 6
                self.segmentationGT.append(seg)
        
        def segment(self, labelVolume, labelValues, labelIndices):
            print "fake segment"
            assert labelVolume.shape == (120,120,120,1)
            if labelVolume[0,0,0,0] == 1:
                self.segmentation = self.segmentationGT[0]
            elif labelVolume[0,0,0,0] == 2:
                self.segmentation = self.segmentationGT[1]
            elif labelVolume[0,0,0,0] == 3:
                self.segmentation = self.segmentationGT[2]  
    
    
    # create project with some fake data
    project = Project('Project Name', 'Labeler', 'Description')
    filename = str(QDir.tempPath())+'/testdata.h5'
    f = h5py.File(filename, 'w')
    f.create_group('volume')
    f.create_dataset('volume/data', data=numpy.zeros(shape=(1,120,120,120,1), dtype=numpy.uint8))
    f.close; del f
    project.addFile([filename])
    os.remove(filename)
    
    dataMgr = project.dataMgr
    segmentor = FakeSegmentor()
    dataMgr.Interactive_Segmentation.segmentor = segmentor
    
    #initialize the module to testQDir.tempPath()+'/testdata.h5'
    s = dataMgr._activeImage.module["Interactive_Segmentation"] 
    #create outputPath, make sure it is empty
    outputPath = str(QDir.tempPath())+"/tmpseg"
    print outputPath
    if os.path.exists(outputPath):
        shutil.rmtree(outputPath)
    os.makedirs(outputPath)
    s.init()
    s.outputPath = outputPath

    shape3D = (120,120,120)
    shape4D = (120,120,120,1)
    shape5D = (1,120,120,120,1)

    version = 0

    print "*************************************************************************"
    print "* segment for the first time (version 0)                                *"
    print "*************************************************************************"
    
    s.seedLabelsVolume._data[:] = segmentor.seedsGT[version][:] #fake drawing some seeds
    s.segment() #segment
    
    assert arrayEqual(s.segmentation[0,:,:,:,:], segmentor.segmentation)
    assert not os.path.exists(s.outputPath+'/one')
    assert s._mapKeysToLabels == {}
    assert s._mapLabelsToKeys == {}
    
    #save as 'one'
    s.saveCurrentSegmentsAs('one')
    
    #we now have a 'done' overlay
    doneRef = s.done
    
    assert os.path.exists(s.outputPath)
    assert os.path.exists(s.outputPath+'/done.h5')
    assert os.path.exists(s.outputPath+'/mapping.dat')
    assert os.path.exists(s.outputPath+'/one/segmentation.h5')
    assert os.path.exists(s.outputPath+'/one/seeds.h5')
    
    h5equal(s.outputPath+'/one/segmentation.h5', segmentor.segmentation)
    h5equal(s.outputPath+'/one/seeds.h5', segmentor.seedsGT[version])
    
    assert numpy.where(s.seedLabelsVolume._data != 0) == () 
    
    doneGT = numpy.zeros(shape=shape4D, dtype=numpy.uint32)
    doneGT[numpy.where(segmentor.segmentation == 2)] = 1
    h5equal(s.outputPath+'/done.h5', doneGT)
    f = open(s.outputPath+'/mapping.dat')
    assert f.readlines() == ['1|one\r\n']
    f.close()
    
    assert s._mapKeysToLabels == {'one': set([1])}
    assert s._mapLabelsToKeys == {1: 'one'}
    assert s.segmentKeyForLabel(1) == 'one'
    assert s.segmentLabelsForKey('one') == set([1])

    s.discardCurrentSegmentation()
    assert s.segmentation == None
    assert numpy.where(s.seedLabelsVolume._data != 0) == ()

    print "*************************************************************************"
    print "* remove segment 'one'                                                  *"
    print "*************************************************************************"

    #remove segment by key
    s.removeSegmentsByKey('one')
    assert s._mapKeysToLabels == {}
    assert s._mapLabelsToKeys == {}
    assert numpy.array_equal(s.done, numpy.zeros(shape=s.done.shape, dtype=s.done.dtype))
    assert os.path.exists(s.outputPath)
    assert os.path.exists(s.outputPath+'/done.h5')
    assert os.path.exists(s.outputPath+'/mapping.dat')
    assert not os.path.exists(s.outputPath+'/one')
    f = open(s.outputPath+'/mapping.dat')
    assert f.readlines() == []
    f.close()
    
    print "*************************************************************************"
    print "* segment for the second time (version 1)                               *"
    print "*************************************************************************"
    
    version = 1
    s.seedLabelsVolume._data[:] = segmentor.seedsGT[version][:] #fake drawing some seeds
    s.segment()
    assert arrayEqual(s.segmentation[0,:,:,:,:], segmentor.segmentation)
    assert s._mapKeysToLabels == {}
    assert s._mapLabelsToKeys == {}
    
    s.saveCurrentSegmentsAs('two')
    assert os.path.exists(s.outputPath+'/two/segmentation.h5')
    assert os.path.exists(s.outputPath+'/two/seeds.h5')
    relabeledGT = segmentor.segmentation.copy()
    relabeledGT[numpy.where(relabeledGT == 1)] = 0
    relabeledGT[numpy.where(relabeledGT == 3)] = 1
    relabeledGT[numpy.where(relabeledGT == 5)] = 2
    assert arrayEqual(s.done.squeeze(), relabeledGT.squeeze().astype(numpy.uint32))
    
    assert s._mapKeysToLabels == {'two': set([1, 2])}
    assert s._mapLabelsToKeys == {1: 'two', 2: 'two'}
    
    print "*************************************************************************"
    print "* segment again (version 2)                                             *"
    print "*************************************************************************"
 
    version = 2
    s.seedLabelsVolume._data[:] = segmentor.seedsGT[version][:] #fake drawing some seeds
    s.segment()
    assert arrayEqual(s.segmentation[0,:,:,:,:], segmentor.segmentationGT[version])
    
    s.saveCurrentSegmentsAs('three')
    assert os.path.exists(s.outputPath)
    assert os.path.exists(s.outputPath+'/done.h5')
    assert os.path.exists(s.outputPath+'/mapping.dat')
    assert os.path.exists(s.outputPath+'/two/segmentation.h5')
    assert os.path.exists(s.outputPath+'/two/seeds.h5')
    assert os.path.exists(s.outputPath+'/three/segmentation.h5')
    assert os.path.exists(s.outputPath+'/three/seeds.h5')
    
    assert s._mapKeysToLabels == {'two': set([1, 2]), 'three': set([3, 4, 5])}
    assert s._mapLabelsToKeys == {1: 'two', 2: 'two', 3: 'three', 4: 'three', 5: 'three'}
    
    doneGT = numpy.zeros(shape=shape4D, dtype=numpy.uint32)
    doneGT[numpy.where(segmentor.segmentationGT[1] == 3)] = 1
    doneGT[numpy.where(segmentor.segmentationGT[1] == 5)] = 2
    doneGT[numpy.where(segmentor.segmentationGT[2] == 2)] = 3
    doneGT[numpy.where(segmentor.segmentationGT[2] == 4)] = 4
    doneGT[numpy.where(segmentor.segmentationGT[2] == 6)] = 5
    assert arrayEqual(doneGT.squeeze(), s.done.squeeze())
    
    assert h5equal(s.outputPath+'/two/segmentation.h5', segmentor.segmentationGT[1])
    assert h5equal(s.outputPath+'/three/segmentation.h5', segmentor.segmentationGT[2])
    
    print "*************************************************************************"
    print "* remove segments 'three'                                               *"
    print "*************************************************************************"
    
    s.removeSegmentsByKey('three')
    assert s._mapKeysToLabels == {'two': set([1, 2])}
    assert s._mapLabelsToKeys == {1: 'two', 2: 'two'}
    assert os.path.exists(s.outputPath)
    assert os.path.exists(s.outputPath+'/done.h5')
    assert os.path.exists(s.outputPath+'/mapping.dat')
    assert os.path.exists(s.outputPath+'/two/segmentation.h5')
    assert os.path.exists(s.outputPath+'/two/seeds.h5')
    assert not os.path.exists(s.outputPath+'/three')
    f = open(s.outputPath+'/mapping.dat')
    assert f.readlines() == ['1|two\r\n', '2|two\r\n']
    f.close()
    
    doneGT = numpy.zeros(shape=shape4D, dtype=numpy.uint32)
    doneGT[numpy.where(segmentor.segmentationGT[1] == 3)] = 1
    doneGT[numpy.where(segmentor.segmentationGT[1] == 5)] = 2
    assert arrayEqual(doneGT.squeeze(), s.done.squeeze())
    assert h5equal(s.outputPath+'/done.h5', doneGT)
    
    print "*************************************************************************"
    print "* edit segments 'two'                                                   *"
    print "*************************************************************************"
    
    s.editSegmentsByKey('two')

    print "check...."
    assert arrayEqual(s.seedLabelsVolume._data[0,:,:,:,:], segmentor.seedsGT[1])
    #assert arrayEqual(s.segmentation[0,:,:,:,:].squeeze(), segmentor.segmentation.squeeze())
    
    s.saveCurrentSegment()
    
    print "*************************************************************************"
    print "* remove segments 'two'                                                 *"
    print "*************************************************************************"
    
    s.removeSegmentsByKey('two')
    assert s._mapKeysToLabels == {}
    assert s._mapLabelsToKeys == {}
    assert os.path.exists(s.outputPath)
    assert os.path.exists(s.outputPath+'/done.h5')
    assert os.path.exists(s.outputPath+'/mapping.dat')
    assert not os.path.exists(s.outputPath+'/two')
    assert not os.path.exists(s.outputPath+'/three')
    f = open(s.outputPath+'/mapping.dat')
    assert f.readlines() == []
    f.close()
    
    doneGT = numpy.zeros(shape=shape4D, dtype=numpy.uint32)
    assert arrayEqual(doneGT.squeeze(), s.done.squeeze())
    assert h5equal(s.outputPath+'/done.h5', doneGT)
    
    #make sure that we have not overwritten the done overlay, which
    #would cause the connection with the 'Segmentation/Done' overlay
    #to break
    assert doneRef is s.done
    
    jobMachine.GLOBAL_WM.stopWorkers()

Example 181

Project: pueue
Source File: daemon.py
View license
    def main(self):
        while self.active:
            # Check if there is a running process
            if self.process is not None:
                # Poll process and check to check for termination
                self.process.poll()
                if self.process.returncode is not None:
                    # If a process is terminated by `stop` or `kill`
                    # we want to queue it again instead closing it as failed.
                    if not self.stopping:
                        # Get std_out and err_out
                        output, error_output = self.process.communicate()
                        self.stdout.seek(0)
                        output = self.stdout.read().replace('\n', '\n    ')

                        self.stderr.seek(0)
                        error_output = self.stderr.read().replace('\n', '\n    ')

                        # Mark queue entry as finished and save returncode
                        self.queue[self.current_key]['returncode'] = self.process.returncode
                        if self.process.returncode != 0:
                            self.queue[self.current_key]['status'] = 'errored'
                        else:
                            self.queue[self.current_key]['status'] = 'done'

                        # Add outputs to log
                        self.queue[self.current_key]['stdout'] = output
                        self.queue[self.current_key]['stderr'] = error_output
                        self.queue[self.current_key]['end'] = str(datetime.now().strftime("%H:%M"))

                        # Pause Daemon, if it is configured to stop
                        if self.config['default']['stopAtError'] is True and not self.reset:
                            if self.process.returncode != 0:
                                self.paused = True

                        self.write_queue()
                        self.log()
                    else:
                        # Process finally finished.
                        # Now we can set the status to paused.
                        self.paused = True
                        self.stopping = False
                        if self.remove_current is True:
                            self.remove_current = False
                            del self.queue[self.current_key]
                        else:
                            self.queue[self.current_key]['status'] = 'queued'

                    self.process = None
                    self.current_key = None
                    self.processStatus = 'No running process'

            if self.reset:
                # Rotate log
                self.log(rotate=True)

                # Reset  queue
                self.queue = {}
                self.write_queue()

                # Reset Log
                self.log()
                self.nextKey = 0
                self.reset = False

            # Start next Process
            if not self.paused and len(self.queue) > 0 and self.process is None:
                self.current_key = self.get_next_item()
                if self.current_key is not None:
                    # Get instruction for next process
                    next_item = self.queue[self.current_key]
                    #
                    self.stdout.seek(0)
                    self.stdout.truncate()
                    self.stderr.seek(0)
                    self.stderr.truncate()
                    # Spawn subprocess
                    self.process = subprocess.Popen(
                        next_item['command'],
                        shell=True,
                        stdout=self.stdout,
                        stderr=self.stderr,
                        stdin=subprocess.PIPE,
                        universal_newlines=True,
                        cwd=next_item['path']
                    )
                    self.queue[self.current_key]['status'] = 'running'
                    self.queue[self.current_key]['start'] = str(datetime.now().strftime("%H:%M"))
                    self.processStatus = 'running'

            # Create list for waitable objects
            readable, writable, errored = select.select(self.read_list, [], [], 1)
            for socket in readable:
                if socket is self.socket:
                    # Listening for clients to connect.
                    # Client sockets are added to readlist to be processed.
                    try:
                        self.clientSocket, self.clientAddress = self.socket.accept()
                        self.read_list.append(self.clientSocket)
                    except:
                        print('Daemon rejected client')
                else:
                    # Trying to receive instruction from client socket
                    try:
                        instruction = self.clientSocket.recv(1048576)
                    except EOFError:
                        print('Client died while sending message, dropping received data.')
                        instruction = -1

                    # Check for valid instruction
                    if instruction != -1:
                        # Check if received data can be unpickled.
                        # Instruction will be ignored if it can't be unpickled
                        try:
                            command = pickle.loads(instruction)
                        except EOFError:
                            print('Received message is incomplete, dropping received data.')
                            self.read_list.remove(self.clientSocket)
                            self.clientSocket.close()

                            command = {}
                            command['mode'] = ''

                        # Executing respective function depending on command mode
                        if command['mode'] == 'add':
                            self.respond_client(self.execute_add(command))

                        elif command['mode'] == 'remove':
                            self.respond_client(self.execute_remove(command))

                        elif command['mode'] == 'switch':
                            self.respond_client(self.execute_switch(command))

                        elif command['mode'] == 'send':
                            self.respond_client(self.execute_send(command))

                        elif command['mode'] == 'status':
                            self.respond_client(self.execute_status(command))

                        elif command['mode'] == 'reset':
                            self.respond_client(self.execute_reset())

                        elif command['mode'] == 'start':
                            self.respond_client(self.execute_start())

                        elif command['mode'] == 'pause':
                            self.respond_client(self.execute_pause(command))

                        elif command['mode'] == 'restart':
                            self.respond_client(self.execute_restart(command))

                        elif command['mode'] == 'stop':
                            self.respond_client(self.execute_stop(command))

                        elif command['mode'] == 'kill':
                            self.respond_client(self.execute_kill(command))

                        elif command['mode'] == 'STOPDAEMON':
                            self.respond_client({'message': 'Pueue daemon shutting down',
                                                'status': 'success'})
                            # Kill current process and set active
                            # to False to stop while loop
                            self.active = False
                            self.execute_kill({'remove': False})
                            break

        self.socket.close()
        os.remove(get_socket_path())
        sys.exit(0)

Example 182

Project: kamaelia_
Source File: CheckpointSequencer.py
View license
    def main(self):
        current = self.initial
        highest = self.highest
        self.send( self.loadMessage(current), "outbox")
        dirty = False
        while 1:
            while self.dataReady("inbox"):
                command = self.recv("inbox")
                if command == "delete":
                    try:
                        os.remove(self.notepad + "/slide." + str(current) + ".png")
                    except Exception, e:
                        pass
                    
                    if (current == highest) & (highest > 1):
                        # go to previous slide
                        dirty = False
                        command = "prev"
                        highest -= 1
                        self.fixNumbering()
                    elif (current < highest) & (current != 1):
                        # go to previous slide and fix numbering
                        dirty = False
                        command = "prev"
                        highest -= 1
                        self.fixNumbering()
                    elif (current == 1) & (current < highest):
                        # fix numbering then reload current slide
                        highest -= 1
                        self.fixNumbering()
                        self.send( self.loadMessage(current), "outbox")
                    else:
                        # Do nothing
                        pass
                if command == "save":
                    self.send( self.saveMessage(current), "outbox")
                if command == "prev":
                    if current >1:
                        if dirty:
                            self.send( self.saveMessage(current), "outbox")
                            dirty = False
                        current -= 1
                        self.send( self.loadMessage(current), "outbox")
                if command == "next":
                    if current <highest:
                        if dirty:
                            self.send( self.saveMessage(current), "outbox")
                            dirty = False
                        current += 1
                        self.send( self.loadMessage(current), "outbox")
                if command == "first":
                    if dirty:
                        dirty = False
                    current = 1
                    self.send( self.loadMessage(current), "outbox")        
                    highest = 0
                    for x in os.listdir(self.notepad):
                        if (os.path.splitext(x)[1] == ".png"):
                            highest += 1
                    if (highest < 1):
                        highest = 1
                if command == "checkpoint":
                    if (current == highest):
                        self.send( self.saveMessage(current), "outbox")
                        highest += 1
                        current = highest
                    else:
                        highest += 1
                        current = highest
                        self.send( self.saveMessage(current), "outbox")
                        highest += 1
                        current = highest
                if command == "new":
                    self.send( self.saveMessage(current), "outbox")
                    highest += 1
                    current = highest
                    self.send( self.newMessage(current), "outbox")
                    self.send( self.saveMessage(current), "outbox")
                if command == "undo":
                    self.send( self.loadMessage(current), "outbox")
                if command == "dirty":
#                    print "OK, got dirty message"
                    dirty = True
#                    self.send( self.loadMessage(current), "outbox")

                if command == ("prev", "local"):
                    if current >1:
                        if dirty:
                            self.send( self.saveMessage(current), "outbox")
                            dirty = False
                        current -= 1
                        mess = self.loadMessage(current)
                        mess[0].append("nopropogate")
                        self.send( mess, "outbox")

                if command == ("next", "local"):
                    if current <highest:
                        if dirty:
                            self.send( self.saveMessage(current), "outbox")
                            dirty = False
                        current += 1
                        mess = self.loadMessage(current)
                        mess[0].append("nopropogate")
                        self.send( mess, "outbox")
#                        self.send( self.loadMessage(current), "outbox")


            if not self.anyReady():
                self.pause()
                yield 1

Example 183

Project: nupic
Source File: aggregation_test.py
View license
  def test_GapsInIrregularData(self):
    # Cleanup previous files if exist
    import glob
    for f in glob.glob('gap.*'):
      print 'Removing', f
      os.remove(f)

    #class TestParser(BaseParser):
    #  def __init__(self):
    #    def parseTimestamp(s):
    #      d,t = s.split()
    #      year, month, day = [int(x) for x in d.split('-')]
    #      hour, minute, second = [int(x) for x in t.split(':')]
    #      return datetime.datetime(year, month, day, hour, minute, second)
    #
    #    BaseParser.__init__(self,
    #                        [('dateTime', parseTimestamp),
    #                         ('sequenceId', int),
    #                         ('cardtype', int),
    #                         ('fraud', bool),
    #                         ('amount', float)],
    #                        delimiter=',')
    #  def parse(self, line):
    #    values = BaseParser.parse(self, line)
    #    return values

  #dateTime,cardnum,cardtype,fraud,amount
    data = """\
2009-04-03 19:05:06,129.3
2009-04-04 15:19:12,46.6
2009-04-07 02:54:04,30.32
2009-04-07 06:27:12,84.52
2009-04-07 06:42:21,21.1
2009-04-09 01:01:14,29.24
2009-04-09 06:47:42,99.76
2009-04-11 18:06:11,29.66
2009-04-11 18:12:53,148.32
2009-04-11 19:15:08,61.03
2009-04-15 19:25:40,53.14
2009-05-04 21:07:02,816.75
2009-05-04 21:08:27,686.07
2009-05-06 20:40:04,489.08
2009-05-06 20:40:42,586.9
2009-05-06 20:41:15,554.3
2009-05-06 20:41:51,652.11"""
    fields = [('timestamp', 'datetime', 'T'), ('amount', 'float', '')]
    with FileRecordStream(resource_filename('nupic.datafiles', 'gap.csv'), write=True, fields=fields) as f:
      lines = data.split('\n')
      for line in lines:
        t, a = line.split(',')

        components = t.split()

        yyyy, mm, dd = [int(x) for x in components[0].split('-')]
        h, m, s = [int(x) for x in components[1].split(':')]

        t = datetime.datetime(yyyy, mm, dd, h, m, s)
        a = float(a)
        f.appendRecord([t, a])

    aggregationOptions = dict(
      timeField='timestamp',
      fields=[('timestamp', lambda x: x[0]),
              ('amount', sum)],
      hours=24
      )


    handle = \
      tempfile.NamedTemporaryFile(prefix='agg_gap_hours_24', 
        suffix='.csv', 
        dir='.')
    outputFile = handle.name
    handle.close()
    
    if os.path.isfile(outputFile):
      os.remove(outputFile)
    self.assertFalse(os.path.exists(outputFile),
                     msg="shouldn't exist, but does: " + str(outputFile))

    result = generateDataset(aggregationOptions, 'gap.csv', outputFile)
    self.assertEqual(
      os.path.normpath(os.path.abspath(outputFile)), os.path.normpath(result),
      msg="result = '%s'; outputFile = '%s'" % (result, outputFile))
    self.assertTrue(os.path.isfile(outputFile),
                    msg="outputFile missing or is not file: %r" % (outputFile))
    print outputFile
    print '-' * 30

    s = ''
    for r in FileRecordStream(outputFile):
      s += ', '.join([str(x) for x in r]) + '\n'

    expected = """\
2009-04-03 19:05:06, 175.9
2009-04-06 19:05:06, 135.94
2009-04-08 19:05:06, 129.0
2009-04-10 19:05:06, 177.98
2009-04-11 19:05:06, 61.03
2009-04-15 19:05:06, 53.14
2009-05-04 19:05:06, 1502.82
2009-05-06 19:05:06, 2282.39
"""

    self.assertEqual(s, expected)

    return

Example 184

Project: picrust
Source File: run_genome_evaluations.py
View license
def main():
    option_parser, opts, args =\
                   parse_command_line_parameters(**script_info)

    #set some defaults for the options
    input_dir=opts.input_dir
    output_dir=opts.output_dir or input_dir
    tmp_dir=opts.tmp_dir or output_dir
    parallel_method=opts.parallel_method
    asr_method = opts.asr_method
    predict_traits_method = opts.prediction_method
    
    if opts.num_jobs > 20 and parallel_method == 'multithreaded':
        raise ValueError('You probably dont want to run multithreaded evaluations with a large num_jobs. Please adjust options num_jobs and or parallel_method')
        
    if opts.with_confidence and asr_method not in ['ace_ml','ace_reml']:
        raise ValueError("PICRUST currently only supports confidence intervals with the ace_ml and ace_reml ASR methods")

    if opts.verbose:
        print "Reconstruction method:",asr_method
        print "Prediction method:",predict_traits_method
        print "Parallel method:",parallel_method
        print "num_jobs:",opts.num_jobs
        print "\nOutput will be saved here:'%s'" %output_dir 
    
    #create the output directory unless it already exists
    make_output_dir(output_dir)

    if(parallel_method=='sge'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs_sge.py')
    elif(parallel_method=='multithreaded'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs.py')
    elif(parallel_method=='torque'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs_torque.py')
    else:
        raise RuntimeError


    #get the test datasets to run in the input directory (based on exp_traits files)
    expect_test_files=glob(join(input_dir,'exp_traits--*')) 

    test_datasets={}
    for file_name in expect_test_files:
        test_id=file_name.replace(join(input_dir,'exp_traits--'),'',1)
        #create a dict with the test files as values in the ref list
        test_datasets[test_id]=[ join(input_dir,'test_trait_table--'+test_id),join(input_dir,'test_tree--'+test_id),join(input_dir,'exp_traits--'+test_id)]
    
    created_tmp_files=[]    
    output_files=[]

    #create a tmp file to store the job commands (which we will pass to our parallel script to run)
    jobs_fp=get_tmp_filename(tmp_dir=tmp_dir,prefix='jobs_')
    jobs=open(jobs_fp,'w')
    created_tmp_files.append(jobs_fp)

    #get location of scripts we need to run
    asr_script_fp = join(get_picrust_project_dir(),'scripts','ancestral_state_reconstruction.py')
    predict_traits_script_fp = join(get_picrust_project_dir(),'scripts','predict_traits.py')

    #run each test dataset through the pipeline
    for test_id in test_datasets:

        asr_out_fp=join(output_dir,'asr--'+asr_method+'--'+test_id)
        asr_params_out_fp=join(output_dir,'--'.join(['asr',asr_method,'asr_params',test_id]))
        created_tmp_files.append(asr_out_fp)

        if opts.check_for_null_files and exists(asr_out_fp) and file_contains_nulls(asr_out_fp):
            #remove file
            if opts.verbose:
                print "Existing ASR file contains null characters. Will run ASR again after removing: "+asr_out_fp
            remove(asr_out_fp)
        

        if exists(asr_out_fp) and not opts.force:
            if opts.verbose:
                print "Output file: {0} already exists, so we will skip it.".format(asr_out_fp)
            asr_cmd = "echo 'Skipping ASR for %s, file %s exists already'" %(test_id,asr_out_fp)
        else:
            #create the asr command
            asr_cmd= """python {0} -i "{1}" -t "{2}" -m {3} -o "{4}" -c "{5}" """.format(asr_script_fp, test_datasets[test_id][0], test_datasets[test_id][1], asr_method, asr_out_fp, asr_params_out_fp)

        predict_traits_out_fp=join(output_dir,'--'.join(['predict_traits',predict_traits_method,\
          opts.weighting_method,test_id]))
        
        if opts.with_accuracy:
            predict_traits_accuracy_out_fp=join(output_dir,'--'.join(['predict_traits',predict_traits_method,\
              opts.weighting_method,'accuracy_metrics',test_id]))

        if opts.check_for_null_files and exists(predict_traits_out_fp) and file_contains_nulls(predict_traits_out_fp):
            if opts.verbose:
                print "Existing trait predictions file contains null characters. Will run it again after removing: "+predict_traits_out_fp
            remove(predict_traits_out_fp)

        if exists(predict_traits_out_fp) and not opts.force:
            if opts.verbose:
                print "Prediction file: {0} already exists. Skipping ASR and prediction for this organism".format(predict_traits_out_fp)
            continue
        
        output_files.append(predict_traits_out_fp)

        genome_id=split('--',test_id)[2]
        
        if predict_traits_method == 'nearest_neighbor':
            #don't do asr step
            predict_traits_cmd= """python {0} -i "{1}" -t "{2}" -g "{3}" -o "{4}" -m "{5}" """.format(predict_traits_script_fp, test_datasets[test_id][0], opts.ref_tree, genome_id, predict_traits_out_fp,predict_traits_method)
            jobs.write(predict_traits_cmd+"\n")
        else:

            #create the predict traits command
            predict_traits_cmd= """python {0} -i "{1}" -t "{2}" -r "{3}" -g "{4}" -o "{5}" -m "{6}" -w {7} """.format(predict_traits_script_fp,\
            test_datasets[test_id][0], opts.ref_tree, asr_out_fp,genome_id, predict_traits_out_fp,predict_traits_method,opts.weighting_method)

            #Instruct predict_traits to use confidence intervals output by ASR
            if opts.with_confidence:
                confidence_param = ' -c "%s"' %(asr_params_out_fp)
                predict_traits_cmd = predict_traits_cmd + confidence_param
        
            #Instruct predict traits to output the NTSI measure of distance to
            #nearby sequences.

            if opts.with_accuracy:
                accuracy_param = ' -a "%s"' %(predict_traits_accuracy_out_fp)
                predict_traits_cmd = predict_traits_cmd + accuracy_param

        

 
            #add job command to the the jobs file
            jobs.write(asr_cmd+';'+predict_traits_cmd+"\n")

    jobs.close()

    #created_tmp_files.extend(output_files)

    #submit the jobs
    job_prefix='eval_'
    
    if opts.verbose:
        print "Submitting jobs:",cluster_jobs_fp,jobs_fp,job_prefix,opts.num_jobs
    submit_jobs(cluster_jobs_fp ,jobs_fp,job_prefix,num_jobs=opts.num_jobs)

Example 185

Project: enigma2
Source File: MountManager.py
View license
	def buildMy_rec(self, device):
		device2 = ''
		try:
			if device.find('1') > 1:
				device2 = device.replace('1', '')
		except:
			device2 = ''
		try:
			if device.find('2') > 1:
				device2 = device.replace('2', '')
		except:
			device2 = ''
		try:
			if device.find('3') > 1:
				device2 = device.replace('3', '')
		except:
			device2 = ''
		try:
			if device.find('4') > 1:
				device2 = device.replace('4', '')
		except:
			device2 = ''
		try:
			if device.find('5') > 1:
				device2 = device.replace('5', '')
		except:
			device2 = ''
		try:
			if device.find('6') > 1:
				device2 = device.replace('6', '')
		except:
			device2 = ''
		try:
			if device.find('7') > 1:
				device2 = device.replace('7', '')
		except:
			device2 = ''
		try:
			if device.find('8') > 1:
				device2 = device.replace('8', '')
		except:
			device2 = ''
		try:
			if device.find('p1') > 1:
				device2 = device.replace('p1', '')
		except:
			device2 = ''
		try:
			if device.find('p2') > 1:
				device2 = device.replace('p2', '')
		except:
			device2 = ''
		try:
			if device.find('p3') > 1:
				device2 = device.replace('p3', '')
		except:
			device2 = ''
		try:
			if device.find('p4') > 1:
				device2 = device.replace('p4', '')
		except:
			device2 = ''
		try:
			if device.find('p5') > 1:
				device2 = device.replace('p5', '')
		except:
			device2 = ''
		try:
			if device.find('p6') > 1:
				device2 = device.replace('p6', '')
		except:
			device2 = ''
		try:
			if device.find('p7') > 1:
				device2 = device.replace('p7', '')
		except:
			device2 = ''
		try:
			if device.find('p8') > 1:
				device2 = device.replace('p8', '')
		except:
			device2 = ''
		devicetype = path.realpath('/sys/block/' + device2 + '/device')
		d2 = device
		name = 'USB: '
		mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/icons/dev_usbstick.png'
		if device2.startswith('mmcblk'):
			model = file('/sys/block/' + device2 + '/device/name').read()
			mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/icons/dev_mmc.png'
			name = 'MMC: '
		else:
			model = file('/sys/block/' + device2 + '/device/model').read()
		model = str(model).replace('\n', '')
		des = ''
		if devicetype.find('/devices/pci') != -1 or devicetype.find('ahci') != -1:
			name = _("HARD DISK: ")
			mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/icons/dev_hdd.png'
		name = name + model
		self.Console = Console()
		self.Console.ePopen("sfdisk -l | grep swap | awk '{print $(NF-9)}' >/tmp/devices.tmp")
		sleep(0.5)
		try:
			f = open('/tmp/devices.tmp', 'r')
			swapdevices = f.read()
			f.close()
		except:
			swapdevices = ' '
		if path.exists('/tmp/devices.tmp'):
			remove('/tmp/devices.tmp')
		swapdevices = swapdevices.replace('\n','')
		swapdevices = swapdevices.split('/')
		f = open('/proc/mounts', 'r')
		for line in f.readlines():
			if line.find(device) != -1:
				parts = line.strip().split()
				d1 = parts[1]
				dtype = parts[2]
				rw = parts[3]
				break
				continue
			else:
				if device in swapdevices:
					parts = line.strip().split()
					d1 = _("None")
					dtype = 'swap'
					rw = _("None")
					break
					continue
				else:
					d1 = _("None")
					dtype = _("unavailable")
					rw = _("None")
		f.close()
		f = open('/proc/partitions', 'r')
		for line in f.readlines():
			if line.find(device) != -1:
				parts = line.strip().split()
				size = int(parts[2])
				if (((float(size) / 1024) / 1024) / 1024) > 1:
					des = _("Size: ") + str(round((((float(size) / 1024) / 1024) / 1024),2)) + _("TB")
				elif ((size / 1024) / 1024) > 1:
					des = _("Size: ") + str((size / 1024) / 1024) + _("GB")
				else:
					des = _("Size: ") + str(size / 1024) + _("MB")
			else:
				try:
					size = file('/sys/block/' + device2 + '/' + device + '/size').read()
					size = str(size).replace('\n', '')
					size = int(size)
				except:
					size = 0
				if ((((float(size) / 2) / 1024) / 1024) / 1024) > 1:
					des = _("Size: ") + str(round(((((float(size) / 2) / 1024) / 1024) / 1024),2)) + _("TB")
				elif (((size / 2) / 1024) / 1024) > 1:
					des = _("Size: ") + str(((size / 2) / 1024) / 1024) + _("GB")
				else:
					des = _("Size: ") + str((size / 2) / 1024) + _("MB")
		f.close()
		if des != '':
			if rw.startswith('rw'):
				rw = ' R/W'
			elif rw.startswith('ro'):
				rw = ' R/O'
			else:
				rw = ""
			des += '\t' + _("Mount: ") + d1 + '\n' + _("Device: ") + '/dev/' + device + '\t' + _("Type: ") + dtype + rw
			png = LoadPixmap(mypixmap)
			mountP = d1
			deviceP = '/dev/' + device
			res = (name, des, png, mountP, deviceP)
			self.list.append(res)

Example 186

Project: fMBT
Source File: teststeps.py
View license
def iExecute():
    for f in glob.glob("stats-output-*"):
        try: os.remove(f)
        except: pass

    # Check if this combination hits a seeded error
    if ((seeded_logfile or seeded_format or seeded_output or seeded_plot) and
        (not seeded_logfile or fmbt_stats_logfile in seeded_logfile) and
        (not seeded_format or fmbt_stats_format in seeded_format) and
        (not seeded_output or fmbt_stats_output in seeded_output) and
        (not seeded_plot or fmbt_stats_plot in seeded_plot)):
        raise Exception("Seeded error")

    stepslogfile = file("teststeps.log","w")
    cmd = "fmbt-stats %s %s %s %s %s" % (
        fmbt_stats_format,
        fmbt_stats_output,
        fmbt_stats_plot,
        fmbt_stats_logfile,
        fmbt_stats_redirect)
    fmbtlog("Running '%s'" % (cmd,))
    p = subprocess.Popen(cmd, shell=True,
                         stdin  = subprocess.PIPE,
                         stdout = stepslogfile.fileno(),
                         stderr = stepslogfile.fileno())
    p.stdin.close()
    exit_status = p.wait()
    adapterlog("'%s' exit status: %s" % (cmd, exit_status))

    # Check exit status
    if fmbt_stats_logfile.endswith("-0.log"):
        if exit_status != 1:
            raise Exception("exit status != 1 with empty log. Try: " + cmd)
        return None # no further checks for an empty log
    elif exit_status != 0:
        raise Exception("exit status != 0 with non-empty log. Try: " + cmd)

    # Read produced statistics text file
    if fmbt_stats_output.startswith("-o"):
        stats_text = file(fmbt_stats_output[3:]).read()
        stats_text_format = fmbt_stats_output.split('.')[-1]
    elif fmbt_stats_redirect != "":
        stats_text = file("stats-output-text.txt").read()
        stats_text_format = "txt"
    if stats_text.strip() == "":
        raise Exception("empty output file. Try: " + cmd)

    # Check that every step seems to be reported
    expected_step_count = int(fmbt_stats_logfile[len("stats-input-"):-len(".log")])
    if fmbt_stats_format.startswith("-f times") or fmbt_stats_format == "":
        # Times stats: sum up numbers in the total column
        if stats_text_format == "txt":
            step_count = sum([int(row[39:49])
                              for row in stats_text.split('\n')[2:]
                              if row])
        elif stats_text_format == "csv":
            step_count = sum([int(row.split(';')[4])
                              for row in stats_text.split('\n')[2:]
                              if row])
        elif stats_text_format == "html":
            step_count = sum([int(row.split('</td><td>')[4])
                             for row in stats_text.split('\n')[3:-2]
                             if row])
        else:
            raise Exception("unknown times output format: %s" % (stats_text_format,))
    elif fmbt_stats_format.startswith("-f speed"):
        # Speed stats: count rows
        if stats_text_format == "txt":
            step_count = stats_text.count('\n')-2
        elif stats_text_format == "csv":
            step_count = stats_text.count('\n')-2
        elif stats_text_format == "html":
            step_count = stats_text.count('\n')-5
        else:
            raise Exception("unknown speed output format: %s" % (stats_text_format,))        
    elif fmbt_stats_format.startswith("-f dist"):
        # Distribution stats: sum up numbers in the matrix. Needs
        # adding one because there's no previous action for the first
        # action, and there's no next action for the last one. This
        # validation is skipped if only unique actions are shown.
        if "uniq" in fmbt_stats_format:
            step_count = expected_step_count # skip the test
        elif stats_text_format == "txt":
            step_count = sum([sum([int(c) for c in row.split('"')[0].split()])
                              for row in stats_text.split('\n')[2:]
                              if row]) + 1
        elif stats_text_format == "csv":
            step_count = sum([sum([int(c) for c in row.split(';')[:-1]])
                              for row in stats_text.split('\n')[2:]
                              if row]) + 1
        elif stats_text_format == "html":
            step_count = sum([sum([int(c) for c in row[8:].split('</td><td>')[:-1]])
                              for row in stats_text.split('\n')[4:]
                              if row]) + 1
        else:
            raise Exception("unknown dist output format: %s" % (stats_text_format,))
    if step_count != expected_step_count:
        raise Exception('text output reports %s steps (expected: %s). Try: %s'
                            % (step_count, expected_step_count, cmd))

    # Check that a non-empty plot file has been created if requested.
    if fmbt_stats_plot.startswith("-p"):
        if "," in fmbt_stats_plot:
            plot_filename = fmbt_stats_plot.split(",")[0][3:]
        else:
            plot_filename = fmbt_stats_plot[3:]
        if not os.stat(plot_filename).st_size > 0:
            raise Exception("zero-length plot file. Try: %s" % (cmd,))

Example 187

Project: openstates
Source File: votes.py
View license
    def scrape(self, chamber, session):
        # Unfortunately, you now have to request access to FTP.
        # This method of retrieving votes needs to be be changed or
        # fall back to traditional web scraping.
        if session == '2009':
            # 2009 files have a different delimiter and naming scheme.
            vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Vote Data 2009.zip'
            naming_scheme = '{session}{file_label}.txt'
            delimiter = ";"
        else:
            vote_data_url = 'ftp://www.ncleg.net/Bill_Status/Votes%s.zip' % session
            naming_scheme = '{file_label}_{session}.txt'
            delimiter = "\t"
        fname, resp = self.urlretrieve(vote_data_url)
        # fname = "/Users/brian/Downloads/Vote Data 2009.zip"
        zf = ZipFile(fname)

        chamber_code = 'H' if chamber == 'lower' else 'S'

        # Members_YYYY.txt: tab separated
        # 0: id (unique only in chamber)
        # 1: H or S
        # 2: member name
        # 3-5: county, district, party
        # 6: mmUserId
        member_file = zf.open(naming_scheme.format(file_label='Members', session=session))
        members = {}
        for line in member_file.readlines():
            data = line.split(delimiter)
            if data[1] == chamber_code:
                members[data[0]] = data[2]

        # Votes_YYYY.txt
        # 0: sequence number
        # 1: chamber (S/H)
        # 2: date
        # 3: prefix
        # 4: bill_id
        # 5: yes votes
        # 6: no votes
        # 7: excused absences
        # 8: excused votes
        # 9: didn't votes
        # 10: total yes+no
        # 11: sponsor
        # 12: reading info
        # 13: info
        # 20: PASSED/FAILED
        # 21: legislative day
        vote_file = zf.open(naming_scheme.format(file_label='Votes', session=session))
        bill_chambers = {'H':'lower', 'S':'upper'}
        votes = {}
        for line in vote_file.readlines():
            data = line.split(delimiter)
            if len(data) < 24:
                self.warning('line too short %s', data)
                continue
            if data[1] == chamber_code:
                date = datetime.datetime.strptime(data[2][:16],
                                                  '%Y-%m-%d %H:%M')
                if data[3][0] not in bill_chambers:
                    # skip votes that aren't on bills
                    self.log('skipping vote %s' % data[0])
                    continue

                votes[data[0]] = Vote(chamber, date, data[13],
                                      'PASS' in data[20],
                                      int(data[5]),
                                      int(data[6]),
                                      int(data[7])+int(data[8])+int(data[9]),
                                      bill_chamber=bill_chambers[data[3][0]],
                                      bill_id=data[3]+data[4], session=session)

        member_vote_file = zf.open(naming_scheme.format(file_label='MemberVotes', session=session))
        # 0: member id
        # 1: chamber (S/H)
        # 2: vote id
        # 3: vote chamber (always same as 1)
        # 4: vote (Y,N,E,X)
        # 5: pair ID (member)
        # 6: pair order
        # If a vote is paired then it should be counted as an 'other'
        for line in member_vote_file.readlines():
            data = line.split(delimiter)
            if data[1] == chamber_code:
                try:
                    member_voting = members[data[0]]
                except KeyError:
                    self.debug('Member %s not found.' % data[0])
                    continue
                try:
                    vote = votes[data[2]]
                except KeyError:
                    self.debug('Vote %s not found.' % data[2])
                    continue

                # -1 votes are Lt. Gov, not included in count, so we add them
                if data[4] == 'Y' and not data[5]:
                    if data[0] == '-1':
                        vote['yes_count'] += 1
                    vote.yes(member_voting)
                elif data[4] == 'N' and not data[5]:
                    if data[0] == '-1':
                        vote['no_count'] += 1
                    vote.no(member_voting)
                else:
                    # for some reason other_count is high for paired votes
                    if data[5]:
                        vote['other_count'] -= 1
                    # is either E: excused, X: no vote, or paired (doesn't count)
                    vote.other(member_voting)

        for vote in votes.itervalues():
            #vote.validate()
            vote.add_source(vote_data_url)
            self.save_vote(vote)

        # remove file
        zf.close()
        os.remove(fname)

Example 188

Project: pyNastran
Source File: test_dmig.py
View license
    def test_dmi_01(self):
        data = """
DMI         W2GJ       0       2       1       0            1200       1
DMI         W2GJ       1       1 1.54685.1353939.1312423.0986108.0621382
        .0369645.0257369.0234453.0255875.05652071.561626.1205361.1125278
        .0846353.0557613.0355958.0250237.0222578.0246823.05448051.532335
        .1076103.0988449.0754307.0525759.0348965.0245642.0227323.0263744
        .05396551.515989 .095201.0867341.0672852.0484031  .03261 .022431
        .0212566.0260411.05388151.487622.0829165.0753736.0586076.0422512
        .0278508.0174376.0167388 .023505.05292421.448751 .070779.0644553
        .0497634.0353554.0217712.0110981 .010631.0189924.0513639 1.25276
        .0588486.0540163.0412702.0286076.0158065.0047222.0041635.0132007
         .050093.6120345.0477668.0442399.0331648 .022064 .009834-1.828-3
        -2.503-3.0071487.0496528-.041134.0378177.0352355.0256046.0157419
        .0039734-8.065-3-8.842-3.0020994.05098221.446484.0286787.0268212
        .0183474.0095515-1.486-3 -.01338-.013646-6.974-4 .0543671.548042
        .0209779.0194154.0117029.0038052-6.315-3-.017763 -.01672-1.465-3
        .0581247-.993292.0142634 .012689.0053179-2.231-3-.012116-.022461
        -.020295-3.444-3.0594645-1.02737.0076634.0058206-1.478-3-9.128-3
         -.01927 -.02811-.025353-7.069-3.0581478-1.06258.0013462-7.586-4
        -7.972-3-.015515-.025103-.033856-.030101-.010728  .05707-1.08126
        -4.495-3-6.589-3-.013483-.020721-.029771-.037558-.032107-.012514
        .0580607-1.09659-9.834-3-.011544 -.01795-.024825-.033374 -.04009
        -.033981-.013454.05842491.327594-.014595-.015787 -.02156-.027993
        -.036203 -.04211-.035644-.014236.0587609-1.12656-.018867  -.0194
        -.024477-.030468-.038234-.043564-.036789-.014931.05923291.406868
        -.022712-.022633-.026994-.032539-.039873-.044645-.037676-.015477
        .0600661 -1.1403-.026208-.025684-.029346-.034437-.041619-.046053
        -.038965-.016419.0602822-1.15545-.029382-.028471 -.03156 -.03624
         -.04336-.048018-.040796-.018028.0592796-1.16853-.032213-.030919
        -.033614-.038048-.045083-.049868-.042604-.019698.0580287-1.17282
        -.034734-.032994-.035557-.039919-.046795-.051645 -.04442-.021465
        .0565324-1.18209-.036935-.034901-.037274-.041597 -.04831-.053297
        -.046237-.023328.05519341.553182-.038891-.036845-.038764-.042911
        -.049524-.054687-.048015-.025159.0543401-1.17264-.040588  -.0387
        -.040155 -.04408-.050704 -.05603-.049735-.027008.0538784 1.55343
        -.041944-.040277-.041628-.045311-.052116-.057595-.051508-.028825
        .0533671-1.17067-.043074-.041669-.043193 -.04673-.053791-.059407
        -.053284-.030358.0528413 -1.1734-.044205-.043085-.044977 -.04865
        -.055744 -.06141 -.05495-.031233.0523855-1.16752-.045483-.044569
        -.047071-.051162 -.05808-.063396-.056434-.031236.0518605-1.16843
         -.04698-.046239-.049258-.053844-.060565-.065508-.057742-.031123
         .051474-1.15854-.048786-.048225-.051408-.056377-.063191-.067552
        -.059057-.031243.0501356-1.12756-.050892-.050449-.053679-.058932
        -.065748-.069427-.060094 -.03156.0482373-1.11765-.053268-.052849
        -.056289-.061745-.068092-.070729-.060566-.031679.0464637-1.08369
        -.055984-.055508-.059297-.064792-.070378-.071549-.060739-.031831
        .0438902-1.03624 -.05879-.058529-.062614-.067938 -.07253-.072523
        -.061477-.033638.03692041.523722-.062845-.062397-.066421-.071335
        -.074786-.073668-.062967-.036846.0276397 -.11955-.066722-.066324
        -.070425-.074913-.077237-.075171 -.06493-.041043.0165293.9973973
        -.062269-.064098-.070488  -.0769-.080234-.079264-.071769-.051909
         -.00519.5332272-.043435-.050199 -.06278-.075336-.083821-.088398
        -.088075-.075685-.044054     601 1.54685.1353939.1312423.0986108
        .0621382.0369645.0257369.0234453.0255875.05652071.561626.1205361
        .1125278.0846353.0557613.0355958.0250237.0222578.0246823.0544805
        1.532335.1076103.0988449.0754307.0525759.0348965.0245642.0227323
        .0263744.05396551.515989 .095201.0867341.0672852.0484031  .03261
         .022431.0212566.0260411.05388151.487622.0829165.0753736.0586076
        .0422512.0278508.0174376.0167388 .023505.05292421.448751 .070779
        .0644553.0497634.0353554.0217712.0110981 .010631.0189924.0513639
         1.25276.0588486.0540163.0412702.0286076.0158065.0047222.0041635
        .0132007 .050093.6120345.0477668.0442399.0331648 .022064 .009834
        -1.828-3-2.503-3.0071487.0496528-.041134.0378177.0352355.0256046
        .0157419.0039734-8.065-3-8.842-3.0020994.05098221.446484.0286787
        .0268212.0183474.0095515-1.486-3 -.01338-.013646-6.974-4 .054367
        1.548042.0209779.0194154.0117029.0038052-6.315-3-.017763 -.01672
        -1.465-3.0581247-.993292.0142634 .012689.0053179-2.231-3-.012116
        -.022461-.020295-3.444-3.0594645-1.02737.0076634.0058206-1.478-3
        -9.128-3 -.01927 -.02811-.025353-7.069-3.0581478-1.06258.0013462
        -7.586-4-7.972-3-.015515-.025103-.033856-.030101-.010728  .05707
        -1.08126-4.495-3-6.589-3-.013483-.020721-.029771-.037558-.032107
        -.012514.0580607-1.09659-9.834-3-.011544 -.01795-.024825-.033374
         -.04009-.033981-.013454.05842491.327594-.014595-.015787 -.02156
        -.027993-.036203 -.04211-.035644-.014236.0587609-1.12656-.018867
          -.0194-.024477-.030468-.038234-.043564-.036789-.014931.0592329
        1.406868-.022712-.022633-.026994-.032539-.039873-.044645-.037676
        -.015477.0600661 -1.1403-.026208-.025684-.029346-.034437-.041619
        -.046053-.038965-.016419.0602822-1.15545-.029382-.028471 -.03156
         -.03624 -.04336-.048018-.040796-.018028.0592796-1.16853-.032213
        -.030919-.033614-.038048-.045083-.049868-.042604-.019698.0580287
        -1.17282-.034734-.032994-.035557-.039919-.046795-.051645 -.04442
        -.021465.0565324-1.18209-.036935-.034901-.037274-.041597 -.04831
        -.053297-.046237-.023328.05519341.553182-.038891-.036845-.038764
        -.042911-.049524-.054687-.048015-.025159.0543401-1.17264-.040588
          -.0387-.040155 -.04408-.050704 -.05603-.049735-.027008.0538784
         1.55343-.041944-.040277-.041628-.045311-.052116-.057595-.051508
        -.028825.0533671-1.17067-.043074-.041669-.043193 -.04673-.053791
        -.059407-.053284-.030358.0528413 -1.1734-.044205-.043085-.044977
         -.04865-.055744 -.06141 -.05495-.031233.0523855-1.16752-.045483
        -.044569-.047071-.051162 -.05808-.063396-.056434-.031236.0518605
        -1.16843 -.04698-.046239-.049258-.053844-.060565-.065508-.057742
        -.031123 .051474-1.15854-.048786-.048225-.051408-.056377-.063191
        -.067552-.059057-.031243.0501356-1.12756-.050892-.050449-.053679
        -.058932-.065748-.069427-.060094 -.03156.0482373-1.11765-.053268
        -.052849-.056289-.061745-.068092-.070729-.060566-.031679.0464637
        -1.08369-.055984-.055508-.059297-.064792-.070378-.071549-.060739
        -.031831.0438902-1.03624 -.05879-.058529-.062614-.067938 -.07253
        -.072523-.061477-.033638.03692041.523722-.062845-.062397-.066421
        -.071335-.074786-.073668-.062967-.036846.0276397 -.11955-.066722
        -.066324-.070425-.074913-.077237-.075171 -.06493-.041043.0165293
        .9973973-.062269-.064098-.070488  -.0769-.080234-.079264-.071769
        -.051909 -.00519.5332272-.043435-.050199 -.06278-.075336-.083821
        -.088398-.088075-.075685-.044054
        """
        with open('dmi.bdf', 'w') as bdf_file:
            bdf_file.write(data)
        model = BDF(debug=False)
        model.read_bdf('dmi.bdf', punch=True)
        w2gj = model.dmis['W2GJ']
        assert w2gj.shape == (1200, 1), w2gj.shape
        w2gj.get_matrix()

        real2 = []
        for i, real in enumerate(w2gj.Real):
            real2.append(0.1  * i)
        #w2gj.Real = real2
        #print(w2gj.GCi)  # varying (rows)
        #print(w2gj.GCj)  # constant (cols)

        model.write_bdf('dmi_out.bdf')

        model2 = BDF(debug=False)
        model2.read_bdf('dmi_out.bdf')
        w2gj_new = model.dmis['W2GJ']
        assert w2gj_new.shape == (1200, 1), w2gj_new.shape

        assert array_equal(w2gj.GCi, w2gj_new.GCi)
        assert array_equal(w2gj.GCj, w2gj_new.GCj)
        assert array_equal(w2gj.Real, w2gj_new.Real)
        os.remove('dmi.bdf')
        os.remove('dmi_out.bdf')

Example 189

Project: openstates
Source File: bills.py
View license
    def parse_senate_vote(self, url):
        """ senate PDFs -> garbled text -> good text -> Vote """
        vote = Vote('upper', '?', 'senate passage', False, 0, 0, 0)
        vote.add_source(url)

        fname, resp = self.urlretrieve(url)
        # this gives us the cleaned up text
        sv_text = convert_sv_text(convert_pdf(fname, 'text'))
        os.remove(fname)
        in_votes = False
        flag = None
        overrides = {"ONEILL": "O'NEILL"}

        """ #this was 2014's vote_override, adding a new one so it breaks
        #when this comes up in the future
        vote_override = {("SB0112SVOTE.PDF", "RYAN"): vote.other,    # Recused
                         ("HB0144SVOTE.PDF", "SOULES"): vote.other,  # Recused
                         ("HJR15SVOTE.PDF", "KELLER"): vote.other,   # Recused
                        }
        """

        vote_override_2015 = {}

        # use in_votes as a sort of state machine
        for line in sv_text:
            # not 'in_votes', get date or passage

            if "bT" in line:  # Whatever generates this text renders the cross
                # in the table as a bT
                continue

            # GARBAGE_SPECIAL = ["'", "%", "$", "&"]
            # for x in GARBAGE_SPECIAL:
            #     for y in [" {} ", "{} ", " {}"]:
            #         line = line.replace(y.format(x), " ")

            if not in_votes:
                dmatch = re.search('DATE:\s+(\d{2}/\d{2}/\d{2})', line)
                if dmatch:
                    date = dmatch.groups()[0]
                    vote['date'] = datetime.strptime(date, '%m/%d/%y')

                els = re.findall("YES.*NO.*ABS.*EXC", line)
                if els != []:
                    flag = line[0]
                    in_votes = True

                if 'PASSED' in line:
                    vote['passed'] = True

            # in_votes: totals & votes
            else:
                if "|" not in line:
                    self.warning("NO DELIM!!! %s", line)
                    continue

                # totals
                if 'TOTALS' in line:
                    # Lt. Governor voted
                    if 'GOVERNOR' in line:
                        _, name, y, n, a, e = [
                            x.strip() for x in line.split("|")
                        ][:6]
                        assert name == "LT. GOVERNOR"
                        if y == "X":
                            vote.yes(name)
                        elif n == "X":
                            vote.no(name)
                        elif a == "X" or e == "X":
                            vote.other(name)
                        else:
                            raise ValueError("Bad parse")

                    name, yes, no, abs, exc = [
                        x.strip() for x in line.split("|")
                    ][6:-1]

                    vote['yes_count'] = int(yes)
                    vote['no_count'] = int(no)
                    vote['other_count'] = int(abs) + int(exc)
                    # no longer in votes
                    in_votes = False
                    continue

                # pull votes out
                matches = re.match(
                    ' ([A-Z,\'\-.]+)(\s+)X\s+([A-Z,\'\-.]+)(\s+)X', line)

                votes = [x.strip() for x in line.split("|")]
                vote1 = votes[:5]
                vote2 = votes[5:]

                for voted in [vote1, vote2]:
                    name = "".join(voted[:2])
                    if name in overrides:
                        name = overrides[name]
                        voted.pop(0)
                        voted[0] = name

                    name, yes, no, abs, exc = voted

                    if "District" in name:
                        continue

                    if yes == "X":
                        vote.yes(name)
                    elif no == "X":
                        vote.no(name)
                    elif abs == "X" or exc == "X":
                        vote.other(name)
                    else:
                        key = (os.path.basename(url), name)
                        if key in vote_override_2015:
                            vote_override_2015[key](name)
                        else:
                            raise ValueError("Bad parse")

        if not isinstance(vote['date'], datetime):
            return None

        return vote

Example 190

Project: mu-repo
Source File: action_add_commit_push.py
View license
def Run(params, add, commit, push):
    from .print_ import Print, CreateJoinedReposMsg

    args = params.args[1:]
    if commit and not args:
        git = params.config.git
        from mu_repo.execute_command import ExecuteCommand
        output = ExecuteCommand(
            [git] + 'config --get-regexp editor'.split(), '.', return_stdout=True)

        editors = []
        for line in output.splitlines():
            if line.startswith(b'core.editor '):
                line = line[len(b'core.editor '):]
                editors.append(line)

        if not editors:
            Print('Message for commit is required for git add -A & git commit -m command (or git core.editor must be configured).')
            return
        else:
            import tempfile
            import subprocess
            with tempfile.NamedTemporaryFile(delete=False, suffix='.txt') as f:
                f.write(b'\n\n')
                f.write(b'# Please enter the commit message for your changes. Lines starting\n')
                f.write(b'# with "#" will be ignored, and an empty message aborts the commit.\n')
            import sys
            args = editors[0].decode(sys.getfilesystemencoding()) + ' ' + f.name
            if hasattr(subprocess, 'run'):
                subprocess.run(args)
            else:
                subprocess.call(args)
            with open(f.name, 'r') as stream:
                lines = [x for x in stream.read().strip().splitlines() if not x.startswith('#')]

            contents = '\n'.join(lines)
            import os
            os.remove(f.name)
            if not contents:
                Print('Commit message not provided. Commit aborted.')
                return
            else:
                args = [contents]

    from .execute_parallel_command import ParallelCmd, ExecuteInParallelStackingMessages

    serial = params.config.serial
    if add:
        commands = [ParallelCmd(repo, [params.config.git, 'add', '-A']) for repo in params.config.repos]
        ExecuteInParallelStackingMessages(
            commands,
            lambda output: not output.stdout.strip(),
            lambda repos: Print(CreateJoinedReposMsg('Executed "git add -A" in:', repos)),
            serial=serial,
        )


    if commit:
        commit_msg = ' '.join(args)
        commands = [ParallelCmd(repo, [params.config.git, 'commit', '-m', commit_msg])
            for repo in params.config.repos]

        ExecuteInParallelStackingMessages(
            commands,
            lambda output: 'nothing to commit (working directory clean)' in output.stdout,
            lambda repos: Print(CreateJoinedReposMsg('Nothing to commit at:', repos)),
            serial=serial,
        )


    if push:
        from .get_repos_and_curr_branch import GetReposAndCurrBranch
        repos_and_curr_branch = GetReposAndCurrBranch(params)

        commands = [ParallelCmd(repo, [params.config.git, 'push', 'origin', branch])
            for (repo, branch) in repos_and_curr_branch]

        ExecuteInParallelStackingMessages(
            commands,
            lambda output: not output.stdout.strip() and output.stderr.strip() == 'Everything up-to-date',
            lambda repos: Print(CreateJoinedReposMsg('Up-to-date:', repos)),
            serial=serial,
        )

Example 191

Project: youtube-dl
Source File: f4m.py
View license
    def real_download(self, filename, info_dict):
        man_url = info_dict['url']
        requested_bitrate = info_dict.get('tbr')
        self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
        urlh = self.ydl.urlopen(man_url)
        man_url = urlh.geturl()
        # Some manifests may be malformed, e.g. prosiebensat1 generated manifests
        # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
        # and https://github.com/rg3/youtube-dl/issues/7823)
        manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()

        doc = compat_etree_fromstring(manifest)
        formats = [(int(f.attrib.get('bitrate', -1)), f)
                   for f in self._get_unencrypted_media(doc)]
        if requested_bitrate is None or len(formats) == 1:
            # get the best format
            formats = sorted(formats, key=lambda f: f[0])
            rate, media = formats[-1]
        else:
            rate, media = list(filter(
                lambda f: int(f[0]) == requested_bitrate, formats))[0]

        base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
        bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
        # From Adobe F4M 3.0 spec:
        # The <baseURL> element SHALL be the base URL for all relative
        # (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
        # URLs should be relative to the location of the containing document.
        boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
        live = boot_info['live']
        metadata_node = media.find(_add_ns('metadata'))
        if metadata_node is not None:
            metadata = base64.b64decode(metadata_node.text.encode('ascii'))
        else:
            metadata = None

        fragments_list = build_fragments_list(boot_info)
        test = self.params.get('test', False)
        if test:
            # We only download the first fragment
            fragments_list = fragments_list[:1]
        total_frags = len(fragments_list)
        # For some akamai manifests we'll need to add a query to the fragment url
        akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))

        ctx = {
            'filename': filename,
            'total_frags': total_frags,
            'live': live,
        }

        self._prepare_frag_download(ctx)

        dest_stream = ctx['dest_stream']

        write_flv_header(dest_stream)
        if not live:
            write_metadata_tag(dest_stream, metadata)

        base_url_parsed = compat_urllib_parse_urlparse(base_url)

        self._start_frag_download(ctx)

        frags_filenames = []
        while fragments_list:
            seg_i, frag_i = fragments_list.pop(0)
            name = 'Seg%d-Frag%d' % (seg_i, frag_i)
            query = []
            if base_url_parsed.query:
                query.append(base_url_parsed.query)
            if akamai_pv:
                query.append(akamai_pv.strip(';'))
            if info_dict.get('extra_param_to_segment_url'):
                query.append(info_dict['extra_param_to_segment_url'])
            url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
            frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
            try:
                success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
                if not success:
                    return False
                (down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
                down_data = down.read()
                down.close()
                reader = FlvReader(down_data)
                while True:
                    try:
                        _, box_type, box_data = reader.read_box_info()
                    except DataTruncatedError:
                        if test:
                            # In tests, segments may be truncated, and thus
                            # FlvReader may not be able to parse the whole
                            # chunk. If so, write the segment as is
                            # See https://github.com/rg3/youtube-dl/issues/9214
                            dest_stream.write(down_data)
                            break
                        raise
                    if box_type == b'mdat':
                        dest_stream.write(box_data)
                        break
                if live:
                    os.remove(encodeFilename(frag_sanitized))
                else:
                    frags_filenames.append(frag_sanitized)
            except (compat_urllib_error.HTTPError, ) as err:
                if live and (err.code == 404 or err.code == 410):
                    # We didn't keep up with the live window. Continue
                    # with the next available fragment.
                    msg = 'Fragment %d unavailable' % frag_i
                    self.report_warning(msg)
                    fragments_list = []
                else:
                    raise

            if not fragments_list and not test and live and bootstrap_url:
                fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
                total_frags += len(fragments_list)
                if fragments_list and (fragments_list[0][1] > frag_i + 1):
                    msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
                    self.report_warning(msg)

        self._finish_frag_download(ctx)

        for frag_file in frags_filenames:
            os.remove(encodeFilename(frag_file))

        return True

Example 192

Project: pyglossary
Source File: glossary.py
View license
    def read(
        self,
        filename,
        format='',
        direct=False,
        progressbar=True,
        **options
    ):
        """
        filename (str): name/path of input file
        format (str): name of inout format,
                      or '' to detect from file extention
        direct (bool): enable direct mode
        """
        filename = abspath(filename)

        # don't allow direct=False when there are readers
        # (read is called before with direct=True)
        if self._readers and not direct:
            raise ValueError(
                'there are already %s readers' % len(self._readers) +
                ', you can not read with direct=False mode'
            )

        self.updateEntryFilters()
        ###
        delFile = False
        ext = get_ext(filename)
        if ext in ('.gz', '.bz2', '.zip'):
            if ext == '.bz2':
                output, error = subprocess.Popen(
                    ['bzip2', '-dk', filename],
                    stdout=subprocess.PIPE,
                ).communicate()
                # -k ==> keep original bz2 file
                # bunzip2 ~= bzip2 -d
                if error:
                    log.error(
                        error + '\n' +
                        'failed to decompress file "%s"' % filename
                    )
                    return False
                else:
                    filename = filename[:-4]
                    ext = get_ext(filename)
                    delFile = True
            elif ext == '.gz':
                output, error = subprocess.Popen(
                    ['gzip', '-dc', filename],
                    stdout=subprocess.PIPE,
                ).communicate()
                # -c ==> write to stdout (we want to keep original gz file)
                # gunzip ~= gzip -d
                if error:
                    log.error(
                        error + '\n' +
                        'failed to decompress file "%s"' % filename
                    )
                    return False
                else:
                    filename = filename[:-3]
                    open(filename, 'w').write(output)
                    ext = get_ext(filename)
                    delFile = True
            elif ext == '.zip':
                output, error = subprocess.Popen(
                    ['unzip', filename, '-d', dirname(filename)],
                    stdout=subprocess.PIPE,
                ).communicate()
                if error:
                    log.error(
                        error + '\n' +
                        'failed to decompress file "%s"' % filename
                    )
                    return False
                else:
                    filename = filename[:-4]
                    ext = get_ext(filename)
                    delFile = True
        if not format:
            for key in Glossary.formatsExt.keys():
                if ext in Glossary.formatsExt[key]:
                    format = key
            if not format:
                # if delFile:
                #    os.remove(filename)
                log.error('Unknown extension "%s" for read support!' % ext)
                return False
        validOptionKeys = self.formatsReadOptions[format]
        for key in list(options.keys()):
            if key not in validOptionKeys:
                log.error(
                    'Invalid read option "%s" ' % key +
                    'given for %s format' % format
                )
                del options[key]

        filenameNoExt, ext = splitext(filename)
        if not ext.lower() in self.formatsExt[format]:
            filenameNoExt = filename

        self._filename = filenameNoExt
        if not self.getInfo('name'):
            self.setInfo('name', split(filename)[1])
        self._progressbar = progressbar

        try:
            Reader = self.readerClasses[format]
        except KeyError:
            if direct:
                log.warning(
                    'no `Reader` class found in %s plugin' % format +
                    ', falling back to indirect mode'
                )
            result = self.readFunctions[format].__call__(
                self,
                filename,
                **options
            )
            # if not result:## FIXME
            #    return False
            if delFile:
                os.remove(filename)
        else:
            reader = Reader(self)
            reader.open(filename, **options)
            if direct:
                self._readers.append(reader)
                log.info(
                    'using Reader class from %s plugin' % format +
                    ' for direct conversion without loading into memory'
                )
            else:
                self.loadReader(reader)

        self._updateIter()

        return True

Example 193

Project: galah
Source File: zip_bulk_submissions.py
View license
def _zip_bulk_submissions(archive_id, requester, assignment, email = ""):
    archive_id = ObjectId(archive_id)

    archive_file = temp_directory = ""

    # Find any expired archives and remove them
    deleted_files = []
    for i in Archive.objects(expires__lt = datetime.datetime.today()):
        deleted_files.append(i.file_location)

        if i.file_location:
            try:
                os.remove(i.file_location)
            except OSError as e:
                logger.warning(
                    "Could not remove expired archive at %s: %s.",
                    i.file_location, str(e)
                )

        i.delete()

    if deleted_files:
        logger.info("Deleted archives %s.", str(deleted_files))

    # This is the archive object we will eventually add to the database
    new_archive = Archive(
        id = archive_id,
        requester = requester,
        archive_type = "assignment_package"
    )

    temp_directory = archive_file = None
    try:
        # Form the query
        query = {"assignment": ObjectId(assignment)}

        # Only mention email in the query if it's not None or the empty
        # string, otherwise mongo will look for submissions that list the
        # user as None or the empty string (which should be exactly none of
        # the submission in the system).
        if email:
            query["user"] = email
        else:
            # Otherwise, we need to be careful not to get teacher/TA submissions.
            assn = Assignment.objects.get(id = ObjectId(assignment))
            students = User.objects(
                account_type="student",
                classes = assn.for_class
            )
            query["user__in"] = [i.id for i in students]

        # Grab all the submissions
        submissions = list(Submission.objects(**query))

        if not submissions:
            logger.info("No submissions found matching query.")
            return

        # Organize all the submissions by user name, as this will closely
        # match the structure of the archive we will build.
        submission_map = {}
        for i in submissions:
            if i.user in submission_map:
                submission_map[i.user].append(i)
            else:
                submission_map[i.user] = [i]

        # Create a temporary directory we will create our archive in.
        temp_directory = tempfile.mkdtemp()

        # Create our directory tree. Instead of making new folders for each
        # submission and copying the user's files over however, we will
        # create symlinks to save space and time.
        for user, user_submissions in submission_map.items():
            # Create a directory for the user
            os.makedirs(os.path.join(temp_directory, user))

            # Create symlinks for all his submissions. Each symlink is
            # named after the submission date.
            for i in user_submissions:
                time_stamp = i.timestamp.strftime("%Y-%m-%d-%H-%M-%S")
                symlink_path = \
                    os.path.join(temp_directory, user, time_stamp)

                # In the highly unlikely event that two of the same user's
                # submissions have the same exact time stamp, we'll need to
                # add a marker to the end of the timestamp.
                marker = 0
                while os.path.exists(symlink_path +
                        ("-%d" % marker if marker > 0 else "")):
                    marker += 1

                if marker > 0:
                    symlink_path += "-%d" % marker

                original_path = i.getFilePath()

                # Detect if the submission's files are still on the filesystem
                if os.path.isdir(original_path):
                    # Create a symlink pointing to the actual submission
                    # directory with the name we gnerated
                    os.symlink(original_path, symlink_path)
                else:
                    # Create an empty text file marking the fact that a
                    # submissions existed but is no longer available.
                    open(symlink_path, "w").close()

        # Create the actual archive file.
        # TODO: Create it in galah's /var/ directory
        file_descriptor, archive_file = tempfile.mkstemp(suffix = ".zip")
        os.close(file_descriptor)

        # Run zip and do the actual archiving. Will block until it's finished.
        zipdir(temp_directory, archive_file)

        new_archive.file_location = archive_file

        new_archive.expires = \
            datetime.datetime.today() + config["TEACHER_ARCHIVE_LIFETIME"]

        new_archive.save(force_insert = True)
    except Exception as e:
        # If we created a temporary archive file we need to delete it.
        new_archive.file_location = None
        if archive_file:
            os.remove(archive_file)

        new_archive.error_string = str(e)
        new_archive.save(force_insert = True)

        raise
    finally:
        if temp_directory:
            shutil.rmtree(temp_directory)

Example 194

Project: RoboGif
Source File: recorder.py
View license
@click.command(options_metavar="[options]")
@click.argument("filename", type=click.Path(exists=False, writable=True, resolve_path=True), metavar="<filename>.<gif|mp4>")
@click.option('-i', '--input-file', type=str, help="Convert input mp4 file to optimized gif")
@click.option('-s', '--size', type=int, default=480, help="Size of the shortest side of the output gif/video. Defaults to 480.")
@click.option('-f', '--fps', type=int, help="Framerate of the output gif/video. Defaults to 15 for GIF and 60 for MP4.")
@click.option('-vq', '--video-quality', type=int, default=24, help="Video quality of the output video - the value is x264 CRF. Default is 24, lower number means better quality.")
@click.help_option()
@click.version_option(version=VERSION, prog_name="RoboGif")
def run(filename=None, input_file=None, size=None, fps=None, video_quality=None):
    """
    Records Android device screen to an optimized GIF or MP4 file. The type of the output is chosen depending on the file extension.
    """

    print("RoboGif Recorder v%s" % (VERSION,))
    check_requirements()
    output_video_mode = False

    if not (filename.lower().endswith(".mp4") or filename.lower().endswith(".gif")):
        print("Usage: %s [output filename].[mp4|gif]" % (sys.argv[0], ))
        print(t.red("Filename must either end with"), t.green("mp4"), t.red("for video or"), t.green("gif"), t.red("for a GIF."))
        print
        sys.exit(-4)

    if filename.lower().endswith(".mp4"):
        output_video_mode = True

    if fps is None:
        if output_video_mode:
            fps = 60
        else:
            fps = 15

    # Convert file if input is passed
    if input_file is not None:
        if output_video_mode:
            print(t.red("There's no point in converting video to video!"))
            sys.exit(-4)

        create_optimized_gif(input_file, filename, size, fps)
        sys.exit(0)

    # Show device chooser if more than one device is selected
    device_id = None
    devices = get_devices()
    if len(devices) == 0:
        print(t.red("No adb devices found, connect one."))
        sys.exit(-3)
    elif len(devices) == 1:
        device_id = list(devices.keys())[0]
    else:
        device_id = get_chosen_device(devices)

    print(t.green("Starting recording on %s..." % (device_id, )))
    print(t.yellow("Press Ctrl+C to stop recording."))

    recorder = subprocess.Popen(["adb", "-s", device_id, "shell", "screenrecord", "--bit-rate", "8000000", "/sdcard/tmp_record.mp4"])
    try:
        while recorder.poll() is None:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pass

    try:
        recorder.send_signal(signal.SIGTERM)
        recorder.wait()
    except OSError:
        print
        print(t.red("Recording has failed, it's possible that your device does not support recording."))
        print(t.normal + "Recording is supported on devices running KitKat (4.4) or newer.")
        print(t.normal + "Genymotion and stock emulator do not support it.")
        print
        sys.exit(-3)
    # We need to wait for MOOV item to be written
    time.sleep(2)

    print(t.green("Recording done, downloading file...."))
    tmp_video_file = get_new_temp_file_path("mp4")

    # Download file and cleanup
    try:
        subprocess.check_call(["adb", "-s", device_id, "pull", "/sdcard/tmp_record.mp4", tmp_video_file])
        subprocess.check_call(["adb", "-s", device_id, "shell", "rm", "/sdcard/tmp_record.mp4"])

        if output_video_mode:
            create_optimized_video(tmp_video_file, filename, size, fps, video_quality)
        else:
            create_optimized_gif(tmp_video_file, filename, size, fps)

    except subprocess.CalledProcessError:
        print(t.red("Could not download recording from the device."))
        sys.exit(-1)
    finally:
        os.remove(tmp_video_file)

Example 195

Project: picrust
Source File: parallel_predict_traits.py
View license
def main():
    option_parser, opts, args =\
                   parse_command_line_parameters(**script_info)

    tmp_dir='jobs/'
    make_output_dir(tmp_dir)

    #Run the jobs
    script_fp = join(get_picrust_project_dir(),'scripts','predict_traits.py')

    if(opts.parallel_method=='sge'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs_sge.py')
    elif(opts.parallel_method=='multithreaded'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs.py')
    elif(opts.parallel_method=='torque'):
        cluster_jobs_fp=join(get_picrust_project_dir(),'scripts','start_parallel_picrust_jobs_torque.py')
    else:
        raise RuntimeError

    if(opts.verbose):
        print "Loading tree..."
        
    tree = load_picrust_tree(opts.tree, opts.verbose)

    all_tips = [tip.Name for tip in tree.tips()]
    
    if(opts.verbose):
        print "Total number of possible tips to predict: {0}".format(len(all_tips))

    created_tmp_files=[]
    output_files={}
    output_files['counts']=[]
    if opts.reconstruction_confidence:
        output_files['variances']=[]
        output_files['upper_CI']=[]
        output_files['lower_CI']=[]

    if opts.already_calculated:
        all_tips=get_tips_not_in_precalc(all_tips,opts.already_calculated)
        if opts.verbose:
            print "After taking into account tips already predicted, the number of tips left to predict is: {0}".format(len(all_tips))

    #create a tmp file to store the job commands (which we will pass to our parallel script to run)
    jobs_fp=get_tmp_filename(tmp_dir=tmp_dir,prefix='jobs_')
    jobs=open(jobs_fp,'w')
    created_tmp_files.append(jobs_fp)

    if(opts.verbose):
        print "Creating temporary input files in: ",tmp_dir
    
    num_tips_per_job=1000
    for tips_to_predict in [all_tips[i:i+num_tips_per_job] for i in range(0, len(all_tips), num_tips_per_job)]:
        
        #create tmp output files
        tmp_output_fp=get_tmp_filename(tmp_dir=tmp_dir,prefix='out_predict_traits_')
        output_files['counts'].append(tmp_output_fp)

        tip_to_predict_str=','.join(list(tips_to_predict))

        if opts.reconstruction_confidence:
            outfile_base,extension = splitext(tmp_output_fp)
            output_files['variances'].append(outfile_base+"_variances.tab")
            output_files['upper_CI'].append(outfile_base+"_upper_CI.tab")
            output_files['lower_CI'].append(outfile_base+"_lower_CI.tab")
            
            #create the job command
            cmd= "{0} -i {1} -t {2} -r {3} -c {4} -g {5} -o {6}".format(script_fp, opts.observed_trait_table, opts.tree, opts.reconstructed_trait_table, opts.reconstruction_confidence, tip_to_predict_str, tmp_output_fp)

        else:
            cmd= "{0} -i {1} -t {2} -r {3} -g {4} -o {5}".format(script_fp, opts.observed_trait_table, opts.tree, opts.reconstructed_trait_table, tip_to_predict_str, tmp_output_fp)
            

        #NOTE: Calculating NSTI this way is convenient, 
        #but would probably be faster if we ran the NSTI calculation separate (using the --output_accuracy_metrics_only) and added it to the output file later on.
        if opts.calculate_accuracy_metrics:
            cmd=cmd+" -a"

        #add job command to the the jobs file
        jobs.write(cmd+"\n")

    jobs.close()

    #add all output files to tmp list (used later for deletion)
    for predict_type in output_files:
        created_tmp_files.extend(output_files[predict_type])
    if(opts.verbose):
        print "Launching parallel jobs."
        
    #run the job command
    job_prefix='picrust'
    submit_jobs(cluster_jobs_fp ,jobs_fp,job_prefix,num_jobs=opts.num_jobs,delay=opts.delay)

    if(opts.verbose):
        print "Jobs are now running. Will wait until finished."

    #wait until all jobs finished (e.g. simple poller)
    wait_for_output_files(output_files['counts'])

    if(opts.verbose):
        print "Jobs are done running."

    make_output_dir_for_file(opts.output_trait_table)
    outfile_base,extension = splitext(opts.output_trait_table)
    for predict_type in sorted(output_files):
       #Combine output files
        if opts.verbose:
            print "Combining all output files for "+ predict_type

        combined_predictions=combine_predict_trait_output(output_files[predict_type])
        
        if opts.verbose:
            print "Writing combined file for "+predict_type

        if predict_type == 'counts':
        #Output in whatever format the user wants
            if opts.output_precalc_file_in_biom:
                open(opts.output_trait_table,'w').write(format_biom_table(convert_precalc_to_biom(combined_predictions)))
            else:
                open(opts.output_trait_table,'w').write(combined_predictions)
        else:
            if opts.output_precalc_file_in_biom:
                open(outfile_base+"_"+predict_type+".biom",'w').write(format_biom_table(convert_precalc_to_biom(combined_predictions)))
            else:
                open(outfile_base+"_"+predict_type+".tab",'w').write(combined_predictions)    
        
    #clean up all tmp files
    for file in created_tmp_files:
        remove(file)

Example 196

Project: SecureCRT
Source File: IntStatsOverTime.py
View license
def Main():

    def GetInterfaceSamples(ParseIntfStats):
        for i in range(stat_count):
            sample_time = datetime.now().strftime("%I:%M:%S")
            timestamps.append(sample_time)

            start = time.clock()
            # Generate filename used for output files.
            fullFileName = GetFilename(session, settings, "int_summary")

            # Save raw output to a file.  Dumping directly to a var has problems with
            # large outputs
            tab.Send('\n')
            WriteOutput(session, SendCmd, fullFileName)

            if stat_count != (i + 1):
                # Print status to the Cisco prompt to keep user aware of progress
                # This must start with ! to be a Cisco comment, to prevent in-terminal errors
                warning_msg = "! {0} samples left. DO NOT TYPE IN WINDOW.".format(stat_count - (i + 1))
                tab.Send(warning_msg + '\n')
                tab.WaitForString(session['prompt'])

            # Read text file into a list of lines (no line endings)
            intf_raw = ReadFileToList(fullFileName)

            # If the settings allow it, delete the temporary file that holds show cmd output
            if settings['delete_temp']:    
                os.remove(fullFileName + ".txt")

            summarytable = ParseIntfStats(intf_raw)

            for stat in measurements:
                for entry in summarytable:
                    if entry['Interface'] in output[stat]:
                        output[stat][entry['Interface']][sample_time] = entry[stat]
                    else:
                        output[stat][entry['Interface']] = {}
                        output[stat][entry['Interface']][sample_time] = entry[stat]

            end = time.clock()
            if interval - (end - start) > 0:
                if stat_count != (i + 1):
                    time.sleep(interval - (end - start))
            else:
                 crt.Dialog.MessageBox("Did not complete within interval time", 
                                    "Took Too Long", ICON_STOP)
                 sys.exit(0)


    SupportedOS = ["IOS", "IOS XE", "NX-OS"]
    
    # Run session start commands and save session information into a dictionary
    session = StartSession(crt)
    SendCmd = "show interface"
    tab = session['tab']

    output = {}
    for name in measurements:
        output[name] = {}

    timestamps = []

    if session['OS'] in SupportedOS:
        if session['OS'] == "NX-OS":
            GetInterfaceSamples(ParseNXOSIntfStats)
        else:
            GetInterfaceSamples(ParseIOSIntfStats)
    else:
        error_str = "This script does not support {}.\n" \
                    "It will currently only run on IOS Devices.".format(session['OS'])
        crt.Dialog.MessageBox(error_str, "Unsupported Network OS", 16)
    
    field_names = [ "Interface" ]
    field_names.extend(timestamps)

    fullFileName = GetFilename(session, settings, "graph")
    
    for stat in measurements:
        temp_csv_list = []
        header = [ [stat] ]
        empty_line = [ [] ] 
        for key in sorted(output[stat].keys(), key=alphanum_key):
            temp_dict = { "Interface" : key }
            temp_dict.update(output[stat][key])
            temp_csv_list.append(temp_dict)
        ListToCSV(header, fullFileName, mode='ab')
        DictListToCSV(field_names, temp_csv_list, fullFileName, mode='ab')
        # Add seperator line
        ListToCSV(empty_line, fullFileName, mode='ab')

    EndSession(session)
    crt.Dialog.MessageBox("Interface Statistic Gathering Complete", "Script Complete", 64)

Example 197

Project: tp-libvirt
Source File: macvtap.py
View license
def run(test, params, env):
    """
    This test is for macvtap nic

    1. Check and backup environment
    2. Configure guest, add new nic and set a static ip address
    3. According to nic mode, start test
    4. Recover environment
    """
    vm_names = params.get("vms").split()
    remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
    iface_mode = params.get("mode", "vepa")
    eth_card_no = params.get("eth_card_no", "ENTER.YOUR.DEV.NAME")
    vm1_ip = params.get("vm1_ip", "ENTER.YOUR.GUEST1.IP")
    vm2_ip = params.get("vm2_ip", "ENTER.YOUR.GUEST2.IP")
    eth_config_file = params.get("eth_config_file",
                                 "ENTER.YOUR.CONFIG.FILE.PATH")
    persistent_net_file = params.get("persistent_net_file",
                                     "ENTER.YOUR.RULE.FILE.PATH")

    param_keys = ["remote_ip", "vm1_ip", "vm2_ip", "eth_card_no",
                  "eth_config_file", "persistent_net_file"]
    param_values = [remote_ip, vm1_ip, vm2_ip, eth_card_no,
                    eth_config_file, persistent_net_file]
    for key, value in zip(param_keys, param_values):
        if value.count("ENTER.YOUR"):
            raise error.TestNAError("Parameter '%s'(%s) is not configured."
                                    % (key, value))

    vm1 = env.get_vm(vm_names[0])
    vm2 = None
    if len(vm_names) > 1:
        vm2 = env.get_vm(vm_names[1])

    if eth_card_no not in utils_net.get_net_if():
        raise error.TestNAError("Device %s do not exists." % eth_card_no)
    try:
        iface_cls = utils_net.Interface(eth_card_no)
        origin_status = iface_cls.is_up()
        if not origin_status:
            iface_cls.up()
    except process.CmdError, detail:
        raise error.TestNAError(str(detail))
    br_cls = utils_net.Bridge()
    if eth_card_no in br_cls.list_iface():
        raise error.TestNAError("%s has been used!" % eth_card_no)
    vmxml1 = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[0])
    if vm2:
        vmxml2 = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[1])

    def guest_config(vm, ip_addr):
        """
        Add a new nic to guest and set a static ip address

        :param vm: Configured guest
        :param ip_addr: Set ip address
        """
        # Attach an interface device
        # Use attach-device, not attach-interface, because attach-interface
        # doesn't support 'direct'
        interface_class = vm_xml.VMXML.get_device_class('interface')
        interface = interface_class(type_name="direct")
        interface.source = dict(dev=str(eth_card_no), mode=str(iface_mode))
        interface.model = "virtio"
        interface.xmltreefile.write()
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.attach_device(vm.name, interface.xml, flagstr="--config")
        os.remove(interface.xml)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        new_nic = vmxml.get_devices(device_type="interface")[-1]

        # Modify new interface's IP
        vm.start()
        session = vm.wait_for_login()
        eth_name = utils_net.get_linux_ifname(session, new_nic.mac_address)
        eth_config_detail_list = ['DEVICE=%s' % eth_name,
                                  'HWADDR=%s' % new_nic.mac_address,
                                  'ONBOOT=yes',
                                  'BOOTPROTO=static',
                                  'IPADDR=%s' % ip_addr]
        remote_file = remote.RemoteFile(vm.get_address(), 'scp', 'root',
                                        params.get('password'), 22,
                                        eth_config_file)
        remote_file.truncate()
        remote_file.add(eth_config_detail_list, linesep='\n')
        try:
            # Attached interface maybe already active
            session.cmd("ifdown %s" % eth_name)
        except aexpect.ShellCmdError:
            raise error.TestFail("ifdown %s failed." % eth_name)

        try:
            session.cmd("ifup %s" % eth_name)
        except aexpect.ShellCmdError:
            raise error.TestFail("ifup %s failed." % eth_name)
        return session

    def guest_clean(vm, vmxml):
        """
        Recover guest configuration

        :param: Recovered guest
        """
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        session.cmd("rm -f %s" % eth_config_file)
        session.cmd("sync")
        try:
            # Delete the last 3 lines
            session.cmd('sed -i "$[$(cat %s | wc -l) - 2],$"d %s'
                        % (persistent_net_file, persistent_net_file))
            session.cmd("sync")
        except aexpect.ShellCmdError:
            # This file may not exists
            pass
        vm.destroy()
        vmxml.sync()

    def vepa_test(session):
        """
        vepa mode test.
        Check guest can ping remote host
        """
        ping_s, _ = ping(remote_ip, count=1, timeout=5, session=session)
        if ping_s:
            raise error.TestFail("%s ping %s failed." % (vm1.name, remote_ip))

    def private_test(session):
        """
        private mode test.
        Check guest cannot ping other guest, but can pin remote host
        """
        ping_s, _ = ping(remote_ip, count=1, timeout=5, session=session)
        if ping_s:
            raise error.TestFail("%s ping %s failed." % (vm1.name, remote_ip))
        ping_s, _ = ping(vm2_ip, count=1, timeout=5, session=session)
        if not ping_s:
            raise error.TestFail("%s ping %s succeed, but expect failed."
                                 % (vm1.name, vm2.name))
        try:
            iface_cls.down()
        except process.CmdError, detail:
            raise error.TestNAError(str(detail))
        ping_s, _ = ping(vm2_ip, count=1, timeout=5, session=session)
        if not ping_s:
            raise error.TestFail("%s ping %s succeed, but expect failed."
                                 % (vm1.name, remote_ip))

    def passthrough_test(session):
        """
        passthrough mode test.
        Check guest can ping remote host.
        When guest is running, local host cannot ping remote host,
        When guest is poweroff, local host can ping remote host,
        """
        ping_s, _ = ping(remote_ip, count=1, timeout=5, session=session)
        if ping_s:
            raise error.TestFail("%s ping %s failed."
                                 % (vm1.name, remote_ip))
        ping_s, _ = ping(remote_ip, count=1, timeout=5)
        if not ping_s:
            raise error.TestFail("host ping %s succeed, but expect fail."
                                 % remote_ip)
        vm1.destroy(gracefully=False)
        ping_s, _ = ping(remote_ip, count=1, timeout=5)
        if ping_s:
            raise error.TestFail("host ping %s failed."
                                 % remote_ip)

    def bridge_test(session):
        """
        bridge mode test.
        Check guest can ping remote host
        guest can ping other guest when macvtap nic is up
        guest cannot ping remote host when macvtap nic is up
        """
        ping_s, _ = ping(remote_ip, count=1, timeout=5, session=session)
        if ping_s:
            raise error.TestFail("%s ping %s failed."
                                 % (vm1.name, remote_ip))
        ping_s, _ = ping(vm2_ip, count=1, timeout=5, session=session)
        if ping_s:
            raise error.TestFail("%s ping %s failed."
                                 % (vm1.name, vm2.name))
        try:
            iface_cls.down()
        except process.CmdError, detail:
            raise error.TestNAError(str(detail))
        ping_s, _ = ping(remote_ip, count=1, timeout=5, session=session)
        if not ping_s:
            raise error.TestFail("%s ping %s success, but expected fail."
                                 % (vm1.name, remote_ip))
    # Test start
    try:
        try:
            session = guest_config(vm1, vm1_ip)
        except remote.LoginTimeoutError, fail:
            raise error.TestFail(str(fail))
        if vm2:
            try:
                guest_config(vm2, vm2_ip)
            except remote.LoginTimeoutError, fail:
                raise error.TestFail(str(fail))

        # Four mode test
        if iface_mode == "vepa":
            vepa_test(session)
        elif iface_mode == "bridge":
            bridge_test(session)
        elif iface_mode == "private":
            private_test(session)
        elif iface_mode == "passthrough":
            passthrough_test(session)
    finally:
        if iface_cls.is_up():
            if not origin_status:
                iface_cls.down()
        else:
            if origin_status:
                iface_cls.up()
        guest_clean(vm1, vmxml1)
        if vm2:
            guest_clean(vm2, vmxml2)

Example 198

Project: pymo
Source File: config.py
View license
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
    """
    Start up a socket server on the specified port, and listen for new
    configurations.

    These will be sent as a file suitable for processing by fileConfig().
    Returns a Thread object on which you can call start() to start the server,
    and which you can join() when appropriate. To stop the server, call
    stopListening().
    """
    if not thread:
        raise NotImplementedError, "listen() needs threading to work"

    class ConfigStreamHandler(StreamRequestHandler):
        """
        Handler for a logging configuration request.

        It expects a completely new logging configuration and uses fileConfig
        to install it.
        """
        def handle(self):
            """
            Handle a request.

            Each request is expected to be a 4-byte length, packed using
            struct.pack(">L", n), followed by the config file.
            Uses fileConfig() to do the grunt work.
            """
            import tempfile
            try:
                conn = self.connection
                chunk = conn.recv(4)
                if len(chunk) == 4:
                    slen = struct.unpack(">L", chunk)[0]
                    chunk = self.connection.recv(slen)
                    while len(chunk) < slen:
                        chunk = chunk + conn.recv(slen - len(chunk))
                    #Apply new configuration. We'd like to be able to
                    #create a StringIO and pass that in, but unfortunately
                    #1.5.2 ConfigParser does not support reading file
                    #objects, only actual files. So we create a temporary
                    #file and remove it later.
                    file = tempfile.mktemp(".ini")
                    f = open(file, "w")
                    f.write(chunk)
                    f.close()
                    try:
                        fileConfig(file)
                    except (KeyboardInterrupt, SystemExit):
                        raise
                    except:
                        traceback.print_exc()
                    os.remove(file)
            except socket.error, e:
                if type(e.args) != types.TupleType:
                    raise
                else:
                    errcode = e.args[0]
                    if errcode != RESET_ERROR:
                        raise

    class ConfigSocketReceiver(ThreadingTCPServer):
        """
        A simple TCP socket-based logging config receiver.
        """

        allow_reuse_address = 1

        def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
                     handler=None):
            ThreadingTCPServer.__init__(self, (host, port), handler)
            logging._acquireLock()
            self.abort = 0
            logging._releaseLock()
            self.timeout = 1

        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()],
                                           [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()

    def serve(rcvr, hdlr, port):
        server = rcvr(port=port, handler=hdlr)
        global _listener
        logging._acquireLock()
        _listener = server
        logging._releaseLock()
        server.serve_until_stopped()

    return threading.Thread(target=serve,
                            args=(ConfigSocketReceiver,
                                  ConfigStreamHandler, port))

Example 199

Project: packet-manipulator
Source File: update.py
View license
    def __process_plugin(self, file, data, exc, obj):
        """
        Process callback for plugin data
        """

        if isinstance(exc, ErrorNetException):
            obj.lock.acquire()

            try:
                if obj.last_update_idx + 1 < \
                   len(obj.updates[obj.selected_update_idx].url):

                    obj.last_update_idx += 1

                    obj.status = FILE_GETTING
                    obj.label = _('Cycling to next update url. Waiting...')
                else:
                    obj.status = FILE_ERROR
                    obj.label = _('Download failed: %s') % str(exc.reason)
                    obj.fract = 1

                self.__remove_file(obj)

                self.__process_next_download()
                return
            finally:
                obj.lock.release()

        elif isinstance(exc, StopNetException):
            #TODO: CHECK THIS
            if obj.updates[obj.selected_update_idx].integrity:

                data = ""
                obj.lock.acquire()

                try:
                    obj.label = _('Checking validity ...')
                    obj.status = FILE_CHECKING

                    obj.fd.flush()
                    obj.fd.seek(0)

                    data = obj.fd.read()
                finally:
                    obj.lock.release()

                # Not locked it could freeze the ui
                md5_hash = sha_hash = None
                sums = obj.updates[obj.selected_update_idx].integrity

                if 'md5' in sums:
                    md5_hash = md5(data)
                if 'sha1' in sums:
                    sha_hash = sha(data)

                obj.lock.acquire()

                try:
                    if (md5_hash and md5_hash.hexdigest() == sums['md5']) or \
                       (sha_hash and sha_hash.hexdigest() == sums['sha1']):

                        obj.label = _('Updated. Restart to take effect')
                        obj.status = FILE_GETTED
                    else:
                        obj.label = _('Corrupted file.')
                        obj.status = FILE_ERROR
                finally:
                    obj.lock.release()

            else:
                obj.lock.acquire()

                try:
                    obj.label = _('Updated. Restart to take effect')
                    obj.status = FILE_GETTED
                finally:
                    obj.lock.release()

            obj.lock.acquire()

            try:
                obj.fd.close()
                obj.fract = 1
            finally:
                obj.lock.release()

            try:
                if obj.status == FILE_ERROR:
                    os.remove(obj.fd.name)
                else:
                    os.rename(obj.fd.name, \
                              obj.fd.name[:obj.fd.name.index(".ump") + 4])
            except Exception:
                # TODO: add more sensed control?
                pass

            self.__process_next()

        elif isinstance(exc, StartNetException):
            obj.lock.acquire()

            try:
                try:
                    obj.status = FILE_GETTING
                    obj.size = 0
                    obj.total = int(file.info()['Content-Length'])
                except:
                    pass

                obj.label = _('Downloading ...')
            finally:
                obj.lock.release()

        elif not exc:
            if obj.total:
                obj.size += len(data)
                obj.fract = float(obj.size) / float(obj.total)
            obj.fd.write(data)

Example 200

Project: internetarchive
Source File: item.py
View license
    def upload_file(self, body,
                    key=None,
                    metadata=None,
                    headers=None,
                    access_key=None,
                    secret_key=None,
                    queue_derive=None,
                    verbose=None,
                    verify=None,
                    checksum=None,
                    delete=None,
                    retries=None,
                    retries_sleep=None,
                    debug=None,
                    request_kwargs=None):
        """Upload a single file to an item. The item will be created
        if it does not exist.

        :type body: Filepath or file-like object.
        :param body: File or data to be uploaded.

        :type key: str
        :param key: (optional) Remote filename.

        :type metadata: dict
        :param metadata: (optional) Metadata used to create a new item.

        :type headers: dict
        :param headers: (optional) Add additional IA-S3 headers to request.

        :type queue_derive: bool
        :param queue_derive: (optional) Set to False to prevent an item from
                             being derived after upload.

        :type verify: bool
        :param verify: (optional) Verify local MD5 checksum matches the MD5
                       checksum of the file received by IAS3.

        :type checksum: bool
        :param checksum: (optional) Skip based on checksum.

        :type delete: bool
        :param delete: (optional) Delete local file after the upload has been
                       successfully verified.

        :type retries: int
        :param retries: (optional) Number of times to retry the given request
                        if S3 returns a 503 SlowDown error.

        :type retries_sleep: int
        :param retries_sleep: (optional) Amount of time to sleep between
                              ``retries``.

        :type verbose: bool
        :param verbose: (optional) Print progress to stdout.

        :type debug: bool
        :param debug: (optional) Set to True to print headers to stdout, and
                      exit without sending the upload request.

        Usage::

            >>> import internetarchive
            >>> item = internetarchive.Item('identifier')
            >>> item.upload_file('/path/to/image.jpg',
            ...                  key='photos/image1.jpg')
            True
        """
        # Set defaults.
        headers = {} if headers is None else headers
        metadata = {} if metadata is None else metadata
        access_key = self.session.access_key if access_key is None else access_key
        secret_key = self.session.secret_key if secret_key is None else secret_key
        queue_derive = True if queue_derive is None else queue_derive
        verbose = False if verbose is None else verbose
        verify = True if verify is None else verify
        delete = False if delete is None else delete
        # Set checksum after delete.
        checksum = True if delete or checksum is None else checksum
        retries = 0 if retries is None else retries
        retries_sleep = 30 if retries_sleep is None else retries_sleep
        debug = False if debug is None else debug
        request_kwargs = {} if request_kwargs is None else request_kwargs
        md5_sum = None

        if not hasattr(body, 'read'):
            body = open(body, 'rb')

        size = get_file_size(body)

        if not headers.get('x-archive-size-hint'):
            headers['x-archive-size-hint'] = str(size)

        # Build IA-S3 URL.
        key = body.name.split('/')[-1] if key is None else key
        base_url = '{0.session.protocol}//s3.us.archive.org/{0.identifier}'.format(self)
        url = '{0}/{1}'.format(
            base_url, urllib.parse.quote(key.lstrip('/').encode('utf-8')))

        # Skip based on checksum.
        if checksum:
            md5_sum = get_md5(body)
            ia_file = self.get_file(key)
            if (not self.tasks) and (ia_file) and (ia_file.md5 == md5_sum):
                log.info('{f} already exists: {u}'.format(f=key, u=url))
                if verbose:
                    print(' {f} already exists, skipping.'.format(f=key))
                if delete:
                    log.info(
                        '{f} successfully uploaded to '
                        'https://archive.org/download/{i}/{f} '
                        'and verified, deleting '
                        'local copy'.format(i=self.identifier,
                                            f=key))
                    os.remove(body.name)
                # Return an empty response object if checksums match.
                # TODO: Is there a better way to handle this?
                return Response()

        # require the Content-MD5 header when delete is True.
        if verify or delete:
            if not md5_sum:
                md5_sum = get_md5(body)
            headers['Content-MD5'] = md5_sum

        def _build_request():
            body.seek(0, os.SEEK_SET)
            if verbose:
                try:
                    chunk_size = 1048576
                    expected_size = size / chunk_size + 1
                    chunks = chunk_generator(body, chunk_size)
                    progress_generator = progress.bar(
                        chunks,
                        expected_size=expected_size,
                        label=' uploading {f}: '.format(f=key))
                    data = IterableToFileAdapter(progress_generator, size)
                except:
                    print(' uploading {f}'.format(f=key))
                    data = body
            else:
                data = body

            request = S3Request(method='PUT',
                                url=url,
                                headers=headers,
                                data=data,
                                metadata=metadata,
                                access_key=access_key,
                                secret_key=secret_key,
                                queue_derive=queue_derive)
            return request

        if debug:
            return _build_request()
        else:
            try:
                error_msg = ('s3 is overloaded, sleeping for '
                             '{0} seconds and retrying. '
                             '{1} retries left.'.format(retries_sleep, retries))
                while True:
                    if retries > 0:
                        if self.session.s3_is_overloaded(access_key):
                            sleep(retries_sleep)
                            log.info(error_msg)
                            if verbose:
                                print(' warning: {0}'.format(error_msg), file=sys.stderr)
                            retries -= 1
                            continue
                    request = _build_request()
                    prepared_request = request.prepare()
                    response = self.session.send(prepared_request,
                                                 stream=True,
                                                 **request_kwargs)
                    if (response.status_code == 503) and (retries > 0):
                        log.info(error_msg)
                        if verbose:
                            print(' warning: {0}'.format(error_msg), file=sys.stderr)
                        sleep(retries_sleep)
                        retries -= 1
                        continue
                    else:
                        if response.status_code == 503:
                            log.info('maximum retries exceeded, upload failed.')
                        break
                response.raise_for_status()
                log.info('uploaded {f} to {u}'.format(f=key, u=url))
                if delete and response.status_code == 200:
                    log.info(
                        '{f} successfully uploaded to '
                        'https://archive.org/download/{i}/{f} and verified, deleting '
                        'local copy'.format(i=self.identifier,
                                            f=key))
                    os.remove(body.name)
                return response
            except HTTPError as exc:
                msg = get_s3_xml_text(exc.response.content)
                error_msg = (' error uploading {0} to {1}, '
                             '{2}'.format(key, self.identifier, msg))
                log.error(error_msg)
                if verbose:
                    print(' error uploading {0}: {1}'.format(key, msg), file=sys.stderr)
                # Raise HTTPError with error message.
                raise type(exc)(error_msg, response=exc.response, request=exc.request)