uuid.uuid4

Here are the examples of the python api uuid.uuid4 taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: qutip
Source File: test_control_pulseoptim.py
View license
    def test_2_dumping_and_unitarity(self):
        """
        control: data dumping and unitarity checking
        Dump out processing data and use to check unitary evolution
        """
        N_EXP_OPTIMDUMP_FILES = 10
        N_EXP_DYNDUMP_FILES = 49
        
        # Hadamard
        H_d = sigmaz()
        H_c = [sigmax()]
        U_0 = identity(2)
        U_targ = hadamard_transform(1)

        n_ts = 1000
        evo_time = 4
        
        dump_folder = str(uuid.uuid4())
        qtrl_dump_dir = os.path.expanduser(os.path.join('~', dump_folder))
        self.tmp_dirs.append(qtrl_dump_dir)
        optim_dump_dir = os.path.join(qtrl_dump_dir, 'optim')
        dyn_dump_dir = os.path.join(qtrl_dump_dir, 'dyn')
        result = cpo.optimize_pulse_unitary(H_d, H_c, U_0, U_targ, 
                        n_ts, evo_time, 
                        fid_err_targ=1e-9, 
                        init_pulse_type='LIN', 
                        optim_params={'dumping':'FULL', 'dump_to_file':True, 
                                    'dump_dir':optim_dump_dir},
                        dyn_params={'dumping':'FULL', 'dump_to_file':True, 
                                    'dump_dir':dyn_dump_dir},
                        gen_stats=True)
        
        # check dumps were generated
        optim = result.optimizer
        dyn = optim.dynamics
        assert_(optim.dump is not None, msg='optimizer dump not created')
        assert_(dyn.dump is not None, msg='dynamics dump not created')
        
        # Count files that were output
        nfiles = len(os.listdir(optim.dump.dump_dir))
        assert_(nfiles == N_EXP_OPTIMDUMP_FILES, 
                msg="{} optimizer dump files generated, {} expected".format(
                    nfiles, N_EXP_OPTIMDUMP_FILES))
                    
        nfiles = len(os.listdir(dyn.dump.dump_dir))
        assert_(nfiles == N_EXP_DYNDUMP_FILES, 
                msg="{} dynamics dump files generated, {} expected".format(
                    nfiles, N_EXP_DYNDUMP_FILES))
                    
        # dump all to specific file stream
        fpath = os.path.expanduser(os.path.join('~', str(uuid.uuid4())))
        self.tmp_files.append(fpath)
        with open(fpath, 'wb') as f:
            optim.dump.writeout(f)
        
        assert_(os.stat(fpath).st_size > 0, msg="Nothing written to optimizer dump file")
        
        fpath = os.path.expanduser(os.path.join('~', str(uuid.uuid4())))
        self.tmp_files.append(fpath)
        with open(fpath, 'wb') as f:
            dyn.dump.writeout(f)
        assert_(os.stat(fpath).st_size > 0, msg="Nothing written to dynamics dump file")
        
        # Use the dump to check unitarity of all propagators and evo_ops
        dyn.unitarity_tol = 1e-14
        nu_prop = 0
        nu_fwd_evo = 0
        nu_onto_evo = 0
        for d in dyn.dump.evo_dumps:
            for k in range(dyn.num_tslots):
                if not dyn._is_unitary(d.prop[k]): nu_prop += 1
                if not dyn._is_unitary(d.fwd_evo[k]): nu_fwd_evo += 1
                if not dyn._is_unitary(d.onto_evo[k]): nu_onto_evo += 1
        assert_(nu_prop==0, 
                msg="{} propagators found to be non-unitary".format(nu_prop))
        assert_(nu_fwd_evo==0, 
                msg="{} fwd evo ops found to be non-unitary".format(
                                                                nu_fwd_evo))
        assert_(nu_onto_evo==0,
                msg="{} onto evo ops found to be non-unitary".format(
                                                                nu_onto_evo))

Example 2

Project: cassandra-dtest
Source File: user_types_test.py
View license
    def test_type_secondary_indexing(self):
        """
        Confirm that user types are secondary-indexable
        Similar procedure to TestSecondaryIndexesOnCollections.test_list_indexes
        """
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()
        session = self.patient_cql_connection(node1)
        create_ks(session, 'user_type_indexing', 2)
        session.default_consistency_level = ConsistencyLevel.LOCAL_QUORUM

        stmt = """
              CREATE TYPE t_person_name (
              first text,
              middle text,
              last text
            )
           """
        session.execute(stmt)

        stmt = """
              CREATE TABLE person_likes (
              id uuid PRIMARY KEY,
              name frozen<t_person_name>,
              like text
              )
           """
        session.execute(stmt)
        # Make sure the schema propagate
        time.sleep(2)

        # add index and query (even though there are no rows in the table yet)
        stmt = """
              CREATE INDEX person_likes_name on person_likes (name);
            """
        session.execute(stmt)

        stmt = """
              SELECT * from person_likes where name = {first:'Nero', middle: 'Claudius Caesar Augustus', last: 'Germanicus'};
            """
        rows = list(session.execute(stmt))
        self.assertEqual(0, len(rows))

        # add a row which doesn't specify data for the indexed column, and query again
        _id = uuid.uuid4()
        stmt = """
              INSERT INTO person_likes (id, like)
              VALUES ({id}, 'long walks on the beach');
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              SELECT * from person_likes where name = {first:'Bob', middle: 'Testy', last: 'McTesterson'};
            """

        rows = list(session.execute(stmt))
        self.assertEqual(0, len(rows))

        # finally let's add a queryable row, and get it back using the index
        _id = uuid.uuid4()

        stmt = """
              INSERT INTO person_likes (id, name, like)
              VALUES ({id}, {{first:'Nero', middle:'Claudius Caesar Augustus', last:'Germanicus'}}, 'arson');
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              SELECT id, name.first, like from person_likes where name = {first:'Nero', middle: 'Claudius Caesar Augustus', last: 'Germanicus'};
           """

        rows = list(session.execute(stmt))

        row_uuid, first_name, like = rows[0]

        self.assertEqual(str(row_uuid), str(_id))
        self.assertEqual(first_name, u'Nero')
        self.assertEqual(like, u'arson')

        # rename a field in the type and make sure the index still works
        stmt = """
            ALTER TYPE t_person_name rename first to first_name;
            """
        session.execute(stmt)

        stmt = """
            SELECT id, name.first_name, like from person_likes where name = {first_name:'Nero', middle: 'Claudius Caesar Augustus', last: 'Germanicus'};
            """

        rows = list(session.execute(stmt))

        row_uuid, first_name, like = rows[0]

        self.assertEqual(str(row_uuid), str(_id))
        self.assertEqual(first_name, u'Nero')
        self.assertEqual(like, u'arson')

        # add another row to be sure the index is still adding new data
        _id = uuid.uuid4()

        stmt = """
              INSERT INTO person_likes (id, name, like)
              VALUES ({id}, {{first_name:'Abraham', middle:'', last:'Lincoln'}}, 'preserving unions');
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
            SELECT id, name.first_name, like from person_likes where name = {first_name:'Abraham', middle:'', last:'Lincoln'};
            """

        rows = list(session.execute(stmt))

        row_uuid, first_name, like = rows[0]

        self.assertEqual(str(row_uuid), str(_id))
        self.assertEqual(first_name, u'Abraham')
        self.assertEqual(like, u'preserving unions')

Example 3

View license
def test_copy():
    """Tests the copy function"""

    # Create a Project
    project_entity = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(project_entity.id)
    acl = syn.setPermissions(project_entity, other_user['principalId'], accessType=['READ', 'CREATE', 'UPDATE'])
    # Create two Folders in Project
    folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    third_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(folder_entity.id)
    schedule_for_cleanup(second_folder.id)
    schedule_for_cleanup(third_folder.id)

    # Annotations and provenance
    repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
    annots = {'test':['hello_world']}
    prov = Activity(name = "test",used = repo_url)
    # Create, upload, and set annotations/provenance on a file in Folder
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    file_entity = syn.store(File(filename, parent=folder_entity))
    externalURL_entity = syn.store(File(repo_url,name='rand',parent=folder_entity,synapseStore=False))
    syn.setAnnotations(file_entity,annots)
    syn.setAnnotations(externalURL_entity,annots)
    syn.setProvenance(externalURL_entity.id, prov)
    schedule_for_cleanup(file_entity.id)
    schedule_for_cleanup(externalURL_entity.id)
    # ------------------------------------
    # TEST COPY FILE
    # ------------------------------------
    output = synapseutils.copy(syn,file_entity.id,destinationId=project_entity.id)
    output_URL = synapseutils.copy(syn,externalURL_entity.id,destinationId=project_entity.id)

    #Verify that our copied files are identical
    copied_ent = syn.get(output[file_entity.id])
    copied_URL_ent = syn.get(output_URL[externalURL_entity.id],downloadFile=False)

    copied_ent_annot = syn.getAnnotations(copied_ent)
    copied_url_annot = syn.getAnnotations(copied_URL_ent)
    copied_prov = syn.getProvenance(copied_ent)
    copied_url_prov = syn.getProvenance(copied_URL_ent)
    schedule_for_cleanup(copied_ent.id)
    schedule_for_cleanup(copied_URL_ent.id)

    # TEST: set_Provenance = Traceback
    print("Test: setProvenance = Traceback")
    assert copied_prov['used'][0]['reference']['targetId'] == file_entity.id
    assert copied_url_prov['used'][0]['reference']['targetId'] == externalURL_entity.id

    # TEST: Make sure copied files are the same
    assert copied_ent_annot == annots
    assert copied_ent.dataFileHandleId == file_entity.dataFileHandleId

    # TEST: Make sure copied URLs are the same
    assert copied_url_annot == annots
    assert copied_URL_ent.externalURL == repo_url
    assert copied_URL_ent.name == 'rand'
    assert copied_URL_ent.dataFileHandleId == externalURL_entity.dataFileHandleId

    # TEST: Throw error if file is copied to a folder/project that has a file with the same filename
    assert_raises(ValueError,synapseutils.copy,syn,project_entity.id,destinationId = project_entity.id)
    assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = project_entity.id)
    assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = third_folder.id,setProvenance = "gib")
    assert_raises(ValueError,synapseutils.copy,syn,file_entity.id,destinationId = file_entity.id)

    print("Test: setProvenance = None")
    output = synapseutils.copy(syn,file_entity.id,destinationId=second_folder.id,setProvenance = None)
    assert_raises(SynapseHTTPError,syn.getProvenance,output[file_entity.id])
    schedule_for_cleanup(output[file_entity.id])

    print("Test: setProvenance = Existing")
    output_URL = synapseutils.copy(syn,externalURL_entity.id,destinationId=second_folder.id,setProvenance = "existing")
    output_prov = syn.getProvenance(output_URL[externalURL_entity.id])
    schedule_for_cleanup(output_URL[externalURL_entity.id])
    assert output_prov['name'] == prov['name']
    assert output_prov['used'] == prov['used']

    if 'username' not in other_user or 'password' not in other_user:
        sys.stderr.write('\nWarning: no test-authentication configured. skipping testing copy function when trying to copy file made by another user.\n')
        return

    try:
        print("Test: Other user copy should result in different data file handle")
        syn_other = synapseclient.Synapse(skip_checks=True)
        syn_other.login(other_user['username'], other_user['password'])

        output = synapseutils.copy(syn_other,file_entity.id,destinationId=third_folder.id)
        new_copied_ent = syn.get(output[file_entity.id])
        new_copied_ent_annot = syn.getAnnotations(new_copied_ent)
        schedule_for_cleanup(new_copied_ent.id)
        
        copied_URL_ent.externalURL = "https://www.google.com"
        copied_URL_ent = syn.store(copied_URL_ent)
        output = synapseutils.copy(syn_other,copied_URL_ent.id,destinationId=third_folder.id,version=1)
        new_copied_URL = syn.get(output[copied_URL_ent.id],downloadFile=False)
        schedule_for_cleanup(new_copied_URL.id)

        assert new_copied_ent_annot == annots
        assert new_copied_ent.dataFileHandleId != copied_ent.dataFileHandleId
        #Test if copying different versions gets you the correct file
        assert new_copied_URL.versionNumber == 1
        assert new_copied_URL.externalURL == repo_url
        assert new_copied_URL.dataFileHandleId != copied_URL_ent.dataFileHandleId
    finally:
        syn_other.logout()
    # ------------------------------------
    # TEST COPY LINKS
    # ------------------------------------
    print("Test: Copy Links")
    second_file = utils.make_bogus_data_file()
    #schedule_for_cleanup(filename)
    second_file_entity = syn.store(File(second_file, parent=project_entity))
    link_entity = Link(second_file_entity.id,parent=folder_entity.id)
    link_entity = syn.store(link_entity)

    copied_link = synapseutils.copy(syn,link_entity.id, destinationId=second_folder.id)
    old = syn.get(link_entity.id,followLink=False)
    new = syn.get(copied_link[link_entity.id],followLink=False)
    assert old.linksTo['targetId'] == new.linksTo['targetId']
    assert old.linksTo['targetVersionNumber'] == new.linksTo['targetVersionNumber']

    schedule_for_cleanup(second_file_entity.id)
    schedule_for_cleanup(link_entity.id)
    schedule_for_cleanup(copied_link[link_entity.id])

    assert_raises(ValueError,synapseutils.copy,syn,link_entity.id,destinationId=second_folder.id)


    # ------------------------------------
    # TEST COPY TABLE
    # ------------------------------------
    second_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(second_project.id)
    print("Test: Copy Tables")
    cols = [Column(name='n', columnType='DOUBLE', maximumSize=50),
            Column(name='c', columnType='STRING', maximumSize=50),
            Column(name='i', columnType='INTEGER')]
    data = [[2.1,'foo',10],
            [2.2,'bar',20],
            [2.3,'baz',30]]

    schema = syn.store(Schema(name='Testing', columns=cols, parent=project_entity.id))
    row_reference_set = syn.store(RowSet(columns=cols, schema=schema, rows=[Row(r) for r in data]))

    table_map = synapseutils.copy(syn,schema.id, destinationId=second_project.id)
    copied_table = syn.tableQuery('select * from %s' %table_map[schema.id])
    rows = copied_table.asRowSet()['rows']
    # TEST: Check if all values are the same
    for i,row in enumerate(rows):
        assert row['values'] == data[i]

    assert_raises(ValueError,synapseutils.copy,syn,schema.id,destinationId=second_project.id)

    schedule_for_cleanup(schema.id)
    schedule_for_cleanup(table_map[schema.id])

    # ------------------------------------
    # TEST COPY FOLDER
    # ------------------------------------
    print("Test: Copy Folder")
    mapping = synapseutils.copy(syn,folder_entity.id,destinationId=second_project.id)
    for i in mapping:
        old = syn.get(i,downloadFile=False)
        new = syn.get(mapping[i],downloadFile=False)
        assert old.name == new.name
        assert old.annotations == new.annotations
        assert old.concreteType == new.concreteType


    assert_raises(ValueError,synapseutils.copy,syn,folder_entity.id,destinationId=second_project.id)
    # TEST: Throw error if excludeTypes isn't in file, link and table or isn't a list
    assert_raises(ValueError,synapseutils.copy,syn,second_folder.id,excludeTypes=["foo"])
    assert_raises(ValueError,synapseutils.copy,syn,second_folder.id,excludeTypes="file")
    # TEST: excludeType = ["file"], only the folder is created
    second = synapseutils.copy(syn,second_folder.id,destinationId=second_project.id,excludeTypes=["file","table","link"])

    copied_folder = syn.get(second[second_folder.id])
    assert copied_folder.name == second_folder.name
    assert len(second) == 1
    # TEST: Make sure error is thrown if foldername already exists
    assert_raises(ValueError,synapseutils.copy,syn,second_folder.id, destinationId=second_project.id)

    # ------------------------------------
    # TEST COPY PROJECT
    # ------------------------------------
    print("Test: Copy Project")
    third_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(third_project.id)

    mapping = synapseutils.copy(syn,project_entity.id,destinationId=third_project.id)
    for i in mapping:
        old = syn.get(i,downloadFile=False)
        new = syn.get(mapping[i],downloadFile=False)
        if not isinstance(old, Project):
            assert old.name == new.name
        assert old.annotations == new.annotations
        assert old.concreteType == new.concreteType

    # TEST: Can't copy project to a folder
    assert_raises(ValueError,synapseutils.copy,syn,project_entity.id,destinationId=second_folder.id)

Example 4

View license
def test_copyWiki():
    # Create a Project
    project_entity = syn.store(Project(name=str(uuid.uuid4())))

    schedule_for_cleanup(project_entity.id)

    folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(folder_entity.id)
    second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(second_folder.id)
    third_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
    schedule_for_cleanup(third_folder.id)

    filename = utils.make_bogus_data_file()
    attachname = utils.make_bogus_data_file()

    schedule_for_cleanup(filename)
    file_entity = syn.store(File(filename, parent=folder_entity))
    nested_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=folder_entity))
    second_file = syn.store(File(filename, parent=nested_folder))

    schedule_for_cleanup(file_entity.id)
    schedule_for_cleanup(nested_folder.id)
    schedule_for_cleanup(second_file.id)

    fileWiki = Wiki(owner=second_file, title='A Test Wiki', markdown="Test")
    fileWiki = syn.store(fileWiki)

    
    #Create mock wiki
    md = """
    This is a test wiki
    =======================

    Blabber jabber blah blah boo.
    %s
    %s
    """ %(file_entity.id,second_file.id)

    wiki = Wiki(owner=project_entity, title='A Test Wiki', markdown=md, 
                attachments=[attachname])
    wiki = syn.store(wiki)

    # Create a Wiki sub-page
    subwiki = Wiki(owner=project_entity, title='A sub-wiki', 
                   markdown='%s' % file_entity.id, parentWikiId=wiki.id)
    subwiki = syn.store(subwiki)

    second_md = """
    Testing internal links
    ======================

    [test](#!Synapse:%s/wiki/%s)

    %s)
    """ % (project_entity.id,subwiki.id, second_file.id)

    sub_subwiki = Wiki(owner=project_entity, title='A sub-sub-wiki', 
                   markdown=second_md, parentWikiId=subwiki.id,
                   attachments=[attachname])
    sub_subwiki = syn.store(sub_subwiki)

    #Copy wiki to second project
    second_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(second_project.id)

    fileMapping = synapseutils.copy(syn, project_entity, second_project.id, copyWikiPage=False)
    
    print("Test: copyWikiPage = False")
    assert_raises(SynapseHTTPError,syn.getWiki,second_project.id)

    first_headers = syn.getWikiHeaders(project_entity)
    second_headers = synapseutils.copyWiki(syn, project_entity.id, second_project.id, entityMap=fileMapping)

    mapping = dict()

    print("Test: Check that all wikis were copied correctly with the correct mapping")
    for index,info in enumerate(second_headers):
        mapping[first_headers[index]['id']] = info['id']
        assert first_headers[index]['title'] == info['title']
        if info.get('parentId',None) is not None:
            #Check if parent Ids are mapping correctly in the copied Wikis
            assert info['parentId'] == mapping[first_headers[index]['parentId']]

    print("Test: Check that all wikis have the correct attachments and have correct internal synapse link/file mapping")
    for index,info in enumerate(second_headers):
        #Check if markdown is the correctly mapped
        orig_wikiPage= syn.getWiki(project_entity, first_headers[index]['id'])
        new_wikiPage = syn.getWiki(second_project, info['id'])
        s = orig_wikiPage.markdown
        for oldWikiId in mapping.keys():
            oldProjectAndWikiId = "%s/wiki/%s" % (project_entity.id, oldWikiId)
            newProjectAndWikiId = "%s/wiki/%s" % (second_project.id, mapping[oldWikiId])
            s=re.sub(oldProjectAndWikiId, newProjectAndWikiId, s)
        for oldFileId in fileMapping.keys():
            s = re.sub(oldFileId, fileMapping[oldFileId], s)
        assert s == new_wikiPage.markdown
        orig_attach = syn.getWikiAttachments(orig_wikiPage)
        new_attach = syn.getWikiAttachments(new_wikiPage)
        #check that attachment file names are the same
        assert orig_attach == new_attach

    print("Test: copyWikiPage = True (Default) (Should copy all wikis including wikis on files)")
    third_project = syn.store(Project(name=str(uuid.uuid4())))
    schedule_for_cleanup(third_project.id)

    copiedFile = synapseutils.copy(syn, second_file, third_project.id)
    copiedWiki = syn.getWiki(copiedFile[second_file.id])
    assert copiedWiki.title == fileWiki.title
    assert copiedWiki.markdown == fileWiki.markdown

    print("Test: entitySubPageId")
    third_header = synapseutils.copyWiki(syn, project_entity.id, third_project.id, entitySubPageId=sub_subwiki.id, destinationSubPageId=None, updateLinks=False, updateSynIds=False,entityMap=fileMapping)
    test_ent_subpage = syn.getWiki(third_project.id,third_header[0]['id'])

    print("Test: No internal links updated")
    assert test_ent_subpage.markdown == sub_subwiki.markdown
    assert test_ent_subpage.title == sub_subwiki.title

    print("Test: destinationSubPageId")
    fourth_header = synapseutils.copyWiki(syn, project_entity.id, third_project.id, entitySubPageId=subwiki.id, destinationSubPageId=test_ent_subpage.id, updateLinks=False, updateSynIds=False,entityMap=fileMapping)
    temp = syn.getWiki(third_project.id, fourth_header[0]['id'])
    #There are issues where some title pages are blank.  This is an issue that needs to be addressed
    #assert temp.title == subwiki.title

    assert temp.markdown == subwiki.markdown

    temp = syn.getWiki(third_project.id, fourth_header[1]['id'])
    assert temp.title == sub_subwiki.title
    assert temp.markdown == sub_subwiki.markdown
    assert fourth_header[0] == third_header[0]

Example 5

Project: pth-toolkit
Source File: backend.py
View license
    def provision(self):
        from samba.provision import ProvisioningError, setup_path
        # Wipe the directories so we can start
        shutil.rmtree(os.path.join(self.ldapdir, "db"), True)

        # Allow the test scripts to turn off fsync() for OpenLDAP as for TDB
        # and LDB
        nosync_config = ""
        if self.nosync:
            nosync_config = "dbnosync"

        lnkattr = self.schema.linked_attributes()
        refint_attributes = ""
        memberof_config = "# Generated from Samba4 schema\n"
        for att in lnkattr.keys():
            if lnkattr[att] is not None:
                refint_attributes = refint_attributes + " " + att

                memberof_config += read_and_sub_file(
                    setup_path("memberof.conf"), {
                        "MEMBER_ATTR": att,
                        "MEMBEROF_ATTR" : lnkattr[att] })

        refint_config = read_and_sub_file(setup_path("refint.conf"),
                                      { "LINK_ATTRS" : refint_attributes})

        attrs = ["linkID", "lDAPDisplayName"]
        res = self.schema.ldb.search(expression="(&(objectclass=attributeSchema)(searchFlags:1.2.840.113556.1.4.803:=1))", base=self.names.schemadn, scope=SCOPE_ONELEVEL, attrs=attrs)
        index_config = ""
        for i in range (0, len(res)):
            index_attr = res[i]["lDAPDisplayName"][0]
            if index_attr == "objectGUID":
                index_attr = "entryUUID"

            index_config += "index " + index_attr + " eq\n"

        # generate serverids, ldap-urls and syncrepl-blocks for mmr hosts
        mmr_on_config = ""
        mmr_replicator_acl = ""
        mmr_serverids_config = ""
        mmr_syncrepl_schema_config = ""
        mmr_syncrepl_config_config = ""
        mmr_syncrepl_user_config = ""

        if self.ol_mmr_urls is not None:
            # For now, make these equal
            mmr_pass = self.ldapadminpass

            url_list = filter(None,self.ol_mmr_urls.split(','))
            for url in url_list:
                self.logger.info("Using LDAP-URL: "+url)
            if len(url_list) == 1:
                raise ProvisioningError("At least 2 LDAP-URLs needed for MMR!")

            mmr_on_config = "MirrorMode On"
            mmr_replicator_acl = "  by dn=cn=replicator,cn=samba read"
            serverid = 0
            for url in url_list:
                serverid = serverid + 1
                mmr_serverids_config += read_and_sub_file(
                    setup_path("mmr_serverids.conf"), {
                        "SERVERID": str(serverid),
                        "LDAPSERVER": url })
                rid = serverid * 10
                rid = rid + 1
                mmr_syncrepl_schema_config += read_and_sub_file(
                        setup_path("mmr_syncrepl.conf"), {
                            "RID" : str(rid),
                           "MMRDN": self.names.schemadn,
                           "LDAPSERVER" : url,
                           "MMR_PASSWORD": mmr_pass})

                rid = rid + 1
                mmr_syncrepl_config_config += read_and_sub_file(
                    setup_path("mmr_syncrepl.conf"), {
                        "RID" : str(rid),
                        "MMRDN": self.names.configdn,
                        "LDAPSERVER" : url,
                        "MMR_PASSWORD": mmr_pass})

                rid = rid + 1
                mmr_syncrepl_user_config += read_and_sub_file(
                    setup_path("mmr_syncrepl.conf"), {
                        "RID" : str(rid),
                        "MMRDN": self.names.domaindn,
                        "LDAPSERVER" : url,
                        "MMR_PASSWORD": mmr_pass })
        # OpenLDAP cn=config initialisation
        olc_syncrepl_config = ""
        olc_mmr_config = ""
        # if mmr = yes, generate cn=config-replication directives
        # and olc_seed.lif for the other mmr-servers
        if self.ol_mmr_urls is not None:
            serverid = 0
            olc_serverids_config = ""
            olc_syncrepl_seed_config = ""
            olc_mmr_config += read_and_sub_file(
                setup_path("olc_mmr.conf"), {})
            rid = 500
            for url in url_list:
                serverid = serverid + 1
                olc_serverids_config += read_and_sub_file(
                    setup_path("olc_serverid.conf"), {
                        "SERVERID" : str(serverid), "LDAPSERVER" : url })

                rid = rid + 1
                olc_syncrepl_config += read_and_sub_file(
                    setup_path("olc_syncrepl.conf"), {
                        "RID" : str(rid), "LDAPSERVER" : url,
                        "MMR_PASSWORD": mmr_pass})

                olc_syncrepl_seed_config += read_and_sub_file(
                    setup_path("olc_syncrepl_seed.conf"), {
                        "RID" : str(rid), "LDAPSERVER" : url})

            setup_file(setup_path("olc_seed.ldif"), self.olcseedldif,
                       {"OLC_SERVER_ID_CONF": olc_serverids_config,
                        "OLC_PW": self.ldapadminpass,
                        "OLC_SYNCREPL_CONF": olc_syncrepl_seed_config})
        # end olc

        setup_file(setup_path("slapd.conf"), self.slapdconf,
                   {"DNSDOMAIN": self.names.dnsdomain,
                    "LDAPDIR": self.ldapdir,
                    "DOMAINDN": self.names.domaindn,
                    "CONFIGDN": self.names.configdn,
                    "SCHEMADN": self.names.schemadn,
                    "MEMBEROF_CONFIG": memberof_config,
                    "MIRRORMODE": mmr_on_config,
                    "REPLICATOR_ACL": mmr_replicator_acl,
                    "MMR_SERVERIDS_CONFIG": mmr_serverids_config,
                    "MMR_SYNCREPL_SCHEMA_CONFIG": mmr_syncrepl_schema_config,
                    "MMR_SYNCREPL_CONFIG_CONFIG": mmr_syncrepl_config_config,
                    "MMR_SYNCREPL_USER_CONFIG": mmr_syncrepl_user_config,
                    "OLC_SYNCREPL_CONFIG": olc_syncrepl_config,
                    "OLC_MMR_CONFIG": olc_mmr_config,
                    "REFINT_CONFIG": refint_config,
                    "INDEX_CONFIG": index_config,
                    "NOSYNC": nosync_config})

        self.setup_db_config(os.path.join(self.ldapdir, "db", "user"))
        self.setup_db_config(os.path.join(self.ldapdir, "db", "config"))
        self.setup_db_config(os.path.join(self.ldapdir, "db", "schema"))

        if not os.path.exists(os.path.join(self.ldapdir, "db", "samba", "cn=samba")):
            os.makedirs(os.path.join(self.ldapdir, "db", "samba", "cn=samba"), 0700)

        setup_file(setup_path("cn=samba.ldif"),
                   os.path.join(self.ldapdir, "db", "samba", "cn=samba.ldif"),
                   { "UUID": str(uuid.uuid4()),
                     "LDAPTIME": timestring(int(time.time()))} )
        setup_file(setup_path("cn=samba-admin.ldif"),
                   os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=samba-admin.ldif"),
                   {"LDAPADMINPASS_B64": b64encode(self.ldapadminpass),
                    "UUID": str(uuid.uuid4()),
                    "LDAPTIME": timestring(int(time.time()))} )

        if self.ol_mmr_urls is not None:
            setup_file(setup_path("cn=replicator.ldif"),
                       os.path.join(self.ldapdir, "db", "samba", "cn=samba", "cn=replicator.ldif"),
                       {"MMR_PASSWORD_B64": b64encode(mmr_pass),
                        "UUID": str(uuid.uuid4()),
                        "LDAPTIME": timestring(int(time.time()))} )

        mapping = "schema-map-openldap-2.3"
        backend_schema = "backend-schema.schema"

        f = open(setup_path(mapping), 'r')
        try:
            backend_schema_data = self.schema.convert_to_openldap(
                    "openldap", f.read())
        finally:
            f.close()
        assert backend_schema_data is not None
        f = open(os.path.join(self.ldapdir, backend_schema), 'w')
        try:
            f.write(backend_schema_data)
        finally:
            f.close()

        # now we generate the needed strings to start slapd automatically,
        if self.ldap_backend_extra_port is not None:
            # When we use MMR, we can't use 0.0.0.0 as it uses the name
            # specified there as part of it's clue as to it's own name,
            # and not to replicate to itself
            if self.ol_mmr_urls is None:
                server_port_string = "ldap://0.0.0.0:%d" % self.ldap_backend_extra_port
            else:
                server_port_string = "ldap://%s.%s:%d" (self.names.hostname,
                    self.names.dnsdomain, self.ldap_backend_extra_port)
        else:
            server_port_string = ""

        # Prepare the 'result' information - the commands to return in
        # particular
        self.slapd_provision_command = [self.slapd_path, "-F" + self.olcdir,
            "-h"]

        # copy this command so we have two version, one with -d0 and only
        # ldapi (or the forced ldap_uri), and one with all the listen commands
        self.slapd_command = list(self.slapd_provision_command)

        self.slapd_provision_command.extend([self.ldap_uri, "-d0"])

        uris = self.ldap_uri
        if server_port_string is not "":
            uris = uris + " " + server_port_string

        self.slapd_command.append(uris)

        # Set the username - done here because Fedora DS still uses the admin
        # DN and simple bind
        self.credentials.set_username("samba-admin")

        # Wipe the old sam.ldb databases away
        shutil.rmtree(self.olcdir, True)
        os.makedirs(self.olcdir, 0770)

        # If we were just looking for crashes up to this point, it's a
        # good time to exit before we realise we don't have OpenLDAP on
        # this system
        if self.ldap_dryrun_mode:
            sys.exit(0)

        slapd_cmd = [self.slapd_path, "-Ttest", "-n", "0", "-f",
                         self.slapdconf, "-F", self.olcdir]
        retcode = subprocess.call(slapd_cmd, close_fds=True, shell=False)

        if retcode != 0:
            self.logger.error("conversion from slapd.conf to cn=config failed slapd started with: %s" %  "\'" + "\' \'".join(slapd_cmd) + "\'")
            raise ProvisioningError("conversion from slapd.conf to cn=config failed")

        if not os.path.exists(os.path.join(self.olcdir, "cn=config.ldif")):
            raise ProvisioningError("conversion from slapd.conf to cn=config failed")

        # Don't confuse the admin by leaving the slapd.conf around
        os.remove(self.slapdconf)

Example 6

Project: avos
Source File: neutron_data.py
View license
def data(TEST):
    # Data returned by openstack_dashboard.api.neutron wrapper.
    TEST.agents = utils.TestDataContainer()
    TEST.networks = utils.TestDataContainer()
    TEST.subnets = utils.TestDataContainer()
    TEST.ports = utils.TestDataContainer()
    TEST.routers = utils.TestDataContainer()
    TEST.routers_with_rules = utils.TestDataContainer()
    TEST.q_floating_ips = utils.TestDataContainer()
    TEST.q_secgroups = utils.TestDataContainer()
    TEST.q_secgroup_rules = utils.TestDataContainer()
    TEST.providers = utils.TestDataContainer()
    TEST.pools = utils.TestDataContainer()
    TEST.vips = utils.TestDataContainer()
    TEST.members = utils.TestDataContainer()
    TEST.monitors = utils.TestDataContainer()
    TEST.neutron_quotas = utils.TestDataContainer()
    TEST.net_profiles = utils.TestDataContainer()
    TEST.policy_profiles = utils.TestDataContainer()
    TEST.network_profile_binding = utils.TestDataContainer()
    TEST.policy_profile_binding = utils.TestDataContainer()
    TEST.vpnservices = utils.TestDataContainer()
    TEST.ikepolicies = utils.TestDataContainer()
    TEST.ipsecpolicies = utils.TestDataContainer()
    TEST.ipsecsiteconnections = utils.TestDataContainer()
    TEST.firewalls = utils.TestDataContainer()
    TEST.fw_policies = utils.TestDataContainer()
    TEST.fw_rules = utils.TestDataContainer()

    # Data return by neutronclient.
    TEST.api_agents = utils.TestDataContainer()
    TEST.api_networks = utils.TestDataContainer()
    TEST.api_subnets = utils.TestDataContainer()
    TEST.api_ports = utils.TestDataContainer()
    TEST.api_routers = utils.TestDataContainer()
    TEST.api_q_floating_ips = utils.TestDataContainer()
    TEST.api_q_secgroups = utils.TestDataContainer()
    TEST.api_q_secgroup_rules = utils.TestDataContainer()
    TEST.api_pools = utils.TestDataContainer()
    TEST.api_vips = utils.TestDataContainer()
    TEST.api_members = utils.TestDataContainer()
    TEST.api_monitors = utils.TestDataContainer()
    TEST.api_extensions = utils.TestDataContainer()
    TEST.api_net_profiles = utils.TestDataContainer()
    TEST.api_policy_profiles = utils.TestDataContainer()
    TEST.api_network_profile_binding = utils.TestDataContainer()
    TEST.api_policy_profile_binding = utils.TestDataContainer()
    TEST.api_vpnservices = utils.TestDataContainer()
    TEST.api_ikepolicies = utils.TestDataContainer()
    TEST.api_ipsecpolicies = utils.TestDataContainer()
    TEST.api_ipsecsiteconnections = utils.TestDataContainer()
    TEST.api_firewalls = utils.TestDataContainer()
    TEST.api_fw_policies = utils.TestDataContainer()
    TEST.api_fw_rules = utils.TestDataContainer()

    # 1st network.
    network_dict = {'admin_state_up': True,
                    'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
                    'name': 'net1',
                    'status': 'ACTIVE',
                    'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
                                         'start': '10.0.0.2'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': '10.0.0.0/24',
                   'enable_dhcp': True,
                   'gateway_ip': '10.0.0.1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 4,
                   'name': 'mysubnet1',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # Network profile for network when using the cisco n1k plugin.
    net_profile_dict = {'name': 'net_profile_test1',
                        'segment_type': 'vlan',
                        'physical_network': 'phys1',
                        'segment_range': '3000-3100',
                        'id':
                        '00000000-1111-1111-1111-000000000000',
                        'project': TEST.networks.get(name="net1")['tenant_id'],
                        # vlan profiles have no sub_type or multicast_ip_range
                        'multicast_ip_range': None,
                        'sub_type': None}

    TEST.api_net_profiles.add(net_profile_dict)
    TEST.net_profiles.add(neutron.Profile(net_profile_dict))

    # Policy profile for port when using the cisco n1k plugin.
    policy_profile_dict = {'name': 'policy_profile_test1',
                           'id':
                           '00000000-9999-9999-9999-000000000000'}

    TEST.api_policy_profiles.add(policy_profile_dict)
    TEST.policy_profiles.add(neutron.Profile(policy_profile_dict))

    # Network profile binding.
    network_profile_binding_dict = {'profile_id':
                                    '00000000-1111-1111-1111-000000000000',
                                    'tenant_id': network_dict['tenant_id']}

    TEST.api_network_profile_binding.add(network_profile_binding_dict)
    TEST.network_profile_binding.add(neutron.Profile(
        network_profile_binding_dict))

    # Policy profile binding.
    policy_profile_binding_dict = {'profile_id':
                                   '00000000-9999-9999-9999-000000000000',
                                   'tenant_id': network_dict['tenant_id']}

    TEST.api_policy_profile_binding.add(policy_profile_binding_dict)
    TEST.policy_profile_binding.add(neutron.Profile(
        policy_profile_binding_dict))

    # Ports on 1st network.
    port_dict = {'admin_state_up': True,
                 'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
                 'device_owner': 'network:dhcp',
                 'fixed_ips': [{'ip_address': '10.0.0.3',
                                'subnet_id': subnet_dict['id']}],
                 'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
                 'mac_address': 'fa:16:3e:9c:d5:7e',
                 'name': '',
                 'network_id': network_dict['id'],
                 'status': 'ACTIVE',
                 'tenant_id': network_dict['tenant_id']}
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    port_dict = {'admin_state_up': True,
                 'device_id': '1',
                 'device_owner': 'compute:nova',
                 'fixed_ips': [{'ip_address': '10.0.0.4',
                                'subnet_id': subnet_dict['id']}],
                 'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
                 'mac_address': 'fa:16:3e:9d:e6:2f',
                 'name': '',
                 'network_id': network_dict['id'],
                 'status': 'ACTIVE',
                 'tenant_id': network_dict['tenant_id']}
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))
    assoc_port = port_dict

    port_dict = {'admin_state_up': True,
                 'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
                 'device_owner': 'network:router_interface',
                 'fixed_ips': [{'ip_address': '10.0.0.1',
                                'subnet_id': subnet_dict['id']}],
                 'id': '9036eedb-e7fa-458e-bc6e-d9d06d9d1bc4',
                 'mac_address': 'fa:16:3e:9c:d5:7f',
                 'name': '',
                 'network_id': network_dict['id'],
                 'status': 'ACTIVE',
                 'tenant_id': network_dict['tenant_id']}
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    # 2nd network.
    network_dict = {'admin_state_up': True,
                    'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
                    'name': 'net2',
                    'status': 'ACTIVE',
                    'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
                    'tenant_id': '2',
                    'router:external': False,
                    'shared': True}
    subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
                                         'start': '172.16.88.2'}],
                   'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
                   'host_routes': [{'destination': '192.168.20.0/24',
                                    'nexthop': '172.16.88.253'},
                                   {'destination': '192.168.21.0/24',
                                    'nexthop': '172.16.88.252'}],
                   'cidr': '172.16.88.0/24',
                   'enable_dhcp': True,
                   'gateway_ip': '172.16.88.1',
                   'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
                   'ip_version': 4,
                   'name': 'aaaa',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    port_dict = {'admin_state_up': True,
                 'device_id': '2',
                 'device_owner': 'compute:nova',
                 'fixed_ips': [{'ip_address': '172.16.88.3',
                                'subnet_id': subnet_dict['id']}],
                 'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
                 'mac_address': 'fa:16:3e:56:e6:2f',
                 'name': '',
                 'network_id': network_dict['id'],
                 'status': 'ACTIVE',
                 'tenant_id': network_dict['tenant_id']}

    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    # External network.
    network_dict = {'admin_state_up': True,
                    'id': '9b466b94-213a-4cda-badf-72c102a874da',
                    'name': 'ext_net',
                    'status': 'ACTIVE',
                    'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
                    'tenant_id': '3',
                    'router:external': True,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
                                         'end': '172.24.4.238'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': '172.24.4.0/28',
                   'enable_dhcp': False,
                   'gateway_ip': '172.24.4.225',
                   'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
                   'ip_version': 4,
                   'name': 'ext_subnet',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}
    ext_net = network_dict

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # 1st v6 network.
    network_dict = {'admin_state_up': True,
                    'id': '96688ea1-ffa5-78ec-22ca-33aaabfaf775',
                    'name': 'v6_net1',
                    'status': 'ACTIVE',
                    'subnets': ['88ddd443-4377-ab1f-87dd-4bc4a662dbb6'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
                                         'start': 'ff09::02'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': 'ff09::/64',
                   'enable_dhcp': True,
                   'gateway_ip': 'ff09::1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 6,
                   'name': 'v6_subnet1',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id'],
                   'ipv6_modes': 'none/none'}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # 2nd v6 network - slaac.
    network_dict = {'admin_state_up': True,
                    'id': 'c62e4bb3-296a-4cd1-8f6b-aaa7a0092326',
                    'name': 'v6_net2',
                    'status': 'ACTIVE',
                    'subnets': ['5d736a21-0036-4779-8f8b-eed5f98077ec'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
                                         'start': 'ff09::02'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': 'ff09::/64',
                   'enable_dhcp': True,
                   'gateway_ip': 'ff09::1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 6,
                   'name': 'v6_subnet2',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id'],
                   'ipv6_modes': 'slaac/slaac'}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # Set up router data.
    port_dict = {'admin_state_up': True,
                 'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
                 'device_owner': 'network:router_gateway',
                 'fixed_ips': [{'ip_address': '10.0.0.3',
                                'subnet_id': subnet_dict['id']}],
                 'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
                 'mac_address': 'fa:16:3e:9c:d5:7e',
                 'name': '',
                 'network_id': TEST.networks.get(name="ext_net")['id'],
                 'status': 'ACTIVE',
                 'tenant_id': '1'}
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
                   'name': 'router1',
                   'status': 'ACTIVE',
                   'admin_state_up': True,
                   'distributed': True,
                   'external_gateway_info':
                       {'network_id': ext_net['id']},
                   'tenant_id': '1'}
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
                   'name': 'router2',
                   'status': 'ACTIVE',
                   'admin_state_up': False,
                   'distributed': False,
                   'external_gateway_info': None,
                   'tenant_id': '1'}
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {'id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
                   'name': 'rulerouter',
                   'status': 'ACTIVE',
                   'admin_state_up': True,
                   'distributed': False,
                   'external_gateway_info':
                       {'network_id': ext_net['id']},
                   'tenant_id': '1',
                   'router_rules': [{'id': '101',
                                     'action': 'deny',
                                     'source': 'any',
                                     'destination': 'any',
                                     'nexthops': []},
                                    {'id': '102',
                                     'action': 'permit',
                                     'source': 'any',
                                     'destination': '8.8.8.8/32',
                                     'nexthops': ['1.0.0.2', '1.0.0.1']}]}
    TEST.api_routers.add(router_dict)
    TEST.routers_with_rules.add(neutron.Router(router_dict))

    # Floating IP.
    # Unassociated.
    fip_dict = {'tenant_id': '1',
                'floating_ip_address': '172.16.88.227',
                'floating_network_id': ext_net['id'],
                'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
                'fixed_ip_address': None,
                'port_id': None,
                'router_id': None}
    TEST.api_q_floating_ips.add(fip_dict)
    TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))

    # Associated (with compute port on 1st network).
    fip_dict = {'tenant_id': '1',
                'floating_ip_address': '172.16.88.228',
                'floating_network_id': ext_net['id'],
                'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
                'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
                'port_id': assoc_port['id'],
                'router_id': router_dict['id']}
    TEST.api_q_floating_ips.add(fip_dict)
    TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))

    # Security group.

    sec_group_1 = {'tenant_id': '1',
                   'description': 'default',
                   'id': 'faad7c80-3b62-4440-967c-13808c37131d',
                   'name': 'default'}
    sec_group_2 = {'tenant_id': '1',
                   'description': 'NotDefault',
                   'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
                   'name': 'other_group'}
    sec_group_3 = {'tenant_id': '1',
                   'description': 'NotDefault',
                   'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
                   'name': 'another_group'}

    def add_rule_to_group(secgroup, default_only=True):
        rule_egress_ipv4 = {
            'id': str(uuid.uuid4()),
            'direction': u'egress', 'ethertype': u'IPv4',
            'port_range_min': None, 'port_range_max': None,
            'protocol': None, 'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_egress_ipv6 = {
            'id': str(uuid.uuid4()),
            'direction': u'egress', 'ethertype': u'IPv6',
            'port_range_min': None, 'port_range_max': None,
            'protocol': None, 'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}

        rule_tcp_80 = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 80, 'port_range_max': 80,
            'protocol': u'tcp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_icmp = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 5, 'port_range_max': 8,
            'protocol': u'icmp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_group = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 80, 'port_range_max': 80,
            'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_all_tcp = {
            'id': str(uuid.uuid4()),
            'direction': u'egress', 'ethertype': u'IPv4',
            'port_range_min': 1, 'port_range_max': 65535,
            'protocol': u'tcp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/24',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}

        rules = []
        if not default_only:
            rules += [rule_tcp_80, rule_icmp, rule_group, rule_all_tcp]
        rules += [rule_egress_ipv4, rule_egress_ipv6]
        secgroup['security_group_rules'] = rules

    add_rule_to_group(sec_group_1, default_only=False)
    add_rule_to_group(sec_group_2)
    add_rule_to_group(sec_group_3)

    groups = [sec_group_1, sec_group_2, sec_group_3]
    sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
    for sg in groups:
        # Neutron API.
        TEST.api_q_secgroups.add(sg)
        for rule in sg['security_group_rules']:
            TEST.api_q_secgroup_rules.add(copy.copy(rule))
        # OpenStack Dashboard internaly API.
        TEST.q_secgroups.add(
            neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
        for rule in sg['security_group_rules']:
            TEST.q_secgroup_rules.add(
                neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))

    # LBaaS.

    # 1st pool.
    pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
                 'tenant_id': '1',
                 'vip_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
                 'name': 'pool1',
                 'description': 'pool description',
                 'subnet_id': TEST.subnets.first().id,
                 'protocol': 'HTTP',
                 'lb_method': 'ROUND_ROBIN',
                 'health_monitors': TEST.monitors.list(),
                 'members': ['78a46e5e-eb1a-418a-88c7-0e3f5968b08'],
                 'admin_state_up': True,
                 'status': 'ACTIVE',
                 'provider': 'haproxy'}
    TEST.api_pools.add(pool_dict)
    TEST.pools.add(lbaas.Pool(pool_dict))

    # 2nd pool.
    pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d50',
                 'tenant_id': '1',
                 'vip_id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
                 'name': 'pool2',
                 'description': 'pool description',
                 'subnet_id': TEST.subnets.first().id,
                 'protocol': 'HTTPS',
                 'lb_method': 'ROUND_ROBIN',
                 'health_monitors': TEST.monitors.list()[0:1],
                 'members': [],
                 'status': 'PENDING_CREATE',
                 'admin_state_up': True}
    TEST.api_pools.add(pool_dict)
    TEST.pools.add(lbaas.Pool(pool_dict))

    # 1st vip.
    vip_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
                'name': 'vip1',
                'address': '10.0.0.100',
                'description': 'vip description',
                'subnet_id': TEST.subnets.first().id,
                'port_id': TEST.ports.first().id,
                'subnet': TEST.subnets.first().cidr,
                'protocol_port': 80,
                'protocol': pool_dict['protocol'],
                'pool_id': pool_dict['id'],
                'session_persistence': {'type': 'APP_COOKIE',
                                        'cookie_name': 'jssessionid'},
                'connection_limit': 10,
                'admin_state_up': True}
    TEST.api_vips.add(vip_dict)
    TEST.vips.add(lbaas.Vip(vip_dict))

    # 2nd vip.
    vip_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
                'name': 'vip2',
                'address': '10.0.0.110',
                'description': 'vip description',
                'subnet_id': TEST.subnets.first().id,
                'port_id': TEST.ports.list()[0].id,
                'subnet': TEST.subnets.first().cidr,
                'protocol_port': 80,
                'protocol': pool_dict['protocol'],
                'pool_id': pool_dict['id'],
                'session_persistence': {'type': 'APP_COOKIE',
                                        'cookie_name': 'jssessionid'},
                'connection_limit': 10,
                'admin_state_up': True}
    TEST.api_vips.add(vip_dict)
    TEST.vips.add(lbaas.Vip(vip_dict))

    # 1st member.
    member_dict = {'id': '78a46e5e-eb1a-418a-88c7-0e3f5968b08',
                   'tenant_id': '1',
                   'pool_id': pool_dict['id'],
                   'address': '10.0.0.11',
                   'protocol_port': 80,
                   'weight': 10,
                   'status': 'ACTIVE',
                   'admin_state_up': True}
    TEST.api_members.add(member_dict)
    TEST.members.add(lbaas.Member(member_dict))

    # 2nd member.
    member_dict = {'id': '41ac1f8d-6d9c-49a4-a1bf-41955e651f91',
                   'tenant_id': '1',
                   'pool_id': pool_dict['id'],
                   'address': '10.0.0.12',
                   'protocol_port': 80,
                   'weight': 10,
                   'status': 'ACTIVE',
                   'admin_state_up': True}
    TEST.api_members.add(member_dict)
    TEST.members.add(lbaas.Member(member_dict))

    # 1st monitor.
    monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff96',
                    'type': 'http',
                    'delay': 10,
                    'timeout': 10,
                    'max_retries': 10,
                    'http_method': 'GET',
                    'url_path': '/',
                    'expected_codes': '200',
                    'admin_state_up': True,
                    "pools": [{"pool_id": TEST.pools.list()[0].id},
                              {"pool_id": TEST.pools.list()[1].id}],
                    }
    TEST.api_monitors.add(monitor_dict)
    TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))

    # 2nd monitor.
    monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff97',
                    'type': 'ping',
                    'delay': 10,
                    'timeout': 10,
                    'max_retries': 10,
                    'admin_state_up': True,
                    'pools': [],
                    }
    TEST.api_monitors.add(monitor_dict)
    TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))

    # Quotas.
    quota_data = {'network': '10',
                  'subnet': '10',
                  'port': '50',
                  'router': '10',
                  'floatingip': '50',
                  'security_group': '20',
                  'security_group_rule': '100',
                  }
    TEST.neutron_quotas.add(base.QuotaSet(quota_data))

    # Extensions.
    extension_1 = {"name": "security-group",
                   "alias": "security-group",
                   "description": "The security groups extension."}
    extension_2 = {"name": "Quota management support",
                   "alias": "quotas",
                   "description": "Expose functions for quotas management"}
    extension_3 = {"name": "Provider network",
                   "alias": "provider",
                   "description": "Provider network extension"}
    extension_4 = {"name": "Distributed Virtual Router",
                   "alias": "dvr",
                   "description":
                   "Enables configuration of Distributed Virtual Routers."}
    extension_5 = {"name": "HA Router extension",
                   "alias": "l3-ha",
                   "description": "Add HA capability to routers."}
    extension_6 = {"name": "LoadBalancing service",
                   "alias": "lbaas",
                   "description": "Extension for LoadBalancing service"}
    TEST.api_extensions.add(extension_1)
    TEST.api_extensions.add(extension_2)
    TEST.api_extensions.add(extension_3)
    TEST.api_extensions.add(extension_4)
    TEST.api_extensions.add(extension_5)
    TEST.api_extensions.add(extension_6)

    # 1st agent.
    agent_dict = {"binary": "neutron-openvswitch-agent",
                  "description": None,
                  "admin_state_up": True,
                  "heartbeat_timestamp": "2013-07-26 06:51:47",
                  "alive": True,
                  "id": "c876ff05-f440-443e-808c-1d34cda3e88a",
                  "topic": "N/A",
                  "host": "devstack001",
                  "agent_type": "Open vSwitch agent",
                  "started_at": "2013-07-26 05:23:28",
                  "created_at": "2013-07-26 05:23:28",
                  "configurations": {"devices": 2}}
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    # 2nd agent.
    agent_dict = {"binary": "neutron-dhcp-agent",
                  "description": None,
                  "admin_state_up": True,
                  "heartbeat_timestamp": "2013-07-26 06:51:48",
                  "alive": True,
                  "id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
                  "topic": "dhcp_agent",
                  "host": "devstack001",
                  "agent_type": "DHCP agent",
                  "started_at": "2013-07-26 05:23:30",
                  "created_at": "2013-07-26 05:23:30",
                  "configurations": {
                      "subnets": 1,
                      "use_namespaces": True,
                      "dhcp_lease_duration": 120,
                      "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
                      "networks": 1,
                      "ports": 1}}
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    # Service providers.
    provider_1 = {"service_type": "LOADBALANCER",
                  "name": "haproxy",
                  "default": True}
    TEST.providers.add(provider_1)

    # VPNaaS.

    # 1st VPNService.
    vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d61',
                       'tenant_id': '1',
                       'name': 'cloud_vpn1',
                       'description': 'vpn description',
                       'subnet_id': TEST.subnets.first().id,
                       'router_id': TEST.routers.first().id,
                       'vpn_type': 'ipsec',
                       'ipsecsiteconnections': [],
                       'admin_state_up': True,
                       'status': 'Active',
                       'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
    TEST.api_vpnservices.add(vpnservice_dict)
    TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))

    # 2nd VPNService.
    vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d62',
                       'tenant_id': '1',
                       'name': 'cloud_vpn2',
                       'description': 'vpn description',
                       'subnet_id': TEST.subnets.first().id,
                       'router_id': TEST.routers.first().id,
                       'vpn_type': 'ipsec',
                       'ipsecsiteconnections': [],
                       'admin_state_up': True,
                       'status': 'Active',
                       'ipsecsiteconns': []}
    TEST.api_vpnservices.add(vpnservice_dict)
    TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))

    # 1st IKEPolicy
    ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c981',
                      'tenant_id': '1',
                      'name': 'ikepolicy_1',
                      'description': 'ikepolicy description',
                      'auth_algorithm': 'sha1',
                      'encryption_algorithm': 'aes-256',
                      'ike_version': 'v1',
                      'lifetime': {'units': 'seconds', 'value': 3600},
                      'phase1_negotiation_mode': 'main',
                      'pfs': 'group5',
                      'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
    TEST.api_ikepolicies.add(ikepolicy_dict)
    TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))

    # 2nd IKEPolicy
    ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c982',
                      'tenant_id': '1',
                      'name': 'ikepolicy_2',
                      'description': 'ikepolicy description',
                      'auth_algorithm': 'sha1',
                      'encryption_algorithm': 'aes-256',
                      'ike_version': 'v1',
                      'lifetime': {'units': 'seconds', 'value': 3600},
                      'phase1_negotiation_mode': 'main',
                      'pfs': 'group5',
                      'ipsecsiteconns': []}
    TEST.api_ikepolicies.add(ikepolicy_dict)
    TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))

    # 1st IPSecPolicy
    ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb8',
                        'tenant_id': '1',
                        'name': 'ipsecpolicy_1',
                        'description': 'ipsecpolicy description',
                        'auth_algorithm': 'sha1',
                        'encapsulation_mode': 'tunnel',
                        'encryption_algorithm': '3des',
                        'lifetime': {'units': 'seconds', 'value': 3600},
                        'pfs': 'group5',
                        'transform_protocol': 'esp',
                        'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
    TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
    TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))

    # 2nd IPSecPolicy
    ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb9',
                        'tenant_id': '1',
                        'name': 'ipsecpolicy_2',
                        'description': 'ipsecpolicy description',
                        'auth_algorithm': 'sha1',
                        'encapsulation_mode': 'tunnel',
                        'encryption_algorithm': '3des',
                        'lifetime': {'units': 'seconds', 'value': 3600},
                        'pfs': 'group5',
                        'transform_protocol': 'esp',
                        'ipsecsiteconns': []}
    TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
    TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))

    # 1st IPSecSiteConnection
    ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d6',
                                'tenant_id': '1',
                                'name': 'ipsec_connection_1',
                                'description': 'vpn connection description',
                                'dpd': {'action': 'hold',
                                        'interval': 30,
                                        'timeout': 120},
                                'ikepolicy_id': ikepolicy_dict['id'],
                                'initiator': 'bi-directional',
                                'ipsecpolicy_id': ipsecpolicy_dict['id'],
                                'mtu': 1500,
                                'peer_address':
                                '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
                                'peer_cidrs': ['20.1.0.0/24', '21.1.0.0/24'],
                                'peer_id':
                                    '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
                                'psk': 'secret',
                                'vpnservice_id': vpnservice_dict['id'],
                                'admin_state_up': True,
                                'status': 'Active'}
    TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
    TEST.ipsecsiteconnections.add(
        vpn.IPSecSiteConnection(ipsecsiteconnection_dict))

    # 2nd IPSecSiteConnection
    ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d7',
                                'tenant_id': '1',
                                'name': 'ipsec_connection_2',
                                'description': 'vpn connection description',
                                'dpd': {'action': 'hold',
                                        'interval': 30,
                                        'timeout': 120},
                                'ikepolicy_id': ikepolicy_dict['id'],
                                'initiator': 'bi-directional',
                                'ipsecpolicy_id': ipsecpolicy_dict['id'],
                                'mtu': 1500,
                                'peer_address': '172.0.0.2',
                                'peer_cidrs': ['20.1.0.0/24'],
                                'peer_id': '172.0.0.2',
                                'psk': 'secret',
                                'vpnservice_id': vpnservice_dict['id'],
                                'admin_state_up': True,
                                'status': 'Active'}
    TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
    TEST.ipsecsiteconnections.add(
        vpn.IPSecSiteConnection(ipsecsiteconnection_dict))

    # FWaaS

    # 1st rule (used by 1st policy)
    rule1_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
                  'tenant_id': '1',
                  'name': 'rule1',
                  'description': 'rule1 description',
                  'protocol': 'tcp',
                  'action': 'allow',
                  'source_ip_address': '1.2.3.0/24',
                  'source_port': '80',
                  'destination_ip_address': '4.5.6.7/32',
                  'destination_port': '1:65535',
                  'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
                  'position': 1,
                  'shared': True,
                  'enabled': True}
    TEST.api_fw_rules.add(rule1_dict)

    rule1 = fwaas.Rule(copy.deepcopy(rule1_dict))
    # NOTE: rule1['policy'] is set below
    TEST.fw_rules.add(rule1)

    # 2nd rule (used by 2nd policy; no name)
    rule2_dict = {'id': 'c6298a93-850f-4f64-b78a-959fd4f1e5df',
                  'tenant_id': '1',
                  'name': '',
                  'description': '',
                  'protocol': 'udp',
                  'action': 'deny',
                  'source_ip_address': '1.2.3.0/24',
                  'source_port': '80',
                  'destination_ip_address': '4.5.6.7/32',
                  'destination_port': '1:65535',
                  'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
                  'position': 2,
                  'shared': True,
                  'enabled': True}
    TEST.api_fw_rules.add(rule2_dict)

    rule2 = fwaas.Rule(copy.deepcopy(rule2_dict))
    # NOTE: rule2['policy'] is set below
    TEST.fw_rules.add(rule2)

    # 3rd rule (not used by any policy)
    rule3_dict = {'id': 'h0881d38-c3eb-4fee-9763-12de3338041d',
                  'tenant_id': '1',
                  'name': 'rule3',
                  'description': 'rule3 description',
                  'protocol': None,
                  'action': 'allow',
                  'source_ip_address': '1.2.3.0/24',
                  'source_port': '80',
                  'destination_ip_address': '4.5.6.7/32',
                  'destination_port': '1:65535',
                  'firewall_policy_id': None,
                  'position': None,
                  'shared': True,
                  'enabled': True}
    TEST.api_fw_rules.add(rule3_dict)

    rule3 = fwaas.Rule(copy.deepcopy(rule3_dict))
    # rule3 is not associated with any rules
    rule3._apidict['policy'] = None
    TEST.fw_rules.add(rule3)

    # 1st policy (associated with 2 rules)
    policy1_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
                    'tenant_id': '1',
                    'name': 'policy1',
                    'description': 'policy with two rules',
                    'firewall_rules': [rule1_dict['id'], rule2_dict['id']],
                    'audited': True,
                    'shared': True}
    TEST.api_fw_policies.add(policy1_dict)

    policy1 = fwaas.Policy(copy.deepcopy(policy1_dict))
    policy1._apidict['rules'] = [rule1, rule2]
    TEST.fw_policies.add(policy1)

    # Reverse relations (rule -> policy)
    rule1._apidict['policy'] = policy1
    rule2._apidict['policy'] = policy1

    # 2nd policy (associated with no rules; no name)
    policy2_dict = {'id': 'cf50b331-787a-4623-825e-da794c918d6a',
                    'tenant_id': '1',
                    'name': '',
                    'description': '',
                    'firewall_rules': [],
                    'audited': False,
                    'shared': False}
    TEST.api_fw_policies.add(policy2_dict)

    policy2 = fwaas.Policy(copy.deepcopy(policy2_dict))
    policy2._apidict['rules'] = []
    TEST.fw_policies.add(policy2)

    # 1st firewall
    fw1_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
                'tenant_id': '1',
                'firewall_policy_id':
                    'abcdef-c3eb-4fee-9763-12de3338041e',
                'name': 'firewall1',
                'description': 'firewall description',
                'status': 'PENDING_CREATE',
                'shared': True,
                'admin_state_up': True}
    TEST.api_firewalls.add(fw1_dict)

    fw1 = fwaas.Firewall(copy.deepcopy(fw1_dict))
    fw1._apidict['policy'] = policy1
    TEST.firewalls.add(fw1)

    # 2nd firewall (no name)
    fw2_dict = {'id': '1aa75150-415f-458e-bae5-5a362a4fb1f7',
                'tenant_id': '1',
                'firewall_policy_id':
                    'abcdef-c3eb-4fee-9763-12de3338041e',
                'name': '',
                'description': '',
                'status': 'PENDING_CREATE',
                'shared': True,
                'admin_state_up': True}
    TEST.api_firewalls.add(fw1_dict)

    fw2 = fwaas.Firewall(copy.deepcopy(fw2_dict))
    fw2._apidict['policy'] = policy1
    TEST.firewalls.add(fw1)

    # Additional Cisco N1K profiles.

    # 2nd network profile for network when using the cisco n1k plugin.
    # Profile applied on 1st network.
    net_profile_dict = {'name': 'net_profile_test2',
                        'segment_type': 'overlay',
                        'sub_type': 'native_vxlan',
                        'segment_range': '10000-10100',
                        'multicast_ip_range': '144.0.0.0-144.0.0.100',
                        'id':
                        '00000000-2222-2222-2222-000000000000',
                        'project': '1',
                        # overlay profiles have no physical_network
                        'physical_network': None}

    TEST.api_net_profiles.add(net_profile_dict)
    TEST.net_profiles.add(neutron.Profile(net_profile_dict))

    # 2nd network profile binding.
    network_profile_binding_dict = {'profile_id':
                                    '00000000-2222-2222-2222-000000000000',
                                    'tenant_id': '1'}

    TEST.api_network_profile_binding.add(network_profile_binding_dict)
    TEST.network_profile_binding.add(neutron.Profile(
        network_profile_binding_dict))

    # 3rd network profile for network when using the cisco n1k plugin
    # Profile applied on 1st network
    net_profile_dict = {'name': 'net_profile_test3',
                        'segment_type': 'overlay',
                        'sub_type': 'other',
                        'other_subtype': 'GRE',
                        'segment_range': '11000-11100',
                        'id':
                        '00000000-3333-3333-3333-000000000000',
                        'project': '1'}

    TEST.api_net_profiles.add(net_profile_dict)
    TEST.net_profiles.add(neutron.Profile(net_profile_dict))

    # 3rd network profile binding
    network_profile_binding_dict = {'profile_id':
                                    '00000000-3333-3333-3333-000000000000',
                                    'tenant_id': '1'}

    TEST.api_network_profile_binding.add(network_profile_binding_dict)
    TEST.network_profile_binding.add(neutron.Profile(
        network_profile_binding_dict))

    # 4th network profile for network when using the cisco n1k plugin
    # Profile applied on 1st network
    net_profile_dict = {'name': 'net_profile_test4',
                        'segment_type': 'trunk',
                        'sub_type_trunk': 'vlan',
                        'id':
                        '00000000-4444-4444-4444-000000000000',
                        'project': '1'}

    TEST.api_net_profiles.add(net_profile_dict)
    TEST.net_profiles.add(neutron.Profile(net_profile_dict))

    # 4th network profile binding
    network_profile_binding_dict = {'profile_id':
                                    '00000000-4444-4444-4444-000000000000',
                                    'tenant_id': '1'}

    TEST.api_network_profile_binding.add(network_profile_binding_dict)
    TEST.network_profile_binding.add(neutron.Profile(
        network_profile_binding_dict))

Example 7

Project: avos
Source File: nova_data.py
View license
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.flavor_access = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_groups_uuid = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.security_group_rules_uuid = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.disabled_quotas = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.floating_ips_uuid = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.volume_snapshots = utils.TestDataContainer()
    TEST.volume_types = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()
    TEST.hosts = utils.TestDataContainer()

    # Data return by novaclient.
    # It is used if API layer does data conversion.
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_floating_ips_uuid = utils.TestDataContainer()

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
         "name": 'test_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Volume name',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
         "name": '',
         "status": 'in-use',
         "size": 10,
         "display_name": '',
         "display_description": '',
         "device": "/dev/hda",
         "created_at": '2010-11-21 18:34:25',
         "volume_type": 'vol_type_1',
         "attachments": [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
         "name": 'my_volume',
         "status": 'in-use',
         "size": 30,
         "display_name": 'My Volume',
         "display_description": '',
         "device": "/dev/hdk",
         "created_at": '2011-05-01 11:54:33',
         "volume_type": 'vol_type_2',
         "attachments": [{"id": "2", "server_id": '1',
                          "device": "/dev/hdk"}]})
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
         "name": 'non_bootable_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Non Bootable Volume',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    attached_volume.bootable = 'true'
    non_bootable_volume.bootable = 'false'

    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)
    TEST.volumes.add(non_bootable_volume)

    vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': 1,
                                         'name': 'vol_type_1'})
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': 2,
                                         'name': 'vol_type_2'})
    TEST.volume_types.add(vol_type1, vol_type2)

    # Flavors
    flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
                               'name': 'm1.tiny',
                               'vcpus': 1,
                               'disk': 0,
                               'ram': 512,
                               'swap': 0,
                               'extra_specs': {},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 0})
    flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
                               'name': 'm1.massive',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': {'Trusted': True, 'foo': 'bar'},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
                               'name': 'm1.secret',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': {},
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
                               'name': 'm1.metadata',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'extra_specs': FlavorExtraSpecs(
                                   {'key': 'key_mock',
                                    'value': 'value_mock'}),
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)

    flavor_access_manager = flavor_access.FlavorAccessManager(None)
    flavor_access_1 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "1",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    flavor_access_2 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "2",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    TEST.flavor_access.add(flavor_access_1, flavor_access_2)

    # Key pairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Security Groups and Rules
    def generate_security_groups(is_uuid=False):

        def get_id(is_uuid):
            global current_int_id
            if is_uuid:
                return str(uuid.uuid4())
            else:
                get_id.current_int_id += 1
                return get_id.current_int_id

        get_id.current_int_id = 0

        sg_manager = sec_groups.SecurityGroupManager(None)
        rule_manager = rules.SecurityGroupRuleManager(None)

        sec_group_1 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"default",
                                                "description": u"default"})
        sec_group_2 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"other_group",
                                                "description": u"NotDefault."})
        sec_group_3 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"another_group",
                                                "description": u"NotDefault."})

        rule = {'id': get_id(is_uuid),
                'group': {},
                'ip_protocol': u"tcp",
                'from_port': u"80",
                'to_port': u"80",
                'parent_group_id': sec_group_1.id,
                'ip_range': {'cidr': u"0.0.0.0/32"}}

        icmp_rule = {'id': get_id(is_uuid),
                     'group': {},
                     'ip_protocol': u"icmp",
                     'from_port': u"9",
                     'to_port': u"5",
                     'parent_group_id': sec_group_1.id,
                     'ip_range': {'cidr': u"0.0.0.0/32"}}

        group_rule = {'id': 3,
                      'group': {},
                      'ip_protocol': u"tcp",
                      'from_port': u"80",
                      'to_port': u"80",
                      'parent_group_id': sec_group_1.id,
                      'source_group_id': sec_group_1.id}

        rule_obj = rules.SecurityGroupRule(rule_manager, rule)
        rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
        rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)

        sec_group_1.rules = [rule_obj]
        sec_group_2.rules = [rule_obj]

        return {"rules": [rule_obj, rule_obj2, rule_obj3],
                "groups": [sec_group_1, sec_group_2, sec_group_3]}

    sg_data = generate_security_groups()
    TEST.security_group_rules.add(*sg_data["rules"])
    TEST.security_groups.add(*sg_data["groups"])

    sg_uuid_data = generate_security_groups(is_uuid=True)
    TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
    TEST.security_groups_uuid.add(*sg_uuid_data["groups"])

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      volumes='1',
                      gigabytes='1000',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      security_groups='10',
                      security_group_rules='20')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # nova quotas disabled when neutron is enabled
    disabled_quotas_nova = ['floating_ips', 'fixed_ips',
                            'security_groups', 'security_group_rules']
    TEST.disabled_quotas.add(disabled_quotas_nova)

    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'instances': {'used': 0,
                                      'quota': 10},
                        'ram': {'used': 0,
                                'quota': 10000},
                        'cores': {'used': 0,
                                  'quota': 20},
                        'floating_ips': {'used': 0,
                                         'quota': 10},
                        'security_groups': {'used': 0,
                                            'quota': 10},
                        'volumes': {'used': 0,
                                    'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {"absolute": {"maxImageMeta": 128,
                           "maxPersonality": 5,
                           "maxPersonalitySize": 10240,
                           "maxSecurityGroupRules": 20,
                           "maxSecurityGroups": 10,
                           "maxServerMeta": 128,
                           "maxTotalCores": 20,
                           "maxTotalFloatingIps": 10,
                           "maxTotalInstances": 10,
                           "maxTotalKeypairs": 100,
                           "maxTotalRAMSize": 10000,
                           "totalCoresUsed": 0,
                           "totalInstancesUsed": 0,
                           "totalKeyPairsUsed": 0,
                           "totalRAMUsed": 0,
                           "totalSecurityGroupsUsed": 0}}
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {"host": "http://nova.example.com:8774",
            "name": "server_1",
            "status": "ACTIVE",
            "tenant_id": TEST.tenants.first().id,
            "user_id": TEST.user.id,
            "server_id": "1",
            "flavor_id": flavor_1.id,
            "image_id": TEST.images.first().id,
            "key_name": keypair.name}
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2",
                 "status": "BUILD",
                 "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": u'\u4e91\u89c4\u5219',
                 "status": "ACTIVE",
                 "tenant_id": tenant3.id,
                "server_id": "3"})
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3)

    # VNC Console Data
    console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
                            u'type': u'novnc'}}
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
                            u'type': u'spice'}}
    TEST.servers.spice_console_data = console
    # RDP Console Data
    console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
                            u'type': u'rdp'}}
    TEST.servers.rdp_console_data = console

    # Floating IPs
    def generate_fip(conf):
        return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
                                       conf)

    fip_1 = {'id': 1,
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_2 = {'id': 2,
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))

    TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
                          nova.FloatingIp(generate_fip(fip_2)))

    # Floating IP with UUID id (for Floating IP with Neutron Proxy)
    fip_3 = {'id': str(uuid.uuid4()),
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_4 = {'id': str(uuid.uuid4()),
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))

    TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
                               nova.FloatingIp(generate_fip(fip_4)))

    # Usage
    usage_vals = {"tenant_id": TEST.tenant.id,
                  "instance_name": server_1.name,
                  "flavor_name": flavor_1.name,
                  "flavor_vcpus": flavor_1.vcpus,
                  "flavor_disk": flavor_1.disk,
                  "flavor_ram": flavor_1.ram}
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {"tenant_id": tenant3.id,
                    "instance_name": server_3.name,
                    "flavor_name": flavor_1.name,
                    "flavor_vcpus": flavor_1.vcpus,
                    "flavor_disk": flavor_1.disk,
                    "flavor_ram": flavor_1.ram}
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    volume_snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
         'display_name': 'test snapshot',
         'display_description': 'vol snap!',
         'size': 40,
         'status': 'available',
         'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'})
    volume_snapshot2 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
         'display_name': '',
         'display_description': 'vol snap 2!',
         'size': 80,
         'status': 'available',
         'volume_id': '3b189ac8-9166-ac7f-90c9-16c8bf9e01ac'})
    TEST.volume_snapshots.add(volume_snapshot)
    TEST.volume_snapshots.add(volume_snapshot2)

    cert_data = {'private_key': 'private',
                 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(availability_zones.AvailabilityZone(
        availability_zones.AvailabilityZoneManager(None),
        {
            'zoneName': 'nova',
            'zoneState': {'available': True},
            'hosts': {
                "host001": {
                    "nova-network": {
                        "active": True,
                        "available": True,
                    },
                },
            },
        },
    ))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack001", "id": 3},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 1,
        },
    )

    hypervisor_2 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack002", "id": 4},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack002",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 2,
        },
    )
    hypervisor_3 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "instance-host", "id": 5},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack003",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 3,
        },
    )
    TEST.hypervisors.add(hypervisor_1)
    TEST.hypervisors.add(hypervisor_2)
    TEST.hypervisors.add(hypervisor_3)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296,
        }
    }

    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-conductor",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_2 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_3 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "down",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack002",
        "disabled_reason": None,
    })

    TEST.services.add(service_1)
    TEST.services.add(service_2)
    TEST.services.add(service_3)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "foo",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 1,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "bar",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 2,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)

    host1 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack001",
        "service": "compute",
        "zone": "testing",
    })

    host2 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack002",
        "service": "nova-conductor",
        "zone": "testing",
    })

    host3 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack003",
        "service": "compute",
        "zone": "testing",
    })

    host4 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack004",
        "service": "compute",
        "zone": "testing",
    })

    TEST.hosts.add(host1)
    TEST.hosts.add(host2)
    TEST.hosts.add(host3)
    TEST.hosts.add(host4)

Example 8

Project: coriolis
Source File: 001_initial.py
View license
def upgrade(migrate_engine):
    meta = sqlalchemy.MetaData()
    meta.bind = migrate_engine

    migration = sqlalchemy.Table(
        'migration', meta,
        sqlalchemy.Column("id", sqlalchemy.String(36), primary_key=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted', sqlalchemy.String(36)),
        sqlalchemy.Column("user_id", sqlalchemy.String(255), nullable=False),
        sqlalchemy.Column("project_id", sqlalchemy.String(255),
                          nullable=False),
        sqlalchemy.Column("origin", sqlalchemy.Text, nullable=False),
        sqlalchemy.Column("destination", sqlalchemy.Text,
                          nullable=False),
        sqlalchemy.Column("status", sqlalchemy.String(100), nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    task = sqlalchemy.Table(
        'task', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted', sqlalchemy.String(36)),
        sqlalchemy.Column("migration_id", sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('migration.id'),
                          nullable=False),
        sqlalchemy.Column("instance", sqlalchemy.String(1024), nullable=False),
        sqlalchemy.Column("host", sqlalchemy.String(1024), nullable=True),
        sqlalchemy.Column("process_id", sqlalchemy.Integer, nullable=True),
        sqlalchemy.Column("status", sqlalchemy.String(100), nullable=False),
        sqlalchemy.Column("task_type", sqlalchemy.String(100),
                          nullable=False),
        sqlalchemy.Column("exception_details", sqlalchemy.Text, nullable=True),
        sqlalchemy.Column("depends_on", sqlalchemy.Text, nullable=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    task_progress_update = sqlalchemy.Table(
        'task_progress_update', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted', sqlalchemy.String(36)),
        sqlalchemy.Column("task_id", sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('task.id'),
                          nullable=False),
        sqlalchemy.Column("current_step", sqlalchemy.Integer, nullable=False),
        sqlalchemy.Column("total_steps", sqlalchemy.Integer, nullable=True),
        sqlalchemy.Column("message", sqlalchemy.String(1024), nullable=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    task_events = sqlalchemy.Table(
        'task_event', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted', sqlalchemy.String(36)),
        sqlalchemy.Column("task_id", sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('task.id'),
                          nullable=False),
        sqlalchemy.Column("level", sqlalchemy.String(50), nullable=False),
        sqlalchemy.Column("message", sqlalchemy.String(1024), nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    tables = (
        migration,
        task,
        task_progress_update,
        task_events,
    )

    for index, table in enumerate(tables):
        try:
            table.create()
        except Exception:
            # If an error occurs, drop all tables created so far to return
            # to the previously existing state.
            meta.drop_all(tables=tables[:index])
            raise

Example 9

Project: rom
Source File: index.py
View license
    def _prepare(self, conn, filters):
        temp_id = "%s:%s"%(self.namespace, uuid.uuid4())
        pipe = conn.pipeline(True)
        sfilters = filters
        sizes = [(None, 0)]
        if filters:
            # reorder filters based on the size of the underlying set/zset
            for fltr in filters:
                if isinstance(fltr, six.string_types):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr), None)
                elif isinstance(fltr, Prefix):
                    estimate_work_lua(pipe, '%s:%s:pre'%(self.namespace, fltr.attr), fltr.prefix)
                elif isinstance(fltr, Suffix):
                    estimate_work_lua(pipe, '%s:%s:suf'%(self.namespace, fltr.attr), fltr.suffix)
                elif isinstance(fltr, Pattern):
                    estimate_work_lua(pipe, '%s:%s:pre'%(self.namespace, fltr.attr), _find_prefix(fltr.pattern))
                elif isinstance(fltr, list):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr[0]), None)
                elif isinstance(fltr, Geofilter):
                    estimate_work_lua(pipe, '%s:%s:geo'%(self.namespace, fltr.name), fltr.count)
                elif isinstance(fltr, tuple):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr[0]), fltr[1:3])
                else:
                    raise QueryError("Don't know how to handle a filter of: %r"%(fltr,))
            sizes = list(enumerate(pipe.execute()))
            sizes.sort(key=lambda x:abs(x[1]))
            sfilters = [filters[x[0]] for x in sizes]

        # the first "intersection" is actually a union to get us started, unless
        # we can explicitly create a sub-range in Lua for a fast start to
        # intersection
        intersect = pipe.zunionstore
        first = True
        for ii, fltr in enumerate(sfilters):
            if isinstance(fltr, list):
                # or string string/tag search
                if len(fltr) == 1:
                    # only 1? Use the simple version.
                    fltr = fltr[0]
                elif not fltr:
                    continue
                else:
                    temp_id2 = str(uuid.uuid4())
                    pipe.zunionstore(temp_id2, dict(
                        ('%s:%s:idx'%(self.namespace, fi), 0) for fi in fltr))
                    intersect(temp_id, {temp_id: 0, temp_id2: 0})
                    pipe.delete(temp_id2)
            if isinstance(fltr, six.string_types):
                # simple string/tag search
                intersect(temp_id, {temp_id:0, '%s:%s:idx'%(self.namespace, fltr):0})
            elif isinstance(fltr, Prefix):
                redis_prefix_lua(pipe, temp_id, '%s:%s:pre'%(self.namespace, fltr.attr), fltr.prefix, first)
            elif isinstance(fltr, Suffix):
                redis_prefix_lua(pipe, temp_id, '%s:%s:suf'%(self.namespace, fltr.attr), fltr.suffix, first)
            elif isinstance(fltr, Pattern):
                redis_prefix_lua(pipe, temp_id,
                    '%s:%s:pre'%(self.namespace, fltr.attr),
                    _find_prefix(fltr.pattern),
                    first, '^' + _pattern_to_lua_pattern(fltr.pattern),
                )
            elif isinstance(fltr, Geofilter):
                # Prep the georadius command
                args = [
                    'georadius', '%s:%s:geo'%(self.namespace, fltr.name),
                    repr(fltr.lon), repr(fltr.lat), fltr.radius, fltr.measure
                ]
                if fltr.count and fltr.count >= 0:
                    args.append('COUNT')
                    args.append(fltr.count)
                args.append('STOREDIST')
                first = intersect == pipe.zunionstore
                args.append(temp_id if first else str(uuid.uuid4()))

                pipe.pipeline_execute_command(*args)
                if not first:
                    intersect(temp_id, {temp_id: 0, args[-1]: 1})
                    pipe.delete(args[-1])

            elif isinstance(fltr, tuple):
                # zset range search
                if len(fltr) != 3:
                    raise QueryError("Cannot filter range of data without 2 endpoints (%s given)"%(len(fltr)-1,))
                fltr, mi, ma = fltr
                if not ii and sizes[0][1] < 0:
                    # We've got a special case where we want to explicitly extract
                    # a subrange instead of starting from a larger index, because
                    # it turns out that this is going to be faster :P
                    lua_subrange(pipe, [temp_id, '%s:%s:idx'%(self.namespace, fltr)],
                        ['-inf' if mi is None else _to_score(mi), 'inf' if ma is None else _to_score(ma)]
                    )

                else:
                    intersect(temp_id, {temp_id:0, '%s:%s:idx'%(self.namespace, fltr):1})
                    if mi is not None:
                        pipe.zremrangebyscore(temp_id, '-inf', _to_score(mi, True))
                    if ma is not None:
                        pipe.zremrangebyscore(temp_id, _to_score(ma, True), 'inf')
            first = False
            intersect = pipe.zinterstore
        return pipe, intersect, temp_id

Example 10

Project: rom
Source File: index.py
View license
    def _prepare(self, conn, filters):
        temp_id = "%s:%s"%(self.namespace, uuid.uuid4())
        pipe = conn.pipeline(True)
        sfilters = filters
        sizes = [(None, 0)]
        if filters:
            # reorder filters based on the size of the underlying set/zset
            for fltr in filters:
                if isinstance(fltr, six.string_types):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr), None)
                elif isinstance(fltr, Prefix):
                    estimate_work_lua(pipe, '%s:%s:pre'%(self.namespace, fltr.attr), fltr.prefix)
                elif isinstance(fltr, Suffix):
                    estimate_work_lua(pipe, '%s:%s:suf'%(self.namespace, fltr.attr), fltr.suffix)
                elif isinstance(fltr, Pattern):
                    estimate_work_lua(pipe, '%s:%s:pre'%(self.namespace, fltr.attr), _find_prefix(fltr.pattern))
                elif isinstance(fltr, list):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr[0]), None)
                elif isinstance(fltr, Geofilter):
                    estimate_work_lua(pipe, '%s:%s:geo'%(self.namespace, fltr.name), fltr.count)
                elif isinstance(fltr, tuple):
                    estimate_work_lua(pipe, '%s:%s:idx'%(self.namespace, fltr[0]), fltr[1:3])
                else:
                    raise QueryError("Don't know how to handle a filter of: %r"%(fltr,))
            sizes = list(enumerate(pipe.execute()))
            sizes.sort(key=lambda x:abs(x[1]))
            sfilters = [filters[x[0]] for x in sizes]

        # the first "intersection" is actually a union to get us started, unless
        # we can explicitly create a sub-range in Lua for a fast start to
        # intersection
        intersect = pipe.zunionstore
        first = True
        for ii, fltr in enumerate(sfilters):
            if isinstance(fltr, list):
                # or string string/tag search
                if len(fltr) == 1:
                    # only 1? Use the simple version.
                    fltr = fltr[0]
                elif not fltr:
                    continue
                else:
                    temp_id2 = str(uuid.uuid4())
                    pipe.zunionstore(temp_id2, dict(
                        ('%s:%s:idx'%(self.namespace, fi), 0) for fi in fltr))
                    intersect(temp_id, {temp_id: 0, temp_id2: 0})
                    pipe.delete(temp_id2)
            if isinstance(fltr, six.string_types):
                # simple string/tag search
                intersect(temp_id, {temp_id:0, '%s:%s:idx'%(self.namespace, fltr):0})
            elif isinstance(fltr, Prefix):
                redis_prefix_lua(pipe, temp_id, '%s:%s:pre'%(self.namespace, fltr.attr), fltr.prefix, first)
            elif isinstance(fltr, Suffix):
                redis_prefix_lua(pipe, temp_id, '%s:%s:suf'%(self.namespace, fltr.attr), fltr.suffix, first)
            elif isinstance(fltr, Pattern):
                redis_prefix_lua(pipe, temp_id,
                    '%s:%s:pre'%(self.namespace, fltr.attr),
                    _find_prefix(fltr.pattern),
                    first, '^' + _pattern_to_lua_pattern(fltr.pattern),
                )
            elif isinstance(fltr, Geofilter):
                # Prep the georadius command
                args = [
                    'georadius', '%s:%s:geo'%(self.namespace, fltr.name),
                    repr(fltr.lon), repr(fltr.lat), fltr.radius, fltr.measure
                ]
                if fltr.count and fltr.count >= 0:
                    args.append('COUNT')
                    args.append(fltr.count)
                args.append('STOREDIST')
                first = intersect == pipe.zunionstore
                args.append(temp_id if first else str(uuid.uuid4()))

                pipe.pipeline_execute_command(*args)
                if not first:
                    intersect(temp_id, {temp_id: 0, args[-1]: 1})
                    pipe.delete(args[-1])

            elif isinstance(fltr, tuple):
                # zset range search
                if len(fltr) != 3:
                    raise QueryError("Cannot filter range of data without 2 endpoints (%s given)"%(len(fltr)-1,))
                fltr, mi, ma = fltr
                if not ii and sizes[0][1] < 0:
                    # We've got a special case where we want to explicitly extract
                    # a subrange instead of starting from a larger index, because
                    # it turns out that this is going to be faster :P
                    lua_subrange(pipe, [temp_id, '%s:%s:idx'%(self.namespace, fltr)],
                        ['-inf' if mi is None else _to_score(mi), 'inf' if ma is None else _to_score(ma)]
                    )

                else:
                    intersect(temp_id, {temp_id:0, '%s:%s:idx'%(self.namespace, fltr):1})
                    if mi is not None:
                        pipe.zremrangebyscore(temp_id, '-inf', _to_score(mi, True))
                    if ma is not None:
                        pipe.zremrangebyscore(temp_id, _to_score(ma, True), 'inf')
            first = False
            intersect = pipe.zinterstore
        return pipe, intersect, temp_id

Example 11

Project: pymxf
Source File: test_file_io.py
View license
    def test_new(self):
        
        DVBased_50_625_50_ClipWrapped =  mxf.util.find_essence_container_label("DVBased_50_625_50_ClipWrapped")

        test_file = os.path.join(sandbox, 'test_new.mxf')
        f = mxf.open(test_file, 'w')
        
        # Create Header Object
        header = mxf.metadata.HeaderMetadata()
        header.create_avid_metadictionary()
        
        bodySID = 1
        indexSID = 2
        
        now = str(datetime.now())
        def set_and_check(item,key, value, check=True):
            data_item = item[key]
            if isinstance(data_item, mxf.metadata.MetaDataArrayItem):
                data_item.append(value)
                print "%s.%s =" % (item.type_name, key),  data_item.value
                if check:
                    assert value in data_item.value
            else:
                data_item.value = value
                print "%s.%s =" % (item.type_name, key),  data_item.value
                if check:
                    assert value == data_item.value

        # Preface
        preface = header.create_set("Preface")
        set_and_check(preface, 'LastModifiedDate', now, False)
        set_and_check(preface, 'Version', 0x0102)
        set_and_check(preface, 'OperationalPattern', mxf.util.find_op_pattern("atom", "NTracks_1SourceClip"))
        set_and_check(preface, 'EssenceContainers', DVBased_50_625_50_ClipWrapped)

        # Preface - Identification
        gen =  uuid.uuid4()
        ProductUID = uuid.uuid4()
        ident = header.create_set("Identification")
        set_and_check(preface, 'Identifications', ident)
        
        set_and_check(ident, 'ThisGenerationUID', gen)
        set_and_check(ident, 'CompanyName',  "This Company")
        set_and_check(ident, 'ProductName', "Some Product")
        set_and_check(ident, 'VersionString', "Alpha version")
        set_and_check(ident, 'ProductUID', ProductUID)
        set_and_check(ident, 'ModificationDate',now, False)
        set_and_check(ident, 'ToolkitVersion', None, False)
        set_and_check(ident, 'Platform', mxf.util.get_platform_string())
        
        # Preface - ContentStorage
        storage = header.create_set("ContentStorage")        
        set_and_check(preface, 'ContentStorage', storage)

        # Preface - ContentStorage - MaterialPackage
        m_package = header.create_set("MaterialPackage")
        set_and_check(storage, 'Packages', m_package)
        
        materialPackageUMID = mxf.util.generate_umid()
        sourcePackageUMID = mxf.util.generate_umid()
        set_and_check(m_package, 'PackageUID', materialPackageUMID)
        set_and_check(m_package, 'PackageCreationDate', now, False)
        set_and_check(m_package, 'PackageModifiedDate', now, False)
        set_and_check(m_package, 'Name', "python writedv50 material")
        
        # Preface - ContentStorage - MaterialPackage - Timeline Track
        sourceTrackID = 1
        sourceTrackNumber = 0x18010201
        m_track = header.create_set("Track")
        set_and_check(m_package, "Tracks", m_track)
        
        set_and_check(m_track, 'TrackID', sourceTrackID)
        set_and_check(m_track, "TrackNumber", sourceTrackNumber)
        set_and_check(m_track, "EditRate" , "25/1")
        set_and_check(m_track, "Origin" , 0)
        
        # Preface - ContentStorage - MaterialPackage - Timeline Track - Sequence
        seq = header.create_set("Sequence")
        set_and_check(m_track,'Sequence', seq)
        
        set_and_check(seq, 'DataDefinition', mxf.util.find_datadef("LegacyPicture"))
        duration = 0
        set_and_check(seq, 'Duration', duration)
        
        durationItem1 = seq['Duration']
        
        # Preface - ContentStorage - MaterialPackage - Timeline Track - Sequence - SourceClip
        source_clip = header.create_set("SourceClip")
        set_and_check(seq, 'StructuralComponents', source_clip)
        
        set_and_check(source_clip, 'DataDefinition', mxf.util.find_datadef("LegacyPicture"))
        set_and_check(source_clip, 'Duration', duration)
        set_and_check(source_clip, 'StartPosition', 0)
        set_and_check(source_clip, 'SourcePackageID', sourcePackageUMID)
        set_and_check(source_clip, 'SourceTrackID', sourceTrackID)
        
        durationItem2 = source_clip['Duration']
        
        # Preface - ContentStorage - SourcePackage 
        source_package = header.create_set("SourcePackage")
        set_and_check(storage, 'Packages', source_package)
        
        set_and_check(preface, 'PrimaryPackage', source_package)
        set_and_check(source_package, 'PackageUID', sourcePackageUMID)
        set_and_check(source_package, 'PackageModifiedDate', now, False)
        set_and_check(source_package, 'PackageModifiedDate', now, False)
        set_and_check(source_package, 'Name', "writedv50 source")
        
        # Preface - ContentStorage - SourcePackage - Timeline Track
        sp_track =  header.create_set("Track")
        set_and_check(source_package, 'Tracks', sp_track)
        
        set_and_check(sp_track, 'TrackID', sourceTrackID) 
        set_and_check(sp_track, 'TrackNumber',sourceTrackID)
        set_and_check(sp_track, 'EditRate', '25/1')
        set_and_check(sp_track, 'Origin', 0)
        
        # Preface - ContentStorage - SourcePackage - Timeline Track - Sequence
        sp_seq = header.create_set("Sequence")
        set_and_check(sp_track, 'Sequence', sp_seq)
        set_and_check(sp_seq, 'DataDefinition', mxf.util.find_datadef("LegacyPicture"))
        set_and_check(sp_seq, 'Duration', 0)
        
        durationItem3 = sp_seq['Duration']
        
        # Preface - ContentStorage - SourcePackage - Timeline Track - Sequence - SourceClip
        sp_sourceclip = header.create_set('SourceClip')
        set_and_check(sp_seq, 'StructuralComponents', sp_sourceclip)

        set_and_check(sp_sourceclip, 'DataDefinition', mxf.util.find_datadef("LegacyPicture"))
        set_and_check(sp_sourceclip, 'Duration', 0)
        set_and_check(sp_sourceclip, 'StartPosition', 0)
        set_and_check(sp_sourceclip, 'SourcePackageID', mxf.util.get_Null_UMID())
        set_and_check(sp_sourceclip, 'SourceTrackID', 0)
        
        durationItem4 = sp_sourceclip['Duration']
        
        # Preface - ContentStorage - SourcePackage - CDCIEssenceDescriptor
        desc = header.create_set("CDCIEssenceDescriptor")
        set_and_check(source_package,'Descriptor', desc)
        set_and_check(desc, 'SampleRate', '25/1')
        set_and_check(desc,'ContainerDuration', 0)
        set_and_check(desc, 'EssenceContainer', mxf.util.find_essence_container_label('DVBased_50_625_50_ClipWrapped'))
        set_and_check(desc, 'PictureEssenceCoding', mxf.util.find_essence_coding_label('DVBased_50_625_50'))
        
        set_and_check(desc,'StoredHeight',288)
        set_and_check(desc,'StoredWidth',720)
        set_and_check(desc,'SampledHeight',288)
        set_and_check(desc,'SampledWidth',288)
        set_and_check(desc,'SampledXOffset',0)
        set_and_check(desc,'SampledYOffset',0)
        set_and_check(desc,'DisplayHeight',288)
        set_and_check(desc,'DisplayWidth',720)
        set_and_check(desc,'DisplayXOffset',0)
        set_and_check(desc,'DisplayYOffset',0)
        set_and_check(desc,'FrameLayout',1)
        
        desc['VideoLineMap'].append(23)
        desc['VideoLineMap'].append(335)
        print 'CDCIEssenceDescriptor.VideoLineMap =', desc['VideoLineMap'].value
        
        aspectRatio = "4/3"
        
        set_and_check(desc, 'AspectRatio',aspectRatio)
        set_and_check(desc, 'ImageAlignmentOffset', 1)
        set_and_check(desc, 'ComponentDepth', 8)
        set_and_check(desc, 'HorizontalSubsampling', 2)
        set_and_check(desc, 'VerticalSubsampling', 1)
        set_and_check(desc, 'ColorSiting', 4)
        set_and_check(desc, 'BlackRefLevel', 16)
        set_and_check(desc, 'WhiteReflevel', 235)
        set_and_check(desc, 'ColorRange', 255)
        
        resolutionID = 0x8e
        frameSize = 288000
        imageSize = 0
        set_and_check(desc,'ResolutionID', resolutionID)
        set_and_check(desc,'FrameSampleSize', frameSize)
        set_and_check(desc,'ImageSize',imageSize)
        
        durationItem5 = desc['ContainerDuration']
        ImageSizeItem = desc['ImageSize']
        
        #Preface - ContentStorage - EssenceContainerData
        ess_container = header.create_set("EssenceContainerData")
        set_and_check(storage, 'EssenceContainerData', ess_container)
        
        set_and_check(ess_container, 'LinkedPackageUID', sourcePackageUMID)
        set_and_check(ess_container, 'IndexSID', indexSID)
        set_and_check(ess_container, 'BodySID', bodySID)
        
        # create header partition
        header_partition = f.create_partition("header")
        header_partition.append_essence_container(DVBased_50_625_50_ClipWrapped)
        
        print header_partition.operational_pattern_name
        header_partition.operational_pattern_name = "NTracks_1SourceClip"
        print header_partition.operational_pattern_name
        
        f.write_partition(header_partition)
        
        # Store current position in file for later
        header_pos = f.tell()
        f.write_header(header, header_partition)
        
        # Create body partition
        body = f.create_partition("body")
        f.write_partition(body)

        DVClipWrapped = mxf.util.find_essence_element_key('DVClipWrapped')
        essence = f.create_essence(DVClipWrapped)
        
        dv_file = os.path.join(files, "input.dv")
        
        input_file = open(dv_file)
        while True:
            data = input_file.read(1024)
            if not data:
                break
            essence.write(data)
        
        #essence.import_from_file(dv_file)
        
        duration = essence.size/ frameSize
        imageSize = essence.size
        
        essence.complete_write()
        essence.close()
        
        # create footer partition
        footer = f.create_partition("footer")
        f.write_partition(footer)
        
        index_seg = mxf.storage.IndexTableSegment()
        indexSegmentUUID = uuid.uuid4()
        index_seg.instanceUID = indexSegmentUUID
        print index_seg.instanceUID
        index_seg.edit_rate = "25/1"
        print index_seg.edit_rate
        index_seg.start_position = 0
        index_seg.duration = duration
        index_seg.edit_unit_byte_count = imageSize
        index_seg.indexSID = indexSID
        index_seg.bodySID = bodySID
        index_seg.slice_count = 0
        index_seg.pos_table_count = 0
        
        f.write_index(index_seg,footer)
        
        for item in (durationItem1 ,durationItem2, durationItem3, durationItem4, durationItem5):
            item.value = duration
        ImageSizeItem.value = imageSize
        
        f.update_header(header, header_partition, header_pos)
        
        f.update_partitions()
        
        f.close()
        
        
        #dump(test_file)
        f= mxf.open(test_file, 'r')
        
        header = f.read_header()
        
        #output = open("new.txt", 'w')
        
        for set_item in header.iter_sets():
            #print set_item.type_name
            
            #output.write("%s\n" % str(set_item.type_name))
            for item in set_item.iter_items():
                pass
                #print "   ", item.name, item.type_name,  item.key, item.length, item.value
                
                #output.write("   %s %s %s %s %s\n" % (str(item.name), str(item.type_name), str(item.key), str(item.length), str(item.value)))
        #output.close()
        f.close()

Example 12

Project: mongo-python-driver
Source File: test_common.py
View license
    def test_uuid_representation(self):
        coll = self.db.uuid
        coll.drop()

        # Test property
        self.assertEqual(PYTHON_LEGACY,
                         coll.codec_options.uuid_representation)

        # Test basic query
        uu = uuid.uuid4()
        # Insert as binary subtype 3
        coll.insert_one({'uu': uu})
        self.assertEqual(uu, coll.find_one({'uu': uu})['uu'])
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual(STANDARD, coll.codec_options.uuid_representation)
        self.assertEqual(None, coll.find_one({'uu': uu}))
        self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu'])

        # Test Cursor.count
        self.assertEqual(0, coll.find({'uu': uu}).count())
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(1, coll.find({'uu': uu}).count())

        # Test delete
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        coll.delete_one({'uu': uu})
        self.assertEqual(1, coll.count())
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        coll.delete_one({'uu': uu})
        self.assertEqual(0, coll.count())

        # Test update_one
        coll.insert_one({'_id': uu, 'i': 1})
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        coll.update_one({'_id': uu}, {'$set': {'i': 2}})
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(1, coll.find_one({'_id': uu})['i'])
        coll.update_one({'_id': uu}, {'$set': {'i': 2}})
        self.assertEqual(2, coll.find_one({'_id': uu})['i'])

        # Test Cursor.distinct
        self.assertEqual([2], coll.find({'_id': uu}).distinct('i'))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual([], coll.find({'_id': uu}).distinct('i'))

        # Test findAndModify
        self.assertEqual(None, coll.find_one_and_update({'_id': uu},
                                                        {'$set': {'i': 5}}))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(2, coll.find_one_and_update({'_id': uu},
                                                     {'$set': {'i': 5}})['i'])
        self.assertEqual(5, coll.find_one({'_id': uu})['i'])

        # Test command
        self.assertEqual(5, self.db.command('findAndModify', 'uuid',
                                            update={'$set': {'i': 6}},
                                            query={'_id': uu})['value']['i'])
        self.assertEqual(6, self.db.command(
            'findAndModify', 'uuid',
            update={'$set': {'i': 7}},
            query={'_id': UUIDLegacy(uu)})['value']['i'])

        # Test (inline)_map_reduce
        coll.drop()
        coll.insert_one({"_id": uu, "x": 1, "tags": ["dog", "cat"]})
        coll.insert_one({"_id": uuid.uuid4(), "x": 3,
                         "tags": ["mouse", "cat", "dog"]})

        map = Code("function () {"
                   "  this.tags.forEach(function(z) {"
                   "    emit(z, 1);"
                   "  });"
                   "}")

        reduce = Code("function (key, values) {"
                      "  var total = 0;"
                      "  for (var i = 0; i < values.length; i++) {"
                      "    total += values[i];"
                      "  }"
                      "  return total;"
                      "}")

        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        q = {"_id": uu}
        result = coll.inline_map_reduce(map, reduce, query=q)
        self.assertEqual([], result)

        result = coll.map_reduce(map, reduce, "results", query=q)
        self.assertEqual(0, self.db.results.count())

        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        q = {"_id": uu}
        result = coll.inline_map_reduce(map, reduce, query=q)
        self.assertEqual(2, len(result))

        result = coll.map_reduce(map, reduce, "results", query=q)
        self.assertEqual(2, self.db.results.count())

        self.db.drop_collection("result")
        coll.drop()

        # Test group
        coll.insert_one({"_id": uu, "a": 2})
        coll.insert_one({"_id": uuid.uuid4(), "a": 1})

        reduce = "function (obj, prev) { prev.count++; }"
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual([],
                         coll.group([], {"_id": uu},
                                    {"count": 0}, reduce))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual([{"count": 1}],
                         coll.group([], {"_id": uu},
                                    {"count": 0}, reduce))

Example 13

Project: mongo-python-driver
Source File: test_common.py
View license
    def test_uuid_representation(self):
        coll = self.db.uuid
        coll.drop()

        # Test property
        self.assertEqual(PYTHON_LEGACY,
                         coll.codec_options.uuid_representation)

        # Test basic query
        uu = uuid.uuid4()
        # Insert as binary subtype 3
        coll.insert_one({'uu': uu})
        self.assertEqual(uu, coll.find_one({'uu': uu})['uu'])
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual(STANDARD, coll.codec_options.uuid_representation)
        self.assertEqual(None, coll.find_one({'uu': uu}))
        self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu'])

        # Test Cursor.count
        self.assertEqual(0, coll.find({'uu': uu}).count())
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(1, coll.find({'uu': uu}).count())

        # Test delete
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        coll.delete_one({'uu': uu})
        self.assertEqual(1, coll.count())
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        coll.delete_one({'uu': uu})
        self.assertEqual(0, coll.count())

        # Test update_one
        coll.insert_one({'_id': uu, 'i': 1})
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        coll.update_one({'_id': uu}, {'$set': {'i': 2}})
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(1, coll.find_one({'_id': uu})['i'])
        coll.update_one({'_id': uu}, {'$set': {'i': 2}})
        self.assertEqual(2, coll.find_one({'_id': uu})['i'])

        # Test Cursor.distinct
        self.assertEqual([2], coll.find({'_id': uu}).distinct('i'))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual([], coll.find({'_id': uu}).distinct('i'))

        # Test findAndModify
        self.assertEqual(None, coll.find_one_and_update({'_id': uu},
                                                        {'$set': {'i': 5}}))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual(2, coll.find_one_and_update({'_id': uu},
                                                     {'$set': {'i': 5}})['i'])
        self.assertEqual(5, coll.find_one({'_id': uu})['i'])

        # Test command
        self.assertEqual(5, self.db.command('findAndModify', 'uuid',
                                            update={'$set': {'i': 6}},
                                            query={'_id': uu})['value']['i'])
        self.assertEqual(6, self.db.command(
            'findAndModify', 'uuid',
            update={'$set': {'i': 7}},
            query={'_id': UUIDLegacy(uu)})['value']['i'])

        # Test (inline)_map_reduce
        coll.drop()
        coll.insert_one({"_id": uu, "x": 1, "tags": ["dog", "cat"]})
        coll.insert_one({"_id": uuid.uuid4(), "x": 3,
                         "tags": ["mouse", "cat", "dog"]})

        map = Code("function () {"
                   "  this.tags.forEach(function(z) {"
                   "    emit(z, 1);"
                   "  });"
                   "}")

        reduce = Code("function (key, values) {"
                      "  var total = 0;"
                      "  for (var i = 0; i < values.length; i++) {"
                      "    total += values[i];"
                      "  }"
                      "  return total;"
                      "}")

        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        q = {"_id": uu}
        result = coll.inline_map_reduce(map, reduce, query=q)
        self.assertEqual([], result)

        result = coll.map_reduce(map, reduce, "results", query=q)
        self.assertEqual(0, self.db.results.count())

        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        q = {"_id": uu}
        result = coll.inline_map_reduce(map, reduce, query=q)
        self.assertEqual(2, len(result))

        result = coll.map_reduce(map, reduce, "results", query=q)
        self.assertEqual(2, self.db.results.count())

        self.db.drop_collection("result")
        coll.drop()

        # Test group
        coll.insert_one({"_id": uu, "a": 2})
        coll.insert_one({"_id": uuid.uuid4(), "a": 1})

        reduce = "function (obj, prev) { prev.count++; }"
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=STANDARD))
        self.assertEqual([],
                         coll.group([], {"_id": uu},
                                    {"count": 0}, reduce))
        coll = self.db.get_collection(
            "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY))
        self.assertEqual([{"count": 1}],
                         coll.group([], {"_id": uu},
                                    {"count": 0}, reduce))

Example 14

Project: Extinguish
Source File: extinguish.py
View license
def main():
    """gimme some main"""
    parser = argparse.ArgumentParser(add_help=True, version='0.1',
                                     description='Either drag-drop the path to '
                                     'an app bundle into the terminal window, '
                                     'or use "-a" and the CFBundleIdentifier value from an app.')
    parser.add_argument('app_bundle', type=str, help='Path to app bundle'
                        ' to make a profile for', nargs='?')
    parser.add_argument('-a', '--apps', action='append',
                        help='One or more app bundle ids to create profiles for',
                       )
    parser.add_argument('-g', '--group', type=bool, dest='group',
                        default=False,
                        help='Generates one mobileconfig for all apps specified',
                       )
    parser.add_argument('-o', type=str, dest='org',
                        default="",
                        help='Sets organization in profile, empty by default',
                       )
    parser.add_argument('-p', '--profile_id', type=str, dest='profile_id',
                        default="com.github.arubdesu.extinguish",
                        help='Used as identifier for payload id in reverse-domain format. '
                             'Uses "com.github.arubdesu.extinguish" by default',
                       )
    options = parser.parse_args()
    #build_payload, handling one-off drag-drops first
    out_uuid = str(uuid.uuid4())
    group = False
    if options.app_bundle:
        if options.app_bundle.endswith('.app'):
            try:
                infoplist_path = (options.app_bundle + '/Contents/Info.plist')
                bundle_id = CoreFoundation.CFPreferencesCopyAppValue("CFBundleIdentifier", infoplist_path)
                appname = bundle_id.split('.')[-1]
                in_uuid = str(uuid.uuid4())
                payload_id = "SparkleDisabler." + out_uuid + ".alacarte.customsettings." + in_uuid
                payload = build_payload(bundle_id)
                payload_dict = {"PayloadContent": payload,
                                "PayloadEnabled": True,
                                "PayloadIdentifier": payload_id,
                                "PayloadType": "com.apple.ManagedClient.preferences",
                                "PayloadUUID": in_uuid,
                                "PayloadVersion": 1,
                               }
                inside_dict = [payload_dict]
                whole = integrate_whole(inside_dict, options.org, out_uuid, group)
                extend_dict = {"PayloadDescription": "Custom settings to disable "
                               "sparkle updates for %s.app" % appname,
                               "PayloadDisplayName": "SparkleDisabler: %s" % bundle_id,
                               "PayloadIdentifier": options.profile_id + '.' + appname,
                              }
                whole.update(extend_dict)
                mobilecfg_path = ''.join([os.getcwd(), '/disable_autoupdates_',
                                            bundle_id.split('.')[-1], '.mobileconfig'])
                with open(mobilecfg_path, 'w') as final:
                    plistlib.writePlist(whole, final)
                sys.exit(0)
            except OSError:
                print 'Info.plist not found, exiting'
                sys.exit(1)
        else:
            print 'Not recognized as an app bundle, exiting'
            print parser.print_help()
            sys.exit(1)
    to_process = options.apps
    if not to_process:
        print parser.print_help()
        sys.exit(0)
    payload_list = {}
    for bundle_id in to_process:
        #gen uuid's for containing profile and payload
        appname = bundle_id.split('.')[-1]
        payload = build_payload(bundle_id)
        if not options.group:
            in_uuid, out_uuid = str(uuid.uuid4()), str(uuid.uuid4())
            payload_id = ''.join(["SparkleDisabler.", out_uuid,
                               ".alacarte.customsettings.", in_uuid])
            payload_dict = {"PayloadContent": payload,
                            "PayloadEnabled": True,
                            "PayloadIdentifier": payload_id,
                            "PayloadType": "com.apple.ManagedClient.preferences",
                            "PayloadUUID": in_uuid,
                            "PayloadVersion": 1,
                           }
            inside_dict = [payload_dict]
            whole = integrate_whole(inside_dict, options.org, out_uuid, group)
            extend_dict = {"PayloadDescription": "Custom settings to disable "
                           "sparkle updates for %s.app" % appname,
                           "PayloadDisplayName": "SparkleDisabler: %s" % bundle_id,
                           "PayloadIdentifier": options.profile_id + '.' + appname,
                          }
            whole.update(extend_dict)
            mobilecfg_path = ''.join([os.getcwd(), '/disable_autoupdates_',
                                       bundle_id.split('.')[-1], '.mobileconfig'])
            with open(mobilecfg_path, 'w') as final:
                plistlib.writePlist(whole, final)

        else:
            payload_list[bundle_id] = payload[bundle_id]
    if options.group:
        group = True
        mobilecfg_path = ''.join([os.getcwd(), '/disable_all_sparkle_',
                            'autoupdates.mobileconfig'])
        out_uuid = str(uuid.uuid4())
        payload_id = ''.join(["SparkleDisabler.", out_uuid,
                               ".alacarte.customsettings"])

        whole = integrate_whole(payload_list, options.org, out_uuid, group)
        extend_dict = {"PayloadDescription": "Custom settings to disable "
                       "all sparkle apps from updating over http",
                       "PayloadDisplayName": "ExtinguishGeneratedSparkleDisabler",
                       "PayloadIdentifier": payload_id
                      }
        whole.update(extend_dict)
        with open(mobilecfg_path, 'w') as final:
            plistlib.writePlist(whole, final)

Example 15

Project: Extinguish
Source File: extinguish.py
View license
def main():
    """gimme some main"""
    parser = argparse.ArgumentParser(add_help=True, version='0.1',
                                     description='Either drag-drop the path to '
                                     'an app bundle into the terminal window, '
                                     'or use "-a" and the CFBundleIdentifier value from an app.')
    parser.add_argument('app_bundle', type=str, help='Path to app bundle'
                        ' to make a profile for', nargs='?')
    parser.add_argument('-a', '--apps', action='append',
                        help='One or more app bundle ids to create profiles for',
                       )
    parser.add_argument('-g', '--group', type=bool, dest='group',
                        default=False,
                        help='Generates one mobileconfig for all apps specified',
                       )
    parser.add_argument('-o', type=str, dest='org',
                        default="",
                        help='Sets organization in profile, empty by default',
                       )
    parser.add_argument('-p', '--profile_id', type=str, dest='profile_id',
                        default="com.github.arubdesu.extinguish",
                        help='Used as identifier for payload id in reverse-domain format. '
                             'Uses "com.github.arubdesu.extinguish" by default',
                       )
    options = parser.parse_args()
    #build_payload, handling one-off drag-drops first
    out_uuid = str(uuid.uuid4())
    group = False
    if options.app_bundle:
        if options.app_bundle.endswith('.app'):
            try:
                infoplist_path = (options.app_bundle + '/Contents/Info.plist')
                bundle_id = CoreFoundation.CFPreferencesCopyAppValue("CFBundleIdentifier", infoplist_path)
                appname = bundle_id.split('.')[-1]
                in_uuid = str(uuid.uuid4())
                payload_id = "SparkleDisabler." + out_uuid + ".alacarte.customsettings." + in_uuid
                payload = build_payload(bundle_id)
                payload_dict = {"PayloadContent": payload,
                                "PayloadEnabled": True,
                                "PayloadIdentifier": payload_id,
                                "PayloadType": "com.apple.ManagedClient.preferences",
                                "PayloadUUID": in_uuid,
                                "PayloadVersion": 1,
                               }
                inside_dict = [payload_dict]
                whole = integrate_whole(inside_dict, options.org, out_uuid, group)
                extend_dict = {"PayloadDescription": "Custom settings to disable "
                               "sparkle updates for %s.app" % appname,
                               "PayloadDisplayName": "SparkleDisabler: %s" % bundle_id,
                               "PayloadIdentifier": options.profile_id + '.' + appname,
                              }
                whole.update(extend_dict)
                mobilecfg_path = ''.join([os.getcwd(), '/disable_autoupdates_',
                                            bundle_id.split('.')[-1], '.mobileconfig'])
                with open(mobilecfg_path, 'w') as final:
                    plistlib.writePlist(whole, final)
                sys.exit(0)
            except OSError:
                print 'Info.plist not found, exiting'
                sys.exit(1)
        else:
            print 'Not recognized as an app bundle, exiting'
            print parser.print_help()
            sys.exit(1)
    to_process = options.apps
    if not to_process:
        print parser.print_help()
        sys.exit(0)
    payload_list = {}
    for bundle_id in to_process:
        #gen uuid's for containing profile and payload
        appname = bundle_id.split('.')[-1]
        payload = build_payload(bundle_id)
        if not options.group:
            in_uuid, out_uuid = str(uuid.uuid4()), str(uuid.uuid4())
            payload_id = ''.join(["SparkleDisabler.", out_uuid,
                               ".alacarte.customsettings.", in_uuid])
            payload_dict = {"PayloadContent": payload,
                            "PayloadEnabled": True,
                            "PayloadIdentifier": payload_id,
                            "PayloadType": "com.apple.ManagedClient.preferences",
                            "PayloadUUID": in_uuid,
                            "PayloadVersion": 1,
                           }
            inside_dict = [payload_dict]
            whole = integrate_whole(inside_dict, options.org, out_uuid, group)
            extend_dict = {"PayloadDescription": "Custom settings to disable "
                           "sparkle updates for %s.app" % appname,
                           "PayloadDisplayName": "SparkleDisabler: %s" % bundle_id,
                           "PayloadIdentifier": options.profile_id + '.' + appname,
                          }
            whole.update(extend_dict)
            mobilecfg_path = ''.join([os.getcwd(), '/disable_autoupdates_',
                                       bundle_id.split('.')[-1], '.mobileconfig'])
            with open(mobilecfg_path, 'w') as final:
                plistlib.writePlist(whole, final)

        else:
            payload_list[bundle_id] = payload[bundle_id]
    if options.group:
        group = True
        mobilecfg_path = ''.join([os.getcwd(), '/disable_all_sparkle_',
                            'autoupdates.mobileconfig'])
        out_uuid = str(uuid.uuid4())
        payload_id = ''.join(["SparkleDisabler.", out_uuid,
                               ".alacarte.customsettings"])

        whole = integrate_whole(payload_list, options.org, out_uuid, group)
        extend_dict = {"PayloadDescription": "Custom settings to disable "
                       "all sparkle apps from updating over http",
                       "PayloadDisplayName": "ExtinguishGeneratedSparkleDisabler",
                       "PayloadIdentifier": payload_id
                      }
        whole.update(extend_dict)
        with open(mobilecfg_path, 'w') as final:
            plistlib.writePlist(whole, final)

Example 16

Project: aodh
Source File: test_composite.py
View license
    def prepare_alarms(self):
        self.alarms = [
            models.Alarm(name='alarm_threshold_nest',
                         description='alarm with sub rules nested combined',
                         type='composite',
                         enabled=True,
                         user_id='fake_user',
                         project_id='fake_project',
                         alarm_id=str(uuid.uuid4()),
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         time_constraints=[],
                         rule={
                             "or": [self.sub_rule1,
                                    {"and": [self.sub_rule2, self.sub_rule3]
                                     }]
                         },
                         severity='critical'),
            models.Alarm(name='alarm_threshold_or',
                         description='alarm on one of sub rules triggered',
                         type='composite',
                         enabled=True,
                         user_id='fake_user',
                         project_id='fake_project',
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         alarm_id=str(uuid.uuid4()),
                         time_constraints=[],
                         rule={
                             "or": [self.sub_rule1, self.sub_rule2,
                                    self.sub_rule3]
                         },
                         severity='critical'
                         ),
            models.Alarm(name='alarm_threshold_and',
                         description='alarm on all the sub rules triggered',
                         type='composite',
                         enabled=True,
                         user_id='fake_user',
                         project_id='fake_project',
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         alarm_id=str(uuid.uuid4()),
                         time_constraints=[],
                         rule={
                             "and": [self.sub_rule1, self.sub_rule2,
                                     self.sub_rule3]
                         },
                         severity='critical'
                         ),
            models.Alarm(name='alarm_multi_type_rules',
                         description='alarm with threshold and gnocchi rules',
                         type='composite',
                         enabled=True,
                         user_id='fake_user',
                         project_id='fake_project',
                         alarm_id=str(uuid.uuid4()),
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         time_constraints=[],
                         rule={
                             "and": [self.sub_rule2, self.sub_rule3,
                                     {'or': [self.sub_rule1, self.sub_rule4,
                                             self.sub_rule5, self.sub_rule6]}]
                         },
                         severity='critical'
                         ),
        ]

Example 17

Project: aodh
Source File: test_gnocchi.py
View license
    def setUp(self):
        self.client = self.useFixture(mockpatch.Patch(
            'aodh.evaluator.gnocchi.client'
        )).mock.Client.return_value
        self.prepared_alarms = [
            models.Alarm(name='instance_running_hot',
                         description='instance_running_hot',
                         type='gnocchi_resources_threshold',
                         enabled=True,
                         user_id='foobar',
                         project_id='snafu',
                         alarm_id=str(uuid.uuid4()),
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         time_constraints=[],
                         rule=dict(
                             comparison_operator='gt',
                             threshold=80.0,
                             evaluation_periods=5,
                             aggregation_method='mean',
                             granularity=60,
                             metric='cpu_util',
                             resource_type='instance',
                             resource_id='my_instance')
                         ),
            models.Alarm(name='group_running_idle',
                         description='group_running_idle',
                         type='gnocchi_aggregation_by_metrics_threshold',
                         enabled=True,
                         user_id='foobar',
                         project_id='snafu',
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         alarm_id=str(uuid.uuid4()),
                         time_constraints=[],
                         rule=dict(
                             comparison_operator='le',
                             threshold=10.0,
                             evaluation_periods=4,
                             aggregation_method='max',
                             granularity=300,
                             metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83',
                                      '9ddc209f-42f8-41e1-b8f1-8804f59c4053']),
                         ),
            models.Alarm(name='instance_not_running',
                         description='instance_running_hot',
                         type='gnocchi_aggregation_by_resources_threshold',
                         enabled=True,
                         user_id='foobar',
                         project_id='snafu',
                         alarm_id=str(uuid.uuid4()),
                         state='insufficient data',
                         state_timestamp=constants.MIN_DATETIME,
                         timestamp=constants.MIN_DATETIME,
                         insufficient_data_actions=[],
                         ok_actions=[],
                         alarm_actions=[],
                         repeat_actions=False,
                         time_constraints=[],
                         rule=dict(
                             comparison_operator='gt',
                             threshold=80.0,
                             evaluation_periods=6,
                             aggregation_method='mean',
                             granularity=50,
                             metric='cpu_util',
                             resource_type='instance',
                             query='{"=": {"server_group": '
                                   '"my_autoscaling_group"}}')
                         ),

        ]
        super(TestGnocchiEvaluatorBase, self).setUp()

Example 18

Project: cue
Source File: test_delete_cluster.py
View license
    def test_delete_cluster(self):
        flow_store_create = {
            "tenant_id": str(self.valid_network['tenant_id']),
            "image": self.valid_image.id,
            "flavor": self.valid_flavor.id,
            "port": self.port,
            "context": self.context.to_dict(),
            "erlang_cookie": str(uuid.uuid4()),
            "default_rabbit_user": 'rabbit',
            "default_rabbit_pass": str(uuid.uuid4()),
        }
        flow_store_delete = {
            "context": self.context.to_dict(),
        }

        cluster_values = {
            "project_id": self.context.tenant_id,
            "name": "RabbitCluster",
            "network_id": str(uuid.uuid4()),
            "flavor": "1",
            "size": 3,
        }

        new_cluster = objects.Cluster(**cluster_values)
        new_cluster.create(self.context)

        nodes = objects.Node.get_nodes_by_cluster_id(self.context,
                                                     new_cluster.id)

        node_ids = []
        for node in nodes:
            node_ids.append(str(node.id))

        flow_create = create_cluster(new_cluster.id,
                                     node_ids,
                                     self.valid_network['id'],
                                     self.management_network['id'])

        result = engines.run(flow_create, store=flow_store_create)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        cluster_after = objects.Cluster.get_cluster_by_id(self.context,
                                                          new_cluster.id)

        self.assertEqual(models.Status.ACTIVE, cluster_after.status,
                         "Invalid status for cluster")

        for i, node in enumerate(nodes_after):
            self.assertEqual(models.Status.ACTIVE, result["vm_status_%d" % i])
            self.new_vm_list.append(result["vm_id_%d" % i])
            self.assertEqual(models.Status.ACTIVE, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(1, len(endpoints), "invalid number of endpoints "
                                                "received")
            endpoint = endpoints.pop()
            self.assertEqual(node.id, endpoint.node_id, "invalid endpoint node"
                                                        " id reference")

            uri = result['vm_user_ip_' + str(i)]
            uri += ':' + self.port
            self.assertEqual(uri, endpoint.uri, "invalid endpoint uri")
            self.assertEqual('AMQP', endpoint.type, "invalid endpoint type")

        flow_delete = delete_cluster(str(new_cluster.id), node_ids,
                                     cluster_after.group_id)
        result = engines.run(flow_delete, store=flow_store_delete)

        nodes_after = objects.Node.get_nodes_by_cluster_id(self.context,
                                                           new_cluster.id)

        self.assertRaises(exception.NotFound,
                          objects.Cluster.get_cluster_by_id,
                          self.context,
                          new_cluster.id)

        for i, node in enumerate(nodes_after):
            self.new_vm_list.remove(result["vm_id_%d" % i])
            self.assertEqual(models.Status.DELETED, node.status,
                             "Invalid status for node %d" % i)
            endpoints = objects.Endpoint.get_endpoints_by_node_id(self.context,
                                                                  node.id)
            self.assertEqual(0, len(endpoints), "endpoints were not deleted")

Example 19

Project: fuel-ostf
Source File: test_murano_linux.py
View license
    def test_deploy_dummy_app(self):
        """Check that user can deploy application in Murano environment
        Target component: Murano

        Scenario:
            1. Prepare test app.
            2. Upload test app.
            3. Send request to create environment.
            4. Send request to create session for environment.
            5. Send request to create test service.
            6. Send request to deploy session.
            7. Checking environment status.
            8. Checking deployment status.
            9. Send request to delete environment.
            10. Send request to delete package.

        Duration: 1200 s.
        Deployment tags: Murano | murano_plugin, murano_without_glare
        Available since release: 2014.2-6.1
        """

        vms_count = self.get_info_about_available_resources(
            self.min_required_ram_mb, 40, 2)
        if vms_count < 1:
            msg = ('This test requires more hardware resources of your '
                   'OpenStack cluster: your cloud should allow to create '
                   'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. '
                   'You need to remove some resources or add compute nodes '
                   'to have an ability to run this OSTF test.'
                   .format(self.min_required_ram_mb, 40, 2))
            LOG.debug(msg)
            self.skipTest(msg)

        if self.package_exists(self.dummy_fqdn):
            package = self.get_package_by_fqdn(self.dummy_fqdn)
            self.delete_package(package["id"])

        fail_msg = ("Package preparation failed. Please refer to "
                    "OSTF logs for more information")
        zip_path = self.verify(10, self.zip_dir, 1, fail_msg,
                               'prepare package',
                               os.path.dirname(__file__), self.dummy_fqdn)

        fail_msg = ("Package uploading failed. "
                    "Please refer to Openstack and OSTF logs")
        self.package = self.verify(10, self.upload_package, 2, fail_msg,
                                   'uploading package', 'SimpleApp',
                                   {"categories": ["Web"], "tags": ["tag"]},
                                   zip_path)

        fail_msg = "Can't create environment. Murano API is not available. "
        self.environment = self.verify(15, self.create_environment,
                                       3, fail_msg, 'creating environment',
                                       self.env_name)

        fail_msg = "User can't create session for environment. "
        session = self.verify(5, self.create_session,
                              4, fail_msg, "session creating",
                              self.environment.id)

        post_body = {
            "instance": {
                "flavor": self.flavor_name,
                "image": "TestVM",
                "assignFloatingIp": True,
                "?": {
                    "type": "io.murano.resources.LinuxMuranoInstance",
                    "id": str(uuid.uuid4())
                },
                "name": rand_name("testMurano")
            },
            "name": rand_name("teMurano"),
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "SimpleApp"
                },
                "type": self.dummy_fqdn,
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service. "
        self.verify(5, self.create_service,
                    5, fail_msg, "service creating",
                    self.environment.id, session.id, post_body)

        fail_msg = "User can't deploy session. "
        self.verify(5, self.deploy_session,
                    6, fail_msg,
                    "sending session on deployment",
                    self.environment.id, session.id)

        fail_msg = "Deployment was not completed correctly. "
        self.verify(860, self.deploy_check,
                    7, fail_msg, 'deployment is going',
                    self.environment)

        self.verify(5, self.deployments_status_check, 8, fail_msg,
                    'Check deployments status',
                    self.environment.id)

        fail_msg = "Can't delete environment. "
        self.verify(180, self.environment_delete_check,
                    9, fail_msg, "deleting environment",
                    self.environment.id)

        fail_msg = "Can't delete package"
        self.verify(5, self.delete_package, 10, fail_msg, "deleting_package",
                    self.package.id)

Example 20

Project: fuel-ostf
Source File: test_murano_linux.py
View license
    def test_deploy_dummy_app_with_glare(self):
        """Check application deployment in Murano environment with GLARE
        Target component: Murano

        Scenario:
            1. Prepare test app.
            2. Upload test app.
            3. Send request to create environment.
            4. Send request to create session for environment.
            5. Send request to create test service.
            6. Send request to deploy session.
            7. Checking environment status.
            8. Checking deployment status.
            9. Send request to delete environment.
            10. Send request to delete package.

        Duration: 1200 s.
        Deployment tags: Murano | murano_plugin, murano_use_glare
        Available since release: 2014.2-6.1
        """
        artifacts = True
        vms_count = self.get_info_about_available_resources(
            self.min_required_ram_mb, 40, 2)
        if vms_count < 1:
            msg = ('This test requires more hardware resources of your '
                   'OpenStack cluster: your cloud should allow to create '
                   'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. '
                   'You need to remove some resources or add compute nodes '
                   'to have an ability to run this OSTF test.'
                   .format(self.min_required_ram_mb, 40, 2))
            LOG.debug(msg)
            self.skipTest(msg)

        if self.package_exists(artifacts, self.dummy_fqdn):
            package = self.get_package_by_fqdn(self.dummy_fqdn, artifacts)
            self.delete_package(package.to_dict()["id"], artifacts)

        fail_msg = ("Package preparation failed. Please refer to "
                    "OSTF logs for more information")
        zip_path = self.verify(10, self.zip_dir, 1, fail_msg,
                               'prepare package',
                               os.path.dirname(__file__), self.dummy_fqdn)

        fail_msg = ("Package uploading failed. "
                    "Please refer to Openstack and OSTF logs")
        self.package = self.verify(10, self.upload_package, 2, fail_msg,
                                   'uploading package', 'SimpleApp',
                                   {"categories": ["Web"], "tags": ["tag"]},
                                   zip_path, artifacts)

        fail_msg = "Can't create environment. Murano API is not available. "
        self.environment = self.verify(15, self.create_environment,
                                       3, fail_msg, 'creating environment',
                                       self.env_name)

        fail_msg = "User can't create session for environment. "
        session = self.verify(5, self.create_session,
                              4, fail_msg, "session creating",
                              self.environment.id)

        post_body = {
            "instance": {
                "flavor": self.flavor_name,
                "image": "TestVM",
                "assignFloatingIp": True,
                "?": {
                    "type": "io.murano.resources.LinuxMuranoInstance",
                    "id": str(uuid.uuid4())
                },
                "name": rand_name("testMurano")
            },
            "name": rand_name("teMurano"),
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "SimpleApp"
                },
                "type": self.dummy_fqdn,
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service. "
        self.verify(5, self.create_service,
                    5, fail_msg, "service creating",
                    self.environment.id, session.id, post_body)

        fail_msg = "User can't deploy session. "
        self.verify(5, self.deploy_session,
                    6, fail_msg,
                    "sending session on deployment",
                    self.environment.id, session.id)

        fail_msg = "Deployment was not completed correctly. "
        self.verify(860, self.deploy_check,
                    7, fail_msg, 'deployment is going',
                    self.environment)

        self.verify(5, self.deployments_status_check, 8, fail_msg,
                    'Check deployments status',
                    self.environment.id)

        fail_msg = "Can't delete environment. "
        self.verify(180, self.environment_delete_check,
                    9, fail_msg, "deleting environment",
                    self.environment.id)

        fail_msg = "Can't delete package"
        self.verify(5, self.delete_package, 10, fail_msg, "deleting_package",
                    self.package.id, artifacts)

Example 21

Project: fuel-ostf
Source File: test_murano_linux.py
View license
    def test_deploy_apache_service(self):
        """Check that user can deploy Apache service in Murano environment
        Target component: Murano

        Scenario:
            1. Send request to create environment.
            2. Send request to create session for environment.
            3. Send request to create Linux-based service Apache.
            4. Request to deploy session.
            5. Checking environment status.
            6. Checking deployments status
            7. Checking ports
            8. Send request to delete environment.

        Duration: 2140 s.
        Deployment tags: Murano | murano_plugin, murano_without_artifacts
        Available since release: 2014.2-6.0
        """

        vms_count = self.get_info_about_available_resources(
            self.min_required_ram_mb, 40, 2)
        if vms_count < 1:
            msg = ('This test requires more hardware resources of your '
                   'OpenStack cluster: your cloud should allow to create '
                   'at least 1 VM with {0} MB of RAM, {1} HDD and {2} vCPUs. '
                   'You need to remove some resources or add compute nodes '
                   'to have an ability to run this OSTF test.'
                   .format(self.min_required_ram_mb, 40, 2))
            LOG.debug(msg)
            self.skipTest(msg)

        if not self.image:
            msg = ('Murano image was not properly registered or was not '
                   'uploaded at all. Please refer to the Fuel '
                   'documentation ({0}) to find out how to upload and/or '
                   'register image for Murano.'.format(self.doc_link))
            LOG.debug(msg)
            self.skipTest(msg)

        if not self.package_exists('io.murano.apps.apache.ApacheHttpServer'):
            self.skipTest("This test requires Apache HTTP Server application."
                          "Please add this application to Murano "
                          "and run this test again.")

        fail_msg = "Can't create environment. Murano API is not available. "
        self.environment = self.verify(15, self.create_environment,
                                       1, fail_msg, 'creating environment',
                                       self.env_name)

        fail_msg = "User can't create session for environment. "
        session = self.verify(5, self.create_session,
                              2, fail_msg, "session creating",
                              self.environment.id)

        post_body = {
            "instance": {
                "flavor": self.flavor_name,
                "image": self.image.name,
                "assignFloatingIp": True,
                "?": {
                    "type": "io.murano.resources.LinuxMuranoInstance",
                    "id": str(uuid.uuid4())
                },
                "name": rand_name("testMurano")
            },
            "name": rand_name("teMurano"),
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "Apache"
                },
                "type": "io.murano.apps.apache.ApacheHttpServer",
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service. "
        apache = self.verify(5, self.create_service,
                             3, fail_msg, "service creating",
                             self.environment.id, session.id, post_body)

        fail_msg = "User can't deploy session. "
        self.verify(5, self.deploy_session,
                    4, fail_msg,
                    "sending session on deployment",
                    self.environment.id, session.id)

        fail_msg = "Deployment was not completed correctly. "
        self.environment = self.verify(1800, self.deploy_check,
                                       5, fail_msg, 'deployment is going',
                                       self.environment)

        self.verify(5, self.deployments_status_check,
                    6, fail_msg,
                    'Check deployments status',
                    self.environment.id)

        self.verify(300, self.port_status_check,
                    7, fail_msg,
                    'Check that needed ports are opened',
                    self.environment, [[apache['instance']['name'], 22, 80]])

        fail_msg = "Can't delete environment. "
        self.verify(5, self.delete_environment,
                    8, fail_msg, "deleting environment",
                    self.environment.id)

Example 22

Project: fuel-ostf
Source File: test_murano_linux.py
View license
    def test_deploy_wordpress_app(self):
        """Check that user can deploy WordPress app in Murano environment
        Target component: Murano

        Scenario:
            1. Send request to create environment.
            2. Send request to create session for environment.
            3. Send request to create MySQL.
            4. Send request to create Linux-based service Apache.
            5. Send request to create WordPress.
            6. Request to deploy session.
            7. Checking environment status.
            8. Checking deployments status.
            9. Checking ports availability.
            10. Checking WordPress path.
            11. Send request to delete environment.

        Duration: 2140 s.
        Deployment tags: Murano | murano_plugin, murano_without_artifacts
        Available since release: 2014.2-6.1
        """

        vms_count = self.get_info_about_available_resources(
            self.min_required_ram_mb, 40, 2)
        if vms_count < 2:
            msg = ('This test requires more hardware resources of your '
                   'OpenStack cluster: your cloud should allow to create '
                   'at least 2 VMs with {0} MB of RAM, {1} HDD and {2} vCPUs.'
                   ' You need to remove some resources or add compute nodes '
                   'to have an ability to run this OSTF test.'
                   .format(self.min_required_ram_mb, 40, 2))
            LOG.debug(msg)
            self.skipTest(msg)

        if not self.image:
            msg = ('Murano image was not properly registered or was not '
                   'uploaded at all. Please refer to the Fuel '
                   'documentation ({0}) to find out how to upload and/or '
                   'register image for Murano.'.format(self.doc_link))
            LOG.debug(msg)
            self.skipTest(msg)

        if not self.package_exists('io.murano.apps.apache.ApacheHttpServer',
                                   'io.murano.databases.MySql',
                                   'io.murano.apps.WordPress'):
            self.skipTest("This test requires Apache HTTP Server, "
                          "MySQL database and WordPress applications."
                          "Please add this applications to Murano and "
                          "run this test again.")

        fail_msg = "Can't create environment. Murano API is not available. "
        self.environment = self.verify(15, self.create_environment,
                                       1, fail_msg, 'creating environment',
                                       self.env_name)

        fail_msg = "User can't create session for environment. "
        session = self.verify(5, self.create_session,
                              2, fail_msg, "session creating",
                              self.environment.id)

        post_body = {
            "instance": {
                "flavor": self.flavor_name,
                "image": self.image.name,
                "assignFloatingIp": True,
                "?": {
                    "type": "io.murano.resources.LinuxMuranoInstance",
                    "id": str(uuid.uuid4())
                },
                "name": rand_name("testMurano")
            },
            "name": rand_name("teMurano"),
            "database": rand_name("ostf"),
            "username": rand_name("ostf"),
            "password": rand_name("[email protected]"),
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "MySQL"
                },
                "type": "io.murano.databases.MySql",
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service MySQL. "
        self.mysql = self.verify(5, self.create_service,
                                 3, fail_msg, "service creating",
                                 self.environment.id, session.id,
                                 post_body)

        post_body = {
            "instance": {
                "flavor": self.flavor_name,
                "image": self.image.name,
                "assignFloatingIp": True,
                "?": {
                    "type": "io.murano.resources.LinuxMuranoInstance",
                    "id": str(uuid.uuid4())
                },
                "name": rand_name("testMurano")
            },
            "name": rand_name("teMurano"),
            "enablePHP": True,
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "Apache"
                },
                "type": "io.murano.apps.apache.ApacheHttpServer",
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service Apache. "
        self.apache = self.verify(5, self.create_service,
                                  4, fail_msg, "service creating",
                                  self.environment.id, session.id,
                                  post_body)

        post_body = {
            "name": rand_name("teMurano"),
            "server": self.apache,
            "database": self.mysql,
            "dbName": "wordpress",
            "dbUser": "wp_user",
            "dbPassword": "[email protected]",
            "?": {
                "_{id}".format(id=uuid.uuid4().hex): {
                    "name": "WordPress"
                },
                "type": "io.murano.apps.WordPress",
                "id": str(uuid.uuid4())
            }
        }

        fail_msg = "User can't create service WordPress. "
        self.verify(5, self.create_service,
                    5, fail_msg, "service creating",
                    self.environment.id, session.id, post_body)

        fail_msg = "User can't deploy session. "
        self.verify(5, self.deploy_session,
                    6, fail_msg,
                    "sending session on deployment",
                    self.environment.id, session.id)

        fail_msg = "Deployment was not completed correctly. "
        self.environment = self.verify(2400, self.deploy_check,
                                       7, fail_msg, 'deployment is going',
                                       self.environment)

        self.verify(5, self.deployments_status_check,
                    8, fail_msg,
                    'Check deployments status',
                    self.environment.id)

        self.verify(300, self.port_status_check,
                    9, fail_msg,
                    'Check that needed ports are opened',
                    self.environment,
                    [[self.apache['instance']['name'], 22, 80],
                     [self.mysql['instance']['name'], 22, 3306]])

        fail_msg = "Path to WordPress unavailable"
        self.verify(30, self.check_path, 10, fail_msg,
                    'checking path availability',
                    self.environment, "wordpress",
                    self.apache['instance']['name'])

        fail_msg = "Can't delete environment. "
        self.verify(10, self.delete_environment,
                    11, fail_msg, "deleting environment",
                    self.environment.id)

Example 23

Project: gnocchi
Source File: test_storage.py
View license
    def test_rewrite_measures(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()),
                                 str(uuid.uuid4()),
                                 apname)

        # First store some points scattered across different splits
        self.storage.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
        self.assertEqual(splits,
                         self.storage._list_split_keys_for_metric(
                             self.metric, "mean", 60.0))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(
            self.metric, '1451520000.0', "mean", 60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric, '1451736000.0', "mean", 60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric, '1451952000.0', "mean", 60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
        ], self.storage.get_measures(self.metric, granularity=60.0))

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.storage.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()

        self.assertEqual({'1452384000.0', '1451736000.0',
                          '1451520000.0', '1451952000.0'},
                         self.storage._list_split_keys_for_metric(
                             self.metric, "mean", 60.0))
        data = self.storage._get_measures(
            self.metric, '1451520000.0', "mean", 60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric, '1451736000.0', "mean", 60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric, '1451952000.0', "mean", 60.0)
        # Now this one is compressed because it has been rewritten!
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric, '1452384000.0', "mean", 60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
            (utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45),
            (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46),
        ], self.storage.get_measures(self.metric, granularity=60.0))

Example 24

View license
    def test_create_network_function_device(self):
        driver = orchestration_driver.OrchestrationDriver(
            cfg.CONF,
            supports_device_sharing=True,
            supports_hotplug=True,
            max_interfaces=8)
        driver.network_handler = driver.network_handlers['gbp']

        # Mock the client methods
        driver.identity_handler.get_admin_token = mock.MagicMock(
            return_value='token')
        driver.identity_handler.get_tenant_id = mock.MagicMock(
            return_value='8')
        driver.identity_handler.get_keystone_creds = mock.MagicMock(
            return_value=(None, None, 'admin', None))
        driver.network_handler.create_port = mock.MagicMock(
            return_value={'id': str(pyuuid.uuid4()),
                          'port_id': str(pyuuid.uuid4())})
        driver.network_handler.set_promiscuos_mode = mock.MagicMock(
            return_value=None)
        driver.network_handler.set_promiscuos_mode_fast = mock.MagicMock(
            return_value=None)
        driver.compute_handler_nova.get_image_id = mock.MagicMock(
            return_value='6')
        driver.compute_handler_nova.get_image_metadata = mock.MagicMock(
            return_value=[])
        driver.compute_handler_nova.create_instance = mock.MagicMock(
            return_value='8')
        driver.network_handler.delete_port = mock.MagicMock(
            return_value=None)
        driver.network_handler.get_port_id = mock.MagicMock(return_value='7')
        driver.network_handler.get_port_details = mock.MagicMock(
            return_value=('a.b.c.d',
                          'aa:bb:cc:dd:ee:ff',
                          'p.q.r.s/t',
                          'w.x.y.z'))
        driver.network_handler.get_neutron_port_details = mock.MagicMock(
            return_value=(1, 2, 3, 4,
                          {'port': {}},
                          {'subnet': {}}))

        # test for create device when interface hotplug is enabled
        device_data = {'service_details': {'device_type': 'xyz',
                                           'service_type': 'firewall',
                                           'service_vendor': 'vyos',
                                           'network_mode': 'gbp'},
                       'name': 'FIREWALL.vyos.1.2',
                       'volume_support': None,
                       'volume_size': None,
                       'management_network_info': {'id': '2'},
                       'ports': [{'id': '3',
                                  'port_model': 'gbp',
                                  'port_classification': 'provider'},
                                 {'id': '4',
                                  'port_model': 'gbp',
                                  'port_classification': 'consumer'}],
                       'token': str(pyuuid.uuid4()),
                       'admin_tenant_id': str(pyuuid.uuid4())}
        self.assertRaises(exceptions.ComputePolicyNotSupported,
                          driver.create_network_function_device,
                          device_data)
        device_data['service_details']['device_type'] = 'nova'
        self.assertIsInstance(driver.create_network_function_device(
            device_data),
            dict,
            msg=('Return value from the'
                 ' create_network_function_device call'
                 ' is not a dictionary'))

        # test for create device along with provider port
        driver.supports_hotplug = False
        self.assertIsInstance(driver.create_network_function_device(
            device_data),
            dict,
            msg=('Return value from the'
                 ' create_network_function_device call'
                 ' is not a dictionary'))

Example 25

Project: heat
Source File: 062_kilo.py
View license
def upgrade(migrate_engine):
    meta = sqlalchemy.MetaData()
    meta.bind = migrate_engine

    raw_template = sqlalchemy.Table(
        'raw_template', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('template', types.LongText),
        sqlalchemy.Column('files', types.Json),
        sqlalchemy.Column('environment', types.Json),
        sqlalchemy.Column('predecessor', sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('raw_template.id',
                                                name='predecessor_fkey_ref')),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    user_creds = sqlalchemy.Table(
        'user_creds', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer,
                          primary_key=True, nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('username', sqlalchemy.String(255)),
        sqlalchemy.Column('password', sqlalchemy.String(255)),
        sqlalchemy.Column('region_name', sqlalchemy.String(length=255)),
        sqlalchemy.Column('decrypt_method', sqlalchemy.String(length=64)),
        sqlalchemy.Column('tenant', sqlalchemy.String(1024)),
        sqlalchemy.Column('auth_url', sqlalchemy.Text),
        sqlalchemy.Column('tenant_id', sqlalchemy.String(256)),
        sqlalchemy.Column('trust_id', sqlalchemy.String(255)),
        sqlalchemy.Column('trustor_user_id', sqlalchemy.String(64)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    stack = sqlalchemy.Table(
        'stack', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36),
                          primary_key=True, nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        sqlalchemy.Column('name', sqlalchemy.String(255)),
        sqlalchemy.Column('raw_template_id',
                          sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('raw_template.id'),
                          nullable=False),
        sqlalchemy.Column('prev_raw_template_id',
                          sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('raw_template.id')),
        sqlalchemy.Column('user_creds_id', sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('user_creds.id')),
        sqlalchemy.Column('username', sqlalchemy.String(256)),
        sqlalchemy.Column('owner_id', sqlalchemy.String(36)),
        sqlalchemy.Column('action', sqlalchemy.String(255)),
        sqlalchemy.Column('status', sqlalchemy.String(255)),
        sqlalchemy.Column('status_reason', types.LongText),
        sqlalchemy.Column('timeout', sqlalchemy.Integer),
        sqlalchemy.Column('tenant', sqlalchemy.String(256)),
        sqlalchemy.Column('disable_rollback', sqlalchemy.Boolean,
                          nullable=False),
        sqlalchemy.Column('stack_user_project_id',
                          sqlalchemy.String(length=64)),
        sqlalchemy.Column('backup', sqlalchemy.Boolean, default=False),
        sqlalchemy.Column('nested_depth', sqlalchemy.Integer, default=0),
        sqlalchemy.Column('convergence', sqlalchemy.Boolean, default=False),
        sqlalchemy.Column('current_traversal', sqlalchemy.String(36)),
        sqlalchemy.Column('current_deps', types.Json),
        sqlalchemy.Column('parent_resource_name', sqlalchemy.String(255)),
        sqlalchemy.Index('ix_stack_name', 'name', mysql_length=255),
        sqlalchemy.Index('ix_stack_tenant', 'tenant', mysql_length=255),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    resource = sqlalchemy.Table(
        'resource', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('uuid', sqlalchemy.String(36), unique=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('nova_instance', sqlalchemy.String(255)),
        sqlalchemy.Column('name', sqlalchemy.String(255)),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('action', sqlalchemy.String(255)),
        sqlalchemy.Column('status', sqlalchemy.String(255)),
        sqlalchemy.Column('status_reason', types.LongText),
        sqlalchemy.Column('stack_id', sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('stack.id'), nullable=False),
        sqlalchemy.Column('rsrc_metadata', types.LongText),
        sqlalchemy.Column('properties_data', types.Json),
        sqlalchemy.Column('engine_id', sqlalchemy.String(length=36)),
        sqlalchemy.Column('atomic_key', sqlalchemy.Integer),
        sqlalchemy.Column('needed_by', types.List),
        sqlalchemy.Column('requires', types.List),
        sqlalchemy.Column('replaces', sqlalchemy.Integer),
        sqlalchemy.Column('replaced_by', sqlalchemy.Integer),
        sqlalchemy.Column('current_template_id', sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('raw_template.id')),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    resource_data = sqlalchemy.Table(
        'resource_data', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('key', sqlalchemy.String(255)),
        sqlalchemy.Column('value', sqlalchemy.Text),
        sqlalchemy.Column('redact', sqlalchemy.Boolean),
        sqlalchemy.Column('decrypt_method', sqlalchemy.String(length=64)),
        sqlalchemy.Column('resource_id',
                          sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('resource.id'),
                          nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    event = sqlalchemy.Table(
        'event', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('uuid', sqlalchemy.String(36),
                          default=lambda: str(uuid.uuid4()), unique=True),
        sqlalchemy.Column('stack_id', sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('stack.id'), nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('resource_action', sqlalchemy.String(255)),
        sqlalchemy.Column('resource_status', sqlalchemy.String(255)),
        sqlalchemy.Column('resource_name', sqlalchemy.String(255)),
        sqlalchemy.Column('physical_resource_id', sqlalchemy.String(255)),
        sqlalchemy.Column('resource_status_reason', sqlalchemy.String(255)),
        sqlalchemy.Column('resource_type', sqlalchemy.String(255)),
        sqlalchemy.Column('resource_properties', sqlalchemy.PickleType),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    watch_rule = sqlalchemy.Table(
        'watch_rule', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('name', sqlalchemy.String(255)),
        sqlalchemy.Column('state', sqlalchemy.String(255)),
        sqlalchemy.Column('rule', types.LongText),
        sqlalchemy.Column('last_evaluated', sqlalchemy.DateTime),
        sqlalchemy.Column('stack_id', sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('stack.id'), nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    watch_data = sqlalchemy.Table(
        'watch_data', meta,
        sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('data', types.LongText),
        sqlalchemy.Column('watch_rule_id', sqlalchemy.Integer,
                          sqlalchemy.ForeignKey('watch_rule.id'),
                          nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    stack_lock = sqlalchemy.Table(
        'stack_lock', meta,
        sqlalchemy.Column('stack_id', sqlalchemy.String(length=36),
                          sqlalchemy.ForeignKey('stack.id'),
                          primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('engine_id', sqlalchemy.String(length=36)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    software_config = sqlalchemy.Table(
        'software_config', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36),
                          primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('name', sqlalchemy.String(255)),
        sqlalchemy.Column('group', sqlalchemy.String(255)),
        sqlalchemy.Column('config', types.LongText),
        sqlalchemy.Column('tenant', sqlalchemy.String(64),
                          nullable=False,
                          index=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    software_deployment = sqlalchemy.Table(
        'software_deployment', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36),
                          primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime,
                          index=True),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('server_id', sqlalchemy.String(36),
                          nullable=False,
                          index=True),
        sqlalchemy.Column('config_id',
                          sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('software_config.id'),
                          nullable=False),
        sqlalchemy.Column('input_values', types.Json),
        sqlalchemy.Column('output_values', types.Json),
        sqlalchemy.Column('action', sqlalchemy.String(255)),
        sqlalchemy.Column('status', sqlalchemy.String(255)),
        sqlalchemy.Column('status_reason', types.LongText),
        sqlalchemy.Column('tenant', sqlalchemy.String(64),
                          nullable=False,
                          index=True),
        sqlalchemy.Column('stack_user_project_id',
                          sqlalchemy.String(length=64)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    snapshot = sqlalchemy.Table(
        'snapshot', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36),
                          primary_key=True,
                          nullable=False),
        sqlalchemy.Column('stack_id',
                          sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('stack.id'),
                          nullable=False),
        sqlalchemy.Column('name', sqlalchemy.String(255)),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('status', sqlalchemy.String(255)),
        sqlalchemy.Column('status_reason', sqlalchemy.String(255)),
        sqlalchemy.Column('data', types.Json),
        sqlalchemy.Column('tenant', sqlalchemy.String(64),
                          nullable=False,
                          index=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    service = sqlalchemy.Table(
        'service', meta,
        sqlalchemy.Column('id', sqlalchemy.String(36), primary_key=True,
                          default=lambda: str(uuid.uuid4())),
        sqlalchemy.Column('engine_id', sqlalchemy.String(36), nullable=False),
        sqlalchemy.Column('host', sqlalchemy.String(255), nullable=False),
        sqlalchemy.Column('hostname', sqlalchemy.String(255), nullable=False),
        sqlalchemy.Column('binary', sqlalchemy.String(255), nullable=False),
        sqlalchemy.Column('topic', sqlalchemy.String(255), nullable=False),
        sqlalchemy.Column('report_interval', sqlalchemy.Integer,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    stack_tag = sqlalchemy.Table(
        'stack_tag', meta,
        sqlalchemy.Column('id',
                          sqlalchemy.Integer,
                          primary_key=True,
                          nullable=False),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
        sqlalchemy.Column('tag', sqlalchemy.Unicode(80)),
        sqlalchemy.Column('stack_id',
                          sqlalchemy.String(36),
                          sqlalchemy.ForeignKey('stack.id'),
                          nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    sync_point = sqlalchemy.Table(
        'sync_point', meta,
        sqlalchemy.Column('entity_id', sqlalchemy.String(36)),
        sqlalchemy.Column('traversal_id', sqlalchemy.String(36)),
        sqlalchemy.Column('is_update', sqlalchemy.Boolean),
        sqlalchemy.Column('atomic_key', sqlalchemy.Integer,
                          nullable=False),
        sqlalchemy.Column('stack_id', sqlalchemy.String(36),
                          nullable=False),
        sqlalchemy.Column('input_data', types.Json),
        sqlalchemy.Column('created_at', sqlalchemy.DateTime),
        sqlalchemy.Column('updated_at', sqlalchemy.DateTime),

        sqlalchemy.PrimaryKeyConstraint('entity_id',
                                        'traversal_id',
                                        'is_update'),
        sqlalchemy.ForeignKeyConstraint(['stack_id'], ['stack.id'],
                                        name='fk_stack_id'),

        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    tables = (
        raw_template,
        user_creds,
        stack,
        resource,
        resource_data,
        event,
        watch_rule,
        watch_data,
        stack_lock,
        software_config,
        software_deployment,
        snapshot,
        service,
        stack_tag,
        sync_point,
    )

    for index, table in enumerate(tables):
        try:
            table.create()
        except Exception:
            # If an error occurs, drop all tables created so far to return
            # to the previously existing state.
            meta.drop_all(tables=tables[:index])
            raise

Example 26

Project: horizon
Source File: nova_data.py
View license
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.flavor_access = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_groups_uuid = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.security_group_rules_uuid = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.disabled_quotas = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.floating_ips_uuid = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()
    TEST.hosts = utils.TestDataContainer()
    TEST.server_groups = utils.TestDataContainer()

    # Data return by novaclient.
    # It is used if API layer does data conversion.
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_floating_ips_uuid = utils.TestDataContainer()

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
         "name": 'test_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Volume name',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
         "name": '',
         "status": 'in-use',
         "size": 10,
         "display_name": '',
         "display_description": '',
         "device": "/dev/hda",
         "created_at": '2010-11-21 18:34:25',
         "volume_type": 'vol_type_1',
         "attachments": [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
         "name": 'my_volume',
         "status": 'in-use',
         "size": 30,
         "display_name": 'My Volume',
         "display_description": '',
         "device": "/dev/hdk",
         "created_at": '2011-05-01 11:54:33',
         "volume_type": 'vol_type_2',
         "attachments": [{"id": "2", "server_id": '1',
                          "device": "/dev/hdk"}]})
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
         "name": 'non_bootable_volume',
         "status": 'available',
         "size": 40,
         "display_name": 'Non Bootable Volume',
         "created_at": '2012-04-01 10:30:00',
         "volume_type": None,
         "attachments": []})

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    attached_volume.bootable = 'true'
    non_bootable_volume.bootable = 'false'

    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)
    TEST.volumes.add(non_bootable_volume)

    # Flavors
    flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
                               'name': 'm1.tiny',
                               'vcpus': 1,
                               'disk': 0,
                               'ram': 512,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 0})
    flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
                               'name': 'm1.massive',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {'Trusted': True, 'foo': 'bar'},
                               'os-flavor-access:is_public': True,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
                               'name': 'm1.secret',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': {},
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
                              {'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
                               'name': 'm1.metadata',
                               'vcpus': 1000,
                               'disk': 1024,
                               'ram': 10000,
                               'swap': 0,
                               'rxtx_factor': 1,
                               'extra_specs': FlavorExtraSpecs(
                                   {'key': 'key_mock',
                                    'value': 'value_mock'}),
                               'os-flavor-access:is_public': False,
                               'OS-FLV-EXT-DATA:ephemeral': 2048})
    TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)

    flavor_access_manager = flavor_access.FlavorAccessManager(None)
    flavor_access_1 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "1",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    flavor_access_2 = flavor_access.FlavorAccess(
        flavor_access_manager,
        {"tenant_id": "2",
         "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
    TEST.flavor_access.add(flavor_access_1, flavor_access_2)

    # Key pairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Security Groups and Rules
    def generate_security_groups(is_uuid=False):

        def get_id(is_uuid):
            global current_int_id
            if is_uuid:
                return str(uuid.uuid4())
            else:
                get_id.current_int_id += 1
                return get_id.current_int_id

        get_id.current_int_id = 0

        sg_manager = sec_groups.SecurityGroupManager(None)
        rule_manager = rules.SecurityGroupRuleManager(None)

        sec_group_1 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"default",
                                                "description": u"default"})
        sec_group_2 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"other_group",
                                                "description": u"NotDefault."})
        sec_group_3 = sec_groups.SecurityGroup(sg_manager,
                                               {"rules": [],
                                                "tenant_id": TEST.tenant.id,
                                                "id": get_id(is_uuid),
                                                "name": u"another_group",
                                                "description": u"NotDefault."})

        rule = {'id': get_id(is_uuid),
                'group': {},
                'ip_protocol': u"tcp",
                'from_port': u"80",
                'to_port': u"80",
                'parent_group_id': sec_group_1.id,
                'ip_range': {'cidr': u"0.0.0.0/32"}}

        icmp_rule = {'id': get_id(is_uuid),
                     'group': {},
                     'ip_protocol': u"icmp",
                     'from_port': u"9",
                     'to_port': u"5",
                     'parent_group_id': sec_group_1.id,
                     'ip_range': {'cidr': u"0.0.0.0/32"}}

        group_rule = {'id': 3,
                      'group': {},
                      'ip_protocol': u"tcp",
                      'from_port': u"80",
                      'to_port': u"80",
                      'parent_group_id': sec_group_1.id,
                      'source_group_id': sec_group_1.id}

        rule_obj = rules.SecurityGroupRule(rule_manager, rule)
        rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
        rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)

        sec_group_1.rules = [rule_obj]
        sec_group_2.rules = [rule_obj]

        return {"rules": [rule_obj, rule_obj2, rule_obj3],
                "groups": [sec_group_1, sec_group_2, sec_group_3]}

    sg_data = generate_security_groups()
    TEST.security_group_rules.add(*sg_data["rules"])
    TEST.security_groups.add(*sg_data["groups"])

    sg_uuid_data = generate_security_groups(is_uuid=True)
    TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
    TEST.security_groups_uuid.add(*sg_uuid_data["groups"])

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      security_groups='10',
                      security_group_rules='20')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # nova quotas disabled when neutron is enabled
    disabled_quotas_nova = {'floating_ips', 'fixed_ips',
                            'security_groups', 'security_group_rules'}
    TEST.disabled_quotas.add(disabled_quotas_nova)

    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'instances': {'used': 0,
                                      'quota': 10},
                        'ram': {'used': 0,
                                'quota': 10000},
                        'cores': {'used': 0,
                                  'quota': 20},
                        'floating_ips': {'used': 0,
                                         'quota': 10},
                        'security_groups': {'used': 0,
                                            'quota': 10},
                        'volumes': {'used': 0,
                                    'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {"absolute": {"maxImageMeta": 128,
                           "maxPersonality": 5,
                           "maxPersonalitySize": 10240,
                           "maxSecurityGroupRules": 20,
                           "maxSecurityGroups": 10,
                           "maxServerMeta": 128,
                           "maxTotalCores": 20,
                           "maxTotalFloatingIps": 10,
                           "maxTotalInstances": 10,
                           "maxTotalKeypairs": 100,
                           "maxTotalRAMSize": 10000,
                           "totalCoresUsed": 0,
                           "totalInstancesUsed": 0,
                           "totalKeyPairsUsed": 0,
                           "totalRAMUsed": 0,
                           "totalSecurityGroupsUsed": 0}}
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {"host": "http://nova.example.com:8774",
            "name": "server_1",
            "status": "ACTIVE",
            "tenant_id": TEST.tenants.first().id,
            "user_id": TEST.user.id,
            "server_id": "1",
            "flavor_id": flavor_1.id,
            "image_id": TEST.images.first().id,
            "key_name": keypair.name}
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2",
                 "status": "BUILD",
                 "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": u'\u4e91\u89c4\u5219',
                 "status": "ACTIVE",
                 "tenant_id": tenant3.id,
                "server_id": "3"})
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_4",
                 "status": "PAUSED",
                 "server_id": "4"})
    server_4 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3, server_4)

    # VNC Console Data
    console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
                            u'type': u'novnc'}}
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
                            u'type': u'spice'}}
    TEST.servers.spice_console_data = console
    # RDP Console Data
    console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
                            u'type': u'rdp'}}
    TEST.servers.rdp_console_data = console

    # Floating IPs
    def generate_fip(conf):
        return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
                                       conf)

    fip_1 = {'id': 1,
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_2 = {'id': 2,
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    # this floating ip is for lbaas tests
    fip_3 = {'id': 3,
             'fixed_ip': '10.0.0.5',
             # the underlying class maps the instance id to port id
             'instance_id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2),
                              generate_fip(fip_3))

    TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
                          nova.FloatingIp(generate_fip(fip_2)),
                          nova.FloatingIp(generate_fip(fip_3)))

    # Floating IP with UUID id (for Floating IP with Neutron Proxy)
    fip_3 = {'id': str(uuid.uuid4()),
             'fixed_ip': '10.0.0.4',
             'instance_id': server_1.id,
             'ip': '58.58.58.58',
             'pool': 'pool1'}
    fip_4 = {'id': str(uuid.uuid4()),
             'fixed_ip': None,
             'instance_id': None,
             'ip': '58.58.58.58',
             'pool': 'pool2'}
    TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))

    TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
                               nova.FloatingIp(generate_fip(fip_4)))

    # Usage
    usage_vals = {"tenant_id": TEST.tenant.id,
                  "instance_name": server_1.name,
                  "flavor_name": flavor_1.name,
                  "flavor_vcpus": flavor_1.vcpus,
                  "flavor_disk": flavor_1.disk,
                  "flavor_ram": flavor_1.ram}
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {"tenant_id": tenant3.id,
                    "instance_name": server_3.name,
                    "flavor_name": flavor_1.name,
                    "flavor_vcpus": flavor_1.vcpus,
                    "flavor_disk": flavor_1.disk,
                    "flavor_ram": flavor_1.ram}
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    cert_data = {'private_key': 'private',
                 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(availability_zones.AvailabilityZone(
        availability_zones.AvailabilityZoneManager(None),
        {
            'zoneName': 'nova',
            'zoneState': {'available': True},
            'hosts': {
                "host001": {
                    "nova-network": {
                        "active": True,
                        "available": True,
                    },
                },
            },
        },
    ))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack001", "id": 3},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 1,
            "servers": [{"name": "test_name", "uuid": "test_uuid"}]
        },
    )

    hypervisor_2 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "devstack002", "id": 4},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 2,
            "servers": [{"name": "test_name_2", "uuid": "test_uuid_2"}]
        },
    )
    hypervisor_3 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {"host": "instance-host", "id": 5},
            "vcpus_used": 1,
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack003",
            "memory_mb_used": 1500,
            "memory_mb": 2000,
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": '{"vendor": "Intel", "model": "core2duo",'
                        '"arch": "x86_64", "features": ["lahf_lm"'
                        ', "rdtscp"], "topology": {"cores": 1, "t'
                        'hreads": 1, "sockets": 1}}',
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 3,
        },
    )
    TEST.hypervisors.add(hypervisor_1)
    TEST.hypervisors.add(hypervisor_2)
    TEST.hypervisors.add(hypervisor_3)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296,
        }
    }

    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-conductor",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_2 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None,
    })

    service_3 = services.Service(services.ServiceManager(None), {
        "status": "enabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "down",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack002",
        "disabled_reason": None,
    })

    service_4 = services.Service(services.ServiceManager(None), {
        "status": "disabled",
        "binary": "nova-compute",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T04:20:51.000000",
        "host": "devstack003",
        "disabled_reason": None,
    })

    TEST.services.add(service_1)
    TEST.services.add(service_2)
    TEST.services.add(service_3)
    TEST.services.add(service_4)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "foo",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 1,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
        "name": "bar",
        "availability_zone": "testing",
        "deleted": 0,
        "created_at": "2013-07-04T13:34:38.000000",
        "updated_at": None,
        "hosts": ["foo", "bar"],
        "deleted_at": None,
        "id": 2,
        "metadata": {"foo": "testing", "bar": "testing"},
    })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)

    host1 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack001",
        "service": "compute",
        "zone": "testing",
    })

    host2 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack002",
        "service": "nova-conductor",
        "zone": "testing",
    })

    host3 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack003",
        "service": "compute",
        "zone": "testing",
    })

    host4 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack004",
        "service": "compute",
        "zone": "testing",
    })

    TEST.hosts.add(host1)
    TEST.hosts.add(host2)
    TEST.hosts.add(host3)
    TEST.hosts.add(host4)

    server_group_1 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "1",
            "name": "server_group_1",
            "policies": [],
        },
    )

    server_group_2 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "2",
            "name": "server_group_2",
            "policies": ["affinity", "some_other_policy"],
        },
    )

    server_group_3 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "3",
            "name": "server_group_3",
            "policies": ["anti-affinity", "some_other_policy"],
        },
    )

    TEST.server_groups.add(server_group_1)
    TEST.server_groups.add(server_group_2)
    TEST.server_groups.add(server_group_3)

Example 27

Project: poppy
Source File: test_services.py
View license
    @ddt.data('requestCount', 'bandwidthOut', 'httpResponseCode_1XX',
              'httpResponseCode_2XX', 'httpResponseCode_3XX',
              'httpResponseCode_4XX', 'httpResponseCode_5XX')
    def test_read(self, metric_name):
        project_id = str(uuid.uuid4())
        auth_token = str(uuid.uuid4())
        domain_name = 'www.' + str(uuid.uuid4()) + '.com'
        to_timestamp = datetime.datetime.utcnow()
        from_timestamp = \
            (datetime.datetime.utcnow() - datetime.timedelta(days=1))
        context_utils.get_current = mock.Mock()
        context_utils.get_current().to_dict = \
            mock.Mock(return_value={'tenant': project_id,
                                    'auth_token': auth_token})
        with mock.patch.object(client.BlueFloodMetricsClient,
                               'async_requests',
                               auto_spec=True) as mock_async:
            timestamp1 = str((int(time.time()) + 0) * 1000)
            timestamp2 = str((int(time.time()) + 100) * 1000)
            timestamp3 = str((int(time.time()) + 200) * 1000)
            json_dict = {
                'values': [
                    {
                        'numPoints': 2,
                        'timestamp': timestamp1,
                        'sum': 45
                    },
                    {
                        'numPoints': 3,
                        'timestamp': timestamp3,
                        'sum': 11
                    },
                    {
                        'numPoints': 1,
                        'timestamp': timestamp2,
                        'sum': 34
                    }
                ]
            }

            metric_names = []
            regions = ['Mock_region{0}'.format(i) for i in range(6)]
            for region in regions:
                metric_names.append('_'.join([metric_name, domain_name,
                                              region]))
            mock_async_responses = []
            for metric_name in metric_names:
                url = 'https://www.metrics.com/{0}/{1}'.format(
                    project_id, metric_name)
                res = Response(ok=True,
                               url=url,
                               text='success',
                               json_dict=json_dict)
                mock_async_responses.append(res)

            # NOTE(TheSriram): shuffle the order of responses
            random.shuffle(mock_async_responses)
            mock_async.return_value = mock_async_responses

            results = self.metrics_driver.services_controller.read(
                metric_names=metric_names,
                from_timestamp=from_timestamp,
                to_timestamp=to_timestamp,
                resolution='86400'
            )

            # confirm the results are in date asc order and all returned.
            expected_order = [
                {"timestamp": time.strftime(
                    '%Y-%m-%dT%H:%M:%S', time.gmtime(int(timestamp1) / 1000)),
                    "count": 45},
                {"timestamp": time.strftime(
                    '%Y-%m-%dT%H:%M:%S', time.gmtime(int(timestamp2) / 1000)),
                    "count": 34},
                {"timestamp": time.strftime(
                    '%Y-%m-%dT%H:%M:%S', time.gmtime(int(timestamp3) / 1000)),
                    "count": 11}
            ]

            for result in results:
                metric_name, response = result
                self.assertListEqual(response, expected_order)

Example 28

Project: subunit2sql
Source File: test_migrations.py
View license
    def _pre_upgrade_2822a408bdd0(self, engine):
        data = {}

        # Add run
        runs = get_table(engine, 'runs')
        run = {'id': six.text_type(uuid.uuid4()),
               'skips': 0,
               'fails': 0,
               'passes': 1,
               'run_time': 1.0,
               'artifacts': 'https://am_i_really_a_fake_url',
               'run_at': datetime.datetime.utcnow()}
        runs.insert().values(run).execute()
        data['run'] = run
        # Add test_metadata
        run_metadatas = get_table(engine, 'run_metadata')
        run_metadata = {'id': six.text_type(uuid.uuid4()),
                        'run_id': run['id'],
                        'key': 'attrs',
                        'value': 'an_attr'}
        run_metadatas.insert().values(run_metadata).execute()
        data['run_metadata'] = run_metadata

        # Add test
        tests = get_table(engine, 'tests')
        test = {'id': six.text_type(uuid.uuid4()),
                'test_id': 'I_am_a_real_test!',
                'success': 1,
                'failure': 0}
        tests.insert().values(test).execute()
        data['test'] = test

        # Add test_metadata
        test_metadatas = get_table(engine, 'test_metadata')
        test_metadata = {'id': six.text_type(uuid.uuid4()),
                         'test_id': test['id'],
                         'key': 'a_real_key',
                         'value': 'an_attr'}
        test_metadatas.insert().values(test_metadata).execute()
        data['test_metadata'] = test_metadata

        # Add test run
        now = datetime.datetime.now().replace(microsecond=0)
        future_now = now + datetime.timedelta(0, 4)

        test_runs = get_table(engine, 'test_runs')

        test_run = {'id': six.text_type(uuid.uuid4()),
                    'test_id': test['id'],
                    'run_id': run['id'],
                    'start_time': now,
                    'status': 'success',
                    'stop_time': future_now}
        test_runs.insert().values(test_run).execute()
        data['test_run'] = test_run

        # Add test_run_metadata
        test_run_metadatas = get_table(engine, 'test_run_metadata')
        test_run_metadata = {'id': six.text_type(uuid.uuid4()),
                             'test_run_id': test_run['id'],
                             'key': 'attrs',
                             'value': 'an_attr'}
        test_run_metadatas.insert().values(test_run_metadata).execute()
        data['test_run_metadata'] = test_run_metadata

        attachments = get_table(engine, 'attachments')
        attachment = {'id': six.text_type(uuid.uuid4()),
                      'test_run_id': test_run['id'],
                      'label': 'an_attachment',
                      'attachment': b'something'}
        attachments.insert().values(attachment).execute()
        data['attachment'] = attachment
        return data

Example 29

Project: framework
Source File: watcher.py
View license
    def services_running(self, target):
        """
        Check all services are running
        :param target: Target to check
        :return: Boolean
        """
        try:
            key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))
            value = str(time.time())

            if target == 'config':
                self.log_message(target, 'Testing configuration store...', 0)
                from ovs.extensions.generic.configuration import Configuration
                try:
                    Configuration.list('/')
                except Exception as ex:
                    self.log_message(target, '  Error during configuration store test: {0}'.format(ex), 2)
                    return False
                if Configuration.get_store() == 'arakoon':
                    from ovs.extensions.db.arakoon.configuration import ArakoonConfiguration
                    from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller, ArakoonClusterConfig
                    from ovs.extensions.db.arakoon.pyrakoon.pyrakoon.compat import NoGuarantee
                    with open(ArakoonConfiguration.CACC_LOCATION) as config_file:
                        contents = config_file.read()
                    config = ArakoonClusterConfig(cluster_id='cacc', filesystem=True)
                    config.read_config(contents)
                    client = ArakoonInstaller.build_client(config)
                    contents = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY, consistency=NoGuarantee())
                    with open(ArakoonConfiguration.CACC_LOCATION, 'w') as config_file:
                        config_file.write(contents)
                self.log_message(target, '  Configuration store OK', 0)
                return True

            if target == 'framework':
                # Volatile
                self.log_message(target, 'Testing volatile store...', 0)
                max_tries = 5
                tries = 0
                while tries < max_tries:
                    try:
                        try:
                            logging.disable(logging.WARNING)
                            from ovs.extensions.storage.volatilefactory import VolatileFactory
                            VolatileFactory.store = None
                            volatile = VolatileFactory.get_client()
                            volatile.set(key, value)
                            if volatile.get(key) == value:
                                volatile.delete(key)
                                break
                            volatile.delete(key)
                        finally:
                            logging.disable(logging.NOTSET)
                    except Exception as message:
                        self.log_message(target, '  Error during volatile store test: {0}'.format(message), 2)
                    key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))  # Get another key
                    time.sleep(1)
                    tries += 1
                if tries == max_tries:
                    self.log_message(target, '  Volatile store not working correctly', 2)
                    return False
                self.log_message(target, '  Volatile store OK after {0} tries'.format(tries), 0)

                # Persistent
                self.log_message(target, 'Testing persistent store...', 0)
                max_tries = 5
                tries = 0
                while tries < max_tries:
                    try:
                        try:
                            logging.disable(logging.WARNING)
                            persistent = PersistentFactory.get_client()
                            persistent.set(key, value)
                            if persistent.get(key) == value:
                                persistent.delete(key)
                                break
                            persistent.delete(key)
                        finally:
                            logging.disable(logging.NOTSET)
                    except Exception as message:
                        self.log_message(target, '  Error during persistent store test: {0}'.format(message), 2)
                    key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))  # Get another key
                    time.sleep(1)
                    tries += 1
                if tries == max_tries:
                    self.log_message(target, '  Persistent store not working correctly', 2)
                    return False
                self.log_message(target, '  Persistent store OK after {0} tries'.format(tries), 0)

            if target == 'volumedriver':
                # Arakoon, voldrv cluster
                self.log_message(target, 'Testing arakoon (voldrv)...', 0)
                max_tries = 5
                tries = 0
                while tries < max_tries:
                    try:
                        from ovs.extensions.generic.configuration import Configuration
                        from ovs.extensions.storage.persistent.pyrakoonstore import PyrakoonStore
                        cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|voldrv'))
                        client = PyrakoonStore(cluster=cluster_name)
                        client.set(key, value)
                        if client.get(key) == value:
                            client.delete(key)
                            break
                        client.delete(key)
                    except Exception as message:
                        self.log_message(target, '  Error during arakoon (voldrv) test: {0}'.format(message), 2)
                    key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))  # Get another key
                    time.sleep(1)
                    tries += 1
                if tries == max_tries:
                    self.log_message(target, '  Arakoon (voldrv) not working correctly', 2)
                    return False
                self.log_message(target, '  Arakoon (voldrv) OK', 0)

            if target in ['framework', 'volumedriver']:
                # RabbitMQ
                self.log_message(target, 'Test rabbitMQ...', 0)
                import pika
                from ovs.extensions.generic.configuration import Configuration
                messagequeue = Configuration.get('/ovs/framework/messagequeue')
                rmq_servers = messagequeue['endpoints']
                good_node = False
                for server in rmq_servers:
                    try:
                        connection_string = '{0}://{1}:{2}@{3}/%2F'.format(messagequeue['protocol'],
                                                                           messagequeue['user'],
                                                                           messagequeue['password'],
                                                                           server)
                        connection = pika.BlockingConnection(pika.URLParameters(connection_string))
                        channel = connection.channel()
                        channel.basic_publish('', 'ovs-watcher', str(time.time()),
                                              pika.BasicProperties(content_type='text/plain', delivery_mode=1))
                        connection.close()
                        good_node = True
                    except Exception as message:
                        self.log_message(target, '  Error during rabbitMQ test on node {0}: {1}'.format(server, message), 2)
                if good_node is False:
                    self.log_message(target, '  No working rabbitMQ node could be found', 2)
                    return False
                self.log_message(target, '  RabbitMQ test OK', 0)
                self.log_message(target, 'All tests OK', 0)
                return True
        except Exception as ex:
            self.log_message(target, 'Unexpected exception: {0}'.format(ex), 2)
            return False

Example 30

Project: bsd-cloudinit
Source File: x509.py
View license
    def create_self_signed_cert(self, subject, validity_years=10,
                                machine_keyset=True, store_name=STORE_NAME_MY):
        subject_encoded = None
        cert_context_p = None
        store_handle = None

        container_name = str(uuid.uuid4())
        self._generate_key(container_name, machine_keyset)

        try:
            subject_encoded_len = wintypes.DWORD()

            if not cryptoapi.CertStrToName(cryptoapi.X509_ASN_ENCODING,
                                           subject,
                                           cryptoapi.CERT_X500_NAME_STR, None,
                                           None,
                                           ctypes.byref(subject_encoded_len),
                                           None):
                raise cryptoapi.CryptoAPIException()

            size = ctypes.c_size_t(subject_encoded_len.value)
            subject_encoded = ctypes.cast(malloc(size),
                                          ctypes.POINTER(wintypes.BYTE))

            if not cryptoapi.CertStrToName(cryptoapi.X509_ASN_ENCODING,
                                           subject,
                                           cryptoapi.CERT_X500_NAME_STR, None,
                                           subject_encoded,
                                           ctypes.byref(subject_encoded_len),
                                           None):
                raise cryptoapi.CryptoAPIException()

            subject_blob = cryptoapi.CRYPTOAPI_BLOB()
            subject_blob.cbData = subject_encoded_len
            subject_blob.pbData = subject_encoded

            key_prov_info = cryptoapi.CRYPT_KEY_PROV_INFO()
            key_prov_info.pwszContainerName = container_name
            key_prov_info.pwszProvName = None
            key_prov_info.dwProvType = cryptoapi.PROV_RSA_FULL
            key_prov_info.cProvParam = None
            key_prov_info.rgProvParam = None
            key_prov_info.dwKeySpec = cryptoapi.AT_SIGNATURE

            if machine_keyset:
                key_prov_info.dwFlags = cryptoapi.CRYPT_MACHINE_KEYSET
            else:
                key_prov_info.dwFlags = 0

            sign_alg = cryptoapi.CRYPT_ALGORITHM_IDENTIFIER()
            sign_alg.pszObjId = cryptoapi.szOID_RSA_SHA1RSA

            start_time = cryptoapi.SYSTEMTIME()
            cryptoapi.GetSystemTime(ctypes.byref(start_time))

            end_time = copy.copy(start_time)
            end_time.wYear += validity_years

            cert_context_p = cryptoapi.CertCreateSelfSignCertificate(
                None, ctypes.byref(subject_blob), 0,
                ctypes.byref(key_prov_info),
                ctypes.byref(sign_alg), ctypes.byref(start_time),
                ctypes.byref(end_time), None)
            if not cert_context_p:
                raise cryptoapi.CryptoAPIException()

            if not cryptoapi.CertAddEnhancedKeyUsageIdentifier(
                    cert_context_p, cryptoapi.szOID_PKIX_KP_SERVER_AUTH):
                raise cryptoapi.CryptoAPIException()

            if machine_keyset:
                flags = cryptoapi.CERT_SYSTEM_STORE_LOCAL_MACHINE
            else:
                flags = cryptoapi.CERT_SYSTEM_STORE_CURRENT_USER

            store_handle = cryptoapi.CertOpenStore(
                cryptoapi.CERT_STORE_PROV_SYSTEM, 0, 0, flags,
                six.text_type(store_name))
            if not store_handle:
                raise cryptoapi.CryptoAPIException()

            if not cryptoapi.CertAddCertificateContextToStore(
                    store_handle, cert_context_p,
                    cryptoapi.CERT_STORE_ADD_REPLACE_EXISTING, None):
                raise cryptoapi.CryptoAPIException()

            return self._get_cert_thumprint(cert_context_p)

        finally:
            if store_handle:
                cryptoapi.CertCloseStore(store_handle, 0)
            if cert_context_p:
                cryptoapi.CertFreeCertificateContext(cert_context_p)
            if subject_encoded:
                free(subject_encoded)

Example 31

Project: glyphIgo
Source File: genEPUB.py
View license
    def createEPUB(self, characters, title, epubFilename):

        # sort characters by codepoint
        characters = sorted(characters)

        # remove existing file
        if (os.path.exists(epubFilename)):
            os.remove(epubFilename)

        # create tmp directory
        tmpDir = "working"
        if (os.path.exists(tmpDir)):
            shutil.rmtree(tmpDir)
        os.makedirs(tmpDir)

        # create META-INF directory
        metaInfDir = os.path.join(tmpDir, "META-INF")
        os.makedirs(metaInfDir)

        # create new mimetype file
        mimetypeFile = os.path.join(tmpDir, "mimetype")
        f = open(mimetypeFile, "w")
        f.write("application/epub+zip")
        f.close()

        # container file
        contentFileRelative = "content.opf"
        contentFile = os.path.join(tmpDir, contentFileRelative)

        # create new container.xml file
        containerFile = os.path.join(metaInfDir, "container.xml")
        f = open(containerFile, "w") 
        f.write("<?xml version=\"1.0\"?>\n")
        f.write("<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">\n")
        f.write(" <rootfiles>\n")
        f.write("  <rootfile full-path=\"%s\" media-type=\"application/oebps-package+xml\"/>\n" % contentFileRelative)
        f.write(" </rootfiles>\n")
        f.write("</container>")
        f.close()

        # create new style.css file
        styleFile = os.path.join(tmpDir, "style.css")
        f = open(styleFile, "w")
        f.write("@charset \"UTF-8\";\n")
        f.write("body {\n")
        f.write("  margin: 10px 25px 10px 25px;\n")
        f.write("}\n")  
        f.write("h1 {\n")
        f.write("  font-size: 200%;\n")
        f.write("  text-align: left;\n")
        f.write("}\n")
        #f.write("body.index {\n")
        #f.write("  margin: 10px 50px 10px 50px;\n")
        #f.write("}\n")
        f.write("table.character {\n")
        f.write("  width: 96%;\n")
        f.write("}\n")
        f.write("th {\n")
        f.write("  font-weight: bold;\n")
        f.write("  text-align: left;\n")
        f.write("}\n")
        f.write("td {\n")
        f.write("  text-align: left;\n")
        f.write("  font-family: monospace;\n")
        f.write("  font-size: 90%;\n")
        f.write("}\n")
        f.write(".character {\n")
        f.write("  width: 96%;\n")
        f.write("}\n")
        f.write(".sym {\n")
        f.write("  width: 10%;\n")
        f.write("}\n")
        f.write(".dec {\n")
        f.write("  width: 10%;\n")
        f.write("}\n")
        f.write(".hex {\n")
        f.write("  width: 10%;\n")
        f.write("}\n")
        f.write(".nam {\n")
        f.write("  width: 70%;\n")
        f.write("}\n")
        f.close()

        # create index file
        self.outputIndexPage(characters, title, tmpDir)

        # get UUID
        identifier = str(uuid.uuid4()).lower()

        # create toc file
        self.outputToc([["index.xhtml", title]], identifier, title, tmpDir)

        # create opf file
        self.outputOpf(identifier, title, tmpDir)

        # zip epub
        self.zipEPUB(epubFilename, tmpDir)

        # delete tmp directory
        if (os.path.exists(tmpDir)):
            shutil.rmtree(tmpDir)

        return True

Example 32

Project: plotly.py
Source File: offline.py
View license
def _plot_html(figure_or_data, config, validate, default_width,
               default_height, global_requirejs):

    figure = tools.return_figure_from_figure_or_data(figure_or_data, validate)

    width = figure.get('layout', {}).get('width', default_width)
    height = figure.get('layout', {}).get('height', default_height)

    try:
        float(width)
    except (ValueError, TypeError):
        pass
    else:
        width = str(width) + 'px'

    try:
        float(height)
    except (ValueError, TypeError):
        pass
    else:
        height = str(height) + 'px'

    plotdivid = uuid.uuid4()
    jdata = json.dumps(figure.get('data', []), cls=utils.PlotlyJSONEncoder)
    jlayout = json.dumps(figure.get('layout', {}), cls=utils.PlotlyJSONEncoder)

    configkeys = (
    'editable',
    'autosizable',
    'fillFrame',
    'frameMargins',
    'scrollZoom',
    'doubleClick',
    'showTips',
    'showLink',
    'sendData',
    'linkText',
    'showSources',
    'displayModeBar',
    'modeBarButtonsToRemove',
    'modeBarButtonsToAdd',
    'modeBarButtons',
    'displaylogo',
    'plotGlPixelRatio',
    'setBackground',
    'topojsonURL')

    config_clean = dict((k,config[k]) for k in configkeys if k in config)

    jconfig = json.dumps(config_clean)

    # TODO: The get_config 'source of truth' should
    # really be somewhere other than plotly.plotly
    plotly_platform_url = plotly.plotly.get_config().get('plotly_domain',
                                                         'https://plot.ly')
    if (plotly_platform_url != 'https://plot.ly' and
            link_text == 'Export to plot.ly'):

        link_domain = plotly_platform_url\
            .replace('https://', '')\
            .replace('http://', '')
        link_text = link_text.replace('plot.ly', link_domain)
        config['linkText'] = link_text

    script = 'Plotly.newPlot("{id}", {data}, {layout}, {config})'.format(
        id=plotdivid,
        data=jdata,
        layout=jlayout,
        config=jconfig)

    optional_line1 = ('require(["plotly"], function(Plotly) {{ '
                      if global_requirejs else '')
    optional_line2 = ('}});' if global_requirejs else '')

    plotly_html_div = (
        ''
        '<div id="{id}" style="height: {height}; width: {width};" '
        'class="plotly-graph-div">'
        '</div>'
        '<script type="text/javascript">' +
        optional_line1 +
        'window.PLOTLYENV=window.PLOTLYENV || {{}};'
        'window.PLOTLYENV.BASE_URL="' + plotly_platform_url + '";'
        '{script}' +
        optional_line2 +
        '</script>'
        '').format(
        id=plotdivid, script=script,
        height=height, width=width)

    return plotly_html_div, plotdivid, width, height

Example 33

Project: pypowervm
Source File: test_lpar_bldr.py
View license
    def test_builder(self):
        # Build the minimum attributes, Shared Procs
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertIsNotNone(bldr)

        new_lpar = bldr.build()
        self.assertIsNotNone(new_lpar)
        self.assert_xml(new_lpar, self.sections['shared_lpar'])
        self.assertEqual('TheName', new_lpar.name)

        # Rebuild the same lpar with a different name
        attr['name'] = 'NewName'
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        rbld_lpar = bldr.rebuild(new_lpar)
        self.assertEqual('NewName', rbld_lpar.name)

        # Build the minimum attributes, Dedicated Procs
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1, dedicated_proc=True)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertIsNotNone(bldr)

        new_lpar = bldr.build()
        self.assertIsNotNone(new_lpar)
        self.assert_xml(new_lpar.entry, self.sections['dedicated_lpar'])

        # Build the minimum attributes, Dedicated Procs = 'true'
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1, dedicated_proc='true')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar.entry, self.sections['dedicated_lpar'])

        # Leave out memory
        attr = dict(name=lpar, env=bp.LPARType.AIXLINUX, vcpu=1)
        self.assertRaises(
            lpar_bldr.LPARBuilderException, lpar_bldr.LPARBuilder, self.adpt,
            attr, self.stdz_sys1)

        # Bad memory lmb multiple
        attr = dict(name='lpar', memory=3333, env=bp.LPARType.AIXLINUX, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Check the validation of the LPAR type when not specified
        attr = dict(name='TheName', memory=1024, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar, self.sections['shared_lpar'])

        # LPAR name too long
        attr = dict(name='lparlparlparlparlparlparlparlparlparlparlparlpar'
                    'lparlparlparlparlparlparlparlparlparlparlparlparlparlpar',
                    memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(lpar_bldr.LPARBuilderException, bldr.build)

        # Test setting uuid
        uuid1 = pvm_uuid.convert_uuid_to_pvm(str(uuid.uuid4()))
        attr = dict(name='lpar', memory=1024, uuid=uuid1, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        lpar_w = bldr.build()
        self.assertEqual(uuid1.upper(), lpar_w.uuid)

        # Test setting id
        id1 = 1234
        attr = dict(name='lpar', memory=1024, uuid=uuid1, vcpu=1, id=id1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        lpar_w = bldr.build()
        self.assertEqual(id1, lpar_w.id)

        # Bad LPAR type
        attr = dict(name='lpar', memory=1024, env='BADLPARType', vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Bad IO Slots
        attr = dict(name='lpar', memory=1024, max_io_slots=0,
                    env=bp.LPARType.AIXLINUX, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        attr = dict(name='lpar', memory=1024, max_io_slots=(65534+1),
                    env=bp.LPARType.AIXLINUX, vcpu=1)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Good non-defaulted IO Slots and SRR
        attr = dict(name='TheName', memory=1024, max_io_slots=64,
                    env=bp.LPARType.AIXLINUX, vcpu=1, srr_capability=False)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar, self.sections['shared_lpar'])

        # Bad SRR value.
        attr = dict(name='lpar', memory=1024, max_io_slots=64,
                    env=bp.LPARType.AIXLINUX, vcpu=1, srr_capability='Frog')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Uncapped / capped shared procs
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1, sharing_mode=bp.SharingMode.CAPPED,
                    srr_capability='true')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar, self.sections['capped_lpar'])

        # Uncapped and no SRR capability
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1, sharing_mode=bp.SharingMode.UNCAPPED,
                    uncapped_weight=100, processor_compatibility='POWER6')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar, self.sections['uncapped_lpar'])

        # Build dedicated but only via dedicated attributes
        m = bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ALWAYS
        attr = dict(name='TheName', env=bp.LPARType.AIXLINUX, memory=1024,
                    vcpu=1, sharing_mode=m, processor_compatibility='PoWeR7')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assert_xml(new_lpar.entry,
                        self.sections['ded_lpar_sre_idle_procs_always'])

        # Desired mem outside min
        attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1,
                    min_mem=2048)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Desired mem outside max
        attr = dict(name='lpar', memory=5000, env=bp.LPARType.AIXLINUX, vcpu=1,
                    max_mem=2048)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # AME not supported on host
        attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1,
                    ame_factor='1.5')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # AME outside valid range
        attr = dict(name='lpar', memory=1024, env=bp.LPARType.AIXLINUX, vcpu=1,
                    ame_factor='0.5')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys3)
        self.assertRaises(ValueError, bldr.build)

        # Desired vcpu outside min
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=1,
                    min_vcpu=2)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Desired vcpu outside max
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    max_vcpu=2)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Ensure the calculated procs are not below the min
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    min_proc_units=3)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        procs = new_lpar.proc_config.shared_proc_cfg
        self.assertEqual(3.0, procs.min_units)

        # Ensure the calculated procs are all 0.5
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=1,
                    proc_units=0.5)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        procs = new_lpar.proc_config.shared_proc_cfg
        self.assertEqual(0.5, procs.min_units)
        self.assertEqual(0.5, procs.max_units)
        self.assertEqual(0.5, procs.desired_units)

        # Create a temp standardizer with a smaller proc units factor
        stdz = lpar_bldr.DefaultStandardize(self.mngd_sys,
                                            proc_units_factor=0.1)
        # Ensure the min, max, and desired proc units works as VCPU is scaled.
        for x in [1, 5, 10, 17, 20]:
            attr = dict(name='lpar', memory=2048, vcpu=x)
            bldr = lpar_bldr.LPARBuilder(self.adpt, attr, stdz)
            new_lpar = bldr.build()
            procs = new_lpar.proc_config.shared_proc_cfg
            self.assertEqual(round(0.1 * x, 2), procs.min_units)
            self.assertEqual(round(0.1 * x, 2), procs.max_units)
            self.assertEqual(round(0.1 * x, 2), procs.desired_units)

        # Ensure the calculated procs are below the max
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    max_proc_units=2.1)
        stdz = lpar_bldr.DefaultStandardize(
            self.mngd_sys, proc_units_factor=0.9)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, stdz)
        new_lpar = bldr.build()
        procs = new_lpar.proc_config.shared_proc_cfg
        self.assertEqual(2.1, procs.max_units)

        # Ensure proc units factor is between 0.1 and 1.0
        self.assertRaises(
            lpar_bldr.LPARBuilderException,
            lpar_bldr.DefaultStandardize,
            self.mngd_sys, proc_units_factor=1.01)
        self.assertRaises(
            lpar_bldr.LPARBuilderException,
            lpar_bldr.DefaultStandardize,
            self.mngd_sys, proc_units_factor=0.01)

        # Avail priority outside max
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    avail_priority=332)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Avail priority bad parm
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    avail_priority='BADVALUE')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertRaises(ValueError, bldr.build)

        # Avail priority at min value
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    avail_priority=0)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assertEqual(new_lpar.avail_priority, 0)

        # Avail priority at max value
        attr = dict(name='lpar', memory=2048, env=bp.LPARType.AIXLINUX, vcpu=3,
                    avail_priority=255)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        new_lpar = bldr.build()
        self.assertEqual(new_lpar.avail_priority, 255)

        # Proc compat
        for pc in bp.LPARCompat.ALL_VALUES:
            attr = dict(name='name', memory=1024, vcpu=1,
                        processor_compatibility=pc)
            bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
            new_lpar = bldr.build()
            self.assertEqual(new_lpar.pending_proc_compat_mode, pc)

        attr = dict(name='name', memory=1024, vcpu=1,
                    processor_compatibility='POWER6')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2)
        new_lpar = bldr.build()
        self.assertEqual(new_lpar.pending_proc_compat_mode, 'POWER6')

        # Ensure failure occurs on validation after the host supported
        # proc modes are loaded and not on convert_value which converts
        # across all acceptable proc modes.
        # This works because 'POWER8' is in LPARCompat.ALL_VALUES
        attr = dict(name='name', memory=1024, vcpu=1,
                    processor_compatibility='POWER8')
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys2)
        exp_msg = ("Value 'POWER8' is not valid for field 'Processor "
                   "Compatability Mode' with acceptable choices: ['POWER6']")
        try:
            bldr.build()
        except Exception as e:
            self.assertEqual(six.text_type(e), exp_msg)

        # Build a VIOS
        attr = dict(name='TheName', env=bp.LPARType.VIOS, memory=1024,
                    vcpu=1, dedicated_proc=True)
        bldr = lpar_bldr.LPARBuilder(self.adpt, attr, self.stdz_sys1)
        self.assertIsNotNone(bldr)

        new_lpar = bldr.build()
        self.assertIsNotNone(new_lpar)
        self.assert_xml(new_lpar.entry, self.sections['vios'])

Example 34

Project: pulp
Source File: distributor.py
View license
    @staticmethod
    def add_distributor(repo_group_id, distributor_type_id, group_plugin_config,
                        distributor_id=None):
        """
        Adds an association from the given repository group to a distributor.
        The assocation will be tracked through the distributor_id; each
        distributor on a given group must have a unique ID. If this is not
        specified, one will be generated. If a distributor already exists on the
        group with a given ID, the existing one will be removed and replaced
        with the newly configured one.

        @param repo_group_id: identifies the repo group
        @type  repo_group_id: str

        @param distributor_type_id: type of distributor being added; must reference
               one of the installed group distributors
        @type  distributor_type_id: str

        @param group_plugin_config: config to use for the distributor for this group alone
        @type  group_plugin_config: dict

        @param distributor_id: if specified, the newly added distributor will be
               referenced by this value and the group id; if omitted one will
               be generated
        @type  distributor_id: str

        @return: database representation of the added distributor
        @rtype:  dict

        @raise MissingResource: if the group doesn't exist
        @raise InvalidValue: if a distributor ID is provided and is not valid
        @raise PulpDataException: if the plugin indicates the config is invalid
        @raise PulpExecutionException: if the plugin raises an exception while
               initializing the newly added distributor
        """
        distributor_coll = RepoGroupDistributor.get_collection()

        query_manager = manager_factory.repo_group_query_manager()

        # Validation
        group = query_manager.get_group(repo_group_id)  # will raise MissingResource

        if not plugin_api.is_valid_group_distributor(distributor_type_id):
            raise InvalidValue(['distributor_type_id'])

        # Determine the ID for the distributor on this repo
        if distributor_id is None:
            distributor_id = str(uuid.uuid4())
        else:
            # Validate if one was passed in
            if not is_distributor_id_valid(distributor_id):
                raise InvalidValue(['distributor_id'])

        distributor_instance, plugin_config = plugin_api.get_group_distributor_by_id(
            distributor_type_id)

        # Convention is that a value of None means unset. Remove any keys that
        # are explicitly set to None so the plugin will default them.
        clean_config = None
        if group_plugin_config is not None:
            clean_config = dict([(k, v) for k, v in group_plugin_config.items() if v is not None])

        # Let the plugin validate the configuration
        call_config = PluginCallConfiguration(plugin_config, clean_config)
        transfer_group = common_utils.to_transfer_repo_group(group)

        config_conduit = RepoConfigConduit(distributor_type_id)

        # Request the plugin validate the configuration
        try:
            is_valid, message = distributor_instance.validate_config(transfer_group, call_config,
                                                                     config_conduit)

            if not is_valid:
                raise PulpDataException(message)
        except Exception, e:
            msg = _('Exception received from distributor [%(d)s] while validating config')
            msg = msg % {'d': distributor_type_id}
            _logger.exception(msg)
            raise PulpDataException(e.args), None, sys.exc_info()[2]

        # Remove the old distributor if it exists
        try:
            RepoGroupDistributorManager.remove_distributor(repo_group_id, distributor_id,
                                                           force=False)
        except MissingResource:
            pass  # if it didn't exist, no problem

        # Invoke the appopriate plugin lifecycle method
        try:
            distributor_instance.distributor_added(transfer_group, call_config)
        except Exception, e:
            _logger.exception(
                'Error initializing distributor [%s] for group [%s]' % (
                    distributor_type_id, repo_group_id))
            raise PulpExecutionException(), None, sys.exc_info()[2]

        # Finally, update the database
        distributor = RepoGroupDistributor(distributor_id, distributor_type_id, repo_group_id,
                                           clean_config)
        distributor_coll.save(distributor)

        return distributor

Example 35

Project: tp-qemu
Source File: usb_storage.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test usb storage devices in the guest.

    1) Create a image file by qemu-img
    2) Boot up a guest add this image as a usb device
    3) Check usb device information via monitor
    4) Check usb information by executing guest command
    5) Check usb serial option (optional)
    6) Check usb removable option (optional)
    7) Check usb min_io_size/opt_io_size option (optional)

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    @error.context_aware
    def _verify_string(regex_str, string, expect_result, search_opt=0):
        """
        Verify USB storage device in monitor

        :param regex_str: Regex for checking command output
        :param string: The string which will be checked
        :param expect_result: The expected string
        :param search_opt: Search option for re module.
        """
        def _compare_str(act, exp, ignore_case):
            def str_func_1(x):
                return x

            def str_func_2(x):
                return x.lower()

            str_func = str_func_1
            if ignore_case:
                str_func = str_func_2

            if str_func(act) != str_func(exp):
                return ("Expected: '%s', Actual: '%s'" %
                        (str_func(exp), str_func(act)))
            return ""

        ignore_case = False
        if search_opt & re.I == re.I:
            ignore_case = True

        error.context("Finding matched sub-string with regex pattern '%s'" %
                      regex_str, logging.info)
        m = re.findall(regex_str, string, search_opt)
        if not m:
            logging.debug(string)
            raise error.TestError("Could not find matched sub-string")

        error.context("Verify matched string is same as expected")
        actual_result = m[0]
        if "removable" in regex_str:
            if actual_result in ["on", "yes", "true"]:
                actual_result = "on"
            if actual_result in ["off", "no", "false"]:
                actual_result = "off"

        fail_log = []
        if isinstance(actual_result, tuple):
            for i, v in enumerate(expect_result):
                ret = _compare_str(actual_result[i], v, ignore_case)
                if ret:
                    fail_log.append(ret)
        else:
            ret = _compare_str(actual_result, expect_result[0], ignore_case)
            if ret:
                fail_log.append(ret)

        if fail_log:
            logging.debug(string)
            raise error.TestFail("Could not find expected string:\n %s" %
                                 ("\n".join(fail_log)))

    def _do_io_test_guest(session):
        utils_test.run_virt_sub_test(test, params, env, "format_disk")

    @error.context_aware
    def _restart_vm(options):
        if vm.is_alive():
            vm.destroy()

        for option, value in options.iteritems():
            params[option] = value
        error.context("Restarting VM")
        vm.create(params=params)
        vm.verify_alive()

    def _login():
        return vm.wait_for_login(timeout=login_timeout)

    def _get_usb_disk_name_in_guest(session):
        def _get_output():
            cmd = "ls -l /dev/disk/by-path/* | grep usb"
            try:
                return session.cmd(cmd).strip()
            except aexpect.ShellCmdError:
                return ""

        output = utils_misc.wait_for(_get_output, login_timeout, step=5,
                                     text="Wait for getting USB disk name")
        devname = re.findall("sd\w", output)
        if devname:
            return devname[0]
        return "sda"

    @error.context_aware
    def _check_serial_option(serial, regex_str, expect_str):
        error.context("Set serial option to '%s'" % serial, logging.info)
        _restart_vm({"blk_extra_params_stg": "serial=" + serial})

        error.context("Check serial option in monitor", logging.info)
        output = str(vm.monitor.info("qtree"))
        _verify_string(regex_str, output, [expect_str], re.S)

        error.context("Check serial option in guest", logging.info)
        session = _login()
        output = session.cmd("lsusb -v")
        if serial not in ["EMPTY_STRING", "NO_EQUAL_STRING"]:
            # Verify in guest when serial is set to empty/null is meaningless.
            _verify_string(serial, output, [serial])
        _do_io_test_guest(session)

        session.close()

    @error.context_aware
    def _check_removable_option(removable, expect_str):
        error.context("Set removable option to '%s'" % removable, logging.info)
        _restart_vm({"removable_stg": removable})

        error.context("Check removable option in monitor", logging.info)
        output = str(vm.monitor.info("qtree"))
        regex_str = 'usb-storage.*?removable = (.*?)\s'
        _verify_string(regex_str, output, [removable], re.S)

        error.context("Check removable option in guest", logging.info)
        session = _login()
        cmd = "dmesg | grep %s" % _get_usb_disk_name_in_guest(session)
        output = session.cmd(cmd)
        _verify_string(expect_str, output, [expect_str], re.I)
        _do_io_test_guest(session)

        session.close()

    @error.context_aware
    def _check_io_size_option(min_io_size="512", opt_io_size="0"):
        error.context("Set min_io_size to %s, opt_io_size to %s" %
                      (min_io_size, opt_io_size), logging.info)
        opt = {}
        opt["min_io_size_stg"] = min_io_size
        opt["opt_io_size_stg"] = opt_io_size

        _restart_vm(opt)

        error.context("Check min/opt io_size option in monitor", logging.info)
        output = str(vm.monitor.info("qtree"))
        regex_str = "usb-storage.*?min_io_size = (\d+).*?opt_io_size = (\d+)"
        _verify_string(regex_str, output, [min_io_size, opt_io_size], re.S)

        error.context("Check min/opt io_size option in guest", logging.info)
        session = _login()
        d = _get_usb_disk_name_in_guest(session)
        cmd = ("cat /sys/block/%s/queue/{minimum,optimal}_io_size" % d)

        output = session.cmd(cmd)
        # Note: If set min_io_size = 0, guest min_io_size would be set to
        # 512 by default.
        if min_io_size != "0":
            expected_min_size = min_io_size
        else:
            expected_min_size = "512"
        _verify_string(
            "(\d+)\n(\d+)", output, [expected_min_size, opt_io_size])
        _do_io_test_guest(session)

        session.close()

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    login_timeout = int(params.get("login_timeout", 360))
    error.context("Check usb device information in monitor", logging.info)
    output = str(vm.monitor.info("usb"))
    if "Product QEMU USB MSD" not in output:
        logging.debug(output)
        raise error.TestFail("Could not find mass storage device")

    error.context("Check usb device information in guest", logging.info)
    session = _login()
    output = session.cmd(params["chk_usb_info_cmd"])
    # No bus specified, default using "usb.0" for "usb-storage"
    for i in params["chk_usb_info_keyword"].split(","):
        _verify_string(i, output, [i])
    _do_io_test_guest(session)
    session.close()

    if params.get("check_serial_option") == "yes":
        error.context("Check usb serial option", logging.info)
        serial = str(uuid.uuid4())
        regex_str = 'usb-storage.*?serial = "(.*?)"\s'
        _check_serial_option(serial, regex_str, serial)

        logging.info("Check this option with some illegal string")
        logging.info("Set usb serial to a empty string")
        # An empty string, ""
        serial = "EMPTY_STRING"
        regex_str = 'usb-storage.*?serial = (.*?)\s'
        _check_serial_option(serial, regex_str, '""')

        logging.info("Leave usb serial option blank")
        serial = "NO_EQUAL_STRING"
        regex_str = 'usb-storage.*?serial = (.*?)\s'
        _check_serial_option(serial, regex_str, '"on"')

    if params.get("check_removable_option") == "yes":
        error.context("Check usb removable option", logging.info)
        removable = "on"
        expect_str = "Attached SCSI removable disk"
        _check_removable_option(removable, expect_str)

        removable = "off"
        expect_str = "Attached SCSI disk"
        _check_removable_option(removable, expect_str)

    if params.get("check_io_size_option") == "yes":
        error.context("Check usb min/opt io_size option", logging.info)
        _check_io_size_option("0", "0")

Example 36

Project: sqlalchemy-media
Source File: attachments.py
View license
    def attach(self, attachable: Attachable, content_type: str = None, original_filename: str = None,
               extension: str = None, store_id: str = None, overwrite: bool=False, suppress_pre_process: bool=False,
               suppress_validation: bool=False, **kwargs) -> 'Attachment':
        """
        Attach a file. if the session is rolled-back, all operations will be rolled-back.
        The old file will be deleted after commit, if any.

        Workflow::


                             +--------+
                             | Start  |
                             +---+----+
                                 |
                      +----------v-----------+
                      | Wrap with Descriptor <----+
                      +----------+-----------+    |
                                 |                |
                      +----------v-----------+    |
                      | Nothing or Analyze   |    |
                      +----------+-----------+    |
                                 |                |
                      +----------v-----------+    |
                      | Nothing or Validate  |    |
                      +----------+-----------+    |
                                 |                |
                      +----------v-----------+    |
                      |Nothing or Pre Process+----+
                      +------+---------------+
                             |
                  +----------+-----------+
                  |                      |
           +------v---------+  +---------v------+
           |  Store in DB   |  |Store In Storage|
           +------+---------+  +---------+------+
                  |                      |
                  +----------+-----------+
                             |
                             |
                         +---v----+
                         | Finish |
                         +--------+


        :param attachable: file-like object, filename or URL to attach.
        :param content_type: If given, the content-detection is suppressed.
        :param original_filename: Original name of the file, if available, to append to the end of the the filename, 
                                  useful for SEO, and readability.
        :param extension: The file's extension, is available.else, tries to guess it by content_type
        :param store_id: The store id to store this file on. Stores must be registered with appropriate id via
                         :meth:`sqlalchemy_media.stores.StoreManager.register`.
        :param overwrite: Overwrites the file without changing it's unique-key and name, useful to prevent broken links.
                          Currently, when using this option, Rollback function is not available, because the old file
                          will be overwritten by the given new one.
        :param suppress_pre_process: When is :data:`.True`, ignores the pre-processing phase, during attachment.
        :param suppress_validation: When is :data:`.True`, ignores the validation phase, during attachment.
        :param kwargs: Additional metadata to be stored in backend.

        .. note:: :exc:`.MaximumLengthIsReachedError` and or :exc:`.MinimumLengthIsNotReachedError` may be raised.

        .. warning:: This operation can not be rolled-back, if ``overwrite=True`` given.

        .. versionchanged:: 0.1

            - This method will return the self. it's useful to chain method calls on the object within a single line.
            - Additional ``kwargs`` are accepted to be stored in database alongside the file's metadata.

        .. versionchanged:: 0.5

            - ``suppress_pre_process`` argument.
            - ``suppress_validation`` argument.
            - pre-processing phase.

        """

        # Wrap in AttachableDescriptor
        descriptor = AttachableDescriptor(
            attachable,
            content_type=content_type,
            original_filename=original_filename,
            extension=extension,
            max_length=self.__max_length__,
            min_length=self.__min_length__
        )

        try:

            # Backup the old key and filename if exists
            if overwrite:
                old_attachment = None
            else:
                old_attachment = None if self.empty else self.copy()
                self.key = str(uuid.uuid4())

            # Store information from descriptor
            attachment_info = kwargs.copy()
            attachment_info.update(
                original_filename=descriptor.original_filename,
                extension=descriptor.extension,
                content_type=descriptor.content_type,
                length=descriptor.content_length,
                store_id=store_id
            )

            # Pre-processing
            if self.__pre_processors__:
                processors = self.__pre_processors__ if isinstance(self.__pre_processors__, Iterable) \
                    else [self.__pre_processors__]

                # noinspection PyTypeChecker
                for processor in processors:
                    processor.process(descriptor, attachment_info)

            # Updating the mutable dictionary
            self.update([(k, v) for k, v in attachment_info.items() if v is not None])

            # Putting the file on the store.
            self['length'] = self.get_store().put(self.path, descriptor)

            self.timestamp = time.time()

            store_manager = StoreManager.get_current_store_manager()
            store_manager.register_to_delete_after_rollback(self)

            if old_attachment:
                store_manager.register_to_delete_after_commit(old_attachment)

        except:
            descriptor.close(check_length=False)
            raise

        else:
            descriptor.close()

        return self

Example 37

Project: pyconkr-2015
Source File: views.py
View license
@login_required
def registration_payment(request):
    max_ticket_limit = settings.MAX_TICKET_NUM

    if not is_registration_time():
        return redirect('registration_info')

    if request.method == 'GET':
        product = Product()

        registered = Registration.objects.filter(
            user=request.user,
            payment_status__in=['paid', 'ready']
        ).exists()

        if registered:
            return redirect('registration_status')

        uid = str(uuid4()).replace('-', '')
        form = RegistrationForm(initial={'email': request.user.email})

        return render(request, 'pyconkr/registration/payment.html', {
            'title': _('Registration'),
            'IMP_USER_CODE': settings.IMP_USER_CODE,  # TODO : Move to 'settings context processor'
            'form': form,
            'uid': uid,
            'product_name': product.name,
            'amount': product.price,
            'vat': 0,
        })
    elif request.method == 'POST':
        payment_logger.debug(request.POST)
        form = RegistrationForm(request.POST)

        # TODO : more form validation
        # eg) merchant_uid
        if not form.is_valid():
            form_errors_string = "\n".join(('%s:%s' % (k, v[0]) for k, v in form.errors.items()))
            return render_json({
                'success': False,
                'message': form_errors_string,  # TODO : ...
            })

        remain_ticket_count = (settings.MAX_TICKET_NUM - Registration.objects.filter(payment_status__in=['paid', 'ready']).count())

        # sold out
        if remain_ticket_count <= 0:
            return render_json({
                'success': False,
                'message': u'티켓이 매진 되었습니다',
            })

        registration, created = Registration.objects.get_or_create(user=request.user)
        registration.name = form.cleaned_data.get('name')
        registration.email = request.user.email
        registration.company = form.cleaned_data.get('company', '')
        registration.phone_number = form.cleaned_data.get('phone_number', '')
        registration.merchant_uid = request.POST.get('merchant_uid')
        registration.save()  # TODO : use form.save()

        try:
            product = Product()
            access_token = get_access_token(settings.IMP_API_KEY, settings.IMP_API_SECRET)
            imp_client = Iamporter(access_token)

            if request.POST.get('payment_method') == 'card':
                # TODO : use validated and cleaned data
                imp_client.onetime(
                    token=request.POST.get('token'),
                    merchant_uid=request.POST.get('merchant_uid'),
                    amount=request.POST.get('amount'),
                    # vat=request.POST.get('vat'),
                    card_number=request.POST.get('card_number'),
                    expiry=request.POST.get('expiry'),
                    birth=request.POST.get('birth'),
                    pwd_2digit=request.POST.get('pwd_2digit'),
                    customer_uid=form.cleaned_data.get('email'),
                )

            confirm = imp_client.find_by_merchant_uid(request.POST.get('merchant_uid'))

            if confirm['amount'] != product.price:
                # TODO : cancel
                return render_io_error("amount is not same as product.price. it will be canceled")

            registration.payment_method = confirm.get('pay_method')
            registration.payment_status = confirm.get('status')
            registration.payment_message = confirm.get('fail_reason')
            registration.vbank_name = confirm.get('vbank_name', None)
            registration.vbank_num = confirm.get('vbank_num', None)
            registration.vbank_date = confirm.get('vbank_date', None)
            registration.vbank_holder = confirm.get('vbank_holder', None)
            registration.save()

            send_email_ticket_confirm(request, registration)
        except IamporterError as e:
            # TODO : other status code
            return render_json({
                'success': False,
                'code': e.code,
                'message': e.message,
            })
        else:
            return render_json({
                'success': True,
            })

Example 38

Project: rapidpro
Source File: flow_migrations.py
View license
def migrate_export_to_version_9(exported_json, org, same_site=True):
    """
    Migrates remaining ids to uuids. Changes to uuids for Flows, Groups,
    Contacts and Channels inside of Actions, Triggers, Campaigns, Events
    """

    def replace(str, match, replace):
        rexp = regex.compile(match, flags=regex.MULTILINE | regex.UNICODE | regex.V0)

        # replace until no matches found
        matches = 1
        while matches:
            (str, matches) = rexp.subn(replace, str)

        return str

    exported_string = json.dumps(exported_json)

    # any references to @extra.flow are now just @parent
    exported_string = replace(exported_string, '@(extra\.flow)', '@parent')
    exported_string = replace(exported_string, '(@\(.*?)extra\.flow(.*?\))', r'\1parent\2')

    # any references to @extra.contact are now @parent.contact
    exported_string = replace(exported_string, '@(extra\.contact)', '@parent.contact')
    exported_string = replace(exported_string, '(@\(.*?)extra\.contact(.*?\))', r'\1parent.contact\2')

    exported_json = json.loads(exported_string)

    flow_id_map = {}
    group_id_map = {}
    contact_id_map = {}
    campaign_id_map = {}
    campaign_event_id_map = {}
    label_id_map = {}

    def get_uuid(id_map, obj_id):
        uuid = id_map.get(obj_id, None)
        if not uuid:
            uuid = unicode(uuid4())
            id_map[obj_id] = uuid
        return uuid

    def replace_with_uuid(ele, manager, id_map, nested_name=None, obj=None, create_dict=False):
        # deal with case of having only a string and no name
        if isinstance(ele, basestring) and create_dict:
            # variable references should just stay put
            if len(ele) > 0 and ele[0] == '@':
                return ele
            else:
                ele = dict(name=ele)

        obj_id = ele.pop('id', None)
        obj_name = ele.pop('name', None)

        if same_site and not obj and obj_id:
            try:
                obj = manager.filter(pk=obj_id, org=org).first()
            except:
                pass

        # nest it if we were given a nested name
        if nested_name:
            ele[nested_name] = dict()
            ele = ele[nested_name]

        if obj:
            ele['uuid'] = obj.uuid

            if obj.name:
                ele['name'] = obj.name
        else:
            if obj_id:
                ele['uuid'] = get_uuid(id_map, obj_id)

            if obj_name:
                ele['name'] = obj_name

        return ele

    def remap_flow(ele, nested_name=None):
        from temba.flows.models import Flow
        replace_with_uuid(ele, Flow.objects, flow_id_map, nested_name)

    def remap_group(ele):
        from temba.contacts.models import ContactGroup
        return replace_with_uuid(ele, ContactGroup.user_groups, group_id_map, create_dict=True)

    def remap_campaign(ele):
        from temba.campaigns.models import Campaign
        replace_with_uuid(ele, Campaign.objects, campaign_id_map)

    def remap_campaign_event(ele):
        from temba.campaigns.models import CampaignEvent
        event = None
        if same_site:
            event = CampaignEvent.objects.filter(pk=ele['id'], campaign__org=org).first()
        replace_with_uuid(ele, CampaignEvent.objects, campaign_event_id_map, obj=event)

    def remap_contact(ele):
        from temba.contacts.models import Contact
        replace_with_uuid(ele, Contact.objects, contact_id_map)

    def remap_channel(ele):
        from temba.channels.models import Channel
        channel_id = ele.get('channel')
        if channel_id:
            channel = Channel.objects.filter(pk=channel_id).first()
            if channel:
                ele['channel'] = channel.uuid

    def remap_label(ele):
        from temba.msgs.models import Label
        replace_with_uuid(ele, Label.label_objects, label_id_map)

    for flow in exported_json.get('flows', []):
        for action_set in flow['action_sets']:
            for action in action_set['actions']:
                if action['type'] in ('add_group', 'del_group', 'send', 'trigger-flow'):
                    groups = []
                    for group_json in action.get('groups', []):
                        groups.append(remap_group(group_json))
                    for contact_json in action.get('contacts', []):
                        remap_contact(contact_json)
                    if groups:
                        action['groups'] = groups
                if action['type'] in ('trigger-flow', 'flow'):
                    remap_flow(action, 'flow')
                if action['type'] == 'add_label':
                    for label in action.get('labels', []):
                        remap_label(label)

        metadata = flow['metadata']
        if 'id' in metadata:
            if metadata.get('id', None):
                remap_flow(metadata)
            else:
                del metadata['id']

    for trigger in exported_json.get('triggers', []):
        if 'flow' in trigger:
            remap_flow(trigger['flow'])
        for group in trigger['groups']:
            remap_group(group)
        remap_channel(trigger)

    for campaign in exported_json.get('campaigns', []):
        remap_campaign(campaign)
        remap_group(campaign['group'])
        for event in campaign.get('events', []):
            remap_campaign_event(event)
            if 'id' in event['relative_to']:
                del event['relative_to']['id']
            if 'flow' in event:
                remap_flow(event['flow'])

    return exported_json

Example 39

Project: cassandra-dtest
Source File: user_types_test.py
View license
    def test_nested_user_types(self):
        """Tests user types within user types"""
        cluster = self.cluster
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()
        session = self.patient_cql_connection(node1)
        create_ks(session, 'user_types', 2)
        session.default_consistency_level = ConsistencyLevel.LOCAL_QUORUM

        stmt = """
              USE user_types
           """
        session.execute(stmt)

        # Create a user type to go inside another one:
        stmt = """
              CREATE TYPE item (
              sub_one text,
              sub_two text,
              )
           """
        session.execute(stmt)

        # Create a user type to contain the item:
        stmt = """
              CREATE TYPE container (
              stuff text,
              more_stuff frozen<item>
              )
           """
        session.execute(stmt)

        # Create a table that holds and item, a container, and a
        # list of containers:
        stmt = """
              CREATE TABLE bucket (
               id uuid PRIMARY KEY,
               primary_item frozen<item>,
               other_items frozen<container>,
               other_containers list<frozen<container>>
              )
           """
        session.execute(stmt)
        # Make sure the schema propagate
        time.sleep(2)

        # Insert some data:
        _id = uuid.uuid4()
        stmt = """
              INSERT INTO bucket (id, primary_item)
              VALUES ({id}, {{sub_one: 'test', sub_two: 'test2'}});
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              UPDATE bucket
              SET other_items = {{stuff: 'stuff', more_stuff: {{sub_one: 'one', sub_two: 'two'}}}}
              WHERE id={id};
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              UPDATE bucket
              SET other_containers = other_containers + [
                   {{
                       stuff: 'stuff2',
                       more_stuff: {{sub_one: 'one_other', sub_two: 'two_other'}}
                   }}
              ]
              WHERE id={id};
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              UPDATE bucket
              SET other_containers = other_containers + [
                  {{
                      stuff: 'stuff3',
                      more_stuff: {{sub_one: 'one_2_other', sub_two: 'two_2_other'}}
                  }},
                  {{stuff: 'stuff4',
                    more_stuff: {{sub_one: 'one_3_other', sub_two: 'two_3_other'}}
                  }}
              ]
              WHERE id={id};
           """.format(id=_id)
        session.execute(stmt)

        stmt = """
              SELECT primary_item, other_items, other_containers from bucket where id={id};
           """.format(id=_id)
        rows = list(session.execute(stmt))

        primary_item, other_items, other_containers = rows[0]
        self.assertEqual(listify(primary_item), [u'test', u'test2'])
        self.assertEqual(listify(other_items), [u'stuff', [u'one', u'two']])
        self.assertEqual(listify(other_containers), [[u'stuff2', [u'one_other', u'two_other']], [u'stuff3', [u'one_2_other', u'two_2_other']], [u'stuff4', [u'one_3_other', u'two_3_other']]])

        #  Generate some repetitive data and check it for it's contents:
        for x in xrange(50):

            # Create row:
            _id = uuid.uuid4()
            stmt = """
              UPDATE bucket
              SET other_containers = other_containers + [
                  {{
                      stuff: 'stuff3',
                      more_stuff: {{
                          sub_one: 'one_2_other', sub_two: 'two_2_other'
                      }}
                  }},
                  {{
                      stuff: 'stuff4',
                      more_stuff: {{
                          sub_one: 'one_3_other', sub_two: 'two_3_other'
                      }}
                  }}
              ]
              WHERE id={id};
           """.format(id=_id)
            session.execute(stmt)

            time.sleep(0.1)

            # Check it:
            stmt = """
              SELECT other_containers from bucket WHERE id={id}
            """.format(id=_id)
            rows = list(session.execute(stmt))

            items = rows[0][0]
            self.assertEqual(listify(items), [[u'stuff3', [u'one_2_other', u'two_2_other']], [u'stuff4', [u'one_3_other', u'two_3_other']]])

Example 40

Project: rockstor-core
Source File: disk.py
View license
    @staticmethod
    @transaction.atomic
    def _update_disk_state():
        """
        A db atomic method to update the database of attached disks / drives.
        Works only on device serial numbers for drive identification.
        Calls scan_disks to establish the current connected drives info.
        Initially removes duplicate by serial number db entries to deal
        with legacy db states and obfuscates all previous device names as they
        are transient. The drive database is then updated with the attached
        disks info and previously known drives no longer found attached are
        marked as offline. All offline drives have their SMART availability and
        activation status removed and all attached drives have their SMART
        availability assessed and activated if available.
        :return: serialized models of attached and missing disks via serial num
        """
        # Acquire a list (namedtupil collection) of attached drives > min size
        disks = scan_disks(settings.MIN_DISK_SIZE)
        serial_numbers_seen = []
        # Make sane our db entries in view of what we know we have attached.
        # Device serial number is only known external unique entry, scan_disks
        # make this so in the case of empty or repeat entries by providing
        # fake serial numbers which are in turn flagged via WebUI as unreliable.
        # 1) scrub all device names with unique but nonsense uuid4
        # 1) mark all offline disks as such via db flag
        # 2) mark all offline disks smart available and enabled flags as False
        # logger.info('update_disk_state() Called')
        for do in Disk.objects.all():
            # Replace all device names with a unique placeholder on each scan
            # N.B. do not optimize by re-using uuid index as this could lead
            # to a non refreshed webui acting upon an entry that is different
            # from that shown to the user.
            do.name = 'detached-' + str(uuid.uuid4()).replace('-', '')
            # Delete duplicate or fake by serial number db disk entries.
            # It makes no sense to save fake serial number drives between scans
            # as on each scan the serial number is re-generated (fake) anyway.
            # Serial numbers beginning with 'fake-serial-' are from scan_disks.
            if (do.serial in serial_numbers_seen) or (
                    re.match('fake-serial-', do.serial) is not None):
                logger.info('Deleting duplicate or fake (by serial) Disk db '
                            'entry. Serial = %s' % do.serial)
                do.delete()  # django >=1.9 returns a dict of deleted items.
                # Continue onto next db disk object as nothing more to process.
                continue
            # first encounter of this serial in the db so stash it for reference
            serial_numbers_seen.append(deepcopy(do.serial))
            # Look for devices (by serial number) that are in the db but not in
            # our disk scan, ie offline / missing.
            if (do.serial not in [d.serial for d in disks]):
                # update the db entry as offline
                do.offline = True
                # disable S.M.A.R.T available and enabled flags.
                do.smart_available = do.smart_enabled = False
            do.save()  # make sure all updates are flushed to db
        # Our db now has no device name info as all dev names are place holders.
        # Iterate over attached drives to update the db's knowledge of them.
        # Kernel dev names are unique so safe to overwrite our db unique name.
        for d in disks:
            # start with an empty disk object
            dob = None
            # Convert our transient but just scanned so current sda type name
            # to a more useful by-id type name as found in /dev/disk/by-id
            byid_disk_name, is_byid = get_dev_byid_name(d.name, True)
            # If the db has an entry with this disk's serial number then
            # use this db entry and update the device name from our recent scan.
            if (Disk.objects.filter(serial=d.serial).exists()):
                dob = Disk.objects.get(serial=d.serial)
                #dob.name = d.name
                dob.name = byid_disk_name
            else:
                # We have an assumed new disk entry as no serial match in db.
                # Build a new entry for this disk.
                #dob = Disk(name=d.name, serial=d.serial)
                # N.B. we may want to force a fake-serial here if is_byid False,
                # that way we flag as unusable disk as no by-id type name found.
                # It may already have been set though as the only by-id
                # failures so far are virtio disks with no serial so scan_disks
                # will have already given it a fake serial in d.serial.
                dob = Disk(name=byid_disk_name, serial=d.serial)
            # Update the db disk object (existing or new) with our scanned info
            dob.size = d.size
            dob.parted = d.parted
            dob.offline = False  # as we are iterating over attached devices
            dob.model = d.model
            dob.transport = d.transport
            dob.vendor = d.vendor
            dob.btrfs_uuid = d.btrfs_uuid
            # If attached disk has an fs and it isn't btrfs
            if (d.fstype is not None and d.fstype != 'btrfs'):
                dob.btrfs_uuid = None
                dob.parted = True  # overload use of parted as non btrfs flag.
                # N.B. this overload use may become redundant with the addition
                # of the Disk.role field.
            # Update the role field with scan_disks findings, currently only
            # mdraid membership type based on fstype info. In the case of
            # these raid member indicators from scan_disks() we have the
            # current truth provided so update the db role status accordingly.
            # N.B. this if else could be expanded to accommodate other
            # roles based on the fs found
            if d.fstype == 'isw_raid_member' or d.fstype == 'linux_raid_member':
                # We have an indicator of mdraid membership so update existing
                # role info if any.
                # N.B. We have a minor legacy issue in that prior to using json
                # format for the db role field we stored one of 2 strings.
                # if these 2 strings are found then ignore them as we then
                # overwrite with our current finding and in the new json format.
                # I.e. non None could also be a legacy entry so follow overwrite
                # path when legacy entry found by treating as a None entry.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # get our known roles into a dictionary
                    known_roles = json.loads(dob.role)
                    # create or update an mdraid dictionary entry
                    known_roles['mdraid'] = str(d.fstype)
                    # return updated dict to json format and store in db object
                    dob.role = json.dumps(known_roles)
                else:  # We have a dob.role = None so just insert our new role.
                    # Also applies to legacy pre json role entries.
                    dob.role = '{"mdraid": "' + d.fstype + '"}'  # json string
            else:  # We know this disk is not an mdraid raid member.
                # No identified role from scan_disks() fstype value (mdraid
                # only for now )so we preserve any prior known roles not
                # exposed by scan_disks but remove the mdraid role if found.
                # TODO: When we reset migrations the following need only check
                # TODO: "dob.role is not None"
                if dob.role is not None and dob.role != 'isw_raid_member' \
                        and dob.role != 'linux_raid_member':
                    # remove mdraid role if found but preserve prior roles
                    # which should now only be in json format
                    known_roles = json.loads(dob.role)
                    if 'mdraid' in known_roles:
                        if len(known_roles) > 1:
                            # mdraid is not the only entry so we have to pull
                            # out only mdraid from dict and convert back to json
                            del known_roles['mdraid']
                            dob.role = json.dumps(known_roles)
                        else:
                            # mdraid was the only entry so we need not bother
                            # with dict edit and json conversion only to end up
                            # with an empty json {} so revert to default 'None'.
                            dob.role = None
                else:  # Empty or legacy role entry.
                    # We have either None or a legacy mdraid role when this disk
                    # is no longer an mdraid member. We can now assert None.
                    dob.role = None
            # If our existing Pool db knows of this disk's pool via it's label:
            if (Pool.objects.filter(name=d.label).exists()):
                # update the disk db object's pool field accordingly.
                dob.pool = Pool.objects.get(name=d.label)

                #this is for backwards compatibility. root pools created
                #before the pool.role migration need this. It can safely be
                #removed a few versions after 3.8-11 or when we reset migrations.
                if (d.root is True):
                    dob.pool.role = 'root'
                    dob.pool.save()
            else:  # this disk is not known to exist in any pool via it's label
                dob.pool = None
            # If no pool has yet been found with this disk's label in and
            # the attached disk is our root disk (flagged by scan_disks)
            if (dob.pool is None and d.root is True):
                # setup our special root disk db entry in Pool
                # TODO: dynamically retrieve raid level.
                p = Pool(name=d.label, raid='single', role='root')
                p.disk_set.add(dob)
                p.save()
                # update disk db object to reflect special root pool status
                dob.pool = p
                dob.save()
                p.size = pool_usage(mount_root(p))[0]
                enable_quota(p)
                p.uuid = btrfs_uuid(dob.name)
                p.save()
            # save our updated db disk object
            dob.save()
        # Update online db entries with S.M.A.R.T availability and status.
        for do in Disk.objects.all():
            # find all the not offline db entries
            if (not do.offline):
                # We have an attached disk db entry.
                # Since our Disk.name model now uses by-id type names we can
                # do cheap matches to the beginnings of these names to find
                # virtio, md, or sdcard devices which are assumed to have no
                # SMART capability.
                # We also disable devices smart support when they have a
                # fake serial number as ascribed by scan_disks as any SMART
                # data collected is then less likely to be wrongly associated
                # with the next device that takes this temporary drive's name.
                # Also note that with no serial number some device types will
                # not have a by-id type name expected by the smart subsystem.
                # This has only been observed in no serial virtio devices.
                if (re.match('fake-serial-', do.serial) is not None) or \
                        (re.match('virtio-|md-|mmc-|nvme-', do.name) is not None):
                    # Virtio disks (named virtio-*), md devices (named md-*),
                    # and an sdcard reader that provides devs named mmc-* have
                    # no smart capability so avoid cluttering logs with
                    # exceptions on probing these with smart.available.
                    # nvme not yet supported by CentOS 7 smartmontools:
                    # https://www.smartmontools.org/ticket/657
                    # Thanks to @snafu in rockstor forum post 1567 for this.
                    do.smart_available = do.smart_enabled = False
                    continue
                # try to establish smart availability and status and update db
                try:
                    # for non ata/sata drives
                    do.smart_available, do.smart_enabled = smart.available(
                        do.name, do.smart_options)
                except Exception, e:
                    logger.exception(e)
                    do.smart_available = do.smart_enabled = False
            do.save()
        ds = DiskInfoSerializer(Disk.objects.all().order_by('name'), many=True)
        return Response(ds.data)

Example 41

Project: roundware-server
Source File: commands.py
View license
def get_config(request):
    form = request.GET

    if 'project_id' not in form:
        raise RoundException("a project_id is required for this operation")
    project = models.Project.objects.get(id=form.get('project_id'))
    speakers = project.speaker_set.all()
    audiotracks = project.audiotrack_set.values()

    if 'device_id' not in form or ('device_id' in form and form['device_id'] == ""):
        device_id = str(uuid.uuid4())
    else:
        device_id = form.get('device_id')

    l = models.Language.objects.filter(language_code='en')[0]
    if 'language' in form or ('language' in form and form['language'] == ""):
        try:
            l = models.Language.objects.filter(
                language_code=form.get('language'))[0]
        except:
            pass

    # Get current available CPU as percentage.
    cpu_idle = psutil.cpu_times_percent().idle
    # Demo stream is enabled if enabled project wide or CPU idle is less than
    # CPU limit (default 50%.)
    demo_stream_enabled = project.demo_stream_enabled or cpu_idle < float(
        settings.DEMO_STREAM_CPU_LIMIT)

    # Create a new session if new_session is not equal 'false'
    create_new_session = form.get('new_session') != 'false'

    session_id = 0
    if create_new_session:
        s = models.Session(
            device_id=device_id, starttime=datetime.datetime.now(), project=project, language=l)
        if 'client_type' in form:
            s.client_type = form.get('client_type')
        if 'client_system' in form:
            s.client_system = form.get('client_system')
        s.demo_stream_enabled = demo_stream_enabled

        s.save()
        session_id = s.id
        log_event('start_session', s.id, None)

    sharing_message = t("none set", project.sharing_message_loc, l)
    out_of_range_message = t("none set", project.out_of_range_message_loc, l)
    legal_agreement = t("none set", project.legal_agreement_loc, l)
    demo_stream_message = t("none set", project.demo_stream_message_loc, l)

    response = [
        {"device": {"device_id": device_id}},
        # TODO: This should be changed with a schema change to either add it to
        # the project table or create a new news/notification table...or something
        {"notifications": {"startup_message": settings.STARTUP_NOTIFICATION_MESSAGE}},
        {"session": {"session_id": session_id}},
        {"project": {
            "project_id": project.id,
            "project_name": project.name,
            "audio_format": project.audio_format,
            "max_recording_length": project.max_recording_length,
            "recording_radius": project.recording_radius,
            "sharing_message": sharing_message,
            "out_of_range_message": out_of_range_message,
            "sharing_url": project.sharing_url,
            "listen_questions_dynamic": project.listen_questions_dynamic,
            "speak_questions_dynamic": project.speak_questions_dynamic,
            "listen_enabled": project.listen_enabled,
            "geo_listen_enabled": project.geo_listen_enabled,
            "speak_enabled": project.speak_enabled,
            "geo_speak_enabled": project.geo_speak_enabled,
            "reset_tag_defaults_on_startup": project.reset_tag_defaults_on_startup,
            "legal_agreement": legal_agreement,
            "files_url": project.files_url,
            "files_version": project.files_version,
            "audio_stream_bitrate": project.audio_stream_bitrate,
            "demo_stream_enabled": demo_stream_enabled,
            "demo_stream_url": project.demo_stream_url,
            "demo_stream_message": demo_stream_message,
            "latitude": project.latitude,
            "longitude": project.longitude
        }
        },
        {"server": {"version": "2.0"}},
        {"speakers": speakers},
        {"audiotracks": [dict(d) for d in audiotracks]}
    ]

    return response

Example 42

View license
def test_command_line_client():
    # Create a Project
    output = run('synapse', 
                 '--skip-checks',
                 'create',
                 '-name',
                 str(uuid.uuid4()), 
                 '-description', 
                 'test of command line client', 
                 'Project')
    project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
    schedule_for_cleanup(project_id)

    # Create a File
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    output = run('synapse', 
                 '--skip-checks', 
                 'add', 
                 '-name', 
                 'BogusFileEntity', 
                 '-description', 
                 'Bogus data to test file upload', 
                 '-parentid', 
                 project_id, 
                 filename)
    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Verify that we stored the file in Synapse
    f1 = syn.get(file_entity_id)
    fh = syn._getFileHandle(f1.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'

    # Get File from the command line
    output = run('synapse', 
                 '--skip-checks', 
                 'get',
                 file_entity_id)
    downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
    schedule_for_cleanup(downloaded_filename)
    assert os.path.exists(downloaded_filename)
    assert filecmp.cmp(filename, downloaded_filename)


    # Update the File
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    output = run('synapse', 
                 '--skip-checks', 
                 'store', 
                 '--id', 
                 file_entity_id, 
                 filename)
    updated_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)', output)

    # Get the File again
    output = run('synapse', 
                 '--skip-checks',
                 'get', 
                 file_entity_id)
    downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
    schedule_for_cleanup(downloaded_filename)
    assert os.path.exists(downloaded_filename)
    assert filecmp.cmp(filename, downloaded_filename)

    # Test query
    output = run('synapse', 
                 '--skip-checks', 
                 'query', 
                 'select id, name from entity where parentId=="%s"' % project_id)
    assert 'BogusFileEntity' in output
    assert file_entity_id in output


    # Move the file to new folder
    folder = syn.store(synapseclient.Folder(parentId=project_id))
    output = run('synapse', 
                 'mv',
                 '--id',
                 file_entity_id,
                 '--parentid',
                 folder.id)
    downloaded_filename = parse(r'Moved\s+(.*)', output)
    movedFile = syn.get(file_entity_id, downloadFile=False)
    assert movedFile.parentId == folder.id


    # Test Provenance
    repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
    output = run('synapse', 
                 '--skip-checks',
                 'set-provenance', 
                 '-id', 
                 file_entity_id, 
                 '-name', 
                 'TestActivity', 
                 '-description', 
                 'A very excellent provenance', 
                 '-used', 
                 file_entity_id, 
                 '-executed', 
                 repo_url)
    activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)

    output = run('synapse', 
                 '--skip-checks', 
                 'get-provenance', 
                 '--id', 
                 file_entity_id)

    activity = json.loads(output)
    assert activity['name'] == 'TestActivity'
    assert activity['description'] == 'A very excellent provenance'
    
    used = utils._find_used(activity, lambda used: 'reference' in used)
    assert used['reference']['targetId'] == file_entity_id
    
    used = utils._find_used(activity, lambda used: 'url' in used)
    assert used['url'] == repo_url
    assert used['wasExecuted'] == True

    # Note: Tests shouldn't have external dependencies
    #       but this is a pretty picture of Singapore
    singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \
                    'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \
                    '/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg'

    # Test external file handle
    output = run('synapse', 
                 '--skip-checks', 
                 'add', 
                 '-name', 
                 'Singapore', 
                 '-description', 
                 'A nice picture of Singapore', 
                 '-parentid', 
                 project_id, 
                 singapore_url)
    exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Verify that we created an external file handle
    f2 = syn.get(exteral_entity_id)
    fh = syn._getFileHandle(f2.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'

    output = run('synapse', 
                 '--skip-checks', 
                 'get', 
                 exteral_entity_id)
    downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
    schedule_for_cleanup(downloaded_filename)
    assert os.path.exists(downloaded_filename)

    # Delete the Project
    output = run('synapse', 
                 '--skip-checks', 
                 'delete', 
                 project_id)

Example 43

View license
def test_command_line_client_annotations():
    # Create a Project
    output = run('synapse', 
                 '--skip-checks',
                 'create',
                 '-name',
                 str(uuid.uuid4()), 
                 '-description', 
                 'test of command line client', 
                 'Project')
    project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
    schedule_for_cleanup(project_id)

    # Create a File
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    output = run('synapse', 
                 '--skip-checks', 
                 'add', 
                 '-name', 
                 'BogusFileEntity', 
                 '-description', 
                 'Bogus data to test file upload', 
                 '-parentid', 
                 project_id, 
                 filename)
    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Test setting annotations
    output = run('synapse', 
                 '--skip-checks',
                 'set-annotations', 
                 '--id', 
                 file_entity_id, 
                 '--annotations',
                 '{"foo": 1, "bar": "1", "baz": [1, 2, 3]}',
    )

    # Test getting annotations
    # check that the three things set are correct
    # This test should be adjusted to check for equality of the
    # whole annotation dictionary once the issue of other
    # attributes (creationDate, eTag, id, uri) being returned is resolved
    # See: https://sagebionetworks.jira.com/browse/SYNPY-175
    
    output = run('synapse', 
                 '--skip-checks',
                 'get-annotations', 
                 '--id', 
                 file_entity_id
             )

    annotations = json.loads(output)
    assert annotations['foo'] == [1]
    assert annotations['bar'] == [u"1"]
    assert annotations['baz'] == [1, 2, 3]
    
    # Test setting annotations by replacing existing ones.
    output = run('synapse', 
                 '--skip-checks',
                 'set-annotations', 
                 '--id', 
                 file_entity_id, 
                 '--annotations',
                 '{"foo": 2}',
                 '--replace'
    )
    
    # Test that the annotation was updated
    output = run('synapse', 
                 '--skip-checks',
                 'get-annotations', 
                 '--id', 
                 file_entity_id
             )

    annotations = json.loads(output)

    assert annotations['foo'] == [2]

    # Since this replaces the existing annotations, previous values
    # Should not be available.
    assert_raises(KeyError, lambda key: annotations[key], 'bar')
    assert_raises(KeyError, lambda key: annotations[key], 'baz')
    
    # Test running add command to set annotations on a new object
    filename2 = utils.make_bogus_data_file()
    schedule_for_cleanup(filename2)
    output = run('synapse', 
                 '--skip-checks', 
                 'add', 
                 '-name', 
                 'BogusData2', 
                 '-description', 
                 'Bogus data to test file upload with add and add annotations',
                 '-parentid', 
                 project_id, 
                 '--annotations',
                 '{"foo": 123}',
                 filename2)

    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Test that the annotation was updated
    output = run('synapse', 
                 '--skip-checks',
                 'get-annotations', 
                 '--id', 
                 file_entity_id
             )

    annotations = json.loads(output)
    assert annotations['foo'] == [123]

    # Test running store command to set annotations on a new object
    filename3 = utils.make_bogus_data_file()
    schedule_for_cleanup(filename3)
    output = run('synapse', 
                 '--skip-checks', 
                 'store', 
                 '--name', 
                 'BogusData3', 
                 '--description', 
                 '\"Bogus data to test file upload with store and add annotations\"',
                 '--parentid', 
                 project_id, 
                 '--annotations',
                 '{"foo": 456}',
                 filename3)

    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)

    # Test that the annotation was updated
    output = run('synapse', 
                 '--skip-checks',
                 'get-annotations', 
                 '--id', 
                 file_entity_id
             )

    annotations = json.loads(output)
    assert annotations['foo'] == [456]

Example 44

View license
def test_command_line_store_and_submit():
    # Create a Project
    output = run('synapse', 
                 '--skip-checks',
                 'store',
                 '--name',
                 str(uuid.uuid4()),
                 '--description',
                 'test of store command',
                 '--type',
                 'Project')
    project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
    schedule_for_cleanup(project_id)

    # Create and upload a file
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    output = run('synapse',
                 '--skip-checks',
                 'store',
                 '--description',
                 'Bogus data to test file upload', 
                 '--parentid',
                 project_id,
                 '--file',
                 filename)
    file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
    
    # Verify that we stored the file in Synapse
    f1 = syn.get(file_entity_id)
    fh = syn._getFileHandle(f1.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'

    # Test that entity is named after the file it contains
    assert f1.name == os.path.basename(filename)
    
    # Create an Evaluation to submit to
    eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
    eval = syn.store(eval)
    schedule_for_cleanup(eval)
    
    # Submit a bogus file
    output = run('synapse', 
                 '--skip-checks',
                 'submit',
                 '--evaluation',
                 eval.id, 
                 '--name',
                 'Some random name',
                 '--entity',
                 file_entity_id)
    submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
    
    #testing different commmand line options for submitting to an evaluation
    #. submitting to an evaluation by evaluationID
    output = run('synapse', 
                 '--skip-checks',
                 'submit',
                 '--evalID',
                 eval.id, 
                 '--name',
                 'Some random name',
                 '--alias',
                 'My Team',
                 '--entity',
                 file_entity_id)
    submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
    

    # Update the file
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    output = run('synapse', 
                 '--skip-checks',
                 'store',
                 '--id',
                 file_entity_id, 
                 '--file',
                 filename)
    updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
    schedule_for_cleanup(updated_entity_id)
    
    # Submit an updated bogus file and this time by evaluation name
    output = run('synapse', 
                 '--skip-checks', 
                 'submit', 
                 '--evaluationName', 
                 eval.name,
                 '--entity',
                 file_entity_id)
    submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)

    # Tests shouldn't have external dependencies, but here it's required
    ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'

    # Test external file handle
    output = run('synapse', 
                 '--skip-checks', 
                 'store',
                 '--name',
                 'Rubber Ducky',
                 '--description',
                 'I like rubber duckies',
                 '--parentid',
                 project_id, 
                 '--file', 
                 ducky_url)
    exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
    schedule_for_cleanup(exteral_entity_id)

    # Verify that we created an external file handle
    f2 = syn.get(exteral_entity_id)
    fh = syn._getFileHandle(f2.dataFileHandleId)
    assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'

    #submit an external file to an evaluation and use provenance
    filename = utils.make_bogus_data_file()
    schedule_for_cleanup(filename)
    repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
    output = run('synapse', 
                 '--skip-checks', 
                 'submit', 
                 '--evalID', 
                 eval.id, 
                 '--file',
                 filename,
                 '--parent',
                 project_id,
                 '--used',
                 exteral_entity_id,
                 '--executed',
                 repo_url
                 )
    submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)

    # Delete project
    output = run('synapse', 
                 '--skip-checks',
                 'delete',
                 project_id)

Example 45

View license
def test_evaluations():
    # Create an Evaluation
    name = 'Test Evaluation %s' % str(uuid.uuid4())
    ev = Evaluation(name=name, description='Evaluation for testing', 
                    contentSource=project['id'], status='CLOSED')
    ev = syn.store(ev)

    try:
        
        # -- Get the Evaluation by name
        evalNamed = syn.getEvaluationByName(name)
        assert ev['contentSource'] == evalNamed['contentSource']
        assert ev['createdOn'] == evalNamed['createdOn']
        assert ev['description'] == evalNamed['description']
        assert ev['etag'] == evalNamed['etag']
        assert ev['id'] == evalNamed['id']
        assert ev['name'] == evalNamed['name']
        assert ev['ownerId'] == evalNamed['ownerId']
        assert ev['status'] == evalNamed['status']
        
        # -- Get the Evaluation by project
        evalProj = syn.getEvaluationByContentSource(project)
        evalProj = next(evalProj)
        assert ev['contentSource'] == evalProj['contentSource']
        assert ev['createdOn'] == evalProj['createdOn']
        assert ev['description'] == evalProj['description']
        assert ev['etag'] == evalProj['etag']
        assert ev['id'] == evalProj['id']
        assert ev['name'] == evalProj['name']
        assert ev['ownerId'] == evalProj['ownerId']
        assert ev['status'] == evalProj['status']
        
        # Update the Evaluation
        ev['status'] = 'OPEN'
        ev = syn.store(ev, createOrUpdate=True)
        assert ev.status == 'OPEN'

        # # Add the current user as a participant
        myOwnerId = int(syn.getUserProfile()['ownerId'])
        syn._allowParticipation(ev, myOwnerId)

        # AUTHENTICATED_USERS = 273948
        # PUBLIC = 273949
        syn.setPermissions(ev, 273948, accessType=['READ'])
        syn.setPermissions(ev, 273949, accessType=['READ'])

        # test getPermissions
        permissions = syn.getPermissions(ev, 273949)
        assert ['READ'] == permissions

        permissions = syn.getPermissions(ev, syn.getUserProfile()['ownerId'])
        assert [p in permissions for p in ['READ', 'CREATE', 'DELETE', 'UPDATE', 'CHANGE_PERMISSIONS', 'READ_PRIVATE_SUBMISSION']]

        # Test getSubmissions with no Submissions (SYNR-453)
        submissions = syn.getSubmissions(ev)
        assert len(list(submissions)) == 0

        # -- Get a Submission attachment belonging to another user (SYNR-541) --
        # See if the configuration contains test authentication
        try:
            config = configparser.ConfigParser()
            config.read(client.CONFIG_FILE)
            other_user = {}
            other_user['username'] = config.get('test-authentication', 'username')
            other_user['password'] = config.get('test-authentication', 'password')
            print("Testing SYNR-541")

            # Login as the test user
            testSyn = client.Synapse(skip_checks=True)
            testSyn.login(email=other_user['username'], password=other_user['password'])
            testOwnerId = int(testSyn.getUserProfile()['ownerId'])

            # Make a project
            other_project = Project(name=str(uuid.uuid4()))
            other_project = testSyn.createEntity(other_project)

            # Give the test user permission to read and join the evaluation
            syn._allowParticipation(ev, testOwnerId)

            # Make a file to submit
            with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
                filename = f.name
                f.write(str(random.gauss(0,1)) + '\n')

            f = File(filename, parentId=other_project.id,
                     name='Submission 999',
                     description ="Haha!  I'm inaccessible...")
            entity = testSyn.store(f)

            ## test submission by evaluation ID
            submission = testSyn.submit(ev.id, entity, submitterAlias="My Nickname")

            # Mess up the cached file so that syn._getWithEntityBundle must download again
            os.utime(filename, (0, 0))

            # Grab the Submission as the original user
            fetched = syn.getSubmission(submission['id'])
            assert os.path.exists(fetched['filePath'])

            # make sure the fetched file is the same as the original (PLFM-2666)
            assert filecmp.cmp(filename, fetched['filePath'])


        except configparser.Error:
            print('Skipping test for SYNR-541: No [test-authentication] in %s' % client.CONFIG_FILE)

        # Increase this to fully test paging by getEvaluationSubmissions
        # not to be less than 2
        num_of_submissions = 2

        # Create a bunch of Entities and submit them for scoring
        print("Creating Submissions")
        for i in range(num_of_submissions):
            with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
                filename = f.name
                f.write(str(random.gauss(0,1)) + '\n')

            f = File(filename, parentId=project.id, name='entry-%02d' % i,
                     description='An entry for testing evaluation')
            entity=syn.store(f)
            syn.submit(ev, entity, name='Submission %02d' % i, submitterAlias='My Team')

        # Score the submissions
        submissions = syn.getSubmissions(ev, limit=num_of_submissions-1)
        print("Scoring Submissions")
        for submission in submissions:
            assert re.match('Submission \d+', submission['name'])
            status = syn.getSubmissionStatus(submission)
            status.score = random.random()
            if submission['name'] == 'Submission 01':
                status.status = 'INVALID'
                status.report = 'Uh-oh, something went wrong!'
            else:
                status.status = 'SCORED'
                status.report = 'a fabulous effort!'
            syn.store(status)

        # Annotate the submissions
        print("Annotating Submissions")
        bogosity = {}
        submissions = syn.getSubmissions(ev)
        b = 123
        for submission, status in syn.getSubmissionBundles(ev):
            bogosity[submission.id] = b
            a = dict(foo='bar', bogosity=b)
            b += 123
            status['annotations'] = to_submission_status_annotations(a)
            set_privacy(status['annotations'], key='bogosity', is_private=False)
            syn.store(status)

        # Test that the annotations stuck
        for submission, status in syn.getSubmissionBundles(ev):
            a = from_submission_status_annotations(status.annotations)
            assert a['foo'] == 'bar'
            assert a['bogosity'] == bogosity[submission.id]
            for kvp in status.annotations['longAnnos']:
                if kvp['key'] == 'bogosity':
                    assert kvp['isPrivate'] == False

        # test query by submission annotations
        # These queries run against an eventually consistent index table which is
        # populated by an asynchronous worker. Thus, the queries may remain out
        # of sync for some unbounded, but assumed to be short time.
        attempts = 2
        while attempts > 0:
            try:
                print("Querying for submissions")
                results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s" % ev.id)
                print(results)
                assert results[u'totalNumberOfResults'] == num_of_submissions+1

                results = syn.restGET("/evaluation/submission/query?query=SELECT+*+FROM+evaluation_%s where bogosity > 200" % ev.id)
                print(results)
                assert results[u'totalNumberOfResults'] == num_of_submissions
            except AssertionError as ex1:
                print("failed query: ", ex1)
                attempts -= 1
                if attempts > 0: print("retrying...")
                time.sleep(2)
            else:
                attempts = 0

        ## Test that we can retrieve submissions with a specific status
        invalid_submissions = list(syn.getSubmissions(ev, status='INVALID'))
        assert len(invalid_submissions) == 1, len(invalid_submissions)
        assert invalid_submissions[0]['name'] == 'Submission 01'

    finally:
        # Clean up
        syn.delete(ev)
        if 'testSyn' in locals():
            if 'other_project' in locals():
                # Clean up, since the current user can't access this project
                # This also removes references to the submitted object :)
                testSyn.delete(other_project)
            if 'team' in locals():
                ## remove team
                testSyn.delete(team)

    ## Just deleted it. Shouldn't be able to get it.
    assert_raises(SynapseHTTPError, syn.getEvaluation, ev)

Example 46

Project: eden
Source File: s3notify.py
View license
    @classmethod
    def notify(cls, resource_id):
        """
            Asynchronous task to notify a subscriber about updates,
            runs a POST?format=msg request against the subscribed
            controller which extracts the data and renders and sends
            the notification message (see send()).

            @param resource_id: the pr_subscription_resource record ID
        """

        _debug("S3Notifications.notify(resource_id=%s)", resource_id)

        db = current.db
        s3db = current.s3db

        stable = s3db.pr_subscription
        rtable = db.pr_subscription_resource
        ftable = s3db.pr_filter

        # Extract the subscription data
        join = stable.on(rtable.subscription_id == stable.id)
        left = ftable.on(ftable.id == stable.filter_id)

        # @todo: should not need rtable.resource here
        row = db(rtable.id == resource_id).select(stable.id,
                                                  stable.pe_id,
                                                  stable.frequency,
                                                  stable.notify_on,
                                                  stable.method,
                                                  stable.email_format,
                                                  rtable.id,
                                                  rtable.resource,
                                                  rtable.url,
                                                  rtable.last_check_time,
                                                  ftable.query,
                                                  join=join,
                                                  left=left).first()
        if not row:
            return True

        s = getattr(row, "pr_subscription")
        r = getattr(row, "pr_subscription_resource")
        f = getattr(row, "pr_filter")

        # Create a temporary token to authorize the lookup request
        auth_token = str(uuid4())

        # Store the auth_token in the subscription record
        r.update_record(auth_token=auth_token)
        db.commit()

        # Construct the send-URL
        settings = current.deployment_settings
        public_url = settings.get_base_public_url()
        lookup_url = "%s/%s/%s" % (public_url,
                                   current.request.application,
                                   r.url.lstrip("/"))

        # Break up the URL into its components
        purl = list(urlparse.urlparse(lookup_url))

        # Subscription parameters
        last_check_time = s3_encode_iso_datetime(r.last_check_time)
        query = {"subscription": auth_token, "format": "msg"}
        if "upd" in s.notify_on:
            query["~.modified_on__ge"] = last_check_time
        else:
            query["~.created_on__ge"] = last_check_time

        # Filters
        if f.query:
            from s3filter import S3FilterString
            resource = s3db.resource(r.resource)
            fstring = S3FilterString(resource, f.query)
            for k, v in fstring.get_vars.iteritems():
                if v is not None:
                    if k in query:
                        value = query[k]
                        if type(value) is list:
                            value.append(v)
                        else:
                            query[k] = [value, v]
                    else:
                        query[k] = v
            query_nice = s3_unicode(fstring.represent())
        else:
            query_nice = None

        # Add subscription parameters and filters to the URL query, and
        # put the URL back together
        query = urlencode(query)
        if purl[4]:
            query = "&".join((purl[4], query))
        page_url = urlparse.urlunparse([purl[0], # scheme
                                        purl[1], # netloc
                                        purl[2], # path
                                        purl[3], # params
                                        query,   # query
                                        purl[5], # fragment
                                        ])

        # Serialize data for send (avoid second lookup in send)
        data = json.dumps({"pe_id": s.pe_id,
                           "notify_on": s.notify_on,
                           "method": s.method,
                           "email_format": s.email_format,
                           "resource": r.resource,
                           "last_check_time": last_check_time,
                           "filter_query": query_nice,
                           "page_url": lookup_url,
                           "item_url": None,
                           })

        # Send the request
        _debug("Requesting %s", page_url)
        req = urllib2.Request(page_url, data=data)
        req.add_header("Content-Type", "application/json")
        success = False
        try:
            response = json.loads(urllib2.urlopen(req).read())
            message = response["message"]
            if response["status"] == "success":
                success = True
        except urllib2.HTTPError, e:
            message = ("HTTP %s: %s" % (e.code, e.read()))
        except:
            exc_info = sys.exc_info()[:2]
            message = ("%s: %s" % (exc_info[0].__name__, exc_info[1]))
        _debug(message)

        # Update time stamps and unlock, invalidate auth token
        intervals = s3db.pr_subscription_check_intervals
        interval = datetime.timedelta(minutes=intervals.get(s.frequency, 0))
        if success:
            last_check_time = datetime.datetime.utcnow()
            next_check_time = last_check_time + interval
            r.update_record(auth_token=None,
                            locked=False,
                            last_check_time=last_check_time,
                            next_check_time=next_check_time)
        else:
            r.update_record(auth_token=None,
                            locked=False)
        db.commit()

        # Done
        return message

Example 47

Project: eden
Source File: s3task.py
View license
    def configure_tasktable_crud(self,
                                 task=None,
                                 function=None,
                                 args=None,
                                 vars=None,
                                 period = 3600, # seconds, so 1 hour
                                 ):
        """
            Configure the task table for interactive CRUD,
            setting defaults, widgets and hiding unnecessary fields

            @param task: the task name (will use a UUID if omitted)
            @param function: the function name (won't hide if omitted)
            @param args: the function position arguments
            @param vars: the function named arguments
        """

        if args is None:
            args = []
        if vars is None:
            vars = {}

        T = current.T
        NONE = current.messages["NONE"]
        UNLIMITED = T("unlimited")

        tablename = self.TASK_TABLENAME
        table = current.db[tablename]

        table.uuid.readable = table.uuid.writable = False

        table.prevent_drift.readable = table.prevent_drift.writable = False

        table.sync_output.readable = table.sync_output.writable = False

        table.times_failed.readable = False

        # Configure start/stop time fields
        for fn in ("start_time", "stop_time"):
            field = table[fn]
            field.represent = lambda dt: \
                            S3DateTime.datetime_represent(dt, utc=True)
            field.requires = IS_UTC_DATETIME()
            set_min = set_max = None
            if fn == "start_time":
                set_min = "#scheduler_task_stop_time"
            elif fn == "stop_time":
                set_max = "#scheduler_task_start_time"
            field.widget = S3CalendarWidget(past = 0,
                                            set_min = set_min,
                                            set_max = set_max,
                                            timepicker = True,
                                            )

        if not task:
            import uuid
            task = str(uuid.uuid4())
        field = table.task_name
        field.default = task
        field.readable = False
        field.writable = False

        if function:
            field = table.function_name
            field.default = function
            field.readable = False
            field.writable = False

        field = table.args
        field.default = json.dumps(args)
        field.readable = False
        field.writable = False

        field = table.repeats
        field.label = T("Repeat")
        field.comment = T("times (0 = unlimited)")
        field.default = 0
        field.represent = lambda opt: \
            opt and "%s %s" % (opt, T("times")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.period
        field.label = T("Run every")
        field.default = period
        field.widget = S3TimeIntervalWidget.widget
        field.requires = IS_TIME_INTERVAL_WIDGET(table.period)
        field.represent = S3TimeIntervalWidget.represent
        field.comment = T("seconds")

        table.timeout.default = 600
        table.timeout.represent = lambda opt: \
            opt and "%s %s" % (opt, T("seconds")) or \
            opt == 0 and UNLIMITED or \
            NONE

        field = table.vars
        field.default = json.dumps(vars)
        field.readable = field.writable = False

        # Always use "default" controller (web2py uses current controller),
        # otherwise the anonymous worker does not pass the controller
        # permission check and gets redirected to login before it reaches
        # the task function which does the s3_impersonate
        field = table.application_name
        field.default = "%s/default" % current.request.application
        field.readable = field.writable = False
        table.group_name.readable = table.group_name.writable = False
        table.status.readable = table.status.writable = False
        table.next_run_time.readable = table.next_run_time.writable = False
        table.times_run.readable = table.times_run.writable = False
        table.assigned_worker_name.readable = \
            table.assigned_worker_name.writable = False

        current.s3db.configure(tablename,
                               list_fields = ["id",
                                              "enabled",
                                              "start_time",
                                              "repeats",
                                              "period",
                                              (T("Last run"), "last_run_time"),
                                              (T("Last status"), "status"),
                                              (T("Next run"), "next_run_time"),
                                              "stop_time"
                                              ],
                               )

        response = current.response
        if response:
            response.s3.crud_strings[tablename] = Storage(
                label_create = T("Create Job"),
                title_display = T("Scheduled Jobs"),
                title_list = T("Job Schedule"),
                title_update = T("Edit Job"),
                label_list_button = T("List Jobs"),
                msg_record_created = T("Job added"),
                msg_record_modified = T("Job updated"),
                msg_record_deleted = T("Job deleted"),
                msg_list_empty = T("No jobs configured yet"),
                msg_no_match = T("No jobs configured"))

        return

Example 48

Project: eden
Source File: s3_update_check.py
View license
def update_check(settings):
    """
        Check whether the dependencies are sufficient to run Eden

        @ToDo: Load deployment_settings so that we can configure the update_check
               - need to rework so that 000_config.py is parsed 1st

        @param settings: the deployment_settings
    """

    # Get Web2py environment into our globals.
    #globals().update(**environment)
    request = current.request

    # Fatal errors
    errors = []
    # Non-fatal warnings
    warnings = []

    # -------------------------------------------------------------------------
    # Check Python libraries

    # Get mandatory global dependencies
    app_path = request.folder

    gr_path = os.path.join(app_path, "requirements.txt")
    or_path = os.path.join(app_path, "optional_requirements.txt")

    global_dep = parse_requirements({}, gr_path)
    optional_dep = parse_requirements({}, or_path)

    templates = settings.get_template()
    location = settings.get_template_location()
    if not isinstance(templates, (tuple, list)):
        templates = (templates,)
    template_dep = {}
    template_optional_dep = {}
    for template in templates:
        tr_path = os.path.join(app_path, location, "templates", template, "requirements.txt")
        tor_path = os.path.join(app_path, location, "templates", template, "optional_requirements.txt")
        parse_requirements(template_dep, tr_path)
        parse_requirements(template_optional_dep, tor_path)

    # Remove optional dependencies which are already accounted for in template dependencies
    unique = set(optional_dep.keys()).difference(set(template_dep.keys()))
    for dependency in optional_dep.keys():
        if dependency not in unique:
            del optional_dep[dependency]

    # Override optional dependency messages from template
    unique = set(optional_dep.keys()).difference(set(template_optional_dep.keys()))
    for dependency in optional_dep.keys():
        if dependency not in unique:
            del optional_dep[dependency]

    errors, warnings = s3_check_python_lib(global_dep, template_dep, template_optional_dep, optional_dep)
    # @ToDo: Move these to Template
    # for now this is done in s3db.climate_first_run()
    if settings.has_module("climate"):
        if settings.get_database_type() != "postgres":
            errors.append("Climate unresolved dependency: PostgreSQL required")
        try:
           import rpy2
        except ImportError:
           errors.append("Climate unresolved dependency: RPy2 required")
        try:
           from Scientific.IO import NetCDF
        except ImportError:
           warnings.append("Climate unresolved dependency: NetCDF required if you want to import readings")
        try:
           from scipy import stats
        except ImportError:
           warnings.append("Climate unresolved dependency: SciPy required if you want to generate graphs on the map")

    # -------------------------------------------------------------------------
    # Check Web2Py version
    #
    # Currently, the minimum usable Web2py is determined by whether the
    # Scheduler is available
    web2py_minimum_version = "Version 2.4.7-stable+timestamp.2013.05.27.11.49.44"
    # Offset of datetime in return value of parse_version.
    datetime_index = 4
    web2py_version_ok = True
    try:
        from gluon.fileutils import parse_version
    except ImportError:
        web2py_version_ok = False
    if web2py_version_ok:
        try:
            web2py_minimum_parsed = parse_version(web2py_minimum_version)
            web2py_minimum_datetime = web2py_minimum_parsed[datetime_index]
            version_info = open("VERSION", "r")
            web2py_installed_version = version_info.read().split()[-1].strip()
            version_info.close()
            if isinstance(web2py_installed_version, str):
                # Post 2.4.2, global_settings.web2py_version is unparsed
                web2py_installed_parsed = parse_version(web2py_installed_version)
                web2py_installed_datetime = web2py_installed_parsed[datetime_index]
            else:
                # 2.4.2 & earlier style
                web2py_installed_datetime = web2py_installed_version[datetime_index]
            web2py_version_ok = web2py_installed_datetime >= web2py_minimum_datetime
        except:
            # Will get AttributeError if Web2py's parse_version is too old for
            # its current version format, which changed in 2.3.2.
            web2py_version_ok = False
    if not web2py_version_ok:
        warnings.append(
            "The installed version of Web2py is too old to support the current version of Sahana Eden."
            "\nPlease upgrade Web2py to at least version: %s" % \
            web2py_minimum_version)

    # -------------------------------------------------------------------------
    # Create required directories if needed
    databases_dir = os.path.join(app_path, "databases")
    try:
        os.stat(databases_dir)
    except OSError:
        # not found, create it
        os.mkdir(databases_dir)

    # -------------------------------------------------------------------------
    # Copy in Templates
    # - 000_config.py (machine-specific settings)
    # - rest are run in-place
    #
    template_folder = os.path.join(app_path, "modules", "templates")

    template_files = {
        # source : destination
        "000_config.py" : os.path.join("models", "000_config.py"),
    }

    copied_from_template = []

    for t in template_files:
        src_path = os.path.join(template_folder, t)
        dst_path = os.path.join(app_path, template_files[t])
        try:
            os.stat(dst_path)
        except OSError:
            # Not found, copy from template
            if t == "000_config.py":
                input = open(src_path)
                output = open(dst_path, "w")
                for line in input:
                    if "akeytochange" in line:
                        # Generate a random hmac_key to secure the passwords in case
                        # the database is compromised
                        import uuid
                        hmac_key = uuid.uuid4()
                        line = 'settings.auth.hmac_key = "%s"' % hmac_key
                    output.write(line)
                output.close()
                input.close()
            else:
                import shutil
                shutil.copy(src_path, dst_path)
            copied_from_template.append(template_files[t])

            # @ToDo: WebSetup
            #  http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/WebSetup
            #if not os.path.exists("%s/applications/websetup" % os.getcwd()):
            #    # @ToDo: Check Permissions
            #    # Copy files into this folder (@ToDo: Pythonise)
            #    cp -r private/websetup "%s/applications" % os.getcwd()
            # Launch WebSetup
            #redirect(URL(a="websetup", c="default", f="index",
            #             vars=dict(appname=request.application,
            #                       firstTime="True")))
        else:
            # Found the file in the destination
            # Check if it has been edited
            import re
            edited_pattern = r"FINISHED_EDITING_\w*\s*=\s*(True|False)"
            edited_matcher = re.compile(edited_pattern).match
            has_edited = False
            with open(dst_path) as f:
                for line in f:
                    edited_result = edited_matcher(line)
                    if edited_result:
                        has_edited = True
                        edited = edited_result.group(1)
                        break
            if has_edited and (edited != "True"):
                errors.append("Please edit %s before starting the system." % t)
            # Check if it's up to date (i.e. a critical update requirement)
            version_pattern = r"VERSION =\s*([0-9]+)"
            version_matcher = re.compile(version_pattern).match
            has_version = False
            with open(dst_path) as f:
                for line in f:
                    version_result = version_matcher(line)
                    if version_result:
                        has_version = True
                        version = version_result.group(1)
                        break
            if not has_version:
                error = "Your %s is using settings from the old templates system. Please switch to the new templates system: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates" % t
                errors.append(error)
            elif int(version) != VERSION:
                error = "Your %s is using settings from template version %s. Please update with new settings from template version %s before starting the system." % \
                                (t, version, VERSION)
                errors.append(error)

    if copied_from_template:
        errors.append(
            "The following files were copied from templates and should be edited: %s" %
            ", ".join(copied_from_template))

    return {"error_messages": errors, "warning_messages": warnings}

Example 49

Project: seosuite
Source File: __init__.py
View license
def crawl(urls, db, internal=False, delay=0, user_agent=None,
    url_associations={}, run_id=None, processed_urls={}, limit=0):

    run_id = run_id or uuid.uuid4()
    print "Starting crawl with run_id: %s" % run_id

    def _save_state(run_id, u, ua):

        if not os.path.exists(JOBS_DIR):
            os.makedirs(JOBS_DIR)

        print len(u), len(ua)
        if len(u) == 0 and len(ua) == 0:
            return

        # open the job file
        with gzip.open("%s/%s.gz" % (JOBS_DIR, run_id), 'w+') as f:
            data = {
                'urls': u,
                'associations': ua,
            }

            f.write(json.dumps(data))


    atexit.register(_save_state, run_id, urls, url_associations)

    run_count = 0
    limit_reached = False
    while len(urls) > 0:
        run_count += 1
        url = urls[0]

        print "\nProcessing (%d / %d): %s" % (run_count, len(urls), url)
        if not is_full_url(url):
            processed_urls[url] = urls.pop(0)
            continue
            # raise ValueError('A relative url as provided: %s. Please ensure that all urls are absolute.' % url)

        processed_urls[url] = None

        results = retrieve_url(url, user_agent)

        for res in results:

            lint_errors = {}
            page_details = {}

            if res['code'] == 200 and res['content_type'] == 'text/html':

                lint_errors, page_details, links, sources = process_html(res['content'], res['url'])

                record = store_results(db, run_id, res, lint_errors, page_details)
                processed_urls[url] = record
                url_associations[url] = {}

                # Process links from the page
                if links and len(links) > 0:
                    for link in links:
                        link_url = link['url']

                        if not link['valid']:
                            # Process any malformed links
                            bad_link = store_results(db, run_id, {
                                'url': link_url,
                                'code': 0,
                                }, {}, {}, None)
                            processed_urls[link_url] = bad_link
                            associate_link(db, record, bad_link, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
                        elif not is_internal_url(link_url, url):
                            # Process all external links and create the
                            if link_url not in processed_urls:
                                link_results = retrieve_url(link_url, user_agent, False)

                                for link_result in link_results:
                                    link_store = store_results(db, run_id, link_result, {}, {}, True)
                                    processed_urls[link_result['url']] = link_store

                                    # Associate links
                                    associate_link(db, record, link_store, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))
                            else:
                                associate_link(db, record, processed_urls[link_url], run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))

                        elif internal and is_internal_url(link_url, url) and link_url not in processed_urls and link_url not in urls:
                            if not limit_reached:
                                urls.append(link_url)
                                if limit and len(urls) >= limit:
                                    limit_reached = True
                            url_associations[url][link_url] = link

                # Process sources from the page
                if sources and len(sources) > 0:
                    for source in sources:
                        source_url = source['url']

                        if source_url not in processed_urls:
                            source_results = retrieve_url(source_url, user_agent, False)

                            for source_result in source_results:
                                source_internal = is_internal_url(source_result['url'], url)
                                source_store = store_results(db, run_id, source_result, {}, {}, not source_internal)
                                processed_urls[source_url] = source_store
                                associate_link(db, record, source_store, run_id, 'asset', None, source.get('alt'), None)

                        else:
                            associate_link(db, record, processed_urls[source_url], run_id, 'asset', None, source.get('alt'), None)

            else:
                record = store_results(db, run_id, res, lint_errors, page_details, False)
                processed_urls[url] = record

        time.sleep( delay / 1000.0 )
        urls.pop(0)

    # Process associations
    for url, associations in url_associations.iteritems():
        for association, link in associations.iteritems():
            to_id = processed_urls.get(url)
            from_id = processed_urls.get(association)
            if to_id and from_id and from_id != to_id:
                associate_link(db, to_id, from_id, run_id, 'anchor', link.get('text'), link.get('alt'), link.get('rel'))

    # Clean up any save files that might exist
    if os.path.exists('%s/%s.gz' % (JOBS_DIR, run_id)):
        print "Deleting job file (%s/%s.gz)" % (JOBS_DIR, run_id)
        os.remove('%s/%s.gz' % (JOBS_DIR, run_id))

    return run_id

Example 50

Project: scalarizr
Source File: app.py
View license
    def start(self):
        self._logger.debug("Initialize scalarizr...")
        _init()
        _init_environ()

        # Starting scalarizr daemon initialization
        globals()['_pid'] = pid = os.getpid()
        self._logger.info('[pid: %d] Starting scalarizr %s', pid, __version__)
        __node__['start_time'] = time.time()

        if not 'Windows' == linux.os['family']:
            # Check for another running scalarzir
            if os.path.exists(PID_FILE):
                try:
                    another_pid = None
                    with open(PID_FILE, 'r') as fp:
                        another_pid = int(fp.read().strip())
                except ValueError:
                    pass
                else:
                    if pid != another_pid and os.path.exists('/proc/%s/status' % (another_pid,)):
                        self._logger.error('Cannot start scalarizr: Another process (pid: %s) already running', another_pid)
                        sys.exit(1)

            # Write PID
            with open(PID_FILE, 'w') as fp:
                fp.write(str(pid))

        cnf = bus.cnf

        optparser = bus.optparser
        if optparser and optparser.values.configure:
            do_configure()
            sys.exit()

        elif optparser:
            if optparser.values.import_server:
                print "Starting import process..."
                print "Don't terminate Scalarizr until Scalr will create the new role"
                __node__['state'] = 'importing'
                # Load Command-line configuration options and auto-configure Scalarizr

                values = CmdLineIni.to_kvals(optparser.values.cnf)
                if not values.get('server_id'):
                    values['server_id'] = str(uuid.uuid4())
                self._logger.info('Configuring Scalarizr. This can take a few minutes...')
                cnf.reconfigure(values=values, silent=True, yesall=True)
            elif __node__['state'] == 'importing':
                # Reset state
                __node__['state'] = 'unknown'

        if __node__['state'] != 'importing':
            self._talk_to_updclient()
            # UpdateClient should fetch meta-data for us.
            metadata.wait(timeout=60)

        if linux.os.windows:
            try:
                wintool.wait_boot()
            except wintool.RebootExpected:
                self._logger.info('Waiting for interruption...')
                time.sleep(600)

        try:
            server_id = __node__['server_id']
        except KeyError:
            server_id = None
        if optparser and not optparser.values.import_server \
                and server_id != metadata.user_data()['serverid']:
            # This role was bundled with Cloud API call (i.e. CreateImage)
            # Now we're starting with a new server and should reset it's state
            self._logger.info(('This image was bundled with cloud API call. '
                    'Cleauping ancestor server data'))
            _cleanup_after_rebundle()
            __node__['state'] = 'bootstrapping'

        if __node__['state'] == 'unknown':
            __node__['state'] = 'bootstrapping'
        if __node__['state'] == 'rebundling':
            __node__['state'] == 'running'

        if __node__['state'] == 'bootstrapping':
            _apply_user_data(from_scalr=False)

        # Load INI files configuration
        cnf.bootstrap(force_reload=True)
        ini = cnf.rawini

        # Initialize platform module
        _init_platform()
        pl = bus.platform

        # Initialize local database
        _init_db()

        STATE['global.start_after_update'] = int(bool(STATE['global.version'] and STATE['global.version'] != __version__))
        STATE['global.version'] = __version__

        if __node__['state'] == 'importing' and not linux.os.windows:
            try:
                pkgmgr.updatedb()
            except:
                self._logger.warn('Failed to update package manager database: %s',
                    sys.exc_info()[1], exc_info=sys.exc_info())

        # Check Scalr version
        if not bus.scalr_version:
            version_file = cnf.private_path('.scalr-version')
            if os.path.exists(version_file):
                bus.scalr_version = None
                with open(version_file, 'r') as fp:
                    bus.scalr_version = tuple(fp.read().strip().split('.'))
            else:
                bus.scalr_version = _detect_scalr_version()
                with open(version_file, 'w') as fp:
                    fp.write('.'.join(map(str, bus.scalr_version)))

        # Apply Command-line passed configuration options
        if optparser:
            cnf.update(CmdLineIni.to_ini_sections(optparser.values.cnf))

        # Validate configuration
        num_errors = do_validate_cnf()
        if num_errors or (optparser and optparser.values.validate_cnf):
            sys.exit(int(not num_errors or 1))

        # Initialize scalarizr services
        self._init_services()

        if STATE['global.start_after_update'] and __node__['state'] == 'running':
            self._logger.info('Scalarizr was updated to %s', __version__)
            __node__['messaging'].send(
                'HostUpdate',
                body={'scalarizr': {'version': __version__}}
            )

        if __node__['state'] == 'running':
            # ReSync user-data
            _apply_user_data(from_scalr=True)
        try:
            bus.fire('init')
        except:
            self._logger.warn('Caught exception in "init": %s', sys.exc_info()[1],
                        exc_info=sys.exc_info())

        # Install signal handlers
        if not linux.os.windows:
            signal.signal(signal.SIGTERM, self.onSIGTERM)

        self._start_services()

        # Fire start
        self.running = True
        try:
            bus.fire("start")
        except (BaseException, Exception) as e:
            if isinstance(e, SystemExit):
                raise
            self._logger.warn('Caught exception in "start": %s', e, exc_info=sys.exc_info())

        try:
            while self.running:
                if linux.os.windows_family:
                    rc = win32event.WaitForSingleObject(self.hWaitStop, 30000)
                    if rc == win32event.WAIT_OBJECT_0:
                        # Service stopped, stop main loop
                        break
                else:
                    try:
                        select.select([], [], [], 30)
                    except select.error, e:
                        if e.args[0] == 4:
                            # Interrupted syscall
                            continue
                        raise

        except KeyboardInterrupt:
            self._logger.debug('Mainloop: KeyboardInterrupt')
        finally:
            self._logger.debug('Mainloop: finally')
            if self.running and os.getpid() == _pid:
                self._shutdown()
        self._logger.debug('Mainloop: leave')