sys.exc_info

Here are the examples of the python api sys.exc_info taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: scons
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 2

Project: AZOrange
Source File: getUnbiasedAccuracy.py
View license
    def getAcc(self, callBack = None, callBackWithFoldModel = None):
        """ For regression problems, it returns the RMSE and the Q2 
            For Classification problems, it returns CA and the ConfMat
            The return is made in a Dict: {"RMSE":0.2,"Q2":0.1,"CA":0.98,"CM":[[TP, FP],[FN,TN]]}
            For the EvalResults not supported for a specific learner/datase, the respective result will be None

            if the learner is a dict {"LearnerName":learner, ...} the results will be a dict with results for all Learners and for a consensus
                made out of those that were stable

            It some error occurred, the respective values in the Dict will be None
        """
        self.__log("Starting Calculating MLStatistics")
        statistics = {}
        if not self.__areInputsOK():
            return None
        # Set the response type
        self.responseType =  self.data.domain.classVar.varType == orange.VarTypes.Discrete and "Classification"  or "Regression"
        self.__log("  "+str(self.responseType))

        #Create the Train and test sets
        if self.usePreDefFolds:
            DataIdxs = self.preDefIndices 
        else:
            DataIdxs = self.sampler(self.data, self.nExtFolds) 
        foldsN = [f for f in dict.fromkeys(DataIdxs) if f != 0] #Folds used only from 1 on ... 0 are for fixed train Bias
        nFolds = len(foldsN)
        #Fix the Indexes based on DataIdxs
        # (0s) represents the train set  ( >= 1s) represents the test set folds
        if self.useVarCtrlCV:
            nShifted = [0] * nFolds
            for idx,isTest in enumerate(self.preDefIndices):  # self.preDefIndices == 0 are to be used in TrainBias
                if not isTest:
                    if DataIdxs[idx]:
                        nShifted[DataIdxs[idx]] += 1
                        DataIdxs[idx] = 0
            for idx,shift in enumerate(nShifted):
                self.__log("In fold "+str(idx)+", "+str(shift)+" examples were shifted to the train set.")

        #Var for saving each Fols result
        optAcc = {}
        results = {}
        exp_pred = {}
        nTrainEx = {}
        nTestEx = {}
        
        #Set a dict of learners
        MLmethods = {}
        if type(self.learner) == dict:
            for ml in self.learner:
                MLmethods[ml] = self.learner[ml]
        else:
            MLmethods[self.learner.name] = self.learner

        models={}
        self.__log("Calculating Statistics for MLmethods:")
        self.__log("  "+str([x for x in MLmethods]))

        #Check data in advance so that, by chance, it will not faill at the last fold!
        for foldN in foldsN:
            trainData = self.data.select(DataIdxs,foldN,negate=1)
            self.__checkTrainData(trainData)

        #Optional!!
        # Order Learners so that PLS is the first
        sortedML = [ml for ml in MLmethods]
        if "PLS" in sortedML:
            sortedML.remove("PLS")
            sortedML.insert(0,"PLS")

        stepsDone = 0
        nTotalSteps = len(sortedML) * self.nExtFolds  
        for ml in sortedML:
          startTime = time.time()
          self.__log("    > "+str(ml)+"...")
          try:
            #Var for saving each Fols result
            results[ml] = []
            exp_pred[ml] = []
            models[ml] = []
            nTrainEx[ml] = []
            nTestEx[ml] = []
            optAcc[ml] = []
            logTxt = ""
            for foldN in foldsN:
                if type(self.learner) == dict:
                    self.paramList = None

                trainData = self.data.select(DataIdxs,foldN,negate=1)
                testData = self.data.select(DataIdxs,foldN)
                smilesAttr = dataUtilities.getSMILESAttr(trainData)
                if smilesAttr:
                    self.__log("Found SMILES attribute:"+smilesAttr)
                    if MLmethods[ml].specialType == 1:
                       trainData = dataUtilities.attributeSelectionData(trainData, [smilesAttr, trainData.domain.classVar.name]) 
                       testData = dataUtilities.attributeSelectionData(testData, [smilesAttr, testData.domain.classVar.name]) 
                       self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain]))
                    else:
                       trainData = dataUtilities.attributeDeselectionData(trainData, [smilesAttr]) 
                       testData = dataUtilities.attributeDeselectionData(testData, [smilesAttr]) 
                       self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain[0:3]] + ["..."] + [attr.name for attr in trainData.domain[len(trainData.domain)-3:]]))

                nTrainEx[ml].append(len(trainData))
                nTestEx[ml].append(len(testData))
                #Test if trainsets inside optimizer will respect dataSize criterias.
                #  if not, don't optimize, but still train the model
                dontOptimize = False
                if self.responseType != "Classification" and (len(trainData)*(1-1.0/self.nInnerFolds) < 20):
                    dontOptimize = True
                else:                      
                    tmpDataIdxs = self.sampler(trainData, self.nInnerFolds)
                    tmpTrainData = trainData.select(tmpDataIdxs,1,negate=1)
                    if not self.__checkTrainData(tmpTrainData, False):
                        dontOptimize = True

                SpecialModel = None
                if dontOptimize:
                    logTxt += "       Fold "+str(foldN)+": Too few compounds to optimize model hyper-parameters\n"
                    self.__log(logTxt)
                    if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                        res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                        CA = evalUtilities.CA(res)[0]
                        optAcc[ml].append(CA)
                    else:
                        res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                        R2 = evalUtilities.R2(res)[0]
                        optAcc[ml].append(R2)
                else:
                    if MLmethods[ml].specialType == 1: 
                            if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                                    optInfo, SpecialModel = MLmethods[ml].optimizePars(trainData, folds = 5)
                                    optAcc[ml].append(optInfo["Acc"])
                            else:
                                    res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                                    R2 = evalUtilities.R2(res)[0]
                                    optAcc[ml].append(R2)
                    else:
                            runPath = miscUtilities.createScratchDir(baseDir = AZOC.NFS_SCRATCHDIR, desc = "AccWOptParam", seed = id(trainData))
                            trainData.save(os.path.join(runPath,"trainData.tab"))
                            tunedPars = paramOptUtilities.getOptParam(
                                learner = MLmethods[ml], 
                                trainDataFile = os.path.join(runPath,"trainData.tab"), 
                                paramList = self.paramList, 
                                useGrid = False, 
                                verbose = self.verbose, 
                                queueType = self.queueType, 
                                runPath = runPath, 
                                nExtFolds = None, 
                                nFolds = self.nInnerFolds,
                                logFile = self.logFile,
                                getTunedPars = True,
                                fixedParams = self.fixedParams)
                            if not MLmethods[ml] or not MLmethods[ml].optimized:
                                self.__log("       WARNING: GETACCWOPTPARAM: The learner "+str(ml)+" was not optimized.")
                                self.__log("                It will be ignored")
                                #self.__log("                It will be set to default parameters")
                                self.__log("                    DEBUG can be done in: "+runPath)
                                #Set learner back to default 
                                #MLmethods[ml] = MLmethods[ml].__class__()
                                raise Exception("The learner "+str(ml)+" was not optimized.")
                            else:
                                if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                                    optAcc[ml].append(tunedPars[0])
                                else:
                                    res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                                    R2 = evalUtilities.R2(res)[0]
                                    optAcc[ml].append(R2)

                                miscUtilities.removeDir(runPath) 
                #Train the model
                if SpecialModel is not None:
                    model = SpecialModel 
                else:
                    model = MLmethods[ml](trainData)
                models[ml].append(model)
                #Test the model
                if self.responseType == "Classification":
                    results[ml].append((evalUtilities.getClassificationAccuracy(testData, model), evalUtilities.getConfMat(testData, model) ) )
                else:
                    local_exp_pred = []
                    # Predict using bulk-predict
                    predictions = model(testData)
                    # Gather predictions
                    for n,ex in enumerate(testData):
                        local_exp_pred.append((ex.getclass().value, predictions[n].value))
                    results[ml].append((evalUtilities.calcRMSE(local_exp_pred), evalUtilities.calcRsqrt(local_exp_pred) ) )
                    #Save the experimental value and correspondent predicted value
                    exp_pred[ml] += local_exp_pred
                if callBack:
                     stepsDone += 1
                     if not callBack((100*stepsDone)/nTotalSteps): return None
                if callBackWithFoldModel:
                    callBackWithFoldModel(model) 

            res = self.createStatObj(results[ml], exp_pred[ml], nTrainEx[ml], nTestEx[ml],self.responseType, self.nExtFolds, logTxt, labels = hasattr(self.data.domain.classVar,"values") and list(self.data.domain.classVar.values) or None )
            if self.verbose > 0: 
                print "UnbiasedAccuracyGetter!Results  "+ml+":\n"
                pprint(res)
            if not res:
                raise Exception("No results available!")
            res["runningTime"] = time.time() - startTime
            statistics[ml] = copy.deepcopy(res)
            self.__writeResults(statistics)
            self.__log("       OK")
          except:
            self.__log("       Learner "+str(ml)+" failed to create/optimize the model!")
            error = str(sys.exc_info()[0]) +" "+\
                        str(sys.exc_info()[1]) +" "+\
                        str(traceback.extract_tb(sys.exc_info()[2]))
            self.__log(error)
 
            res = self.createStatObj()
            statistics[ml] = copy.deepcopy(res)
            self.__writeResults(statistics)

        if not statistics or len(statistics) < 1:
            self.__log("ERROR: No statistics to return!")
            return None
        elif len(statistics) > 1:
            #We still need to build a consensus model out of the stable models 
            #   ONLY if there are more that one model stable!
            #   When only one or no stable models, build a consensus based on all models
            # ALWAYS exclude specialType models (MLmethods[ml].specialType > 0)
            consensusMLs={}
            for modelName in statistics:
                StabilityValue = statistics[modelName]["StabilityValue"]
                if StabilityValue is not None and statistics[modelName]["stable"]:
                    consensusMLs[modelName] = copy.deepcopy(statistics[modelName])

            self.__log("Found "+str(len(consensusMLs))+" stable MLmethods out of "+str(len(statistics))+" MLmethods.")

            if len(consensusMLs) <= 1:   # we need more models to build a consensus!
                consensusMLs={}
                for modelName in statistics:
                    consensusMLs[modelName] = copy.deepcopy(statistics[modelName])

            # Exclude specialType models 
            excludeThis = []
            for learnerName in consensusMLs:
                if models[learnerName][0].specialType > 0:
                    excludeThis.append(learnerName)
            for learnerName in excludeThis:
                consensusMLs.pop(learnerName)
                self.__log("    > Excluded special model " + learnerName)
            self.__log("    > Stable modules: " + str(consensusMLs.keys()))

            if len(consensusMLs) >= 2:
                #Var for saving each Fols result
                startTime = time.time()
                Cresults = []
                Cexp_pred = []
                CnTrainEx = []
                CnTestEx = []
                self.__log("Calculating the statistics for a Consensus model based on "+str([ml for ml in consensusMLs]))
                for foldN in range(self.nExtFolds):
                    if self.responseType == "Classification":
                        CLASS0 = str(self.data.domain.classVar.values[0])
                        CLASS1 = str(self.data.domain.classVar.values[1])
                        # exprTest0
                        exprTest0 = "(0"
                        for ml in consensusMLs:
                            exprTest0 += "+( "+ml+" == "+CLASS0+" )*"+str(optAcc[ml][foldN])+" "
                        exprTest0 += ")/IF0(sum([False"
                        for ml in consensusMLs:
                            exprTest0 += ", "+ml+" == "+CLASS0+" "
                        exprTest0 += "]),1)"
                        # exprTest1
                        exprTest1 = "(0"
                        for ml in consensusMLs:
                            exprTest1 += "+( "+ml+" == "+CLASS1+" )*"+str(optAcc[ml][foldN])+" "
                        exprTest1 += ")/IF0(sum([False"
                        for ml in consensusMLs:
                            exprTest1 += ", "+ml+" == "+CLASS1+" "
                        exprTest1 += "]),1)"
                        # Expression
                        expression = [exprTest0+" >= "+exprTest1+" -> "+CLASS0," -> "+CLASS1]
                    else:
                        Q2sum = sum([optAcc[ml][foldN] for ml in consensusMLs])
                        expression = "(1 / "+str(Q2sum)+") * (0"
                        for ml in consensusMLs:
                            expression += " + "+str(optAcc[ml][foldN])+" * "+ml+" "
                        expression += ")"

                    testData = self.data.select(DataIdxs,foldN+1)  # fold 0 if for the train Bias!!
                    smilesAttr = dataUtilities.getSMILESAttr(testData)
                    if smilesAttr:
                        self.__log("Found SMILES attribute:"+smilesAttr)
                        testData = dataUtilities.attributeDeselectionData(testData, [smilesAttr])
                        self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain[0:3]] + ["..."] + [attr.name for attr in trainData.domain[len(trainData.domain)-3:]]))

                    CnTestEx.append(len(testData))
                    consensusClassifiers = {}
                    for learnerName in consensusMLs:
                        consensusClassifiers[learnerName] = models[learnerName][foldN]

                    model = AZorngConsensus.ConsensusClassifier(classifiers = consensusClassifiers, expression = expression)     
                    CnTrainEx.append(model.NTrainEx)
                    #Test the model
                    if self.responseType == "Classification":
                        Cresults.append((evalUtilities.getClassificationAccuracy(testData, model), evalUtilities.getConfMat(testData, model) ) )
                    else:
                        local_exp_pred = []
                        # Predict using bulk-predict
                        predictions = model(testData)
                        # Gather predictions
                        for n,ex in enumerate(testData):
                            local_exp_pred.append((ex.getclass().value, predictions[n].value))
                        Cresults.append((evalUtilities.calcRMSE(local_exp_pred), evalUtilities.calcRsqrt(local_exp_pred) ) )
                        #Save the experimental value and correspondent predicted value
                        Cexp_pred += local_exp_pred

                res = self.createStatObj(Cresults, Cexp_pred, CnTrainEx, CnTestEx, self.responseType, self.nExtFolds, labels = hasattr(self.data.domain.classVar,"values") and list(self.data.domain.classVar.values) or None )
                res["runningTime"] = time.time() - startTime
                statistics["Consensus"] = copy.deepcopy(res)
                statistics["Consensus"]["IndividualStatistics"] = copy.deepcopy(consensusMLs)
                self.__writeResults(statistics)
            self.__log("Returned multiple ML methods statistics.")
            return statistics
                 
        #By default return the only existing statistics!
        self.__writeResults(statistics)
        self.__log("Returned only one ML method statistics.")
        return statistics[statistics.keys()[0]]

Example 3

Project: scalarizr
Source File: gce_persistent.py
View license
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce'].connect_compute()
        except:
            e = sys.exc_info()[1]
            LOG.debug('Can not get GCE connection: %s' % e)
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError('Disk is not created yet, and GCE connection is unavailable')
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError("Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:
            LOG.debug('Successfully created connection to cloud engine')
            try:
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name)
                    if self.snap:
                        snap_dict = dict(self.snap)
                        snap_dict['type'] = STORAGE_TYPE
                        self.snap = storage2.snapshot(snap_dict)
                        LOG.debug('Ensuring that snapshot is ready, before creating disk from it')
                        gce_util.wait_snapshot_ready(self.snap)
                        create_request_body['sourceSnapshot'] = to_current_api_version(self.snap.link)
                    else:
                        create_request_body['sizeGb'] = self.size

                    create = True
                else:
                    self._check_attr('zone')
                    LOG.debug('Checking that disk already exists')
                    try:
                        disk_dict = connection.disks().get(disk=self.name, project=project_id,
                                                                            zone=zone).execute()
                        self.link = disk_dict['selfLink']
                    except HttpError, e:
                        code = int(e.resp['status'])
                        if code == 404:
                            raise storage2.VolumeNotExistsError(self.name)
                        else:
                            raise

                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(
                            name=new_name, sourceSnapshot=to_current_api_version(temp_snap.link))
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    if "pd-standard" != self.disk_type:
                        disk_type = gce_util.get_disktype(conn=connection,
                            project_id=project_id, zone=zone, disktype=self.disk_type)
                        create_request_body.update({'type': disk_type['selfLink']})

                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(project=project_id,
                                                   zone=zone,
                                                   body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id, op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug("Making sure that disk %s detached from previous attachment place." % self.name)
                        try:
                            gce_util.ensure_disk_detached(connection,
                                                          project_id,
                                                          zone,
                                                          self.last_attached_to,
                                                          self.link)
                        except:
                            e = sys.exc_info()[1]
                            if 'resource was not found' in str(e):
                                raise storage2.VolumeNotExistsError(self.link)
                            raise
                        
                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    for _ in range(10):
                        try:
                            LOG.debug('Attaching disk %s to current instance' % self.name)
                            try:
                                op = connection.instances().attachDisk(
                                    instance=server_name, 
                                    project=project_id,
                                    zone=zone, 
                                    body=dict(
                                        deviceName=self.name,
                                        source=self.link,
                                        mode="READ_WRITE",
                                        type="PERSISTENT")).execute()
                            except:
                                e = sys.exc_info()[1]
                                if 'resource was not found' in str(e):
                                    raise storage2.VolumeNotExistsError(self.link)
                                raise

                            gce_util.wait_for_operation(connection, project_id, op['name'], zone=zone)
                            disk_devicename = self.name
                            break
                        except:
                            e = sys.exc_info()[1]
                            #  sometimes occurs when volume was just detached from another instance
                            if 'is already being used by' in str(e):
                                LOG.debug('%s. Retrying in 1s', str(e))
                                time.sleep(1)
                            else:
                                raise

                if not linux.os.windows:
                    for i in range(10):
                        device = gce_util.devicename_to_device(disk_devicename)
                        if device:
                            break
                        LOG.debug('Device not found in system. Retrying in 1s.')
                        time.sleep(1)
                    else:
                        msg = "Disk should be attached, but corresponding device not found in system"
                        raise storage2.StorageError(msg)
                else:
                    device = disk_devicename

                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.debug('Failed to destroy temporary storage object %s: %s', garbage, e)

Example 4

Project: scalyr-agent-2
Source File: compiler.py
View license
    def compile(self, *mibnames, **options):
        """Transform requested and possibly referred MIBs.

        The *compile* method should be invoked when *MibCompiler* object
        is operational meaning at least *sources* are specified.
        
        Once called with a MIB module name, *compile* will:

        * fetch ASN.1 MIB module with given name by calling *sources*
        * make sure no such transformed MIB already exists (with *searchers*)
        * parse ASN.1 MIB text with *parser*
        * perform actual MIB transformation into target format with *code generator*
        * may attempt to borrow pre-transformed MIB through *borrowers*
        * write transformed MIB through *writer*

        The above sequence will be performed for each MIB name given in
        *mibnames* and may be performed for all MIBs referred to from
        MIBs being processed.

        Args:
            mibnames: list of ASN.1 MIBs names
            options: options that affect the way PySMI components work

        Returns:
            A dictionary of MIB module names processed (keys) and *MibStatus*
            class instances (values)

        """
        processed = {}
        parsedMibs = {}; failedMibs = {}; borrowedMibs = {}; builtMibs = {}
        symbolTableMap = {}
        mibsToParse = [x for x in mibnames]
        while mibsToParse:
            mibname = mibsToParse.pop(0)
            if mibname in parsedMibs:
                debug.logger & debug.flagCompiler and debug.logger('MIB %s already parsed' % mibname)
                continue
            if mibname in failedMibs:
                debug.logger & debug.flagCompiler and debug.logger('MIB %s already failed' % mibname)
                continue

            for source in self._sources:
                debug.logger & debug.flagCompiler and debug.logger('trying source %s' % source)
                try:
                    fileInfo, fileData = source.getData(mibname)
                    for mibTree in self._parser.parse(fileData):
                        mibInfo, symbolTable = self._symbolgen.genCode(
                            mibTree, symbolTableMap
                        )

                        symbolTableMap[mibInfo.name] = symbolTable

                        parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree
                        if mibname in failedMibs:
                            del failedMibs[mibname]

                        mibsToParse.extend(mibInfo.imported)

                        debug.logger & debug.flagCompiler and debug.logger('%s (%s) read from %s, immediate dependencies: %s' % (mibInfo.name, mibname, fileInfo.path, ', '.join(mibInfo.imported) or '<none>'))

                    break

                except error.PySmiReaderFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no %s found at %s' % (mibname, source))
                    continue
                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.source = source
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('%serror %s from %s' % (options.get('ignoreErrors') and 'ignoring ' or 'failing on ', exc, source))
                    failedMibs[mibname] = exc
                    processed[mibname] = statusFailed.setOptions(error=exc)
            else:
                exc = error.PySmiError('MIB source %s not found' % mibname)
                exc.mibname = mibname
                debug.logger & debug.flagCompiler and debug.logger('no %s found everywhare' % mibname)
                if mibname not in failedMibs:
                    failedMibs[mibname] = exc
                if mibname not in processed:
                    processed[mibname] = statusMissing

        debug.logger & debug.flagCompiler and debug.logger('MIBs analized %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # See what MIBs need generating
        #

        for mibname in parsedMibs.copy():
            fileInfo, mibInfo, mibTree = parsedMibs[mibname]
            debug.logger & debug.flagCompiler and debug.logger('checking if %s requires updating' % mibname)
            for searcher in self._searchers:
                try:
                    searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
                except error.PySmiFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
                    continue

                except error.PySmiFileNotModifiedError:
                    debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
                    del parsedMibs[mibname]
                    processed[mibname] = statusUntouched
                    break

                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.searcher = searcher
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
                    continue
            else:
                debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)

                if options.get('noDeps') and mibname not in mibnames:
                    debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from code generation' % mibname)
                    del parsedMibs[mibname]
                    processed[mibname] = statusUntouched
                    continue

        debug.logger & debug.flagCompiler and debug.logger('MIBs parsed %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # Generate code for parsed MIBs
        #

        for mibname in parsedMibs.copy():
            fileInfo, mibInfo, mibTree = parsedMibs[mibname]

            comments = [
                'ASN.1 source %s' % fileInfo.path,
                'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
                'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0] or '?'),
                'Using Python version %s' % sys.version.split('\n')[0]
            ]

            try:
                mibInfo, mibData = self._codegen.genCode(
                        mibTree,
                        symbolTableMap,
                        comments=comments,
                        genTexts=options.get('genTexts')
                    )

                builtMibs[mibname] = fileInfo, mibInfo, mibData
                del parsedMibs[mibname]

                debug.logger & debug.flagCompiler and debug.logger('%s read from %s and compiled by %s' % (mibname, fileInfo.path, self._writer))

            except error.PySmiError:
                exc_class, exc, tb = sys.exc_info()
                exc.handler = self._codegen
                exc.mibname = mibname
                exc.msg += ' at MIB %s' % mibname
                debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (self._codegen, exc))
                processed[mibname] = statusFailed.setOptions(error=exc)
                failedMibs[mibname] = exc
                del parsedMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # Try to borrow pre-compiled MIBs for failed ones
        #

        for mibname in failedMibs.copy():
            if options.get('noDeps') and mibname not in mibnames:
                debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
                continue

            for borrower in self._borrowers:
                debug.logger & debug.flagCompiler and debug.logger('trying to borrow %s from %s' % (mibname, borrower))
                try:
                    fileInfo, fileData = borrower.getData(
                        mibname,
                        genTexts=options.get('genTexts')
                    )

                    borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData

                    del failedMibs[mibname]

                    debug.logger & debug.flagCompiler and debug.logger('%s borrowed with %s' % (mibname, borrower))
                    break

                except error.PySmiError:
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (borrower, exc))

        debug.logger & debug.flagCompiler and debug.logger('MIBs available for borrowing %s, MIBs failed %s' % (len(borrowedMibs), len(failedMibs)))

        #
        # See what MIBs need borrowing
        #

        for mibname in borrowedMibs.copy():
            debug.logger & debug.flagCompiler and debug.logger('checking if failed MIB %s requires borrowing' % mibname)
            fileInfo, mibInfo, mibData = borrowedMibs[mibname]
            for searcher in self._searchers:
                try:
                    searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
                except error.PySmiFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
                    continue

                except error.PySmiFileNotModifiedError:
                    debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
                    del borrowedMibs[mibname]
                    processed[mibname] = statusUntouched
                    break

                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.searcher = searcher
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
                    continue
            else:
                debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)

                if options.get('noDeps') and mibname not in mibnames:
                    debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
                    processed[mibname] = statusUntouched
                else:
                    debug.logger & debug.flagCompiler and debug.logger('will borrow MIB %s' % mibname)
                    builtMibs[mibname] = borrowedMibs[mibname]

                    processed[mibname] = statusBorrowed.setOptions(
                        path=fileInfo.path, file=fileInfo.file,
                        alias=fileInfo.name
                    )

                del borrowedMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(builtMibs), len(failedMibs)))

        #
        # We could attempt to ignore missing/failed MIBs
        #

        if failedMibs and not options.get('ignoreErrors'):
            debug.logger & debug.flagCompiler and debug.logger('failing with problem MIBs %s' % ', '.join(failedMibs))
            for mibname in builtMibs:
                processed[mibname] = statusUnprocessed
            return processed

        debug.logger & debug.flagCompiler and debug.logger('proceeding with built MIBs %s, failed MIBs %s' % (', '.join(builtMibs), ', '.join(failedMibs)))

        #
        # Store compiled MIBs
        #

        for mibname in builtMibs.copy():
            fileInfo, mibInfo, mibData = builtMibs[mibname]
            try:
                self._writer.putData(
                    mibname, mibData, dryRun=options.get('dryRun')
                )

                debug.logger & debug.flagCompiler and debug.logger('%s stored by %s' % (mibname, self._writer))

                del builtMibs[mibname]

                if mibname not in processed:
                    processed[mibname] = statusCompiled.setOptions(
                        path=fileInfo.path, file=fileInfo.file,
                        alias=fileInfo.name
                    )

            except error.PySmiError:
                exc_class, exc, tb = sys.exc_info()
                exc.handler = self._codegen
                exc.mibname = mibname
                exc.msg += ' at MIB %s' % mibname
                debug.logger & debug.flagCompiler and debug.logger('error %s from %s' % (exc, self._writer))
                processed[mibname] = statusFailed.setOptions(error=exc)
                failedMibs[mibname] = exc
                del builtMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs modifed: %s' % ', '.join([x for x in processed if processed[x] in ('compiled', 'borrowed')]))

        return processed

Example 5

Project: scalyr-agent-2
Source File: service.py
View license
    def __generateRequestOrResponseMsg(
        self,
        snmpEngine,
        messageProcessingModel,
        globalData,
        maxMessageSize,
        securityModel,
        securityEngineID,
        securityName,
        securityLevel,
        scopedPDU,
        securityStateReference
        ):
        snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        # 3.1.1
        if securityStateReference is not None:
            # 3.1.1a
            cachedSecurityData = self._cache.pop(securityStateReference)
            usmUserName = cachedSecurityData['msgUserName']
            if 'usmUserSecurityName' in cachedSecurityData:
                usmUserSecurityName = cachedSecurityData['usmUserSecurityName']
            else:
                usmUserSecurityName = usmUserName
            if 'usmUserAuthProtocol' in cachedSecurityData:
                usmUserAuthProtocol = cachedSecurityData['usmUserAuthProtocol']
            else:
                usmUserAuthProtocol = noauth.NoAuth.serviceID
            if 'usmUserAuthKeyLocalized' in cachedSecurityData:
                usmUserAuthKeyLocalized = cachedSecurityData['usmUserAuthKeyLocalized']
            else:
                usmUserAuthKeyLocalized = None
            if 'usmUserPrivProtocol' in cachedSecurityData:
                usmUserPrivProtocol = cachedSecurityData['usmUserPrivProtocol']
            else:
                usmUserPrivProtocol = nopriv.NoPriv.serviceID
            if 'usmUserPrivKeyLocalized' in cachedSecurityData:
                usmUserPrivKeyLocalized = cachedSecurityData['usmUserPrivKeyLocalized']
            else:
                usmUserPrivKeyLocalized = None
            securityEngineID = snmpEngineID
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: user info read from cache')
        elif securityName:
            # 3.1.1b
            try:
                ( usmUserName,
                  usmUserSecurityName,
                  usmUserAuthProtocol,
                  usmUserAuthKeyLocalized,
                  usmUserPrivProtocol,
                  usmUserPrivKeyLocalized ) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    securityEngineID,
                    self.__sec2usr(snmpEngine, securityName, securityEngineID)
                )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read user info')
            except NoSuchInstanceError:
                pysnmpUsmDiscovery, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscovery')
                __reportUnknownName = not pysnmpUsmDiscovery.syntax
                if not __reportUnknownName:
                    try:
                        ( usmUserName,
                          usmUserSecurityName,
                          usmUserAuthProtocol,
                          usmUserAuthKeyLocalized,
                          usmUserPrivProtocol,
                          usmUserPrivKeyLocalized ) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            securityEngineID,
                            self.__sec2usr(snmpEngine, securityName)
                        )
                    except NoSuchInstanceError:
                        __reportUnknownName = 1

                if __reportUnknownName:
                    raise error.StatusInformation(
                        errorIndication = errind.unknownSecurityName
                        )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: clone user info')
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax = snmpInGenErrs.syntax + 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use empty USM data')
            
        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: local usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %s usmUserPrivProtocol %s securityEngineID %r securityName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, securityEngineID, securityName))

        msg = globalData
        
        # 3.1.2
        if securityLevel == 3:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID or \
               usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                raise error.StatusInformation(
                    errorIndication = errind.unsupportedSecurityLevel
                    )

        # 3.1.3
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                raise error.StatusInformation(
                    errorIndication = errind.unsupportedSecurityLevel
                    )

        securityParameters = self.__securityParametersSpec

        scopedPDUData = msg.setComponentByPosition(3).getComponentByPosition(3)
        scopedPDUData.setComponentByPosition(
            0, scopedPDU, verifyConstraints=False
            )
        
        # 3.1.6a
        if securityStateReference is None and (  # request type check added
            securityLevel == 3 or securityLevel == 2
            ):
            if securityEngineID in self.__timeline:
                ( snmpEngineBoots,
                  snmpEngineTime,
                  latestReceivedEngineTime,
                  latestUpdateTimestamp) = self.__timeline[
                    securityEngineID
                    ]
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from timeline')
            else:
                # 2.3 XXX is this correct?
                snmpEngineBoots = snmpEngineTime = 0
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: no timeline for securityEngineID %r' % (securityEngineID,))
        # 3.1.6.b
        elif securityStateReference is not None:  # XXX Report?
            ( snmpEngineBoots,
              snmpEngineTime ) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
            snmpEngineBoots = snmpEngineBoots.syntax
            snmpEngineTime = snmpEngineTime.syntax.clone()
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from LCD')
        # 3.1.6.c
        else:
            snmpEngineBoots = snmpEngineTime = 0
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: assuming zero snmpEngineBoots, snmpEngineTime')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use snmpEngineBoots %s snmpEngineTime %s for securityEngineID %r' % (snmpEngineBoots, snmpEngineTime, securityEngineID))

        # 3.1.4a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.encryptionError
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU %s' % scopedPDU.prettyPrint())

            try:
                dataToEncrypt = encoder.encode(scopedPDU)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )
            
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU encoded into %s' % debug.hexdump(dataToEncrypt))

            ( encryptedData,
              privParameters ) = privHandler.encryptData(
                usmUserPrivKeyLocalized,
                ( snmpEngineBoots, snmpEngineTime, None ),
                dataToEncrypt
            )

            securityParameters.setComponentByPosition(
                5, privParameters, verifyConstraints=False
            )
            scopedPDUData.setComponentByPosition(
                1, encryptedData, verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU ciphered into %s' % debug.hexdump(encryptedData))

        # 3.1.4b
        elif securityLevel == 1 or securityLevel == 2:
            securityParameters.setComponentByPosition(5, '')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % scopedPDUData.prettyPrint())
        
        # 3.1.5
        securityParameters.setComponentByPosition(
            0, securityEngineID, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            1, snmpEngineBoots, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            2, snmpEngineTime, verifyConstraints=False
        )
    
        # 3.1.7
        securityParameters.setComponentByPosition(
            3, usmUserName, verifyConstraints=False
        )

        # 3.1.8a
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure
                    )

            # extra-wild hack to facilitate BER substrate in-place re-write
            securityParameters.setComponentByPosition(
                4, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))
            
            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: securityParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: auth outgoing msg: %s' % msg.prettyPrint())

            try:
                wholeMsg = encoder.encode(msg)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            authenticatedWholeMsg = authHandler.authenticateOutgoingMsg(
                usmUserAuthKeyLocalized, wholeMsg
                )

        # 3.1.8b
        else:
            securityParameters.setComponentByPosition(
                4, '', verifyConstraints=False
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: secutiryParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            try:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: plain outgoing msg: %s' % msg.prettyPrint())
                authenticatedWholeMsg = encoder.encode(msg)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s outgoing msg: %s' % (securityLevel > 1 and "authenticated" or "plain", debug.hexdump(authenticatedWholeMsg)))

        # 3.1.9
        return (
            msg.getComponentByPosition(2),
            authenticatedWholeMsg
            )

Example 6

Project: scalyr-agent-2
Source File: service.py
View license
    def processIncomingMsg(
        self,
        snmpEngine,
        messageProcessingModel,
        maxMessageSize,
        securityParameters,
        securityModel,
        securityLevel,
        wholeMsg,
        msg  # XXX 
        ):
        # 3.2.9 -- moved up here to be able to report
        # maxSizeResponseScopedPDU on error
        # (48 - maximum SNMPv3 header length)
        maxSizeResponseScopedPDU = int(maxMessageSize) - len(securityParameters) - 48

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: securityParameters %s' % debug.hexdump(securityParameters))

        # 3.2.1 
        try:
            securityParameters, rest = decoder.decode(
                securityParameters,
                asn1Spec=self.__securityParametersSpec
                )
        except PyAsn1Error:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
            snmpInASNParseErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
            snmpInASNParseErrs.syntax = snmpInASNParseErrs.syntax + 1
            raise error.StatusInformation(
                errorIndication=errind.parseError
            )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (securityParameters.prettyPrint(),))

        if eoo.endOfOctets.isSameTypeWith(securityParameters):
            raise error.StatusInformation(
                errorIndication=errind.parseError
            )

        # 3.2.2
        msgAuthoritativeEngineID = securityParameters.getComponentByPosition(0)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3)
            )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cache write securityStateReference %s by msgUserName %s' % (securityStateReference, securityParameters.getComponentByPosition(3)))
        
        scopedPduData = msg.getComponentByPosition(3)

        # Used for error reporting
        contextEngineId = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        contextName = null

        # 3.2.3
        if msgAuthoritativeEngineID not in self.__timeline:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unknown securityEngineID %r' % (msgAuthoritativeEngineID,))
            if not msgAuthoritativeEngineID or  \
                    not 4 < len(msgAuthoritativeEngineID) < 33:
                # 3.2.3b
                usmStatsUnknownEngineIDs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownEngineIDs')
                usmStatsUnknownEngineIDs.syntax = usmStatsUnknownEngineIDs.syntax+1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: null or malformed msgAuthoritativeEngineID')
                pysnmpUsmDiscoverable, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                if pysnmpUsmDiscoverable.syntax:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: starting snmpEngineID discovery procedure')

                    # Report original contextName
                    if scopedPduData.getName() != 'plaintext':
                        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPduData not plaintext %s' % scopedPduData.prettyPrint())
                        raise error.StatusInformation(
                            errorIndication = errind.unknownEngineID
                            )

                    # 7.2.6.a.1 
                    scopedPdu = scopedPduData.getComponent()
                    contextEngineId = scopedPdu.getComponentByPosition(0)
                    contextName = scopedPdu.getComponentByPosition(1)

                    raise error.StatusInformation(
                        errorIndication = errind.unknownEngineID,
                        oid=usmStatsUnknownEngineIDs.name,
                        val=usmStatsUnknownEngineIDs.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        scopedPDU=scopedPdu,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
                else:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: will not discover EngineID')                    
                    # free securityStateReference XXX
                    raise error.StatusInformation(
                        errorIndication = errind.unknownEngineID
                        )

        snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
 
        msgUserName = securityParameters.getComponentByPosition(3)

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read from securityParams msgAuthoritativeEngineID %r msgUserName %r' % (msgAuthoritativeEngineID, msgUserName))
        
        if msgUserName:
            # 3.2.4
            try:
                ( usmUserName,
                  usmUserSecurityName,
                  usmUserAuthProtocol,
                  usmUserAuthKeyLocalized,
                  usmUserPrivProtocol,
                  usmUserPrivKeyLocalized ) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    msgAuthoritativeEngineID,
                    msgUserName
                )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read user info from LCD')
            except NoSuchInstanceError:
                pysnmpUsmDiscoverable, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                __reportUnknownName = not pysnmpUsmDiscoverable.syntax
                if not __reportUnknownName:
                    try:
                        ( usmUserName,
                          usmUserSecurityName,
                          usmUserAuthProtocol,
                          usmUserAuthKeyLocalized,
                          usmUserPrivProtocol,
                          usmUserPrivKeyLocalized ) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            msgAuthoritativeEngineID,
                            msgUserName
                        )
                        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cloned user info')
                    except NoSuchInstanceError:
                        __reportUnknownName = 1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unknown securityEngineID %r msgUserName %r' % (msgAuthoritativeEngineID, msgUserName))
                if __reportUnknownName:
                    usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
                    usmStatsUnknownUserNames.syntax = usmStatsUnknownUserNames.syntax+1
                    raise error.StatusInformation(
                        errorIndication = errind.unknownSecurityName,
                        oid = usmStatsUnknownUserNames.name,
                        val = usmStatsUnknownUserNames.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax = snmpInGenErrs.syntax + 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: now have usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %r usmUserPrivProtocol %r for msgUserName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, msgUserName))

        # 3.2.11 (moved up here to let Reports be authenticated & encrypted)
        self._cache.pop(securityStateReference)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3),
            usmUserSecurityName=usmUserSecurityName,
            usmUserAuthProtocol=usmUserAuthProtocol,
            usmUserAuthKeyLocalized=usmUserAuthKeyLocalized,
            usmUserPrivProtocol=usmUserPrivProtocol,
            usmUserPrivKeyLocalized=usmUserPrivKeyLocalized
        )

        msgAuthoritativeEngineBoots = securityParameters.getComponentByPosition(1)
        msgAuthoritativeEngineTime = securityParameters.getComponentByPosition(2)

        # 3.2.5
        if msgAuthoritativeEngineID == snmpEngineID:
            # Authoritative SNMP engine: make sure securityLevel is sufficient
            __badSecIndication = None
            if securityLevel == 3:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    __badSecIndication = 'authPriv wanted while auth not expected'
                if usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                    __badSecIndication = 'authPriv wanted while priv not expected'
            elif securityLevel == 2:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    __badSecIndication = 'authNoPriv wanted while auth not expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    # 4 (discovery phase always uses authenticated messages)
                    if msgAuthoritativeEngineBoots or msgAuthoritativeEngineTime:
                        __badSecIndication = 'authNoPriv wanted while priv expected'

            elif securityLevel == 1:
                if usmUserAuthProtocol != noauth.NoAuth.serviceID:
                    __badSecIndication = 'noAuthNoPriv wanted while auth expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    __badSecIndication = 'noAuthNoPriv wanted while priv expected'
            if __badSecIndication:
                usmStatsUnsupportedSecLevels, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnsupportedSecLevels')
                usmStatsUnsupportedSecLevels.syntax = usmStatsUnsupportedSecLevels.syntax + 1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: reporting inappropriate security level for user %s: %s' % (msgUserName, __badSecIndication))
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel,
                    oid=usmStatsUnsupportedSecLevels.name,
                    val=usmStatsUnsupportedSecLevels.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )

        # 3.2.6
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure
                    )
            try:
                authenticatedWholeMsg = authHandler.authenticateIncomingMsg(
                    usmUserAuthKeyLocalized,
                    securityParameters.getComponentByPosition(4),
                    wholeMsg
                    )
            except error.StatusInformation:
                usmStatsWrongDigests, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsWrongDigests')
                usmStatsWrongDigests.syntax = usmStatsWrongDigests.syntax+1
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure,
                    oid=usmStatsWrongDigests.name,
                    val=usmStatsWrongDigests.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: incoming msg authenticated')

            if msgAuthoritativeEngineID:
                # 3.2.3a moved down here to execute only for authed msg
                self.__timeline[msgAuthoritativeEngineID] = (
                    securityParameters.getComponentByPosition(1),
                    securityParameters.getComponentByPosition(2),
                    securityParameters.getComponentByPosition(2),
                    int(time.time())
                    )
                
                expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
                if expireAt not in self.__timelineExpQueue:
                    self.__timelineExpQueue[expireAt] = []
                self.__timelineExpQueue[expireAt].append(
                    msgAuthoritativeEngineID
                    )
                    
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: store timeline for securityEngineID %r' % (msgAuthoritativeEngineID,))
            
        # 3.2.7
        if securityLevel == 3 or securityLevel == 2:
            if msgAuthoritativeEngineID == snmpEngineID:
                # Authoritative SNMP engine: use local notion (SF bug #1649032)
                ( snmpEngineBoots,
                  snmpEngineTime ) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
                snmpEngineBoots = snmpEngineBoots.syntax
                snmpEngineTime = snmpEngineTime.syntax.clone()
                idleTime = 0
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read snmpEngineBoots (%s), snmpEngineTime (%s) from LCD' % (snmpEngineBoots, snmpEngineTime))
            else:
                # Non-authoritative SNMP engine: use cached estimates
                if msgAuthoritativeEngineID in self.__timeline:
                    ( snmpEngineBoots,
                      snmpEngineTime,
                      latestReceivedEngineTime,
                      latestUpdateTimestamp ) = self.__timeline[
                        msgAuthoritativeEngineID
                        ]
                    # time passed since last talk with this SNMP engine
                    idleTime = int(time.time())-latestUpdateTimestamp
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read timeline snmpEngineBoots %s snmpEngineTime %s for msgAuthoritativeEngineID %r, idle time %s secs' % (snmpEngineBoots, snmpEngineTime, msgAuthoritativeEngineID, idleTime))
                else:
                    raise error.ProtocolError('Peer SNMP engine info missing')

            # 3.2.7a
            if msgAuthoritativeEngineID == snmpEngineID:
                if snmpEngineBoots == 2147483647 or \
                   snmpEngineBoots != msgAuthoritativeEngineBoots or \
                   abs(idleTime + int(snmpEngineTime) - \
                       int(msgAuthoritativeEngineTime)) > 150:
                    usmStatsNotInTimeWindows, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsNotInTimeWindows')
                    usmStatsNotInTimeWindows.syntax = usmStatsNotInTimeWindows.syntax+1
                    raise error.StatusInformation(
                        errorIndication = errind.notInTimeWindow,
                        oid=usmStatsNotInTimeWindows.name,
                        val=usmStatsNotInTimeWindows.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=2,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
            # 3.2.7b
            else:
                # 3.2.7b.1
                if msgAuthoritativeEngineBoots > snmpEngineBoots or \
                   msgAuthoritativeEngineBoots == snmpEngineBoots and \
                   msgAuthoritativeEngineTime > latestReceivedEngineTime:
                    self.__timeline[msgAuthoritativeEngineID] = (
                        msgAuthoritativeEngineBoots,
                        msgAuthoritativeEngineTime,
                        msgAuthoritativeEngineTime,
                        int(time.time())
                        )
                    expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
                    if expireAt not in self.__timelineExpQueue:
                        self.__timelineExpQueue[expireAt] = []
                    self.__timelineExpQueue[expireAt].append(
                        msgAuthoritativeEngineID
                        )

                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: stored timeline msgAuthoritativeEngineBoots %s msgAuthoritativeEngineTime %s for msgAuthoritativeEngineID %r' % (msgAuthoritativeEngineBoots, msgAuthoritativeEngineTime, msgAuthoritativeEngineID))
                    
                # 3.2.7b.2
                if snmpEngineBoots == 2147483647 or \
                   msgAuthoritativeEngineBoots < snmpEngineBoots or \
                   msgAuthoritativeEngineBoots == snmpEngineBoots and \
                   abs(idleTime + int(snmpEngineTime) - \
                       int(msgAuthoritativeEngineTime)) > 150:
                    raise error.StatusInformation(
                        errorIndication = errind.notInTimeWindow
                        )

        # 3.2.8a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )
            encryptedPDU = scopedPduData.getComponentByPosition(1)
            if encryptedPDU is None: # no ciphertext
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

            try:
                decryptedData = privHandler.decryptData(
                    usmUserPrivKeyLocalized,
                        ( securityParameters.getComponentByPosition(1),
                          securityParameters.getComponentByPosition(2),
                          securityParameters.getComponentByPosition(5) ),
                        encryptedPDU
                    )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: PDU deciphered into %s' % debug.hexdump(decryptedData))
            except error.StatusInformation:
                usmStatsDecryptionErrors, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsDecryptionErrors')
                usmStatsDecryptionErrors.syntax = usmStatsDecryptionErrors.syntax+1
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError,
                    oid=usmStatsDecryptionErrors.name,
                    val=usmStatsDecryptionErrors.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            scopedPduSpec = scopedPduData.setComponentByPosition(0).getComponentByPosition(0)
            try:
                scopedPDU, rest = decoder.decode(
                    decryptedData, asn1Spec=scopedPduSpec
                    )
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoder failed %s' % sys.exc_info()[0])                
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

            if eoo.endOfOctets.isSameTypeWith(scopedPDU):
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
        else:
            # 3.2.8b
            scopedPDU = scopedPduData.getComponentByPosition(0)
            if scopedPDU is None:  # no plaintext
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoded %s' % scopedPDU.prettyPrint()) 

        # 3.2.10
        securityName = usmUserSecurityName
        
        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cached msgUserName %s info by securityStateReference %s' % (msgUserName, securityStateReference))
        
        # Delayed to include details
        if not msgUserName and not msgAuthoritativeEngineID:
            usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
            usmStatsUnknownUserNames.syntax = usmStatsUnknownUserNames.syntax+1
            raise error.StatusInformation(
                errorIndication=errind.unknownSecurityName,
                oid=usmStatsUnknownUserNames.name,
                val=usmStatsUnknownUserNames.syntax,
                securityStateReference=securityStateReference,
                securityEngineID=msgAuthoritativeEngineID,
                securityLevel=securityLevel,
                contextEngineId=contextEngineId,
                contextName=contextName,
                maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
                PDU=scopedPDU
                )

        # 3.2.12
        return ( msgAuthoritativeEngineID,
                 securityName,
                 scopedPDU,
                 maxSizeResponseScopedPDU,
                 securityStateReference )

Example 7

Project: jirash
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 8

Project: dt
Source File: dt.py
View license
def dt():
    '''darktable sqlite3 database maintenance'''
    
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent(
            '''
            Darktable filmroll/image/metadata maintenance:
            
            mv <src> <dest>                             : rename film roll or image
            query <dir|file> [ <dir|file> ... ]         : dump details of an object
                                                          (best on 132+ column term)
            scan <dir|file> [ <dir}|file> ... ]         : just report if the object 
                                                          doesn't exist in the db - 
                                                          .xmp and .meta extensions 
                                                          will be stripped before the 
                                                          check
            set <image> <var> <val> [ <var> <val> ... ] : modify metadata (Note 6)
            
            For the 'set' command, the valid var's are:
            
            image table:
                datetime_taken         "YYYY:MM:DD HH:MM:SS"
                caption                "text"
                description            "text"
                license                "text"
                longitude              float (-180.0 to +180.0, positive E) (Note 3)
                latitude               float (-90.0 to +90.0, positive N) (Note 3)
                                       
            meta_data table:
                creator                "text"
                publisher              "text"
                title                  "text"
                description            "text"
                rights                 "text"
            history table:
                crop                   "angle,cx,cy,cw,ch" (Note 4)
                                       
            tags and tagged_images tables:
                tag                    "text" (Note 5)
                                       
            NOTES:
              1) before modifying the database dt will check to make sure darktable 
                 is not running
              2) if a modification is going to be made to the database a backup will 
                 be made first: <library.db>.<timestamp>, unless you specify --no-backup
              3) to convert degrees minutes seconds to a float: 
                    f = degrees + ( minutes + seconds/60. ) / 60.
                 and change sign if W or S.
              4) angle is the rotation angle before crop, positive clockwise,
                 cx, cy, cw, ch are the normalized crop coordinates (0.0 to 1.0),
                 cx,cy is lower left(?) and cw,ch are new width and height.
              5) you may add tags but not modify or remove existing tags
              6) if any of the variables you want to use in the set command begin with
                 a "-", add a "--" option before the file name so that the python 
                 argparser doesn't try to interpret them 
                 (i.e. set -- file.jpg crop -1,0,0,1,1)
            
            '''))
    
    global dbVer
    
    parser.add_argument('-d', '--db', dest='dtdb', action='store',
        default=os.getenv("HOME")+os.sep+'.config'+os.sep+'darktable'+os.sep+'library.db',
        help='Darktable database path, default= ~/.config/darktable/library.db')
    
    parser.add_argument('--no-backup', dest='doBackup', default=True, action='store_false',
        help='don\'t backup the Darktable database before modifications')
    
    parser.add_argument('cmd', metavar='<command>', type=str,
        help='mv, query, set, etc')

    parser.parse_args(['--', '-f'])
    
    parser.add_argument('files', metavar='<parm>', type=str, nargs='+')
    
    args = parser.parse_args()
    
    # connect to the darktable library.db file
    
    try:
        if os.path.exists(args.dtdb):
            conn = sqlite3.connect(args.dtdb)
        else:
            raise Exception('%s does not exist'%args.dtdb)
        
        
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        c = conn.cursor()
        '''
            Here is the darktable settings blob:
            
            typedef struct dt_ctl_settings_t
                {
                  // TODO: remove most of these options, maybe the whole struct?
                  // global
                  int32_t version;
                  char dbname[512];
                
                  int32_t lib_image_mouse_over_id;
                
                  // synchronized navigation
                  float dev_zoom_x, dev_zoom_y, dev_zoom_scale;
                  dt_dev_zoom_t dev_zoom;
                  int dev_closeup;
                }
                dt_ctl_settings_t;
        '''
        try:
            vblob = c.execute('select settings from settings').fetchone()[0]
            dbVer = getVersion(vblob)
            if dbVer != 34 and dbVer != 36:
                raise Exception('%s '%args.dtdb + 'version %d is not 34 or 36!'%dbVer)
            
        except:
            #print 'error: ',sys.exc_info()
            raise Exception('%s '%args.dtdb + 'does not appear to be a Darktable database')

    except:
        print 'darktable db error: %s '%sys.exc_info()[1]
        sys.exit()
    
    
    
    if args.cmd == 'query':
        for qFile in args.files:
            try:
                query(conn,os.path.abspath(qFile))
            except:
                print 'error: [%s]'%sys.exc_info()[1]
                
    elif args.cmd == 'scan':
        for qFile in args.files:
            try:
                scan(conn,os.path.abspath(qFile))
            except:
                print sys.exc_info()[1]
                
    elif args.cmd == 'mv':
        try:
            if len(args.files) != 2:
                raise Exception('mv <from> <to>')
            else:
                mvSrc  = os.path.abspath(args.files[0])
                mvDest = os.path.abspath(args.files[1])             
                
            is_running('darktable',os.getenv('USER'))
            do_backup(args.dtdb,args.doBackup)
            dt_mv(conn,mvSrc,mvDest)
        except:
            print 'error: %s'%sys.exc_info()[1]
            sys.exit()
                
    elif args.cmd == 'set':
        try:
            if len(args.files) < 3:
                raise Exception('set <image> <variable> <value> ...')
            else:
                imRoll = os.path.dirname(os.path.abspath(args.files[0]))
                imName = os.path.basename(os.path.abspath(args.files[0]))
                frId = fr_getId(conn,imRoll)
                imId = im_getId(conn,frId,imName)
                # need to make each tag key unique so I can create a dict using it as key
                for i in range(1,len(args.files),2):
                    if args.files[i] == 'tag':
                        args.files[i] = 'tag---%d'%i 
       
                imVars = dict(zip(args.files[1::2], args.files[2::2]))
                # check that # of keys = # of values
                if len(imVars)*2 != len(args.files)-1:
                    raise Exception('Failed to convert %s to dict.  Missing value?'%args.files[1:])
            
            is_running('darktable',os.getenv('USER'))
            do_backup(args.dtdb,args.doBackup)
            
            metaKeys = [ 'creator', 'publisher', 'title', 'description', 'rights' ]
            imageKeys = [ 'datetime_taken', 'caption', 'description', 'license', 'longitude', 'latitude' ]
            historyKeys = [ 'crop' ]
            #print 'imVars: ',imVars
            for k,v in imVars.iteritems():
                if k in metaKeys:
                    im_setMeta(conn,imId,k,v)
                if k in imageKeys:
                    im_setImage(conn,imId,k,v)
                if k in historyKeys: # there is only one valid right now ....
                    im_setHistory(conn,imId,v)
                if k.startswith('tag---'):
                    im_setTag(conn,imId,v)
            
        except:
            print 'error: ',sys.exc_info()
            sys.exit()           
    else:
        print 'command??'          

Example 9

Project: xraylarch
Source File: builtins.py
View license
def _addplugin(plugin, _larch=None, **kws):
    """add plugin components from plugin directory"""
    if _larch is None:
        raise Warning("cannot add plugins. larch broken?")
    write = _larch.writer.write
    errmsg = 'is not a valid larch plugin\n'
    pjoin = os.path.join
    path = site_config.plugins_path
    _sysconf = _larch.symtable._sys.config
    if not hasattr(_larch.symtable._sys, 'import_ok'):
        _larch.symtable._sys.import_ok  = True
    if not _larch.symtable._sys.import_ok:
        return

    if not hasattr(_sysconf, 'plugins_path'):
        _sysconf.plugins_path = site_config.plugins_path

    def _find_plugin(plugin, p_path):
        """find the plugin from path
        returns True, package name for packages
                False, (fh, modpath, desc) for imported modules
                None, None for Not Found
        """
        if not _larch.symtable._sys.import_ok:
            return

        if plugin == '__init__':
            return None, None
        mod, is_pkg = None, False
        try:
            mod = imp.find_module(plugin, [p_path])
        except ImportError:
            is_pkg = os.path.isdir(pjoin(p_path, plugin))

        if is_pkg or (mod is not None and
                      mod[2][2] == imp.PKG_DIRECTORY):
            return True, pjoin(p_path, plugin)
        elif mod is not None:
            return False, mod
        else:
            return None, None

    def on_error(msg):
        _larch.raise_exception(None, exc=ImportError, msg=msg)
        _larch.symtable._sys.import_ok = False

    def _check_requirements(ppath):
        """check for requirements.txt, return True only if all
        requirements are met
        """
        req_file = os.path.abspath(os.path.join(ppath, PLUGINSREQ))
        if not os.path.exists(req_file):
            return True
        if os.path.exists(req_file):
            with open(req_file, 'r') as fh:
                for line in fh.readlines():
                    line = line[:-1]
                    if line.startswith('#'):  continue
                    match = REQMATCH(line)
                    if match is None:
                        continue
                    ok = False
                    modname, cmp, req_vers = match.groups()
                    try:
                        mod = __import__(modname)
                        vers = getattr(mod, '__version__', None)
                        if   cmp == '>':  ok = vers >  req_vers
                        elif cmp == '<':  ok = vers <  req_vers
                        elif cmp == '>=': ok = vers >= req_vers
                        elif cmp == '<=': ok = vers <= req_vers
                        elif cmp == '==': ok = vers == req_vers
                        elif cmp == '=':  ok = vers == req_vers
                        elif cmp == '!=': ok = vers != req_vers
                    except:
                        ok = False
                    if not ok:
                        return False
        return True

    def _plugin_file(plugin, path=None):
        "defined here to allow recursive imports for packages"
        fh = None
        if plugin == '__init__':
            return
        if not _larch.symtable._sys.import_ok:
            return

        if path is None:
            try:
                path = _larch.symtable._sys.config.plugins_path
            except:
                path = site_config.plugins_path

        for p_path in path:
            # print(" -- find_plugin ", plugin)
            is_pkg, mod = _find_plugin(plugin, p_path)
            if is_pkg is not None:
                break
        if is_pkg is None and mod is None:
            write('Warning: plugin %s not found\n' % plugin)
            return False

        retval = True
        if is_pkg:
            if _check_requirements(plugin):
                filelist = []
                if PLUGINSTXT in sorted(os.listdir(mod)):
                    pfile = os.path.abspath(os.path.join(mod, PLUGINSTXT))
                    try:
                        with open(pfile, 'r') as pluginsfile:
                            for name in pluginsfile:
                                name = name[:-1].strip()
                                if (not name.startswith('#') and
                                    name.endswith('.py') and len(name) > 3):
                                    filelist.append(name)
                    except:
                        write("Warning:: Error reading plugin file:\n %s\n" %
                              pfile)
                if len(filelist) == 0:
                    for fname in sorted(os.listdir(mod)):
                        if fname.endswith('.py') and len(fname) > 3:
                            filelist.append(fname)

                retvals = []
                for fname in filelist:
                    if not _larch.symtable._sys.import_ok:
                        return
                    try:
                        ret =  _plugin_file(fname[:-3], path=[mod])
                    except:
                        err, exc, tback = sys.exc_info()
                        write('Warning: %s is =not= a valid plugin\n' %
                              pjoin(mod, fname))
                        write("   error:  %s\n" % (repr(sys.exc_info()[1])))
                        ret = False
                        _larch.symtable._sys.import_ok = False
                    retvals.append(ret)
                retval = all(retvals)
        else:
            fh, modpath, desc = mod
            try:
                out = imp.load_module(plugin, fh, modpath, desc)
                _larch.symtable.add_plugin(out, on_error, **kws)
            except:
                err, exc, tback = sys.exc_info()
                lineno = getattr(exc, 'lineno', 0)
                offset = getattr(exc, 'offset', 0)
                etext  = getattr(exc, 'text', '')
                emsg   = getattr(exc, 'message', '')
                try:
                    write(traceback.print_tb(tback))
                except:
                    pass
                write("""Python Error at plugin '%s', line %d
  %s %s^
%s: %s\n""" % (modpath, lineno, etext, ' '*offset, err.__name__, emsg))
                retval = False
                _larch.symtable._sys.import_ok = False

        if _larch.error:
            retval = False
            err = _larch.error.pop(0)
            fname, lineno = err.fname, err.lineno
            output = ["Error Adding Plugin %s from file %s" % (plugin, fname),
                      "%s" % (err.get_error()[1])]

            for err in _larch.error:
                if ((err.fname != fname or err.lineno != lineno) and
                    err.lineno > 0 and lineno > 0):
                    output.append("%s" % (err.get_error()[1]))
            write('\n'.join(output))

        if fh is not None:
            fh.close()
        return retval

    return _plugin_file(plugin)

Example 10

Project: django-simple-import
Source File: views.py
View license
@staff_member_required
def do_import(request, import_log_id):
    """ Import the data! """
    import_log = get_object_or_404(ImportLog, id=import_log_id)
    if import_log.import_type == "N" and 'undo' in request.GET and request.GET['undo'] == "True":
        import_log.undo()
        return HttpResponseRedirect(reverse(
                    do_import,
                    kwargs={'import_log_id': import_log.id}) + '?success_undo=True')

    if 'success_undo' in request.GET and request.GET['success_undo'] == "True":
        success_undo = True
    else:
        success_undo = False

    model_class = import_log.import_setting.content_type.model_class()
    import_data = import_log.get_import_file_as_list()
    header_row = import_data.pop(0)
    header_row_field_names = []
    header_row_default = []
    header_row_null_on_empty = []
    error_data = [header_row + ['Error Type', 'Error Details']]
    create_count = 0
    update_count = 0
    fail_count = 0
    if 'commit' in request.GET and request.GET['commit'] == "True":
        commit = True
    else:
        commit = False

    key_column_name = None
    if import_log.update_key and import_log.import_type in ["U", "O"]:
        key_match = import_log.import_setting.columnmatch_set.get(column_name=import_log.update_key)
        key_column_name = key_match.column_name
        key_field_name = key_match.field_name
    for i, cell in enumerate(header_row):
        match = import_log.import_setting.columnmatch_set.get(column_name=cell)
        header_row_field_names += [match.field_name]
        header_row_default += [match.default_value]
        header_row_null_on_empty += [match.null_on_empty]
        if key_column_name != None and key_column_name.lower() == cell.lower():
            key_index = i

    with transaction.atomic():
        sid = transaction.savepoint()
        for row in import_data:
            try:
                with transaction.atomic():
                    is_created = True
                    if import_log.import_type == "N":
                        new_object = model_class()
                    elif import_log.import_type == "O":
                        filters = {key_field_name: row[key_index]}
                        new_object = model_class.objects.get(**filters)
                        is_created = False
                    elif import_log.import_type == "U":
                        filters = {key_field_name: row[key_index]}
                        new_object = model_class.objects.filter(**filters).first()
                        if new_object == None:
                            new_object = model_class()
                            is_created = False

                    new_object.simple_import_m2ms = {} # Need to deal with these after saving
                    for i, cell in enumerate(row):
                        if header_row_field_names[i]: # skip blank
                            if not import_log.is_empty(cell) or header_row_null_on_empty[i]:
                                set_field_from_cell(import_log, new_object, header_row_field_names[i], cell)
                            elif header_row_default[i]:
                                set_field_from_cell(import_log, new_object, header_row_field_names[i], header_row_default[i])
                    new_object.save()

                    for i, cell in enumerate(row):
                        if header_row_field_names[i]: # skip blank
                            if not import_log.is_empty(cell) or header_row_null_on_empty[i]:
                                set_method_from_cell(import_log, new_object, header_row_field_names[i], cell)
                            elif header_row_default[i]:
                                set_method_from_cell(import_log, new_object, header_row_field_names[i], header_row_default[i])
                    new_object.save()

                    for key in new_object.simple_import_m2ms.keys():
                        value = new_object.simple_import_m2ms[key]
                        m2m = getattr(new_object, key)
                        m2m_model = type(m2m.model())
                        related_field_name = RelationalMatch.objects.get(import_log=import_log, field_name=key).related_field_name
                        m2m_object = m2m_model.objects.get(**{related_field_name:value})
                        m2m.add(m2m_object)

                    if is_created:
                        LogEntry.objects.log_action(
                            user_id         = request.user.pk,
                            content_type_id = ContentType.objects.get_for_model(new_object).pk,
                            object_id       = new_object.pk,
                            object_repr     = smart_text(new_object),
                            action_flag     = ADDITION
                        )
                        create_count += 1
                    else:
                        LogEntry.objects.log_action(
                            user_id         = request.user.pk,
                            content_type_id = ContentType.objects.get_for_model(new_object).pk,
                            object_id       = new_object.pk,
                            object_repr     = smart_text(new_object),
                            action_flag     = CHANGE
                        )
                        update_count += 1
                    ImportedObject.objects.create(
                        import_log = import_log,
                        object_id = new_object.pk,
                        content_type = import_log.import_setting.content_type)
            except IntegrityError:
                exc = sys.exc_info()
                error_data += [row + ["Integrity Error", smart_text(exc[1])]]
                fail_count += 1
            except ObjectDoesNotExist:
                exc = sys.exc_info()
                error_data += [row + ["No Record Found to Update", smart_text(exc[1])]]
                fail_count += 1
            except ValueError:
                exc = sys.exc_info()
                if str(exc[1]).startswith('invalid literal for int() with base 10'):
                    error_data += [row + ["Incompatible Data - A number was expected, but a character was used", smart_text(exc[1])]]
                else:
                    error_data += [row + ["Value Error", smart_text(exc[1])]]
                fail_count += 1
            except:
                error_data += [row + ["Unknown Error"]]
                fail_count += 1
        if not commit:
            transaction.savepoint_rollback(sid)


    if fail_count:
        from io import StringIO
        from django.core.files.base import ContentFile
        from openpyxl.workbook import Workbook
        from openpyxl.writer.excel import save_virtual_workbook

        wb = Workbook()
        ws = wb.worksheets[0]
        ws.title = "Errors"
        filename = 'Errors.xlsx'
        for row in error_data:
            ws.append(row)
        buf = StringIO()
        # Not Python 3 compatible
        #buf.write(str(save_virtual_workbook(wb)))
        import_log.error_file.save(filename, ContentFile(save_virtual_workbook(wb)))
        import_log.save()

    return render(
        request,
        'simple_import/do_import.html',
        {
            'error_data': error_data,
            'create_count': create_count,
            'update_count': update_count,
            'fail_count': fail_count,
            'import_log': import_log,
            'commit': commit,
            'success_undo': success_undo,},
    )

Example 11

Project: bigcouch
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 12

Project: numscons
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))

        while 1:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + '\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message('    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message('       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message('       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message('     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message('Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 13

Project: p2pool-n
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 14

Project: pysnmp
Source File: service.py
View license
    def __generateRequestOrResponseMsg(self, snmpEngine,
                                       messageProcessingModel,
                                       globalData, maxMessageSize,
                                       securityModel, securityEngineID,
                                       securityName, securityLevel,
                                       scopedPDU, securityStateReference):
        mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
        snmpEngineID = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax

        # 3.1.1
        if securityStateReference is not None:
            # 3.1.1a
            cachedSecurityData = self._cache.pop(securityStateReference)
            usmUserName = cachedSecurityData['msgUserName']
            if 'usmUserSecurityName' in cachedSecurityData:
                usmUserSecurityName = cachedSecurityData['usmUserSecurityName']
            else:
                usmUserSecurityName = usmUserName
            if 'usmUserAuthProtocol' in cachedSecurityData:
                usmUserAuthProtocol = cachedSecurityData['usmUserAuthProtocol']
            else:
                usmUserAuthProtocol = noauth.NoAuth.serviceID
            if 'usmUserAuthKeyLocalized' in cachedSecurityData:
                usmUserAuthKeyLocalized = cachedSecurityData['usmUserAuthKeyLocalized']
            else:
                usmUserAuthKeyLocalized = None
            if 'usmUserPrivProtocol' in cachedSecurityData:
                usmUserPrivProtocol = cachedSecurityData['usmUserPrivProtocol']
            else:
                usmUserPrivProtocol = nopriv.NoPriv.serviceID
            if 'usmUserPrivKeyLocalized' in cachedSecurityData:
                usmUserPrivKeyLocalized = cachedSecurityData['usmUserPrivKeyLocalized']
            else:
                usmUserPrivKeyLocalized = None
            securityEngineID = snmpEngineID
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: user info read from cache')
        elif securityName:
            # 3.1.1b
            try:
                (usmUserName, usmUserSecurityName, usmUserAuthProtocol,
                 usmUserAuthKeyLocalized, usmUserPrivProtocol,
                 usmUserPrivKeyLocalized) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    securityEngineID,
                    self.__sec2usr(snmpEngine, securityName, securityEngineID)
                )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read user info')

            except NoSuchInstanceError:
                pysnmpUsmDiscovery, = mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscovery')
                reportUnknownName = not pysnmpUsmDiscovery.syntax
                if not reportUnknownName:
                    try:
                        (usmUserName, usmUserSecurityName,
                         usmUserAuthProtocol, usmUserAuthKeyLocalized,
                         usmUserPrivProtocol,
                         usmUserPrivKeyLocalized) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            securityEngineID,
                            self.__sec2usr(snmpEngine, securityName)
                        )

                    except NoSuchInstanceError:
                        reportUnknownName = True

                if reportUnknownName:
                    raise error.StatusInformation(
                        errorIndication=errind.unknownSecurityName
                    )

                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: clone user info')

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use empty USM data')

        # noinspection PyUnboundLocalVariable
        debug.logger & debug.flagSM and debug.logger(
            '__generateRequestOrResponseMsg: local usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %s usmUserPrivProtocol %s securityEngineID %r securityName %r' % (
                usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, securityEngineID, securityName))

        msg = globalData

        # 3.1.2
        if securityLevel == 3:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID or \
                    usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel
                )

        # 3.1.3
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel
                )

        securityParameters = self.__securityParametersSpec

        scopedPDUData = msg.setComponentByPosition(3).getComponentByPosition(3)
        scopedPDUData.setComponentByPosition(
            0, scopedPDU, verifyConstraints=False
        )

        # 3.1.6a
        if securityStateReference is None and securityLevel in (2, 3):
            if securityEngineID in self.__timeline:
                (snmpEngineBoots, snmpEngineTime, latestReceivedEngineTime,
                 latestUpdateTimestamp) = self.__timeline[securityEngineID]
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from timeline')
            else:
                # 2.3 XXX is this correct?
                snmpEngineBoots = snmpEngineTime = 0
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: no timeline for securityEngineID %r' % (securityEngineID,))
        # 3.1.6.b
        elif securityStateReference is not None:  # XXX Report?
            (snmpEngineBoots,
             snmpEngineTime) = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
            snmpEngineBoots = snmpEngineBoots.syntax
            snmpEngineTime = snmpEngineTime.syntax.clone()
            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from LCD')
        # 3.1.6.c
        else:
            snmpEngineBoots = snmpEngineTime = 0
            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: assuming zero snmpEngineBoots, snmpEngineTime')

        debug.logger & debug.flagSM and debug.logger(
            '__generateRequestOrResponseMsg: use snmpEngineBoots %s snmpEngineTime %s for securityEngineID %r' % (
                snmpEngineBoots, snmpEngineTime, securityEngineID))

        # 3.1.4a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.encryptionError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU %s' % scopedPDU.prettyPrint())

            try:
                dataToEncrypt = encoder.encode(scopedPDU)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: scopedPDU serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU encoded into %s' % debug.hexdump(dataToEncrypt))

            # noinspection PyUnboundLocalVariable
            (encryptedData,
             privParameters) = privHandler.encryptData(
                usmUserPrivKeyLocalized,
                (snmpEngineBoots, snmpEngineTime, None), dataToEncrypt
            )

            securityParameters.setComponentByPosition(
                5, privParameters, verifyConstraints=False
            )
            scopedPDUData.setComponentByPosition(
                1, encryptedData, verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU ciphered into %s' % debug.hexdump(encryptedData))

        # 3.1.4b
        elif securityLevel == 1 or securityLevel == 2:
            securityParameters.setComponentByPosition(5, '')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % scopedPDUData.prettyPrint())

        # 3.1.5
        securityParameters.setComponentByPosition(
            0, securityEngineID, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            1, snmpEngineBoots, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            2, snmpEngineTime, verifyConstraints=False
        )

        # 3.1.7
        securityParameters.setComponentByPosition(
            3, usmUserName, verifyConstraints=False
        )

        # 3.1.8a
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure
                )

            # extra-wild hack to facilitate BER substrate in-place re-write
            securityParameters.setComponentByPosition(
                4, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: securityParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: auth outgoing msg: %s' % msg.prettyPrint())

            try:
                wholeMsg = encoder.encode(msg)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            # noinspection PyUnboundLocalVariable
            authenticatedWholeMsg = authHandler.authenticateOutgoingMsg(
                usmUserAuthKeyLocalized, wholeMsg
            )

        # 3.1.8b
        else:
            securityParameters.setComponentByPosition(
                4, '', verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: secutiryParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            try:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: plain outgoing msg: %s' % msg.prettyPrint())
                authenticatedWholeMsg = encoder.encode(msg)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s outgoing msg: %s' % (
            securityLevel > 1 and "authenticated" or "plain", debug.hexdump(authenticatedWholeMsg)))

        # 3.1.9
        return msg.getComponentByPosition(2), authenticatedWholeMsg

Example 15

Project: pysnmp
Source File: service.py
View license
    def processIncomingMsg(self, snmpEngine, messageProcessingModel,
                           maxMessageSize, securityParameters,
                           securityModel, securityLevel, wholeMsg, msg):
        mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder

        # 3.2.9 -- moved up here to be able to report
        # maxSizeResponseScopedPDU on error
        # (48 - maximum SNMPv3 header length)
        maxSizeResponseScopedPDU = int(maxMessageSize) - len(securityParameters) - 48

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: securityParameters %s' % debug.hexdump(securityParameters))

        # 3.2.1
        try:
            securityParameters, rest = decoder.decode(
                securityParameters, asn1Spec=self.__securityParametersSpec
            )

        except PyAsn1Error:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
            snmpInASNParseErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
            snmpInASNParseErrs.syntax += 1
            raise error.StatusInformation(errorIndication=errind.parseError)

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (securityParameters.prettyPrint(),))

        if eoo.endOfOctets.isSameTypeWith(securityParameters):
            raise error.StatusInformation(errorIndication=errind.parseError)

        # 3.2.2
        msgAuthoritativeEngineId = securityParameters.getComponentByPosition(0)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3)
        )

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: cache write securityStateReference %s by msgUserName %s' % (
                securityStateReference, securityParameters.getComponentByPosition(3)))

        scopedPduData = msg.getComponentByPosition(3)

        # Used for error reporting
        contextEngineId = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        contextName = null

        snmpEngineID = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax

        # 3.2.3
        if msgAuthoritativeEngineId != snmpEngineID and \
                msgAuthoritativeEngineId not in self.__timeline:
            if msgAuthoritativeEngineId and \
                    4 < len(msgAuthoritativeEngineId) < 33:
                # 3.2.3a - cloned user when request was sent
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: unsynchronized securityEngineID %r' % (msgAuthoritativeEngineId,))
            else:
                # 3.2.3b
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: peer requested snmpEngineID discovery')
                usmStatsUnknownEngineIDs, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownEngineIDs')
                usmStatsUnknownEngineIDs.syntax += 1
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: null or malformed msgAuthoritativeEngineId')
                pysnmpUsmDiscoverable, = mibBuilder.importSymbols(
                    '__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                if pysnmpUsmDiscoverable.syntax:
                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: starting snmpEngineID discovery procedure')

                    # Report original contextName
                    if scopedPduData.getName() != 'plaintext':
                        debug.logger & debug.flagSM and debug.logger(
                            'processIncomingMsg: scopedPduData not plaintext %s' % scopedPduData.prettyPrint())
                        raise error.StatusInformation(
                            errorIndication=errind.unknownEngineID
                        )

                    # 7.2.6.a.1
                    scopedPdu = scopedPduData.getComponent()
                    contextEngineId = scopedPdu.getComponentByPosition(0)
                    contextName = scopedPdu.getComponentByPosition(1)

                    raise error.StatusInformation(
                        errorIndication=errind.unknownEngineID,
                        oid=usmStatsUnknownEngineIDs.name,
                        val=usmStatsUnknownEngineIDs.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        scopedPDU=scopedPdu,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
                else:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: will not discover EngineID')
                    # free securityStateReference XXX
                    raise error.StatusInformation(
                        errorIndication=errind.unknownEngineID
                    )

        msgUserName = securityParameters.getComponentByPosition(3)

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: read from securityParams msgAuthoritativeEngineId %r msgUserName %r' % (
                msgAuthoritativeEngineId, msgUserName))

        if msgUserName:
            # 3.2.4
            try:
                (usmUserName, usmUserSecurityName, usmUserAuthProtocol,
                 usmUserAuthKeyLocalized, usmUserPrivProtocol,
                 usmUserPrivKeyLocalized) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    msgAuthoritativeEngineId, msgUserName
                )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read user info from LCD')

            except NoSuchInstanceError:
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: unknown securityEngineID %r msgUserName %r' % (
                        msgAuthoritativeEngineId, msgUserName))
                usmStatsUnknownUserNames, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
                usmStatsUnknownUserNames.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.unknownSecurityName,
                    oid=usmStatsUnknownUserNames.name,
                    val=usmStatsUnknownUserNames.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax += 1
                raise error.StatusInformation(errorIndication=errind.invalidMsg)
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: now have usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %r usmUserPrivProtocol %r for msgUserName %r' % (
                usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, msgUserName))

        # 3.2.11 (moved up here to let Reports be authenticated & encrypted)
        self._cache.pop(securityStateReference)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3),
            usmUserSecurityName=usmUserSecurityName,
            usmUserAuthProtocol=usmUserAuthProtocol,
            usmUserAuthKeyLocalized=usmUserAuthKeyLocalized,
            usmUserPrivProtocol=usmUserPrivProtocol,
            usmUserPrivKeyLocalized=usmUserPrivKeyLocalized
        )

        msgAuthoritativeEngineBoots = securityParameters.getComponentByPosition(1)
        msgAuthoritativeEngineTime = securityParameters.getComponentByPosition(2)

        snmpEngine.observer.storeExecutionContext(
            snmpEngine, 'rfc3414.processIncomingMsg',
            dict(securityEngineId=msgAuthoritativeEngineId,
                 snmpEngineBoots=msgAuthoritativeEngineBoots,
                 snmpEngineTime=msgAuthoritativeEngineTime,
                 userName=usmUserName,
                 securityName=usmUserSecurityName,
                 authProtocol=usmUserAuthProtocol,
                 authKey=usmUserAuthKeyLocalized,
                 privProtocol=usmUserPrivProtocol,
                 privKey=usmUserPrivKeyLocalized)
        )
        snmpEngine.observer.clearExecutionContext(
            snmpEngine, 'rfc3414.processIncomingMsg'
        )

        # 3.2.5
        if msgAuthoritativeEngineId == snmpEngineID:
            # Authoritative SNMP engine: make sure securityLevel is sufficient
            badSecIndication = None
            if securityLevel == 3:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    badSecIndication = 'authPriv wanted while auth not expected'
                if usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                    badSecIndication = 'authPriv wanted while priv not expected'
            elif securityLevel == 2:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    badSecIndication = 'authNoPriv wanted while auth not expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    # 4 (discovery phase always uses authenticated messages)
                    if msgAuthoritativeEngineBoots or msgAuthoritativeEngineTime:
                        badSecIndication = 'authNoPriv wanted while priv expected'

            elif securityLevel == 1:
                if usmUserAuthProtocol != noauth.NoAuth.serviceID:
                    badSecIndication = 'noAuthNoPriv wanted while auth expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    badSecIndication = 'noAuthNoPriv wanted while priv expected'
            if badSecIndication:
                usmStatsUnsupportedSecLevels, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnsupportedSecLevels')
                usmStatsUnsupportedSecLevels.syntax += 1
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: reporting inappropriate security level for user %s: %s' % (
                        msgUserName, badSecIndication))
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel,
                    oid=usmStatsUnsupportedSecLevels.name,
                    val=usmStatsUnsupportedSecLevels.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

        # 3.2.6
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure
                )

            try:
                authHandler.authenticateIncomingMsg(
                    usmUserAuthKeyLocalized,
                    securityParameters.getComponentByPosition(4),
                    wholeMsg
                )

            except error.StatusInformation:
                usmStatsWrongDigests, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsWrongDigests')
                usmStatsWrongDigests.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure,
                    oid=usmStatsWrongDigests.name,
                    val=usmStatsWrongDigests.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: incoming msg authenticated')

            # synchronize time with authed peer
            self.__timeline[msgAuthoritativeEngineId] = (
                securityParameters.getComponentByPosition(1),
                securityParameters.getComponentByPosition(2),
                securityParameters.getComponentByPosition(2),
                int(time.time())
            )

            timerResolution = snmpEngine.transportDispatcher is None and 1.0 or snmpEngine.transportDispatcher.getTimerResolution()
            expireAt = int(self.__expirationTimer + 300 / timerResolution)
            if expireAt not in self.__timelineExpQueue:
                self.__timelineExpQueue[expireAt] = []
            self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)

            debug.logger & debug.flagSM and debug.logger(
                'processIncomingMsg: store timeline for securityEngineID %r' % (msgAuthoritativeEngineId,))

        # 3.2.7
        if securityLevel == 3 or securityLevel == 2:
            if msgAuthoritativeEngineId == snmpEngineID:
                # Authoritative SNMP engine: use local notion (SF bug #1649032)
                (snmpEngineBoots,
                 snmpEngineTime) = mibBuilder.importSymbols(
                    '__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
                snmpEngineBoots = snmpEngineBoots.syntax
                snmpEngineTime = snmpEngineTime.syntax.clone()
                idleTime = 0
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: read snmpEngineBoots (%s), snmpEngineTime (%s) from LCD' % (
                        snmpEngineBoots, snmpEngineTime))
            else:
                # Non-authoritative SNMP engine: use cached estimates
                if msgAuthoritativeEngineId in self.__timeline:
                    (snmpEngineBoots, snmpEngineTime,
                     latestReceivedEngineTime,
                     latestUpdateTimestamp) = self.__timeline[
                        msgAuthoritativeEngineId
                    ]
                    # time passed since last talk with this SNMP engine
                    idleTime = int(time.time()) - latestUpdateTimestamp
                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: read timeline snmpEngineBoots %s snmpEngineTime %s for msgAuthoritativeEngineId %r, idle time %s secs' % (
                            snmpEngineBoots, snmpEngineTime, msgAuthoritativeEngineId, idleTime))
                else:
                    raise error.ProtocolError('Peer SNMP engine info missing')

            # 3.2.7a
            if msgAuthoritativeEngineId == snmpEngineID:
                if snmpEngineBoots == 2147483647 or \
                        snmpEngineBoots != msgAuthoritativeEngineBoots or \
                        abs(idleTime + int(snmpEngineTime) - int(msgAuthoritativeEngineTime)) > 150:
                    usmStatsNotInTimeWindows, = mibBuilder.importSymbols(
                        '__SNMP-USER-BASED-SM-MIB', 'usmStatsNotInTimeWindows')
                    usmStatsNotInTimeWindows.syntax += 1
                    raise error.StatusInformation(
                        errorIndication=errind.notInTimeWindow,
                        oid=usmStatsNotInTimeWindows.name,
                        val=usmStatsNotInTimeWindows.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=2,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            # 3.2.7b
            else:
                # 3.2.7b.1
                # noinspection PyUnboundLocalVariable
                if msgAuthoritativeEngineBoots > snmpEngineBoots or \
                        msgAuthoritativeEngineBoots == snmpEngineBoots and \
                        msgAuthoritativeEngineTime > latestReceivedEngineTime:
                    self.__timeline[msgAuthoritativeEngineId] = (
                        msgAuthoritativeEngineBoots,
                        msgAuthoritativeEngineTime,
                        msgAuthoritativeEngineTime,
                        int(time.time())
                    )

                    timerResolution = snmpEngine.transportDispatcher is None and 1.0 or snmpEngine.transportDispatcher.getTimerResolution()
                    expireAt = int(self.__expirationTimer + 300 / timerResolution)
                    if expireAt not in self.__timelineExpQueue:
                        self.__timelineExpQueue[expireAt] = []
                    self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)

                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: stored timeline msgAuthoritativeEngineBoots %s msgAuthoritativeEngineTime %s for msgAuthoritativeEngineId %r' % (
                            msgAuthoritativeEngineBoots, msgAuthoritativeEngineTime, msgAuthoritativeEngineId))

                # 3.2.7b.2
                if snmpEngineBoots == 2147483647 or \
                        msgAuthoritativeEngineBoots < snmpEngineBoots or \
                        msgAuthoritativeEngineBoots == snmpEngineBoots and \
                        abs(idleTime + int(snmpEngineTime) - int(msgAuthoritativeEngineTime)) > 150:
                    raise error.StatusInformation(
                        errorIndication=errind.notInTimeWindow
                    )

        # 3.2.8a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
            encryptedPDU = scopedPduData.getComponentByPosition(1)
            if encryptedPDU is None:  # no ciphertext
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

            try:
                decryptedData = privHandler.decryptData(
                    usmUserPrivKeyLocalized,
                    (securityParameters.getComponentByPosition(1),
                     securityParameters.getComponentByPosition(2),
                     securityParameters.getComponentByPosition(5)),
                    encryptedPDU
                )
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: PDU deciphered into %s' % debug.hexdump(decryptedData))

            except error.StatusInformation:
                usmStatsDecryptionErrors, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsDecryptionErrors')
                usmStatsDecryptionErrors.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError,
                    oid=usmStatsDecryptionErrors.name,
                    val=usmStatsDecryptionErrors.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )
            scopedPduSpec = scopedPduData.setComponentByPosition(0).getComponentByPosition(0)
            try:
                scopedPDU, rest = decoder.decode(decryptedData,
                                                 asn1Spec=scopedPduSpec)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: scopedPDU decoder failed %s' % sys.exc_info()[0])
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

            if eoo.endOfOctets.isSameTypeWith(scopedPDU):
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
        else:
            # 3.2.8b
            scopedPDU = scopedPduData.getComponentByPosition(0)
            if scopedPDU is None:  # no plaintext
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: scopedPDU decoded %s' % scopedPDU.prettyPrint())

        # 3.2.10
        securityName = usmUserSecurityName

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: cached msgUserName %s info by securityStateReference %s' % (
                msgUserName, securityStateReference))

        # Delayed to include details
        if not msgUserName and not msgAuthoritativeEngineId:
            usmStatsUnknownUserNames, = mibBuilder.importSymbols(
                '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
            usmStatsUnknownUserNames.syntax += 1
            raise error.StatusInformation(
                errorIndication=errind.unknownSecurityName,
                oid=usmStatsUnknownUserNames.name,
                val=usmStatsUnknownUserNames.syntax,
                securityStateReference=securityStateReference,
                securityEngineID=msgAuthoritativeEngineId,
                securityLevel=securityLevel,
                contextEngineId=contextEngineId,
                contextName=contextName,
                maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
                PDU=scopedPDU
            )

        # 3.2.12
        return (msgAuthoritativeEngineId, securityName, scopedPDU,
                maxSizeResponseScopedPDU, securityStateReference)

Example 16

Project: MAClient
Source File: player.py
View license
  def _readerLoop( self ):
    f= None
    try:
      while self.exitFlag== 0:
        if self.playingFile== None:
          time.sleep( 0.01 )
          continue
        
        self.length= self.frameNum= -1
        # Initialize demuxer and read small portion of the file to have more info on the format
        self.clearError()
        if type( self.playingFile ) in ( str, unicode ):
          try:
            f= open( self.playingFile, 'rb' )
            format= self.playingFile.split( '.' )[ -1 ].lower()
          except:
            traceback.print_exc()
            self.err.append( sys.exc_info() )
            self.playingFile= None
            continue
        else:
          format= self.fileFormat
          f= self.playingFile
        
        try:
          dm= muxer.Demuxer( format )
          s= f.read( FILE_CHUNK )
          r= dm.parse( s )
        except:
          traceback.print_exc()
          self.err.append( sys.exc_info() )
          self.playingFile= None
          continue
        
        try: self.metaData= dm.getHeaderInfo()
        except: self.metaData= {}
        
        # This seek sets the seeking position already at the desired offset from the beginning
        if self.startPos:
          self.seekTo( self.startPos )
        
        # Setup video( only first matching stream will be used )
        self.clearError()
        self.vindex= -1
        streams= filter( lambda x: x, dm.streams )
        for st in streams:
          if st and st[ 'type' ]== muxer.CODEC_TYPE_VIDEO:
            self._initVideo( st )
            self.vindex= list( streams ).index( st )
            break
        
        # Setup audio( only first matching stream will be used )
        self.aindex= -1
        self.aPTS= None
        for st in streams:
          if st and st[ 'type' ]== muxer.CODEC_TYPE_AUDIO:
            self._initAudio( st )
            self.aindex= list( streams ).index( st )
            break
        
        # Open current file for playing
        currentFile= self.playingFile
        if self.vindex>= 0:
          self._getVStreamParams( self.vindex, streams[ self.vindex ], r )
        
        self._getStreamLength( format, dm, f, r )
        
        # Play until no exit flag, not eof, no errs and file still the same
        while len(s) and len( self.err )== 0 and \
            self.exitFlag== 0 and self.playingFile and len( streams ) and \
            self.playingFile== currentFile:
          
          if self.isPaused():
            time.sleep( PAUSE_SLEEP )
            continue
        
          for d in r:
            if self.playingFile!= currentFile:
              break
            
            # Seeking stuff
            if self.seek>= 0:
              # Find the file position first
              if self.length> 0 and self.fileSize> 0:
                #print self.seek, self.length, self.fileSize, ( float( self.seek ) / self.length )* self.fileSize
                f.seek( ( float( self.seek ) / self.length )* self.fileSize, 0 )
              else:
                f.seek( self.seek* self.getBitRate()/ 8, 0 )
                #print self.seek, self.getBitRate(), f.tell()
              
              #print 'seek to', self.seek, f.tell()
              seek= self.seek
              self.aDecodedFrames= []
              if self.ac:
                self.ac.reset()
                self.snd.stop()
              self.rawFrames= []
              self.decodedFrames= []
              if self.vc:
                self.vc.reset()
              
              dm.reset()
              if self.vindex== -1:
                # Seek immediately if only audio stream is available
                self.seek= -1
                self.aDelta= seek
              else:
                # Wait for a key video frame to arrive
                self.seek= SEEK_IN_PROGRESS
              break
            
            # See if we reached the end position of the video clip
            if self.endPos and self._getPTS()* 1000> self.endPos:
              # Seek at the end and close the reading loop instantly 
              f.seek( 0, 2 )
              break
            
            try:
              # Update length if not set already
              if self.getLength()== -1 and self.getBitRate()> 0:
                # Check file size against length and bitrates
                self.length= self.fileSize/ ( self.getBitRate()/ 8 )
              
              # Demux file into streams
              if d[ 0 ]== self.vindex:
                # Process video frame
                seek= self.seek
                self._processVideoFrame( d )
                if self.seek!= SEEK_IN_PROGRESS and seek== SEEK_IN_PROGRESS:
                  # If key frame was found, change the time position
                  self.videoPTS= ( float( f.tell() )/ self.fileSize )* self.length
                  self.aDelta= self.videoPTS+ self.initADelta
                  #print '---->position', f.tell(), self.aDelta
              elif d[ 0 ]== self.aindex and self.seek!= SEEK_IN_PROGRESS:
                # Decode and play audio frame
                self._processAudioFrame( d )
            except:
              traceback.print_exc()
              self.err.append( sys.exc_info() )
              self.playingFile= None
              break
          
          # Read next encoded chunk and demux it
          try:
            s= f.read( 512 )
            r= dm.parse( s )
          except:
            traceback.print_exc()
            self.err.append( sys.exc_info() )
            self.playingFile= None
            continue
        
        if f: f.close()

        # Close current file when error detected
        if len( self.err ):
          self.stopPlayback()
        
        # Wait until all frames are played
        while self._hasQueue()> 0 and self.isPlaying():
          self._processVideoFrame( None, True )
          self._processAudio()
        
        while self.aindex!= -1 and self._getSndLeft()> 0 and self.isPlaying():
          time.sleep( 0.01 )
        
        self._resetAudio()
        self._resetVideo()
        
        if self.loops> 0:
          self.loops-= 1
          continue
        
        # Report the file end
        try: f= self.callback.onPlaybackEnd
        except: f= None
        if f: 
          f( self )
        else:
          self.playingFile= None
    except:
      traceback.print_exc()
    
    self.exitFlag= 1

Example 17

View license
def build_annotated_tgm(closest_gene_output,distance_to_tss,logistic_score_output,fasta_file,motif_ids,makeWindow=True,tgm_file='',do_pkl=True):
    '''
    Takes existing tgm, and maps to gene names and TF ids within a specific window
    '''
    from chipsequtil import Fasta
    ##get fasta file events, since these are columns in the logistic_score matrix
    seq_ids=Fasta.load(fasta_file,key_func=lambda x: x)

    ##need to get sequence mids in the order they are processed
    ##in the file, this is the index into the score_output file
    ##. ASSUMES GALAXY-formatted FASTA!!!!
    seq_mids=[] ##list of FASTA regions, in their appropriate order in the file
    filtered_events={}##gene name of closest gene to event within window
    for k in seq_ids.keys():
        vals=k.split(';')
        if len(vals)==1:
    	    vals=k.split()
        if ':' in vals[0]: #bed tools used 
            chr,range=vals[0].split(':')
            low,high=range.split('-')
            mid=str(int(low)+((int(high)-int(low))/2))
            seq_mids.append(chr+':'+mid)
        elif 'random' not in vals[0]: #galaxy tools used
            genome,chr,low,high,strand=vals[0].split('_')
            mid=str(int(low)+((int(high)-int(low))/2))
            seq_mids.append(chr+':'+mid)
        
        if len(vals)==3:            
            filtered_events[chr+':'+mid]=vals[2]
    print 'Found %d events, of which %d have gene names'%(len(seq_mids),len(filtered_events))
    ##this next section relies on xls 
    ##filter events that are within distance from closest_gene_output to get gene mapping
    ##
    filtered_fc={}##FC of events within window, in case we want to use in the future

    event_indexes=[] ##

    
 #    ###open the closest_gene_output and determine
#     try:
#         cgo=open(closest_gene_output,'rU').readlines()
#     except:
#         print "Error opening file:", sys.exc_info()[0]
#         print "Check to make sure file exists at %s"%(closest_gene_output)
#         raise
#     inds=cgo[0].strip().split('\t')
#     for row in cgo[1:]:
#         arr=row.strip().split('\t')
#         if 'geneSymbol' in inds: #this is true if we used an xref file
#             gene=arr[inds.index('geneSymbol')]        
# #            mid=arr[2]+':'+str(int(arr[3])+(int(arr[4])-int(arr[3]))/2)
#         else: #otherwise we just gene id
#             gene=arr[inds.index('knownGeneID')]
#         #position mapping is different
#         if 'Position' in inds: #this is for GPS
#             mid='chr'+arr[inds.index('Position')]
#         elif 'chrom' in inds: #this is for BED
#             mid=arr[inds.index('chrom')]+':'+str(int(arr[inds.index('chromStart')])+(int(arr[inds.index('chromEnd')])-int(arr[inds.index('chromStart')]))/2)
#         else: #this is for MACS
#             mid=arr[inds.index('chr')]+':'+str(int(arr[inds.index('start')])+(int(arr[inds.index('end')])-int(arr[inds.index('start')]))/2)

        
#         #print gene,mid
#         dist=arr[inds.index('dist from feature')]
#         try:
#             sv=arr[inds.index('score')]
#         except:
#             try:
#                 sv=arr[inds.index('IPvsCTR')]
#             except:
#                 fc=0.0
#         if sv!='':
#             fc=float(sv)
#         else:
#             next
                
#         #check absolute distance if we're doing a window, or negative distance if we're looking upstream
#         if distance_to_tss=='' or (makeWindow and np.absolute(int(dist))<int(distance_to_tss)) or int(dist)>(-1*int(distance_to_tss)):
# #            filtered_events[mid]=gene #(this was out of if clause, should it be there?) 1/2
#             if mid in seq_mids:
#                 event_indexes.append(seq_mids.index(mid))##index into fasta file value/maps to array
                
#                 ##UPDATE: moved these to within if clause - so that unrelated scores are not included
#                 filtered_events[mid]=gene ##gene name of event
#                 filtered_fc[mid]=float(fc) ##fc value of event
# #            filtered_fc[mid]=float(fc) #see above, 2/2

                
  #  print 'Got '+str(len(filtered_events))+' per-gene events within '+distance_to_tss+' bp window out of '+str(len(cgo))

 #   print 'These map to '+str(len(event_indexes))+' regions in the FASTA file'

    ##get gene ids, or just use mid of sequence region
    gene_names=[t for t in set(filtered_events.values())]
    print gene_names[0:10]

    #get gene ids for all matrices list loaded in
    mi_files=motif_ids.split(',')
    if len(mi_files)>0:
        #open first motif name file that contains names for each element in TAMO file
        all_tf_names=[a.strip() for a in open(mi_files[0],'rU').readlines()]
    if len(mi_files)>1:
        #if we have additional files, check to see if if names already exist
        for i,f in enumerate(mi_files):
            if i==0:
                next
            try:
                #open file and read in extra ids
                newfs=[a.strip() for a in open(f,'rU').readlines()]
            except:
                print "Error opening file:", sys.exc_info()[0]
                print "Check to make sure file exists at %s"%(f)
                raise
               
            if len(newfs)==len(all_tf_names):
                #combine existing tf names with these with . delimiter....
                all_tf_names=['.'.join((a,b)) for a,b in zip(all_tf_names,newfs)]

    ##now go through and clean up TF names
    cleaned_tf_names=[]
    for i,a in enumerate(all_tf_names):
        tfn=set([b for b in a.split('.') if '$' not in b and b!=''])
        if(len(tfn)==0):
            tfn=a.split('.')
#        else:
#            print 'Replacing %s with %s'%(a,'.'.join(tfn))
        cleaned_tf_names.append('.'.join(tfn))

    all_tf_names=cleaned_tf_names
    #print len(cleaned_tf_names)

    
    ##now actually map events to scores
    ##load motif matrix scanning output that maps matrices to regions
    print 'Loading complete motif score file...'
    event_scores=np.loadtxt(logistic_score_output)
    print '\t...Loaded!'
                      
    #create new tgm matrix with approriate file name
    newmat=np.zeros((len(all_tf_names),len(gene_names)),dtype='float')##fill in gene length),dtype='float')
    if makeWindow:
        distance_to_tss=distance_to_tss+'_bpWindow'
    else:
        distance_to_tss=distance_to_tss+'_bpUpstream'

    if tgm_file=='': 
        tgm_file=re.sub('.txt','_'+distance_to_tss+'.tgm',os.path.basename(logistic_score_output))
    if do_pkl:
        pkl_file=re.sub('.tgm','.pkl',tgm_file)
    else:
        pkl_file=''
        
    ##sort event indexes from seq_mids that are in the filtered_events file
    event_indexes.sort()
    
    #populate matrix with greatest score attributed to that gene/tf combo
    for ind,arr in enumerate(event_scores):
        ##name of matrix/motif
        mat=all_tf_names[ind]

        #tfnames=[mat]
        ##here we enumerate which sequences were mapped to a gene within the window
        for k,val in enumerate(seq_mids):#k in event_indexes:
            
            #here we want the event midpoint for the index
#            val=seq_mids[k]
            
            #get score for that index
            score=arr[k]
            
            #now map it to closest gene for that midpoint
            cg=filtered_events[val]

            fc=1.0 ##update this if we want to normalize score by fold change
            score=float(score)*float(fc) ##this should do nothing sine fcgenerally =1

            #if len(tfnames)==1:
            curscore=newmat[all_tf_names.index(mat),gene_names.index(cg)]
            ##updated to include maximum score!!

            if np.abs(score)>np.abs(curscore):
                newmat[all_tf_names.index(mat),gene_names.index(cg)]=score
            #else:
            #    for t in tfnames:
            #        curscore=newmat[all_tf_names.index(t),gene_names.index(cg)]
            #    ##updated to include maximum score!!
            #        if np.abs(float(score))>np.abs(curscore):
            #            newmat[all_tf_names.index(t),gene_names.index(cg)]=float(score)

                
    ###save these intermediate files for debugging purposes
    np.savetxt(tgm_file,newmat)
    gin=re.sub('.tgm','_geneids.txt',tgm_file)
    tin=re.sub('.tgm','_tfids.txt',tgm_file)

    try:
        open(gin,'w').writelines([g+'\n' for g in gene_names])
        open(tin,'w').writelines([t+'\n' for t in all_tf_names])
    except:
        print "Error opening file:", sys.exc_info()[0]
        print "Check to make sure file exists at %s"%(closest_gene_output)
        raise
    
    if pkl_file!='':
        zipcmd='python '+os.path.join(progdir,'zipTgms.py')+' '+tgm_file+' '+tin+' '+gin+' --pkl='+pkl_file
        print 'Compressing matrix file into pkl'
        print zipcmd
        os.system(zipcmd)
        return pkl_file
    else:
        return tgm_file

Example 18

Project: discord_feedbot
Source File: feed2discord.py
View license
@asyncio.coroutine
def background_check_feed(feed,asyncioloop):
    global timezone
    logger.info(feed+': Starting up background_check_feed')

    # Try to wait until Discord client has connected, etc:
    yield from client.wait_until_ready()
    # make sure debug output has this check run in the right order...
    yield from asyncio.sleep(1)

    # just a bit easier to use...
    FEED=config[feed]

    # pull config for this feed out:
    feed_url = FEED.get('feed_url')
    rss_refresh_time = FEED.getint('rss_refresh_time',3600)
    max_age = FEED.getint('max_age',86400)

    # loop through all the channels this feed is configured to send to
    channels = []
    for key in FEED.get('channels').split(','):
        logger.debug(feed+': adding channel '+key)
        # stick a dict in the channels array so we have more to work with
        channels.append(
            {
              'object': discord.Object(id=config['CHANNELS'][key]),
              'name': key,
              'id': config['CHANNELS'][key],
            }
        )

    # Basically run forever
    while not client.is_closed:
        # And tries to catch all the exceptions and just keep going
        # (but see list of except/finally stuff below)
        try:
            logger.info(feed+': processing feed')

            # If send_typing is on for the feed, send a little "typing ..."
            # whenever a feed is being worked on.  configurable per-room
            if FEED.getint(
                           feed+'.send_typing',
                           FEED.getint('send_typing',0)) >= 1:
                for channel in channels:
                    # Since this is first attempt to talk to this channel,
                    # be very verbose about failures to talk to channel
                    try:
                        yield from client.send_typing(channel['object'])
                    except discord.errors.Forbidden:
                        logger.error(feed+':discord.errors.Forbidden')
                        logger.error(sys.exc_info())
                        logger.error(
                            feed+
                            ":Perhaps bot isn't allowed in this channel?")
                        logger.error(channel)

            http_headers = {}
            http_headers['User-Agent'] = MAIN.get('UserAgent',
                                                  'feed2discord/1.0')

            ### Download the actual feed, if changed since last fetch

            # pull data about history of this *feed* from DB:
            cursor = conn.cursor()
            cursor.execute(
                "select lastmodified,etag from feed_info where feed=? OR url=?",
                [feed,feed_url])
            data=cursor.fetchone()

            # If we've handled this feed before,
            # and we have etag from last run, add etag to headers.
            # and if we have a last modified time from last run,
            # add "If-Modified-Since" to headers.
            if data is None: # never handled this feed before...
                logger.info(feed+':looks like updated version. saving info')
                cursor.execute(
                    "REPLACE INTO feed_info (feed,url) VALUES (?,?)",
                    [feed,feed_url])
                conn.commit()
                logger.debug(feed+':feed info saved')
            else:
                logger.debug(feed+
                             ':setting up extra headers for HTTP request.')
                logger.debug(data)
                lastmodified = data[0]
                etag = data[1]
                if lastmodified is not None and len(lastmodified):
                    logger.debug(feed+
                                 ':adding header If-Modified-Since: '+
                                 lastmodified)
                    http_headers['If-Modified-Since'] = lastmodified
                else:
                    logger.debug(feed+':no stored lastmodified')
                if etag is not None and len(etag):
                    logger.debug(feed+':adding header ETag: '+etag)
                    http_headers['ETag'] = etag
                else:
                    logger.debug(feed+':no stored ETag')

            logger.debug(feed+':sending http request for '+feed_url)
            # Send actual request.  yield from can yield control to another
            # instance.
            http_response = yield from httpclient.request('GET',
                                                          feed_url,
                                                          headers=http_headers)
            logger.debug(http_response)

            # Some feeds are smart enough to use that if-modified-since or
            # etag info, which gives us a 304 status.  If that happens,
            # assume no new items, fall through rest of this and try again
            # later.
            if http_response.status == 304:
                logger.debug(feed+':data is old; moving on')
                http_response.close()
                raise HTTPNotModified()
            elif http_response.status is None:
                logger.error(feed+':HTTP response code is NONE')
                raise HTTPError()
            # If we get anything but a 200, that's a problem and we don't
            # have good data, so give up and try later.
            # Mostly handled different than 304/not-modified to make logging
            # clearer.
            elif http_response.status != 200:
                logger.debug(feed+':HTTP error: '+str(http_response.status))
                raise HTTPError()
            else:
                logger.debug(feed+':HTTP success')


            # pull data out of the http response
            logger.debug(feed+':reading http response')
            http_data = yield from http_response.read()

            # parse the data from the http response with feedparser
            logger.debug(feed+':parsing http data')
            feed_data = feedparser.parse(http_data)
            logger.debug(feed+':done fetching')


            # If we got an ETAG back in headers, store that, so we can
            # include on next fetch
            if 'ETAG' in http_response.headers:
                etag = http_response.headers['ETAG']
                logger.debug(feed+':saving etag: '+etag)
                cursor.execute(
                    "UPDATE feed_info SET etag=? where feed=? or url=?",
                    [etag,feed,feed_url])
                conn.commit()
                logger.debug(feed+':etag saved')
            else:
                logger.debug(feed+':no etag')

            # If we got a Last-Modified header back, store that, so we can
            # include on next fetch
            if 'LAST-MODIFIED' in http_response.headers:
                modified = http_response.headers['LAST-MODIFIED']
                logger.debug(feed+':saving lastmodified: '+modified)
                cursor.execute(
                    "UPDATE feed_info SET lastmodified=? where feed=? or url=?",
                    [modified,feed,feed_url])
                conn.commit()
                logger.debug(feed+':saved lastmodified')
            else:
                logger.debug(feed+':no last modified date')

            http_response.close()

            # Process all of the entries in the feed
            # Use reversed to start with end, which is usually oldest
            logger.debug(feed+':processing entries')
            for item in reversed(feed_data.entries):
                logger.debug(feed+':item:processing this entry')
                if debug > 1:
                    logger.debug(item) # can be very noisy

                # Pull out the unique id, or just give up on this item.
                id = ''
                if 'id' in item:
                    id=item.id
                elif 'guid' in item:
                    id=item.guid
                elif 'link' in item:
                    id=item.link
                else:
                    logger.error(feed+':item:no id, skipping')
                    continue

                # Get our best date out, in both raw and parsed form
                pubDateDict = extract_best_item_date(item)
                pubDate = pubDateDict['date']
                pubDate_parsed = pubDateDict['date_parsed']

                logger.debug(feed+':item:id:'+id)
                logger.debug(feed+
                             ':item:checking database history for this item')
                # Check DB for this item
                cursor.execute(
                    "SELECT published,title,url,reposted FROM feed_items WHERE id=?",
                    [id])
                data=cursor.fetchone()

                # If we've never seen it before, then actually processing
                # this:
                if data is None:
                    logger.info(feed+':item '+id+' unseen, processing:')

                    # Store info about this item, so next time we skip it:
                    cursor.execute(
                        "INSERT INTO feed_items (id,published) VALUES (?,?)",
                        [id,pubDate])
                    conn.commit()

                    # Doing some crazy date math stuff...
                    # max_age is mostly so that first run doesn't spew too
                    # much stuff into a room, but is also a useful safety
                    # measure in case a feed suddenly reverts to something
                    # ancient or other weird problems...
                    time_since_published = timezone.localize(datetime.now()) - pubDate_parsed.astimezone(timezone)

                    if time_since_published.total_seconds() < max_age:
                        logger.info(feed+':item:fresh and ready for parsing')

                        # Loop over all channels for this particular feed
                        # and process appropriately:
                        for channel in channels:
                            include = True
                            filter_field = FEED.get(
                                                    channel['name']+'.filter_field',
                                                    FEED.get('filter_field',
                                                        'title'))
                            # Regex if channel exists
                            if (channel['name']+'.filter') in FEED or 'filter' in FEED:
                                logger.debug(feed+':item:running filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter',
                                                    FEED.get('filter','^.*$'))
                                logger.debug(feed+':item:using filter:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = False
                                    logger.info(feed+':item:failed filter for '+channel['name'])
                            elif (channel['name']+'.filter_exclude') in FEED or 'filter_exclude' in FEED:
                                logger.debug(feed+':item:running exclude filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter_exclude',
                                                    FEED.get('filter_exclude',
                                                    '^.*$'))
                                logger.debug(feed+':item:using filter_exclude:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = True
                                    logger.info(feed+':item:passed exclude filter for '+channel['name'])
                                else:
                                    include = False
                                    logger.info(feed+':item:failed exclude filter for '+channel['name'])
                            else:
                                include = True # redundant safety net
                                logger.debug(feed+':item:no filter configured for'+channel['name'])

                            if include is True:
                                logger.debug(feed+':item:building message for '+channel['name'])
                                message = build_message(FEED,item,channel)
                                logger.debug(feed+':item:sending message (eventually) to '+channel['name'])
                                yield from send_message_wrapper(asyncioloop,
                                                                FEED,
                                                                feed,
                                                                channel,
                                                                client,
                                                                message)
                            else:
                                logger.info(feed+':item:skipping item due to not passing filter for '+channel['name'])

                    else:
                        # Logs of debugging info for date handling stuff...
                        logger.info(feed+':too old; skipping')
                        logger.debug(feed+':now:'+str(time.time()))
                        logger.debug(feed+':now:gmtime:'+str(time.gmtime()))

                        logger.debug(feed+':now:localtime:'+str(time.localtime()))
                        logger.debug(feed+':timezone.localize(datetime.now()):'+str(timezone.localize(datetime.now())))
                        logger.debug(feed+':pubDate:'+str(pubDate))
                        logger.debug(feed+':pubDate_parsed:'+str(pubDate_parsed))
                        logger.debug(feed+':pubDate_parsed.astimezome(timezone):'+str(pubDate_parsed.astimezone(timezone)))
                        if debug >= 4:
                            logger.debug(item)
                # seen before, move on:
                else:
                    logger.debug(feed+':item:'+id+' seen before, skipping')
        # This is completely expected behavior for a well-behaved feed:
        except HTTPNotModified:
            logger.debug(feed+':Headers indicate feed unchanged since last time fetched:')
            logger.debug(sys.exc_info())
        # Many feeds have random periodic problems that shouldn't cause
        # permanent death:
        except HTTPError:
            logger.warn(feed+':Unexpected HTTP error:')
            logger.warn(sys.exc_info())
            logger.warn(feed+':Assuming error is transient and trying again later')
        # sqlite3 errors are probably really bad and we should just totally
        # give up on life
        except sqlite3.Error as sqlerr:
            logger.error(feed+':sqlite3 error: ')
            logger.error(sys.exc_info())
            logger.error(sqlerr)
            raise
        # Ideally we'd remove the specific channel or something...
        # But I guess just throw an error into the log and try again later...
        except discord.errors.Forbidden:
            logger.error(feed+':discord.errors.Forbidden')
            logger.error(sys.exc_info())
            logger.error(feed+":Perhaps bot isn't allowed in one of the channels for this feed?")
            # raise # or not? hmm...
        # unknown error: definitely give up and die and move on
        except:
            logger.error(feed+':Unexpected error:')
            # logger.error(sys.exc_info())
            logger.error(traceback.format_exc())
            logger.error(feed+':giving up')
            raise
        # No matter what goes wrong, wait same time and try again
        finally:
            logger.debug(feed+':sleeping for '+str(rss_refresh_time)+' seconds')
            yield from asyncio.sleep(rss_refresh_time)

Example 19

Project: capirca
Source File: aclgen.py
View license
def RenderFile(input_file, output_directory, definitions,
               exp_info, write_files):
  """Render a single file.

  Args:
    input_file: the name of the input policy file.
    output_directory: the directory in which we place the rendered file.
    definitions: the definitions from naming.Naming().
    exp_info: print a info message when a term is set to expire
              in that many weeks.
    write_files: a list of file tuples, (output_file, acl_text), to write
  """
  logging.debug('rendering file: %s into %s', input_file,
                output_directory)
  pol = None
  jcl = False
  acl = False
  asacl = False
  aacl = False
  bacl = False
  eacl = False
  gcefw = False
  ips = False
  ipt = False
  spd = False
  nsx = False
  pcap_accept = False
  pcap_deny = False
  pf = False
  srx = False
  jsl = False
  nft = False
  win_afw = False
  xacl = False

  try:
    conf = open(input_file).read()
    logging.debug('opened and read %s', input_file)
  except IOError as e:
    logging.warn('bad file: \n%s', e)
    raise

  try:
    pol = policy.ParsePolicy(
        conf, definitions, optimize=FLAGS.optimize,
        base_dir=FLAGS.base_directory, shade_check=FLAGS.shade_check)
  except policy.ShadingError as e:
    logging.warn('shading errors for %s:\n%s', input_file, e)
    return
  except (policy.Error, naming.Error):
    raise ACLParserError('Error parsing policy file %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

  platforms = set()
  for header in pol.headers:
    platforms.update(header.platforms)

  if 'juniper' in platforms:
    jcl = copy.deepcopy(pol)
  if 'cisco' in platforms:
    acl = copy.deepcopy(pol)
  if 'ciscoasa' in platforms:
    asacl = copy.deepcopy(pol)
  if 'brocade' in platforms:
    bacl = copy.deepcopy(pol)
  if 'arista' in platforms:
    eacl = copy.deepcopy(pol)
  if 'aruba' in platforms:
    aacl = copy.deepcopy(pol)
  if 'ipset' in platforms:
    ips = copy.deepcopy(pol)
  if 'iptables' in platforms:
    ipt = copy.deepcopy(pol)
  if 'nsxv' in platforms:
    nsx = copy.deepcopy(pol)
  if 'packetfilter' in platforms:
    pf = copy.deepcopy(pol)
  if 'pcap' in platforms:
    pcap_accept = copy.deepcopy(pol)
    pcap_deny = copy.deepcopy(pol)
  if 'speedway' in platforms:
    spd = copy.deepcopy(pol)
  if 'srx' in platforms:
    srx = copy.deepcopy(pol)
  if 'srxlo' in platforms:
    jsl = copy.deepcopy(pol)
  if 'windows_advfirewall' in platforms:
    win_afw = copy.deepcopy(pol)
  if 'ciscoxr' in platforms:
    xacl = copy.deepcopy(pol)
  if 'nftables' in platforms:
    nft = copy.deepcopy(pol)
  if 'gce' in platforms:
    gcefw = copy.deepcopy(pol)

  if not output_directory.endswith('/'):
    output_directory += '/'

  try:
    if jcl:
      acl_obj = juniper.Juniper(jcl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if srx:
      acl_obj = junipersrx.JuniperSRX(srx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if acl:
      acl_obj = cisco.Cisco(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if asacl:
      acl_obj = ciscoasa.CiscoASA(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if aacl:
      acl_obj = aruba.Aruba(aacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if bacl:
      acl_obj = brocade.Brocade(bacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if eacl:
      acl_obj = arista.Arista(eacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ips:
      acl_obj = ipset.Ipset(ips, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ipt:
      acl_obj = iptables.Iptables(ipt, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nsx:
      acl_obj = nsxv.Nsxv(nsx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if spd:
      acl_obj = speedway.Speedway(spd, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_accept:
      acl_obj = pcap.PcapFilter(pcap_accept, exp_info)
      RenderACL(str(acl_obj), '-accept' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_deny:
      acl_obj = pcap.PcapFilter(pcap_deny, exp_info, invert=True)
      RenderACL(str(acl_obj), '-deny' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pf:
      acl_obj = packetfilter.PacketFilter(pf, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if win_afw:
      acl_obj = windows_advfirewall.WindowsAdvFirewall(win_afw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if jsl:
      acl_obj = srxlo.SRXlo(jsl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if xacl:
      acl_obj = ciscoxr.CiscoXR(xacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nft:
      acl_obj = nftables.Nftables(nft, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if gcefw:
      acl_obj = gce.GCE(gcefw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
  # TODO(robankeny) add additional errors.
  except (juniper.Error, junipersrx.Error, cisco.Error, ipset.Error,
          iptables.Error, speedway.Error, pcap.Error,
          aclgenerator.Error, aruba.Error, nftables.Error, gce.Error):
    raise ACLGeneratorError('Error generating target ACL for %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

Example 20

Project: gramps
Source File: dateparserdisplaytest.py
View license
    def run_tool(self):
        self.progress = ProgressMeter(_('Running Date Test'), '',
                                      parent=self.parent_window)
        self.progress.set_pass(_('Generating dates'),
                               4)
        dates = []
        # first some valid dates
        calendar = Date.CAL_GREGORIAN
        for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED,
                        Date.QUAL_CALCULATED):
            for modifier in (Date.MOD_NONE, Date.MOD_BEFORE,
                             Date.MOD_AFTER, Date.MOD_ABOUT):
                for slash1 in (False,True):
                    for month in range(0,13):
                        for day in (0,5,27):
                            if not month and day:
                                continue
                            d = Date()
                            d.set(quality,modifier,calendar,(day,month,1789,slash1),"Text comment")
                            dates.append( d)
            for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
                for slash1 in (False,True):
                    for slash2 in (False,True):
                        for month in range(0,13):
                            for day in (0,5,27):
                                if not month and day:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,day,month,1876,slash2),"Text comment")
                                dates.append( d)

                                if not month:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,day,13-month,1876,slash2),"Text comment")
                                dates.append( d)

                                if not day:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,32-day,month,1876,slash2),"Text comment")
                                dates.append( d)
                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,32-day,13-month,1876,slash2),"Text comment")
                                dates.append( d)
            modifier = Date.MOD_TEXTONLY
            d = Date()
            d.set(quality,modifier,calendar,Date.EMPTY,
                  "This is a textual date")
            dates.append( d)
            self.progress.step()

        # test invalid dates
        #dateval = (4,7,1789,False,5,8,1876,False)
        #for l in range(1,len(dateval)):
        #    d = Date()
        #    try:
        #        d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #              Date.CAL_GREGORIAN,dateval[:l],"Text comment")
        #        dates.append( d)
        #    except DateError, e:
        #        d.set_as_text("Date identified value correctly as invalid.\n%s" % e)
        #        dates.append( d)
        #    except:
        #        d = Date()
        #        d.set_as_text("Date.set Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
        #        dates.append( d)
        #for l in range(1,len(dateval)):
        #    d = Date()
        #    try:
        #        d.set(Date.QUAL_NONE,Date.MOD_SPAN,Date.CAL_GREGORIAN,dateval[:l],"Text comment")
        #        dates.append( d)
        #    except DateError, e:
        #        d.set_as_text("Date identified value correctly as invalid.\n%s" % e)
        #        dates.append( d)
        #    except:
        #        d = Date()
        #        d.set_as_text("Date.set Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
        #        dates.append( d)
        #self.progress.step()
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #      Date.CAL_GREGORIAN,(44,7,1789,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #      Date.CAL_GREGORIAN,(4,77,1789,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_SPAN,
        #      Date.CAL_GREGORIAN,
        #      (4,7,1789,False,55,8,1876,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_SPAN,
        #      Date.CAL_GREGORIAN,
        #      (4,7,1789,False,5,88,1876,False),"Text comment")
        #dates.append( d)

        with DbTxn(_("Date Test Plugin"), self.db, batch=True) as self.trans:
            self.db.disable_signals()
            self.progress.set_pass(_('Generating dates'),
                                   len(dates))

            # create pass and fail tags
            pass_handle = self.create_tag(_('Pass'), '#0000FFFF0000')
            fail_handle = self.create_tag(_('Fail'), '#FFFF00000000')

            # now add them as birth to new persons
            i = 1
            for dateval in dates:
                person = Person()
                surname = Surname()
                surname.set_surname("DateTest")
                name = Name()
                name.add_surname(surname)
                name.set_first_name("Test %d" % i)
                person.set_primary_name(name)
                self.db.add_person(person, self.trans)
                bevent = Event()
                bevent.set_type(EventType.BIRTH)
                bevent.set_date_object(dateval)
                bevent.set_description("Date Test %d (source)" % i)
                bevent_h = self.db.add_event(bevent, self.trans)
                bevent_ref = EventRef()
                bevent_ref.set_reference_handle(bevent_h)
                # for the death event display the date as text and parse it back to a new date
                ndate = None
                try:
                    datestr = _dd.display( dateval)
                    try:
                        ndate = _dp.parse( datestr)
                        if not ndate:
                            ndate = Date()
                            ndate.set_as_text("DateParser None")
                            person.add_tag(fail_handle)
                        else:
                            person.add_tag(pass_handle)
                    except:
                        ndate = Date()
                        ndate.set_as_text("DateParser Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
                        person.add_tag(fail_handle)
                    else:
                        person.add_tag(pass_handle)
                except:
                    ndate = Date()
                    ndate.set_as_text("DateDisplay Exception: %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
                    person.add_tag(fail_handle)

                if dateval.get_modifier() != Date.MOD_TEXTONLY \
                       and ndate.get_modifier() == Date.MOD_TEXTONLY:
                    # parser was unable to correctly parse the string
                    ndate.set_as_text( "TEXTONLY: "+ndate.get_text())
                    person.add_tag(fail_handle)
                if dateval.get_modifier() == Date.MOD_TEXTONLY \
                        and dateval.get_text().count("Traceback") \
                        and pass_handle in person.get_tag_list():
                    person.add_tag(fail_handle)

                devent = Event()
                devent.set_type(EventType.DEATH)
                devent.set_date_object(ndate)
                devent.set_description("Date Test %d (result)" % i)
                devent_h = self.db.add_event(devent, self.trans)
                devent_ref = EventRef()
                devent_ref.set_reference_handle(devent_h)
                person.set_birth_ref(bevent_ref)
                person.set_death_ref(devent_ref)
                self.db.commit_person(person, self.trans)
                i = i + 1
                self.progress.step()
        self.db.enable_signals()
        self.db.request_rebuild()
        self.progress.close()

Example 21

Project: Nagstamon
Source File: Icinga.py
View license
    def _get_status_HTML(self):
        """
        Get status from Nagios Server - the oldschool CGI HTML way
        """
        # create Nagios items dictionary with to lists for services and hosts
        # every list will contain a dictionary for every failed service/host
        # this dictionary is only temporarily
        # ##global icons
        nagitems = {'services':[], 'hosts':[]}

        # new_hosts dictionary
        self.new_hosts = dict()

        # hosts - mostly the down ones
        # unfortunately the hosts status page has a different structure so
        # hosts must be analyzed separately
        try:
            for status_type in 'hard', 'soft':
                result = self.FetchURL(self.cgiurl_hosts[status_type])
                htobj, error, status_code = result.result,\
                                            result.error,\
                                            result.status_code

                # check if any error occured
                errors_occured = self.check_for_error(htobj, error, status_code)
                # if there are errors return them
                if errors_occured != False:
                    return(errors_occured)    

                # put a copy of a part of htobj into table to be able to delete htobj
                table = htobj('table', {'class': 'status'})[0]

                # do some cleanup
                del result, error

                # access table rows
                # some Icinga versions have a <tbody> tag in cgi output HTML which
                # omits the <tr> tags being found
                if len(table('tbody')) == 0:
                    trs = table('tr', recursive=False)
                else:
                    tbody = table('tbody')[0]
                    trs = tbody('tr', recursive=False)

                # kick out table heads
                trs.pop(0)

                for tr in trs:
                    try:
                        # ignore empty <tr> rows
                        if len(tr('td', recursive=False)) > 1:
                            n = {}
                            # get tds in one tr
                            tds = tr('td', recursive=False)
                            # host
                            try:
                                n['host'] = str(tds[0].table.tr.td.table.tr.td.a.string)
                            except:
                                n['host'] = str(nagitems[len(nagitems) - 1]['host'])
                                # status
                            n['status'] = str(tds[1].string)
                            # last_check
                            n['last_check'] = str(tds[2].string)
                            # duration
                            n['duration'] = str(tds[3].string)
                            # division between Nagios and Icinga in real life... where
                            # Nagios has only 5 columns there are 7 in Icinga 1.3...
                            # ... and 6 in Icinga 1.2 :-)
                            if len(tds) < 7:
                                # the old Nagios table
                                # status_information
                                if len(tds[4](text=not_empty)) == 0:
                                    n['status_information'] = ''
                                else:
                                    n['status_information'] = str(tds[4].string)
                                    # attempts are not shown in case of hosts so it defaults to 'N/A'
                                n['attempt'] = 'N/A'
                            else:
                                # attempts are shown for hosts
                                # to fix http://sourceforge.net/tracker/?func=detail&atid=1101370&aid=3280961&group_id=236865 .attempt needs
                                # to be stripped
                                n['attempt'] = str(tds[4].string).strip()
                                # status_information
                                if len(tds[5](text=not_empty)) == 0:
                                    n['status_information'] = ''
                                else:
                                    n['status_information'] = str(tds[5].string)

                            # status flags
                            n['passiveonly'] = False
                            n['notifications_disabled'] = False
                            n['flapping'] = False
                            n['acknowledged'] = False
                            n['scheduled_downtime'] = False

                            # map status icons to status flags
                            icons = tds[0].findAll('img')
                            for i in icons:
                                icon = i['src'].split('/')[-1]
                                if icon in self.STATUS_MAPPING:
                                    n[self.STATUS_MAPPING[icon]] = True
                            # cleaning
                            del icons

                            # add dictionary full of information about this host item to nagitems
                            nagitems['hosts'].append(n)
                            # after collection data in nagitems create objects from its informations
                            # host objects contain service objects
                            if not 'host' in self.new_hosts:
                                new_host = n['host']
                                self.new_hosts[new_host] = GenericHost()
                                self.new_hosts[new_host].name = n['host']
                                self.new_hosts[new_host].server = self.name
                                self.new_hosts[new_host].status = n['status']
                                self.new_hosts[new_host].last_check = n['last_check']
                                self.new_hosts[new_host].duration = n['duration']
                                self.new_hosts[new_host].attempt = n['attempt']
                                self.new_hosts[new_host].status_information = n['status_information'].replace('\n', ' ').strip()
                                self.new_hosts[new_host].passiveonly = n['passiveonly']
                                self.new_hosts[new_host].notifications_disabled = n['notifications_disabled']
                                self.new_hosts[new_host].flapping = n['flapping']
                                self.new_hosts[new_host].acknowledged = n['acknowledged']
                                self.new_hosts[new_host].scheduled_downtime = n['scheduled_downtime']
                                self.new_hosts[new_host].status_type = status_type

                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs host_name and no display name
                                self.new_hosts[new_host].real_name = n['host']
                            
                            # some cleanup
                            del tds, n
                    except:
                        self.Error(sys.exc_info())

                # do some cleanup
                htobj.decompose()
                del htobj, trs, table

        except:
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                return Result(result=result, error=error)

        # services
        try:
            for status_type in 'hard', 'soft':
                result = self.FetchURL(self.cgiurl_services[status_type])
                htobj, error, status_code = result.result,\
                                            result.error,\
                                            result.status_code
                                            
                # check if any error occured
                errors_occured = self.check_for_error(htobj, error, status_code)
                # if there are errors return them
                if errors_occured != False:
                    return(errors_occured)    
                
                table = htobj('table', {'class': 'status'})[0]

                # some Icinga versions have a <tbody> tag in cgi output HTML which
                # omits the <tr> tags being found
                if len(table('tbody')) == 0:
                    trs = table('tr', recursive=False)
                else:
                    tbody = table('tbody')[0]
                    trs = tbody('tr', recursive=False)

                # do some cleanup
                del result, error

                # kick out table heads
                trs.pop(0)

                for tr in trs:
                    try:
                        # ignore empty <tr> rows - there are a lot of them - a Nagios bug?
                        tds = tr('td', recursive=False)
                        if len(tds) > 1:
                            n = {}
                            # host
                            # the resulting table of Nagios status.cgi table omits the
                            # hostname of a failing service if there are more than one
                            # so if the hostname is empty the nagios status item should get
                            # its hostname from the previuos item - one reason to keep 'nagitems'
                            try:
                                n['host'] = str(tds[0](text=not_empty)[0])
                            except:
                                n['host'] = str(nagitems['services'][len(nagitems['services']) - 1]['host'])
                                # service
                            n['service'] = str(tds[1](text=not_empty)[0])
                            # status
                            n['status'] = str(tds[2](text=not_empty)[0])
                            # last_check
                            n['last_check'] = str(tds[3](text=not_empty)[0])
                            # duration
                            n['duration'] = str(tds[4](text=not_empty)[0])
                            # attempt
                            # to fix http://sourceforge.net/tracker/?func=detail&atid=1101370&aid=3280961&group_id=236865 .attempt needs
                            # to be stripped
                            n['attempt'] = str(tds[5](text=not_empty)[0]).strip()
                            # status_information
                            if len(tds[6](text=not_empty)) == 0:
                                n['status_information'] = ''
                            else:
                                n['status_information'] = str(tds[6](text=not_empty)[0])
                                # status flags
                            n['passiveonly'] = False
                            n['notifications_disabled'] = False
                            n['flapping'] = False
                            n['acknowledged'] = False
                            n['scheduled_downtime'] = False

                            # map status icons to status flags
                            icons = tds[1].findAll('img')
                            for i in icons:
                                icon = i['src'].split('/')[-1]
                                if icon in self.STATUS_MAPPING:
                                    n[self.STATUS_MAPPING[icon]] = True
                            # cleaning
                            del icons

                            # add dictionary full of information about this service item to nagitems - only if service
                            nagitems['services'].append(n)
                            # after collection data in nagitems create objects of its informations
                            # host objects contain service objects
                            if not n['host'] in self.new_hosts:
                                self.new_hosts[n['host']] = GenericHost()
                                self.new_hosts[n['host']].name = n['host']
                                self.new_hosts[n['host']].status = 'UP'
                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs host_description and no display name
                                self.new_hosts[n['host']].real_name = n['host']
                                
                                # trying to fix https://sourceforge.net/tracker/index.php?func=detail&aid=3299790&group_id=236865&atid=1101370
                                # if host is not down but in downtime or any other flag this should be evaluated too
                                # map status icons to status flags
                                icons = tds[0].findAll('img')
                                for i in icons:
                                    icon = i['src'].split('/')[-1]
                                    if icon in self.STATUS_MAPPING:
                                        self.new_hosts[n['host']].__dict__[self.STATUS_MAPPING[icon]] = True
                                # cleaning
                                del icons
                                # if a service does not exist create its object
                            if not n['service'] in self.new_hosts[n['host']].services:
                                new_service = n['service']
                                self.new_hosts[n['host']].services[new_service] = GenericService()
                                self.new_hosts[n['host']].services[new_service].host = n['host']
                                self.new_hosts[n['host']].services[new_service].server = self.name
                                self.new_hosts[n['host']].services[new_service].name = n['service']
                                self.new_hosts[n['host']].services[new_service].status = n['status']
                                self.new_hosts[n['host']].services[new_service].last_check = n['last_check']
                                self.new_hosts[n['host']].services[new_service].duration = n['duration']
                                self.new_hosts[n['host']].services[new_service].attempt = n['attempt']
                                self.new_hosts[n['host']].services[new_service].status_information = n['status_information'].replace('\n', ' ').strip()
                                self.new_hosts[n['host']].services[new_service].passiveonly = n['passiveonly']
                                self.new_hosts[n['host']].services[new_service].notifications_disabled = n['notifications_disabled']
                                self.new_hosts[n['host']].services[new_service].flapping = n['flapping']
                                self.new_hosts[n['host']].services[new_service].acknowledged = n['acknowledged']
                                self.new_hosts[n['host']].services[new_service].scheduled_downtime = n['scheduled_downtime']

                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs service_description and no display name
                                self.new_hosts[n['host']].services[new_service].real_name = n['service_description']
                                
                            # some cleanup
                            del tds, n
                    except:
                        self.Error(sys.exc_info())

                # do some cleanup
                htobj.decompose()
                del htobj, trs, table

        except:
            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

            # some cleanup
        del nagitems

        # dummy return in case all is OK
        return Result()

Example 22

Project: Nagstamon
Source File: Zabbix.py
View license
    def _get_status(self):
        """
            Get status from Zabbix Server
        """
        ret = Result()
        # create Nagios items dictionary with to lists for services and hosts
        # every list will contain a dictionary for every failed service/host
        # this dictionary is only temporarily
        nagitems = {"services": [], "hosts": []}

        # Create URLs for the configured filters
        if self.zapi is None:
            self._login()

        try:
            hosts = []
            try:
                hosts = self.zapi.host.get(
                    {"output": ["host", "ip", "status", "available", "error", "errors_from"], "filter": {}})
            except (ZabbixError, ZabbixAPIException):
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                return Result(result=result, error=error)

            for host in hosts:
                # if host is disabled on server safely ignore it
                if host['available'] != '0':
                    n = {
                        'host': host['host'],
                        'status': self.statemap.get(host['status'], host['status']),
                        'last_check': 'n/a',
                        'duration': HumanReadableDurationFromTimestamp(host['errors_from']),
                        'status_information': host['error'],
                        'attempt': '1/1',
                        'site': '',
                        'address': host['host'],
                    }

                    # Zabbix shows OK hosts too - kick 'em!
                    if not n['status'] == 'OK':

                        # add dictionary full of information about this host item to nagitems
                        nagitems["hosts"].append(n)
                        # after collection data in nagitems create objects from its informations
                        # host objects contain service objects
                        if n["host"] not in self.new_hosts:
                            new_host = n["host"]
                            self.new_hosts[new_host] = GenericHost()
                            self.new_hosts[new_host].name = n["host"]
                            self.new_hosts[new_host].status = n["status"]
                            self.new_hosts[new_host].last_check = n["last_check"]
                            self.new_hosts[new_host].duration = n["duration"]
                            self.new_hosts[new_host].attempt = n["attempt"]
                            self.new_hosts[new_host].status_information = n["status_information"]
                            self.new_hosts[new_host].site = n["site"]
                            self.new_hosts[new_host].address = n["address"]
        except ZabbixError:
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

        # services
        services = []
        # groupids = [] # never used - probably old code
        zabbix_triggers = []
        try:
            api_version = self.zapi.api_version()
        except ZabbixAPIException:
            # FIXME Is there a cleaner way to handle this? I just borrowed
            # this code from 80 lines ahead. -- AGV
            # set checking flag back to False

            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            print(sys.exc_info())
            return Result(result=result, error=error)

        try:
            # response = [] # never used - probably old code
            try:
                triggers_list = []

                hostgroup_ids = [x['groupid'] for x in self.zapi.hostgroup.get(
                    {'output': 'extend', 'with_monitored_items': True})
                    if int(x['internal']) == 0]

                zabbix_triggers = self.zapi.trigger.get(
                    {'sortfield': 'lastchange', 'withLastEventUnacknowledged': True, 'groupids': hostgroup_ids,
                     "monitored": True, "filter": {'value': 1}})

                triggers_list = []

                for trigger in zabbix_triggers:
                    triggers_list.append(trigger.get('triggerid'))
                this_trigger = self.zapi.trigger.get(
                    {'triggerids': triggers_list,
                     'expandDescription': True,
                     'output': 'extend',
                     'select_items': 'extend',  # thats for zabbix api 1.8
                     'selectItems': 'extend',  # thats for zabbix api 2.0+
                     'expandData': True}
                )
                if type(this_trigger) is dict:
                    for triggerid in list(this_trigger.keys()):
                        services.append(this_trigger[triggerid])
                        this_item = self.zapi.item.get(
                            {'itemids': [this_trigger[triggerid]['items'][0]['itemid']],
                             'selectApplications': 'extend'}
                        )
                        last_app = len(this_item[0]['applications']) - 1
                        this_trigger[triggerid]['application'] = this_item[0]['applications'][last_app]['name']
                elif type(this_trigger) is list:
                    for trigger in this_trigger:
                        services.append(trigger)
                        this_item = self.zapi.item.get(
                            {'itemids': trigger['items'][0]['itemid'],
                             'selectApplications': 'extend'}
                        )
                        # last_app = 0  # use it to get the first application name
                        last_app = len(this_item[0]['applications']) - 1  # use it to get the last application name
                        trigger['application'] = this_item[0]['applications'][last_app]['name']

            except ZabbixAPIException:
                # FIXME Is there a cleaner way to handle this? I just borrowed
                # this code from 80 lines ahead. -- AGV
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                print(sys.exc_info())
                return Result(result=result, error=error)

            except ZabbixError as e:
                if e.terminate:
                    return e.result
                else:
                    service = e.result.content
                    ret = e.result

            for service in services:
                # Zabbix probably shows OK services too - kick 'em!
                # UPDATE Zabbix api 3.0 doesn't but I didn't tried with older
                #        so I left it
                status = self.statemap.get(service['priority'], service['priority'])
                if not status == 'OK':
                    if not service['description'].endswith('...'):
                        state = service['description']
                    else:
                        state = service['items'][0]['lastvalue']
                    lastcheck = 0
                    for item in service['items']:
                        if int(item['lastclock']) > lastcheck:
                            lastcheck = int(item['lastclock'])
                    if len(service['comments']) == 0:
                        srvc = service['application']
                    else:
                        srvc = self.nagiosify_service(service['comments'])
                    n = {
                        'service': srvc,
                        'status': status,
                        # 1/1 attempt looks at least like there has been any attempt
                        'attempt': '1/1',
                        'duration': HumanReadableDurationFromTimestamp(service['lastchange']),
                        'status_information': state,
                        'passiveonly': 'no',
                        'last_check': time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(lastcheck)),
                        'notifications': 'yes',
                        'flapping': 'no',
                        'site': '',
                        'command': 'zabbix',
                        'triggerid': service['triggerid'],
                    }

                    if api_version >= '3.0':
                        n['host'] = self.zapi.host.get({"output": ["host"], "filter": {}, "triggerids": service['triggerid']})[0]['host']
                    else:
                        n['host'] = service['host']

                    nagitems["services"].append(n)
                    # after collection data in nagitems create objects of its informations
                    # host objects contain service objects
                    if n["host"] not in self.new_hosts:
                        self.new_hosts[n["host"]] = GenericHost()
                        self.new_hosts[n["host"]].name = n["host"]
                        self.new_hosts[n["host"]].status = "UP"
                        self.new_hosts[n["host"]].site = n["site"]
                        self.new_hosts[n["host"]].address = n["host"]
                        # if a service does not exist create its object
                    if n["service"] not in self.new_hosts[n["host"]].services:
                        # workaround for non-existing (or not found) host status flag
                        if n["service"] == "Host is down %s" % (n["host"]):
                            self.new_hosts[n["host"]].status = "DOWN"
                            # also take duration from "service" aka trigger
                            self.new_hosts[n["host"]].duration = n["duration"]
                        else:
                            new_service = n["service"]
                            self.new_hosts[n["host"]].services[new_service] = GenericService()
                            self.new_hosts[n["host"]].services[new_service].host = n["host"]
                            self.new_hosts[n["host"]].services[new_service].name = n["service"]
                            self.new_hosts[n["host"]].services[new_service].status = n["status"]
                            self.new_hosts[n["host"]].services[new_service].last_check = n["last_check"]
                            self.new_hosts[n["host"]].services[new_service].duration = n["duration"]
                            self.new_hosts[n["host"]].services[new_service].attempt = n["attempt"]
                            self.new_hosts[n["host"]].services[new_service].status_information = n["status_information"]
                            self.new_hosts[n["host"]].services[new_service].passiveonly = False
                            self.new_hosts[n["host"]].services[new_service].flapping = False
                            self.new_hosts[n["host"]].services[new_service].site = n["site"]
                            self.new_hosts[n["host"]].services[new_service].address = n["host"]
                            self.new_hosts[n["host"]].services[new_service].command = n["command"]
                            self.new_hosts[n["host"]].services[new_service].triggerid = n["triggerid"]

        except (ZabbixError, ZabbixAPIException):
            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            print(sys.exc_info())
            return Result(result=result, error=error)

        return ret

Example 23

Project: pyvows
Source File: gevent.py
View license
    def run_context(self, ctx_collection, ctx_name, ctx_obj, execution_plan, index=-1, suite=None, skipReason=None):
        #   FIXME: Add Docstring

        #-----------------------------------------------------------------------
        # Local variables and defs
        #-----------------------------------------------------------------------
        ctx_result = {
            'filename': suite or inspect.getsourcefile(ctx_obj.__class__),
            'name': ctx_name,
            'tests': [],
            'contexts': [],
            'topic_elapsed': 0,
            'error': None,
            'skip': skipReason
        }

        ctx_collection.append(ctx_result)
        ctx_obj.index = index
        ctx_obj.pool = self.pool
        teardown_blockers = []

        def _run_setup_and_topic(ctx_obj, index):
            # If we're already mid-skip, don't run anything
            if skipReason:
                raise skipReason

            # Run setup function
            try:
                ctx_obj.setup()
            except Exception:
                raise VowsTopicError('setup', sys.exc_info())

            # Find & run topic function
            if not hasattr(ctx_obj, 'topic'):  # ctx_obj has no topic
                return ctx_obj._get_first_available_topic(index)

            try:
                topic_func = ctx_obj.topic
                topic_list = get_topics_for(topic_func, ctx_obj)

                start_time = time.time()

                if topic_func is None:
                    return None

                topic = topic_func(*topic_list)
                ctx_result['topic_elapsed'] = elapsed(start_time)
                return topic
            except SkipTest:
                raise
            except Exception:
                raise VowsTopicError('topic', sys.exc_info())

        def _run_tests(topic):
            def _run_with_topic(topic):
                def _run_vows_and_subcontexts(topic, index=-1, enumerated=False):
                    # methods
                    for vow_name, vow in vows:
                        if skipReason:
                            skipped_result = self.get_vow_result(vow, topic, ctx_obj, vow_name, enumerated)
                            skipped_result['skip'] = skipReason
                            ctx_result['tests'].append(skipped_result)
                        else:
                            vow_greenlet = self._run_vow(
                                ctx_result['tests'],
                                topic,
                                ctx_obj,
                                vow,
                                vow_name,
                                enumerated=enumerated)
                            teardown_blockers.append(vow_greenlet)

                    # classes
                    for subctx_name, subctx in subcontexts:
                        # resolve user-defined Context classes
                        if not issubclass(subctx, self.context_class):
                            subctx = type(ctx_name, (subctx, self.context_class), {})

                        subctx_obj = subctx(ctx_obj)
                        subctx_obj.pool = self.pool

                        subctx_greenlet = self.pool.spawn(
                            self.run_context,
                            ctx_result['contexts'],
                            subctx_name,
                            subctx_obj,
                            execution_plan['contexts'][subctx_name],
                            index=index,
                            suite=suite or ctx_result['filename'],
                            skipReason=skipReason
                        )
                        teardown_blockers.append(subctx_greenlet)

                # setup generated topics if needed
                is_generator = inspect.isgenerator(topic)
                if is_generator:
                    try:
                        ctx_obj.generated_topic = True
                        topic = ctx_obj.topic_value = list(topic)
                    except Exception:
                        # Actually getting the values from the generator may raise exception
                        raise VowsTopicError('topic', sys.exc_info())
                else:
                    ctx_obj.topic_value = topic

                if is_generator:
                    for index, topic_value in enumerate(topic):
                        _run_vows_and_subcontexts(topic_value, index=index, enumerated=True)
                else:
                    _run_vows_and_subcontexts(topic)

            vows = set((vow_name, getattr(type(ctx_obj), vow_name)) for vow_name in execution_plan['vows'])
            subcontexts = set((subctx_name, getattr(type(ctx_obj), subctx_name)) for subctx_name in execution_plan['contexts'])

            if not isinstance(topic, VowsAsyncTopic):
                _run_with_topic(topic)
            else:
                def handle_callback(*args, **kw):
                    _run_with_topic(VowsAsyncTopicValue(args, kw))
                topic(handle_callback)

        def _run_teardown():
            try:
                for blocker in teardown_blockers:
                    blocker.join()
                ctx_obj.teardown()
            except Exception:
                raise VowsTopicError('teardown', sys.exc_info())

        def _update_execution_plan():
            '''Since Context.ignore can modify the ignored_members during setup or topic,
                update the execution_plan to reflect the new ignored_members'''

            for name in ctx_obj.ignored_members:
                if name in execution_plan['vows']:
                    execution_plan['vows'].remove(name)
                if name in execution_plan['contexts']:
                    del execution_plan['contexts'][name]

        #-----------------------------------------------------------------------
        # Begin
        #-----------------------------------------------------------------------
        try:
            try:
                topic = _run_setup_and_topic(ctx_obj, index)
                _update_execution_plan()
            except SkipTest, se:
                ctx_result['skip'] = se
                skipReason = se
                topic = None
            except VowsTopicError, e:
                ctx_result['error'] = e
                skipReason = SkipTest('topic dependency failed')
                topic = None
            _run_tests(topic)
            if not ctx_result['error']:
                try:
                    _run_teardown()
                except Exception, e:
                    ctx_result['error'] = e
        finally:
            ctx_result['stdout'] = VowsParallelRunner.output.stdout.getvalue()
            ctx_result['stderr'] = VowsParallelRunner.output.stderr.getvalue()

Example 24

Project: openwrt-mt7620
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 25

View license
def callback_allhit(pages, **kwargs):

    if type(pages) != type([]):
        raise Exception('::callback_allhit() must be called with one list argument')

    def remove_newline_fields(list):
        while True:
            try:
                list.remove("\n")
            except:
                break
        return list
    
#    def is_soup(object):
#        soup = BeautifulSoup()
#        if type(object) == type(soup) or type(object) == type(ResultSet('')) or type(object) == type(Tag(soup, "div", [])):
#            return True
#        return False

    data = []
    errors = []

    # Processing every page
    for page_number in pages:
        try:
            # Downloading page
            log.info("Downloading page: %s" % page_number)
            page_url = get_allhit_url(page_number)
            log.debug("Downloading %s" % page_url)
            response = urllib2.urlopen(page_url)
            html = response.read()
            soup = BeautifulSoup(html)

            # Parsing HIT groups' list
            table = soup.find('table', cellpadding='0', cellspacing='5', border='0', width='100%')
            if type(table) == type(None):

                i = 0
                while i < 3:
                    log.warn("Soup returned an empty table for page %s. Trying once more" % page_number)
                    response = urllib2.urlopen(page_url)
                    html = response.read()
                    soup = BeautifulSoup(html)
                    table = soup.find('table', cellpadding='0', cellspacing='5', border='0', width='100%')
                    if type(table) != type(None):
                        break
                    else:
                        table = None
                        soup = None
                        html = None
                        i = i + 1

                if type(table) == type(None):
                    log.warn("Soup returned an empty table. This should not happen. Skipping page")
                    continue

            table.contents = remove_newline_fields(table.contents)

            # Parsing and fetching information about each group
            for i_group in range(0, len(table.contents)):
                log.debug("Processing group %s on page %s" % (i_group, page_number))
                try:
                    group_html = table.contents[i_group]

                    # Title
                    title = group_html.find('a', {'class':'capsulelink'})
                    if type(title) != type(None):
                        try:
                            title = str(title.contents[0])
                        except:
                            title = unicode(title.contents[0])
                        try:
                            title = unicode(remove_whitespaces(title))
                        except:
                            title = ''
                    
                    # Remove <span> in title
                    title = remove_whitespaces(strip_html(title))
                    
                    fields = group_html.findAll('td', {'align':'left','valign':'top','class':'capsule_field_text'})

                    if len(fields) == 7:

                        # Requester's name and ID
                        requester_html = remove_newline_fields(fields[0].contents)[0]
                        requester_name = remove_whitespaces(strip_html(unicode(requester_html.contents[0]))) # Remove <span> in requester name
                        requester_id = requester_html['href']
                        start = requester_id.index('requesterId=')+12
                        stop = requester_id.index('&state')
                        requester_id = requester_id[start:stop]

                        # HIT group expiration date
                        hit_expiration_date = remove_newline_fields(fields[1].contents)[0]
                        hit_expiration_date = remove_whitespaces(strip_html(hit_expiration_date))
                        hit_expiration_date = hit_expiration_date[:hit_expiration_date.index('(')-2]
                        hit_expiration_date = datetime.datetime.strptime(hit_expiration_date, '%b %d, %Y')

                        # Time alloted
                        time_alloted = remove_newline_fields(fields[2].contents)[0]
                        time_alloted = remove_whitespaces(strip_html(time_alloted))
                        time_alloted = int(time_alloted[:time_alloted.index(' ')])

                        # Reward
                        reward = float(remove_newline_fields(fields[3].contents)[0][1:])

                        # HITs available
                        hits_available = int(remove_newline_fields(fields[4].contents)[0])

                        # Description
                        description = unicode(remove_newline_fields(fields[5].contents)[0])

                        # Keywords
                        keywords_raw = remove_newline_fields(fields[6].contents)
                        keywords = []
                        for i in range(0, len(keywords_raw)):
                            try:
                                keyword = keywords_raw[i].contents[0]
                                keywords.append(keyword)
                            except:
                                continue
                        keywords = unicode(fuse(keywords, ','))

                        # Qualification
                        qualifications = ''
                        qfields = group_html.findAll('td', {'style':'padding-right: 2em; white-space: nowrap;'})

                        if len(qfields) > 0:
                            qfields = [remove_whitespaces(unicode(remove_newline_fields(qfield.contents)[0])) for qfield in qfields]
                            qualifications = fuse(qfields, ', ')
                        qfields = None

                        # Occurrence date
                        occurrence_date = datetime.datetime.now()

                        # Group ID
                        group_id = group_html.find('span', {'class':'capsulelink'})
                        group_id_hashed = False
                        if type(group_id) != type(None):
                            group_id = remove_newline_fields(group_id.contents)[0]
                            if 'href' in group_id._getAttrMap():
                                start = group_id['href'].index('groupId=')+8
                                stop = group_id['href'].index('&')
                                group_id = group_id['href'][start:stop]
                            else:
                                group_id_hashed = True
                                composition = "%s;%s;%s;%s;%s;%s;%s;" % (title,requester_id,
                                                                         time_alloted,reward,
                                                                         description,keywords,
                                                                         qualifications)
                                composition = smart_str(composition)
                                group_id = hashlib.md5(composition).hexdigest()

                        # Checking whether processed content is already stored in the database
                        hit_group_content = None
                        try:
                            log.debug("group_id=%s; requester=%s; title=%s; desc=%s; ta=%s; reward=%s" % (group_id, requester_id, title, description, time_alloted, reward))
                            hit_group_content = HitGroupContent.objects.get(group_id=group_id,
                                                                            requester_id=requester_id,
                                                                            title=title,
                                                                            description=description,
                                                                            time_alloted=time_alloted,
                                                                            reward=reward,
                                                                            )
                        except HitGroupContent.DoesNotExist:
                            hit_group_content = HitGroupContent(**{
                                    'title': title,
                                    'requester_id': requester_id,
                                    'requester_name': requester_name,
                                    'time_alloted': time_alloted,
                                    'reward': reward,
                                    'html': '',
                                    'description': description,
                                    'keywords': keywords,
                                    'qualifications': qualifications,
                                    'occurrence_date': occurrence_date,
                                    'group_id': group_id,
                                    'group_id_hashed': group_id_hashed
                                })

                        data.append({
                            'HitGroupStatus': {
                                'group_id': group_id,
                                'hits_available': hits_available,
                                'page_number': page_number,
                                'inpage_position': i_group+1,
                                'hit_expiration_date': hit_expiration_date,
                                'hit_group_content': hit_group_content
                            }
                        })

                    fields = None
                    group_html = None

                except:
                    log.error("Failed to process group %s on %s page (%s)" % (i_group, page_number, sys.exc_info()[0].__name__))
                    errors.append(grab_error(sys.exc_info()))
                    print grab_error(sys.exc_info())

            table = None
            soup = None
            html = None

        except:
            log.error("Failed to process page %d (%s)" % (page_number, sys.exc_info()[0].__name__))
            errors.append(grab_error(sys.exc_info()))
            print grab_error(sys.exc_info())

    return {'data':data,'errors':errors}

Example 26

Project: Nuitka
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 27

Project: Nuitka
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 28

Project: popcorn_maker
Source File: multiprocess.py
View license
    def run(self, test):
        """
        Execute the test (which may be a test suite). If the test is a suite,
        distribute it out among as many processes as have been configured, at
        as fine a level as is possible given the context fixtures defined in
        the suite or any sub-suites.

        """
        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
        wrapper = self.config.plugins.prepareTest(test)
        if wrapper is not None:
            test = wrapper

        # plugins can decorate or capture the output stream
        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        testQueue = Queue()
        resultQueue = Queue()
        tasks = []
        completed = []
        workers = []
        to_teardown = []
        shouldStop = Event()

        result = self._makeResult()
        start = time.time()

        # dispatch and collect results
        # put indexes only on queue because tests aren't picklable
        for case in self.nextBatch(test):
            log.debug("Next batch %s (%s)", case, type(case))
            if (isinstance(case, nose.case.Test) and
                isinstance(case.test, failure.Failure)):
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            # handle shared fixtures
            if isinstance(case, ContextSuite) and case.context is failure.Failure:
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
                log.debug("%s has shared fixtures", case)
                try:
                    case.setUp()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    log.debug("%s setup failed", sys.exc_info())
                    result.addError(case, sys.exc_info())
                else:
                    to_teardown.append(case)
                    for _t in case:
                        test_addr = self.addtask(testQueue,tasks,_t)
                        log.debug("Queued shared-fixture test %s (%s) to %s",
                                  len(tasks), test_addr, testQueue)

            else:
                test_addr = self.addtask(testQueue,tasks,case)
                log.debug("Queued test %s (%s) to %s",
                          len(tasks), test_addr, testQueue)

        log.debug("Starting %s workers", self.config.multiprocess_workers)
        for i in range(self.config.multiprocess_workers):
            currentaddr = Value('c',bytes_(''))
            currentstart = Value('d',0.0)
            keyboardCaught = Event()
            p = Process(target=runner, args=(i, testQueue, resultQueue,
                                             currentaddr, currentstart,
                                             keyboardCaught, shouldStop,
                                             self.loaderClass,
                                             result.__class__,
                                             pickle.dumps(self.config)))
            p.currentaddr = currentaddr
            p.currentstart = currentstart
            p.keyboardCaught = keyboardCaught
            # p.setDaemon(True)
            p.start()
            workers.append(p)
            log.debug("Started worker process %s", i+1)

        total_tasks = len(tasks)
        # need to keep track of the next time to check for timeouts in case
        # more than one process times out at the same time.
        nexttimeout=self.config.multiprocess_timeout
        while tasks:
            log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
                      len(completed), total_tasks,nexttimeout)
            try:
                iworker, addr, newtask_addrs, batch_result = resultQueue.get(
                                                        timeout=nexttimeout)
                log.debug('Results received for worker %d, %s, new tasks: %d',
                          iworker,addr,len(newtask_addrs))
                try:
                    try:
                        tasks.remove(addr)
                    except ValueError:
                        log.warn('worker %s failed to remove from tasks: %s',
                                 iworker,addr)
                    total_tasks += len(newtask_addrs)
                    for newaddr in newtask_addrs:
                        tasks.append(newaddr)
                except KeyError:
                    log.debug("Got result for unknown task? %s", addr)
                    log.debug("current: %s",str(list(tasks)[0]))
                else:
                    completed.append([addr,batch_result])
                self.consolidate(result, batch_result)
                if (self.config.stopOnError
                    and not result.wasSuccessful()):
                    # set the stop condition
                    shouldStop.set()
                    break
                if self.config.multiprocess_restartworker:
                    log.debug('joining worker %s',iworker)
                    # wait for working, but not that important if worker
                    # cannot be joined in fact, for workers that add to
                    # testQueue, they will not terminate until all their
                    # items are read
                    workers[iworker].join(timeout=1)
                    if not shouldStop.is_set() and not testQueue.empty():
                        log.debug('starting new process on worker %s',iworker)
                        currentaddr = Value('c',bytes_(''))
                        currentstart = Value('d',time.time())
                        keyboardCaught = Event()
                        workers[iworker] = Process(target=runner,
                                                   args=(iworker, testQueue,
                                                         resultQueue,
                                                         currentaddr,
                                                         currentstart,
                                                         keyboardCaught,
                                                         shouldStop,
                                                         self.loaderClass,
                                                         result.__class__,
                                                         pickle.dumps(self.config)))
                        workers[iworker].currentaddr = currentaddr
                        workers[iworker].currentstart = currentstart
                        workers[iworker].keyboardCaught = keyboardCaught
                        workers[iworker].start()
            except Empty:
                log.debug("Timed out with %s tasks pending "
                          "(empty testQueue=%d): %s",
                          len(tasks),testQueue.empty(),str(tasks))
                any_alive = False
                for iworker, w in enumerate(workers):
                    if w.is_alive():
                        worker_addr = bytes_(w.currentaddr.value,'ascii')
                        timeprocessing = time.time() - w.currentstart.value
                        if ( len(worker_addr) == 0
                                and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('worker %d has finished its work item, '
                                      'but is not exiting? do we wait for it?',
                                      iworker)
                        else:
                            any_alive = True
                        if (len(worker_addr) > 0
                            and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('timed out worker %s: %s',
                                      iworker,worker_addr)
                            w.currentaddr.value = bytes_('')
                            # If the process is in C++ code, sending a SIGINT
                            # might not send a python KeybordInterrupt exception
                            # therefore, send multiple signals until an
                            # exception is caught. If this takes too long, then
                            # terminate the process
                            w.keyboardCaught.clear()
                            startkilltime = time.time()
                            while not w.keyboardCaught.is_set() and w.is_alive():
                                if time.time()-startkilltime > self.waitkilltime:
                                    # have to terminate...
                                    log.error("terminating worker %s",iworker)
                                    w.terminate()
                                    currentaddr = Value('c',bytes_(''))
                                    currentstart = Value('d',time.time())
                                    keyboardCaught = Event()
                                    workers[iworker] = Process(target=runner,
                                        args=(iworker, testQueue, resultQueue,
                                              currentaddr, currentstart,
                                              keyboardCaught, shouldStop,
                                              self.loaderClass,
                                              result.__class__,
                                              pickle.dumps(self.config)))
                                    workers[iworker].currentaddr = currentaddr
                                    workers[iworker].currentstart = currentstart
                                    workers[iworker].keyboardCaught = keyboardCaught
                                    workers[iworker].start()
                                    # there is a small probability that the
                                    # terminated process might send a result,
                                    # which has to be specially handled or
                                    # else processes might get orphaned.
                                    w = workers[iworker]
                                    break
                                os.kill(w.pid, signal.SIGINT)
                                time.sleep(0.1)
                if not any_alive and testQueue.empty():
                    log.debug("All workers dead")
                    break
            nexttimeout=self.config.multiprocess_timeout
            for w in workers:
                if w.is_alive() and len(w.currentaddr.value) > 0:
                    timeprocessing = time.time()-w.currentstart.value
                    if timeprocessing <= self.config.multiprocess_timeout:
                        nexttimeout = min(nexttimeout,
                            self.config.multiprocess_timeout-timeprocessing)

        log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))

        for case in to_teardown:
            log.debug("Tearing down shared fixtures for %s", case)
            try:
                case.tearDown()
            except (KeyboardInterrupt, SystemExit):
                raise
            except:
                result.addError(case, sys.exc_info())

        stop = time.time()

        # first write since can freeze on shutting down processes
        result.printErrors()
        result.printSummary(start, stop)
        self.config.plugins.finalize(result)

        log.debug("Tell all workers to stop")
        for w in workers:
            if w.is_alive():
                testQueue.put('STOP', block=False)

        # wait for the workers to end
        try:
            for iworker,worker in enumerate(workers):
                if worker.is_alive():
                    log.debug('joining worker %s',iworker)
                    worker.join()#10)
                    if worker.is_alive():
                        log.debug('failed to join worker %s',iworker)
        except KeyboardInterrupt:
            log.info('parent received ctrl-c')
            for worker in workers:
                worker.terminate()
                worker.join()

        return result

Example 29

Project: popcorn_maker
Source File: multiprocess.py
View license
    def run(self, test):
        """
        Execute the test (which may be a test suite). If the test is a suite,
        distribute it out among as many processes as have been configured, at
        as fine a level as is possible given the context fixtures defined in
        the suite or any sub-suites.

        """
        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
        wrapper = self.config.plugins.prepareTest(test)
        if wrapper is not None:
            test = wrapper

        # plugins can decorate or capture the output stream
        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        testQueue = Queue()
        resultQueue = Queue()
        tasks = []
        completed = []
        workers = []
        to_teardown = []
        shouldStop = Event()

        result = self._makeResult()
        start = time.time()

        # dispatch and collect results
        # put indexes only on queue because tests aren't picklable
        for case in self.nextBatch(test):
            log.debug("Next batch %s (%s)", case, type(case))
            if (isinstance(case, nose.case.Test) and
                isinstance(case.test, failure.Failure)):
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            # handle shared fixtures
            if isinstance(case, ContextSuite) and case.context is failure.Failure:
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
                log.debug("%s has shared fixtures", case)
                try:
                    case.setUp()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    log.debug("%s setup failed", sys.exc_info())
                    result.addError(case, sys.exc_info())
                else:
                    to_teardown.append(case)
                    for _t in case:
                        test_addr = self.addtask(testQueue,tasks,_t)
                        log.debug("Queued shared-fixture test %s (%s) to %s",
                                  len(tasks), test_addr, testQueue)

            else:
                test_addr = self.addtask(testQueue,tasks,case)
                log.debug("Queued test %s (%s) to %s",
                          len(tasks), test_addr, testQueue)

        log.debug("Starting %s workers", self.config.multiprocess_workers)
        for i in range(self.config.multiprocess_workers):
            currentaddr = Value('c',bytes_(''))
            currentstart = Value('d',0.0)
            keyboardCaught = Event()
            p = Process(target=runner, args=(i, testQueue, resultQueue,
                                             currentaddr, currentstart,
                                             keyboardCaught, shouldStop,
                                             self.loaderClass,
                                             result.__class__,
                                             pickle.dumps(self.config)))
            p.currentaddr = currentaddr
            p.currentstart = currentstart
            p.keyboardCaught = keyboardCaught
            # p.setDaemon(True)
            p.start()
            workers.append(p)
            log.debug("Started worker process %s", i+1)

        total_tasks = len(tasks)
        # need to keep track of the next time to check for timeouts in case
        # more than one process times out at the same time.
        nexttimeout=self.config.multiprocess_timeout
        while tasks:
            log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
                      len(completed), total_tasks,nexttimeout)
            try:
                iworker, addr, newtask_addrs, batch_result = resultQueue.get(
                                                        timeout=nexttimeout)
                log.debug('Results received for worker %d, %s, new tasks: %d',
                          iworker,addr,len(newtask_addrs))
                try:
                    try:
                        tasks.remove(addr)
                    except ValueError:
                        log.warn('worker %s failed to remove from tasks: %s',
                                 iworker,addr)
                    total_tasks += len(newtask_addrs)
                    for newaddr in newtask_addrs:
                        tasks.append(newaddr)
                except KeyError:
                    log.debug("Got result for unknown task? %s", addr)
                    log.debug("current: %s",str(list(tasks)[0]))
                else:
                    completed.append([addr,batch_result])
                self.consolidate(result, batch_result)
                if (self.config.stopOnError
                    and not result.wasSuccessful()):
                    # set the stop condition
                    shouldStop.set()
                    break
                if self.config.multiprocess_restartworker:
                    log.debug('joining worker %s',iworker)
                    # wait for working, but not that important if worker
                    # cannot be joined in fact, for workers that add to
                    # testQueue, they will not terminate until all their
                    # items are read
                    workers[iworker].join(timeout=1)
                    if not shouldStop.is_set() and not testQueue.empty():
                        log.debug('starting new process on worker %s',iworker)
                        currentaddr = Value('c',bytes_(''))
                        currentstart = Value('d',time.time())
                        keyboardCaught = Event()
                        workers[iworker] = Process(target=runner,
                                                   args=(iworker, testQueue,
                                                         resultQueue,
                                                         currentaddr,
                                                         currentstart,
                                                         keyboardCaught,
                                                         shouldStop,
                                                         self.loaderClass,
                                                         result.__class__,
                                                         pickle.dumps(self.config)))
                        workers[iworker].currentaddr = currentaddr
                        workers[iworker].currentstart = currentstart
                        workers[iworker].keyboardCaught = keyboardCaught
                        workers[iworker].start()
            except Empty:
                log.debug("Timed out with %s tasks pending "
                          "(empty testQueue=%d): %s",
                          len(tasks),testQueue.empty(),str(tasks))
                any_alive = False
                for iworker, w in enumerate(workers):
                    if w.is_alive():
                        worker_addr = bytes_(w.currentaddr.value,'ascii')
                        timeprocessing = time.time() - w.currentstart.value
                        if ( len(worker_addr) == 0
                                and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('worker %d has finished its work item, '
                                      'but is not exiting? do we wait for it?',
                                      iworker)
                        else:
                            any_alive = True
                        if (len(worker_addr) > 0
                            and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('timed out worker %s: %s',
                                      iworker,worker_addr)
                            w.currentaddr.value = bytes_('')
                            # If the process is in C++ code, sending a SIGINT
                            # might not send a python KeybordInterrupt exception
                            # therefore, send multiple signals until an
                            # exception is caught. If this takes too long, then
                            # terminate the process
                            w.keyboardCaught.clear()
                            startkilltime = time.time()
                            while not w.keyboardCaught.is_set() and w.is_alive():
                                if time.time()-startkilltime > self.waitkilltime:
                                    # have to terminate...
                                    log.error("terminating worker %s",iworker)
                                    w.terminate()
                                    currentaddr = Value('c',bytes_(''))
                                    currentstart = Value('d',time.time())
                                    keyboardCaught = Event()
                                    workers[iworker] = Process(target=runner,
                                        args=(iworker, testQueue, resultQueue,
                                              currentaddr, currentstart,
                                              keyboardCaught, shouldStop,
                                              self.loaderClass,
                                              result.__class__,
                                              pickle.dumps(self.config)))
                                    workers[iworker].currentaddr = currentaddr
                                    workers[iworker].currentstart = currentstart
                                    workers[iworker].keyboardCaught = keyboardCaught
                                    workers[iworker].start()
                                    # there is a small probability that the
                                    # terminated process might send a result,
                                    # which has to be specially handled or
                                    # else processes might get orphaned.
                                    w = workers[iworker]
                                    break
                                os.kill(w.pid, signal.SIGINT)
                                time.sleep(0.1)
                if not any_alive and testQueue.empty():
                    log.debug("All workers dead")
                    break
            nexttimeout=self.config.multiprocess_timeout
            for w in workers:
                if w.is_alive() and len(w.currentaddr.value) > 0:
                    timeprocessing = time.time()-w.currentstart.value
                    if timeprocessing <= self.config.multiprocess_timeout:
                        nexttimeout = min(nexttimeout,
                            self.config.multiprocess_timeout-timeprocessing)

        log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))

        for case in to_teardown:
            log.debug("Tearing down shared fixtures for %s", case)
            try:
                case.tearDown()
            except (KeyboardInterrupt, SystemExit):
                raise
            except:
                result.addError(case, sys.exc_info())

        stop = time.time()

        # first write since can freeze on shutting down processes
        result.printErrors()
        result.printSummary(start, stop)
        self.config.plugins.finalize(result)

        log.debug("Tell all workers to stop")
        for w in workers:
            if w.is_alive():
                testQueue.put('STOP', block=False)

        # wait for the workers to end
        try:
            for iworker,worker in enumerate(workers):
                if worker.is_alive():
                    log.debug('joining worker %s',iworker)
                    worker.join()#10)
                    if worker.is_alive():
                        log.debug('failed to join worker %s',iworker)
        except KeyboardInterrupt:
            log.info('parent received ctrl-c')
            for worker in workers:
                worker.terminate()
                worker.join()

        return result

Example 30

Project: masakari
Source File: masakari_controller.py
View license
    @log_process_begin_and_end.output_log
    def _notification_reciever(self, env, start_response):

        try:
            len = env['CONTENT_LENGTH']
            if len > 0:
                body = env['wsgi.input'].read(len)
                json_data = json.loads(body)

                msg = "Recieved notification : " + body
                LOG.info(msg)

                ret = self._check_json_param(json_data)
                if ret == 1:
                    # Return Response
                    start_response(
                        '400 Bad Request', [('Content-Type', 'text/plain')])

                    msg = "Wsgi response: " \
                          "status=400 Bad Request, " \
                          "body=method _notification_reciever returned."
                    LOG.info(msg)

                    return ['method _notification_reciever returned.\r\n']

                # Insert notification into notification_list_db
                notification_list_dic = {}
                notification_list_dic = self._create_notification_list_db(
                    json_data)

                # Return Response
                start_response('200 OK', [('Content-Type', 'text/plain')])

                msg = "Wsgi response: " \
                    + "status=200 OK, " \
                    + "body=method _notification_reciever returned."
                LOG.info(msg)

                if notification_list_dic != {}:
                    # Start thread
                    if notification_list_dic.get("recover_by") == 0 and \
                       notification_list_dic.get("progress") == 0:

                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"), notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()

                        # Sleep until nova recognizes the node down.
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = ("Sleeping %s sec before starting recovery"
                               "thread until nova recognizes the node down..."
                               % (node_err_wait)
                               )
                        LOG.info(msg)
                        greenthread.sleep(int(node_err_wait))

                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_host." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + " notification_cluster_port=" \
                            + notification_list_dic.get(
                                "notification_cluster_port") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                notification_list_dic.get(
                                "notification_cluster_port"),
                                retry_mode, ))

                        th.start()
                    elif notification_list_dic.get("recover_by") == 0 and \
                            notification_list_dic.get("progress") == 3:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 1:
                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_instance." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_uuid=" \
                            + notification_list_dic.get("notification_uuid") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_uuid"), retry_mode, )
                        )
                        th.start()
                    elif notification_list_dic.get("recover_by") == 2:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                True, )
                        )
                        th.start()
                    else:
                        LOG.warning(
                            "Column \"recover_by\" \
                            on notification_list DB is invalid value.")

        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        return ['method _notification_reciever returned.\r\n']

Example 31

Project: masakari
Source File: masakari_controller.py
View license
    @log_process_begin_and_end.output_log
    def _notification_reciever(self, env, start_response):

        try:
            len = env['CONTENT_LENGTH']
            if len > 0:
                body = env['wsgi.input'].read(len)
                json_data = json.loads(body)

                msg = "Recieved notification : " + body
                LOG.info(msg)

                ret = self._check_json_param(json_data)
                if ret == 1:
                    # Return Response
                    start_response(
                        '400 Bad Request', [('Content-Type', 'text/plain')])

                    msg = "Wsgi response: " \
                          "status=400 Bad Request, " \
                          "body=method _notification_reciever returned."
                    LOG.info(msg)

                    return ['method _notification_reciever returned.\r\n']

                # Insert notification into notification_list_db
                notification_list_dic = {}
                notification_list_dic = self._create_notification_list_db(
                    json_data)

                # Return Response
                start_response('200 OK', [('Content-Type', 'text/plain')])

                msg = "Wsgi response: " \
                    + "status=200 OK, " \
                    + "body=method _notification_reciever returned."
                LOG.info(msg)

                if notification_list_dic != {}:
                    # Start thread
                    if notification_list_dic.get("recover_by") == 0 and \
                       notification_list_dic.get("progress") == 0:

                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"), notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()

                        # Sleep until nova recognizes the node down.
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = ("Sleeping %s sec before starting recovery"
                               "thread until nova recognizes the node down..."
                               % (node_err_wait)
                               )
                        LOG.info(msg)
                        greenthread.sleep(int(node_err_wait))

                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_host." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + " notification_cluster_port=" \
                            + notification_list_dic.get(
                                "notification_cluster_port") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                notification_list_dic.get(
                                "notification_cluster_port"),
                                retry_mode, ))

                        th.start()
                    elif notification_list_dic.get("recover_by") == 0 and \
                            notification_list_dic.get("progress") == 3:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 1:
                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_instance." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_uuid=" \
                            + notification_list_dic.get("notification_uuid") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_uuid"), retry_mode, )
                        )
                        th.start()
                    elif notification_list_dic.get("recover_by") == 2:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                True, )
                        )
                        th.start()
                    else:
                        LOG.warning(
                            "Column \"recover_by\" \
                            on notification_list DB is invalid value.")

        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        return ['method _notification_reciever returned.\r\n']

Example 32

Project: stock-logistics-barcode
Source File: sentinel.py
View license
    def main_loop(self):
        """
        Loops until the user asks for ending
        """
        code = False
        result = None
        value = None

        while True:
            try:
                try:
                    # No active scenario, select one
                    if not self.scenario_id:
                        (code, result, value) = self._select_scenario()
                    else:
                        # Search for a step title
                        title = None
                        title_key = '|'
                        if isinstance(result, (types.NoneType, bool)):
                            pass
                        elif (isinstance(result, dict) and
                              result.get(title_key, None)):
                            title = result[title_key]
                            del result[title_key]
                        elif (isinstance(result[0], (tuple, list)) and
                              result[0][0] == title_key):
                            title = result.pop(0)[1]
                        elif (isinstance(result[0], basestring) and
                              result[0].startswith(title_key)):
                            title = result.pop(0)[len(title_key):]

                        if title is None and self.scenario_name:
                            # If no title is defined, display the scenario name
                            title = self.scenario_name

                        if code == 'Q' or code == 'N':
                            # Quantity selection
                            quantity = self._select_quantity(
                                '\n'.join(result), '%g' % value,
                                integer=(code == 'N'), title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   quantity)
                        elif code == 'C':
                            # Confirmation query
                            confirm = self._confirm(
                                '\n'.join(result), title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   confirm)
                        elif code == 'T':
                            # Select arguments from value
                            default = ''
                            size = None
                            if isinstance(value, dict):
                                default = value.get('default', '')
                                size = value.get('size', None)
                            elif isinstance(value, str):
                                default = value

                            # Text input
                            text = self._input_text(
                                '\n'.join(result), default=default,
                                size=size, title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   text)
                        elif code == 'R':
                            # Critical error
                            self.scenario_id = False
                            self.scenario_name = False
                            self._display_error('\n'.join(result), title=title)
                        elif code == 'U':
                            # Unknown action : message with return back to the
                            # last state
                            self._display(
                                '\n'.join(result), clear=True, scroll=True,
                                title=title)
                            (code, result, value) = self.oerp_call('back')
                        elif code == 'E':
                            # Error message
                            self._display_error(
                                '\n'.join(result), title=title)
                            # Execute transition
                            if not value:
                                (code, result, value) = self.oerp_call(
                                    'action')
                            else:
                                # Back to the previous step required
                                (code, result, value) = self.oerp_call(
                                    'back')
                        elif code == 'M':
                            # Simple message
                            self._display(
                                '\n'.join(result), clear=True, scroll=True,
                                title=title)
                            # Execute transition
                            (code, result, value) = self.oerp_call('action',
                                                                   value)
                        elif code == 'L':
                            if result:
                                # Select a value in the list
                                choice = self._menu_choice(result, title=title)
                                # Send the result to Odoo
                                (code, result, value) = self.oerp_call(
                                    'action', choice)
                            else:
                                # Empty list supplied, display an error
                                (code, result, value) = (
                                    'E', [_('No value available')], True)

                            # Check if we are in a scenario (to retrieve the
                            # scenario name from a submenu)
                            self.scanner_check()
                            if not self.scenario_id:
                                self.scenario_id = True
                                self.scenario_name = False
                        elif code == 'F':
                            # End of scenario
                            self.scenario_id = False
                            self.scenario_name = False
                            self._display('\n'.join(result), clear=True,
                                          scroll=True, title=title)
                        else:
                            # Default call
                            (code, result, value) = self.oerp_call('restart')
                except SentinelBackException:
                    # Back to the previous step required
                    (code, result, value) = self.oerp_call('back')
                    # Do not display the termination message
                    if code == 'F':
                        self.ungetch(ord('\n'))
                    self.screen.bkgd(0, self._get_color('base'))
                except Exception:
                    # Generates log contents
                    log_contents = """%s
# %s
# Hardware code : %s
# ''Current scenario : %s (%s)
# Current values :
#\tcode : %s
#\tresult : %s
#\tvalue : %s
%s
%s
"""
                    log_contents = log_contents % (
                        '#' * 79, datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        self.hardware_code, str(self.scenario_id),
                        self.scenario_name, code, repr(result), repr(value),
                        '#' * 79, reduce(
                            lambda x, y: x + y, traceback.format_exception(
                                sys.exc_info()[0],
                                sys.exc_info()[1],
                                sys.exc_info()[2])))

                    # Writes traceback in log file
                    logfile = open(self.datadir + 'oerp_sentinel.log', 'a')
                    logfile.write(log_contents)
                    logfile.close()

                    # Display error message
                    (code, result, value) = (
                        'E', [_('An error occured\n\nPlease contact your '
                                'administrator')], False)
            except KeyboardInterrupt:
                # If Ctrl+C, exit
                (code, result, value) = self.oerp_call('end')
                # Restore normal background colors
                self.screen.bkgd(0, self._get_color('base'))

Example 33

Project: p2pool
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 34

Project: vulnsrv
Source File: vulnsrv.py
View license
    def do_GET(self):
        reqp = _urlparse(self.path)
        try:
            getParams = query2dict(reqp.query.encode('ascii'))
        except ValueError:
            _type, e, _traceback = sys.exc_info()
            self.send_error(400, 'Invalid query format: ' + str(e))
            return
        sessionID = self._getSessionID()

        if reqp.path == '/':
            self._writeHtmlDoc(_uc('''
<ol class="mainMenu">
<li><a href="clientauth/">Client-Side Authorization Check</a></li>
<li><a href="mac/">MAC Length Extension</a></li>
<li><a href="csrf/">Cross-Site Request Forgery (CSRF)</a></li>
<li><a href="reflected_xss/?username=Benutzer%21">Reflected Cross-Site Scripting (XSS)</a></li>
<li><a href="stored_xss/?username=Benutzer%21">Stored Cross-Site Scripting (XSS)</a></li>
<li><a href="sqlinjection/">SQL Injection</a></li>
<li><a href="pathtraversal/">Path Traversal</a></li>
</ol>'''), 'vulnsrv', sessionID)
        elif reqp.path == '/clientauth/':
            js_code = html.escape('if (\'you\' != \'admin\') {alert(\'Zugriff verweigert!\'); return false;} else return true;', True)
            self._writeHtmlDoc(
                _uc('''
    <p>Finden Sie das Geheimnis heraus!</p>

    <form action="secret" method="post">
    <input type="submit" value="Geheimnis herausfinden"
    onclick="%s" />
    %s
    </form>
    ''') % (js_code, self._getCsrfTokenField(sessionID)),
                'Client-Side Authorization Check', sessionID)
        elif reqp.path == '/csrf/':
            self._writeHtmlDoc(
                _uc('''
<p>Mit dem untenstehendem Formular k&ouml;nnen Sie Nachrichten schreiben.
Erstellen Sie eine HTML-Datei <code>evil-csrf.html</code>, bei deren Aufruf der arglose Benutzer hier unfreiwillig eine &uuml;belgesinnte Nachricht hinterl&auml;sst.
</p>

<form action="send" enctype="application/x-www-form-urlencoded" method="post">
<input type="text" name="message" autofocus="autofocus" required="required" placeholder="Eine freundliche Nachricht" size="50" />
<input type="submit" value="Senden" />
</form>
''') + msgsToHtml(self.vulnState.csrfMessages), 'CSRF', sessionID)
        elif reqp.path == '/reflected_xss/':
            username = getParams.get('username', 'Unbekannter')
            self._writeHtmlDoc(_uc(
                '''<div>Hallo %s</div>
<p>Das untenstehende Formular ist gegen Cross-Site Request Forgery gesch&uuml;tzt.
Erstellen Sie eine HTML-Datei <code>evil-reflected-xss.html</code>, bei deren Aufruf der arglose Benutzer hier trotzdem unfreiwillig eine &uuml;belgesinnte Nachricht hinterl&auml;sst.
</p>

<form action="send" enctype="application/x-www-form-urlencoded" method="post">
<input type="text" name="message" autofocus="autofocus" required="required" placeholder="Eine freundliche Nachricht" size="50" />
%s
<input type="submit" value="Senden" />
</form>
''') % (_uc(username), self._getCsrfTokenField(sessionID)) + msgsToHtml(self.vulnState.reflected_xss_messages), 'Reflected XSS', sessionID)
        elif reqp.path == '/stored_xss/':
            self._writeHtmlDoc(_uc(
                '''<div>Hallo <span class="userid">%s</span></div>
<p>Das untenstehende Formular ist gegen Cross-Site Request Forgery gesch&uuml;tzt.
Sorgen Sie daf&uuml;r, dass jeder Benutzer der diese Seite aufruft unfreiwillig eine Nachricht hinterl&auml;sst, die IP und Port des Benutzers beinhaltet.
</p>

<form action="send" enctype="application/x-www-form-urlencoded" method="post">
<input type="text" name="message" autocomplete="off" autofocus="autofocus" required="required" placeholder="Eine freundliche Nachricht" size="50" />
%s
<input type="submit" value="Senden" />
</form>
%s

<script>
function show(messages_json) {
    var messages = JSON.parse(messages_json);
    var list = document.querySelector('.messages');
    messages.forEach(function(m) {
        var li = document.createElement('li');
        li.appendChild(document.createTextNode(m));
        list.appendChild(li);
    });
}

function download() {
    var xhr = new XMLHttpRequest();
    xhr.dataType = 'text';
    xhr.onload = function(e) {
        show(xhr.responseText);
    };
    xhr.open('GET', 'json');
    xhr.send();
}

function send(msg) {
    var xhr = new XMLHttpRequest();
    var token = document.querySelector('input[name="csrfToken"]').value;
    var params = 'csrfToken=' + encodeURIComponent(token) + '&message=' +encodeURIComponent(msg);
    xhr.open('POST', 'send');
    xhr.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
    xhr.send(params);

}

function user() {
    return document.querySelector('.userid').textContent;
}
</script>

<script>
// JSON direkt einbinden
var messages_json = '%s';
show(messages_json);

// Vorheriger Code:
// download();

</script>

<form action="clear" enctype="application/x-www-form-urlencoded" method="post">
%s
<button role="submit">Alle Nachrichten l&ouml;schen</button
</form>

''') % (_uc(':').join(map(_uc, self.client_address)), self._getCsrfTokenField(sessionID), msgsToHtml([]), json.dumps(self.vulnState.stored_xss_messages), self._getCsrfTokenField(sessionID)), 'Stored XSS', sessionID)
        elif reqp.path == '/sqlinjection/':
            webMessages = self.vulnState.sqlQuery("SELECT id,msg FROM messages WHERE user='web'")
            self._writeHtmlDoc(_uc('''
<p>In der untenstehenden Tabelle sehen Sie die Nachrichten an den Benutzer <code>web</code>. Welche Nachrichten hat der Benutzer <code>admin</code> bekommen?</p>

<h2>Nachrichten an <code>web</code></h2>

<ul class="messages">
%s
</ul>''') % '\n'.join('<li><a href="/sqlinjection/msg?id=' + html.escape(str(row[0])) + '">' + html.escape(row[1]) + '</a></li>' for row in webMessages), 'SQL Injection', sessionID)
        elif reqp.path == '/sqlinjection/msg':
            msgNum = getParams.get('id', '')
            sql = "SELECT id,user,msg FROM messages WHERE user='web' AND id='" + msgNum + "'"
            try:
                msgs = self.vulnState.sqlQuery(sql)
                if len(msgs) == 0:
                    msg_html = '<td colspan="3">Keine web-Nachrichten gefunden</td>'
                else:
                    msg_html = '\n'.join('<tr>' + ''.join('<td>' + html.escape(str(cell)) + '</td>' for cell in row) + '</tr>' for row in msgs)
            except:
                _type, e, _traceback = sys.exc_info()
                msg_html = '<td colspan="3" class="error">' + html.escape(str(e)) + '</td>'
            self._writeHtmlDoc(('''
<table class="messages">
<thead><tr><th>ID</th><th>Benutzer</th><th>Nachricht</th></tr></thead>
%s
</table>
<p><a href="/sqlinjection/">Zur&uuml;ck zur &Uuml;bersicht</a></p>
''' % msg_html), 'Detailansicht: Nachricht ' + msgNum, sessionID)
        elif reqp.path == '/pathtraversal/':
            fileHtml = _uc('').join(
                _uc('<li><a href="get?') + html.escape(urlencode([('file', fn)])) + _uc('">') + html.escape(fn) + _uc('</a></li>\n')
                for fn in FILES['/var/www/img']['content'])
            self._writeHtmlDoc(_uc('''
<p>Welchen Unix-Account sollte ein Angreifer n&auml;her untersuchen?</p>

<p><em>Bonus-Aufgabe</em>: Was ist das Passwort des Accounts?</p>

<p>Dateien zum Download:</p>

<ul>
%s
</ul>''' % fileHtml), 'Path Traversal', sessionID)
        elif reqp.path == '/pathtraversal/get':
            fn = '/var/www/img/' + getParams.get('file', '')
            # Resolve the path.
            # If we were using a real filesystem, this would be done automatically by the OS filesystem functions, of course
            curPath = []
            for pel in fn.split('/'):
                if pel == '' or pel == '.':
                    continue
                if pel == '..':
                    if len(curPath) > 0:
                        curPath.pop()
                    # else: We're at the root, and /../ is /
                else:
                    curPath.append(pel)
            finalPath = '/' + '/'.join(curPath)
            if finalPath.endswith('/'):
                finalPath = finalPath[:-1]
            if finalPath in FILES:
                fdata = FILES[finalPath]
                if fdata['type'] == '__directory__':
                    self.send_error(404, 'Is a directory')
                else:
                    fileBlob = base64.b64decode(fdata['blob_b64'].encode('ascii'))
                    self.send_response(200)
                    self.send_header('Content-Type', fdata['type'])
                    self.send_header('Content-Length', str(len(fileBlob)))
                    self.end_headers()
                    self.wfile.write(fileBlob)
            else:
                self.send_error(404)
        elif reqp.path == '/mac/':
            cookies = self._readCookies()
            raw_cookie = cookies.get('mac_session')
            if raw_cookie is not None:
                if isinstance(raw_cookie, compat_bytes):  # Python 2.x
                    raw_cookie = raw_cookie.decode('latin1')
                mac, _, session_data_str = raw_cookie.rpartition(_uc('!'))
                session_data = session_data_str.encode('latin1')
                secret = self.vulnState.macSecret
                if hashlib.sha256(secret + session_data).hexdigest() == mac:
                    session = query2dict(session_data)
                    user = session['user']
                    timestamp = session['time']
                else:
                    user = timestamp = _uc('(Falscher MAC)')
            else:
                raw_cookie = _uc('')
                user = timestamp = _uc('(Nicht gesetzt)')

            assert isinstance(raw_cookie, _uc)
            raw_cookie_hex = binascii.b2a_hex(raw_cookie.encode('utf-8')).decode('ascii')
            assert isinstance(raw_cookie_hex, _uc)
            self._writeHtmlDoc(_uc('''
<p>Loggen Sie sich als Benutzer admin ein (ohne das Geheimnis aus dem Server-Prozess auszulesen).
Schreiben Sie daf&#x00fc;r ein Programm, das den korrekten Cookie-Wert berechnet.</p>

<form method="post" action="login">
%s
<input type="submit" value="Gast-Login" />
</form>

<h3>Aktuelle Session-Daten:</h3>

<p>Cookie (roh): <code>%s</code> (%s Bytes)</p>

<dl>
<dt>Benutzername:</dt><dd>%s</dd>
<dt>Login-Zeit:</dt><dd>%s</dd>
</dl>

<p>F&#x00fc;r den Angriff k&#x00f6;nnen Sie <a href="mac_attack.py">dieses Python-Skript</a> verwenden.
Das Skript erwartet, dass im lokalen Verzeichnis eine ausf&#x00fc;hrbare Datei ./mac_extension liegt, die mit den Argumenten <code>[Bekannter Hash]</code> <code>[Bekannte Eingabe]</code> <code>[Einzuf&#x00fc;gende Daten]</code> <code>[L&#x00e4;nge des secrets in Bytes (32)]</code> aufgerufen werden kann und das exploit zur&#x00fc;ckgibt.
</p>
      ''' % (
                self._getCsrfTokenField(sessionID),
                html.escape(raw_cookie),
                html.escape(_uc(len(raw_cookie))),
                html.escape(user),
                html.escape(timestamp)
            )), 'Length Extension-Angriffe gegen MAC', sessionID)
        elif reqp.path == '/mac/mac_attack.py':
            fdata = FILES['/mac/mac_attack.py']
            fileBlob = base64.b64decode(fdata['blob_b64'].encode('ascii'))
            self.send_response(200)
            self.send_header('Content-Type', fdata['type'])
            self.send_header('Content-Length', str(len(fileBlob)))
            self.end_headers()
            self.wfile.write(fileBlob)
        elif reqp.path == '/favicon.ico':
            self.send_response(200)
            self.send_header('Content-Type', 'image/png')
            self.send_header('Content-Length', str(len(FAVICON)))
            self.end_headers()
            self.wfile.write(FAVICON)
        elif reqp.path == '/stored_xss/json':
            self._write_json(self.vulnState.stored_xss_messages)
        else:
            self.send_error(404)

Example 35

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 36

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 37

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 38

Project: quality-assessment-protocol
Source File: cli.py
View license
def _run_workflow(args):

    # build pipeline for each subject, individually
    # ~ 5 min 20 sec per subject
    # (roughly 320 seconds)

    import os
    import os.path as op
    import sys

    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    import nipype.interfaces.utility as util
    import nipype.interfaces.fsl.maths as fsl

    import glob

    import time
    from time import strftime
    from nipype import config as nyconfig

    resource_pool, config, subject_info, run_name, site_name = args
    sub_id = str(subject_info[0])

    qap_type = config['qap_type']

    if subject_info[1]:
        session_id = subject_info[1]
    else:
        session_id = "session_0"

    if subject_info[2]:
        scan_id = subject_info[2]
    else:
        scan_id = "scan_0"

    # Read and apply general settings in config
    keep_outputs = config.get('write_all_outputs', False)
    output_dir = op.join(config["output_directory"], run_name,
                         sub_id, session_id, scan_id)

    try:
        os.makedirs(output_dir)
    except:
        if not op.isdir(output_dir):
            err = "[!] Output directory unable to be created.\n" \
                  "Path: %s\n\n" % output_dir
            raise Exception(err)
        else:
            pass

    log_dir = output_dir

    # set up logging
    nyconfig.update_config(
        {'logging': {'log_directory': log_dir, 'log_to_file': True}})
    logging.update_logging(nyconfig)

    # take date+time stamp for run identification purposes
    unique_pipeline_id = strftime("%Y%m%d%H%M%S")
    pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")

    pipeline_start_time = time.time()

    logger.info("Pipeline start time: %s" % pipeline_start_stamp)
    logger.info("Contents of resource pool:\n" + str(resource_pool))
    logger.info("Configuration settings:\n" + str(config))

    # for QAP spreadsheet generation only
    config.update({"subject_id": sub_id, "session_id": session_id,
                   "scan_id": scan_id, "run_name": run_name})

    if site_name:
        config["site_name"] = site_name

    workflow = pe.Workflow(name=scan_id)
    workflow.base_dir = op.join(config["working_directory"], sub_id,
                                session_id)

    # set up crash directory
    workflow.config['execution'] = \
        {'crashdump_dir': config["output_directory"]}

    # update that resource pool with what's already in the output directory
    for resource in os.listdir(output_dir):
        if (op.isdir(op.join(output_dir, resource)) and
                resource not in resource_pool.keys()):
            resource_pool[resource] = glob.glob(op.join(output_dir,
                                                        resource, "*"))[0]

    # resource pool check
    invalid_paths = []

    for resource in resource_pool.keys():
        if not op.isfile(resource_pool[resource]):
            invalid_paths.append((resource, resource_pool[resource]))

    if len(invalid_paths) > 0:
        err = "\n\n[!] The paths provided in the subject list to the " \
              "following resources are not valid:\n"

        for path_tuple in invalid_paths:
            err = err + path_tuple[0] + ": " + path_tuple[1] + "\n"

        err = err + "\n\n"
        raise Exception(err)

    # start connecting the pipeline
    if 'qap_' + qap_type not in resource_pool.keys():
        from qap import qap_workflows as qw
        wf_builder = getattr(qw, 'qap_' + qap_type + '_workflow')
        workflow, resource_pool = wf_builder(workflow, resource_pool, config)

    # set up the datasinks
    new_outputs = 0

    out_list = set(['qap_' + qap_type])

    # Save reports to out_dir if necessary
    if config.get('write_report', False):
        out_list.add('qap_mosaic')
        # The functional temporal also has an FD plot
        if 'functional_temporal' in qap_type:
            out_list.add('qap_fd')

    if keep_outputs:
        for k in resource_pool.keys():
            out_list.add(k)

    for output in list(out_list):
        # we use a check for len()==2 here to select those items in the
        # resource pool which are tuples of (node, node_output), instead
        # of the items which are straight paths to files

        # resource pool items which are in the tuple format are the
        # outputs that have been created in this workflow because they
        # were not present in the subject list YML (the starting resource
        # pool) and had to be generated
        if len(resource_pool[output]) == 2:
            ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
            ds.inputs.base_directory = output_dir
            node, out_file = resource_pool[output]
            workflow.connect(node, out_file, ds, output)
            new_outputs += 1

    rt = {'id': sub_id, 'session': session_id, 'scan': scan_id,
          'status': 'started'}
    # run the pipeline (if there is anything to do)
    if new_outputs > 0:
        if config.get('write_graph', False):
            workflow.write_graph(
                dotfilename=op.join(output_dir, run_name + ".dot"),
                simple_form=False)

        nc_per_subject = config.get('num_cores_per_subject', 1)
        runargs = {'plugin': 'Linear', 'plugin_args': {}}
        if nc_per_subject > 1:
            runargs['plugin'] = 'MultiProc'
            runargs['plugin_args'] = {'n_procs': nc_per_subject}

        try:
            workflow.run(**runargs)
            rt['status'] = 'finished'
        except Exception as e:
            # ... however this is run inside a pool.map: do not raise Exception
            etype, evalue, etrace = sys.exc_info()
            tb = format_exception(etype, evalue, etrace)
            rt.update({'status': 'failed', 'msg': '%s' % e, 'traceback': tb})
            logger.error('An error occurred processing subject %s. '
                         'Runtime dict: %s\n%s' %
                         (rt['id'], rt, '\n'.join(rt['traceback'])))
    else:
        rt['status'] = 'cached'
        logger.info("\nEverything is already done for subject %s." % sub_id)

    # Remove working directory when done
    if not keep_outputs:
        try:
            work_dir = op.join(workflow.base_dir, scan_id)

            if op.exists(work_dir):
                import shutil
                shutil.rmtree(work_dir)
        except:
            logger.warn("Couldn\'t remove the working directory!")
            pass

    pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
    pipeline_end_time = time.time()
    logger.info("Elapsed time (minutes) since last start: %s"
                % ((pipeline_end_time - pipeline_start_time) / 60))
    logger.info("Pipeline end time: %s" % pipeline_end_stamp)
    return rt

Example 39

Project: tp-qemu
Source File: cdrom.py
View license
@error.context_aware
def run(test, params, env):
    """
    KVM cdrom test:

    1) Boot up a VM, with one iso image (optional).
    2) Check if VM identifies correctly the iso file.
    3) Verifies that device is unlocked <300s after boot (optional, if
       cdrom_test_autounlock is set).
    4) Eject cdrom using monitor.
    5) Change cdrom image with another iso several times.
    5) Test tray reporting function (optional, if cdrom_test_tray_status is set)
    6) Try to format cdrom and check the return string.
    7) Mount cdrom device.
    8) Copy file from cdrom and compare files.
    9) Umount and mount cdrom in guest for several times.
    10) Check if the cdrom lock works well when iso file is not inserted.
    11) Reboot vm after vm resume from s3/s4.
        Note: This case requires a qemu cli without setting file property
        for -drive option, and will be separated to a different cfg item.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.

    :param cfg: workaround_eject_time - Some versions of qemu are unable to
                                        eject CDROM directly after insert
    :param cfg: cdrom_test_autounlock - Test whether guest OS unlocks cdrom
                                        after boot (<300s after VM is booted)
    :param cfg: cdrom_test_tray_status - Test tray reporting (eject and insert
                                         CD couple of times in guest).
    :param cfg: cdrom_test_locked -     Test whether cdrom tray lock function
                                        work well in guest.
    :param cfg: cdrom_test_eject -      Test whether cdrom works well after
                                        several times of eject action.
    :param cfg: cdrom_test_file_operation - Test file operation for cdrom,
                                            such as mount/umount, reading files
                                            on cdrom.

    @warning: Check dmesg for block device failures
    """
    # Some versions of qemu are unable to eject CDROM directly after insert
    workaround_eject_time = float(params.get('workaround_eject_time', 0))

    login_timeout = int(params.get("login_timeout", 360))
    cdrom_prepare_timeout = int(params.get("cdrom_preapre_timeout", 360))

    def generate_serial_num():
        length = int(params.get("length", "10"))
        id_leng = random.randint(6, length)
        ignore_str = ",!\"#$%&\'()*+./:;<=>[email protected][\\]^`{|}~"
        return utils_misc.generate_random_string(id_leng, ignore_str)

    def list_guest_cdroms(session):
        """
        Get cdrom lists from guest os;

        :param session: ShellSession object;
        :param params: test params dict;
        :return: list of cdroms;
        :rtype: list
        """
        list_cdrom_cmd = "wmic cdrom get Drive"
        filter_cdrom_re = "\w:"
        if params["os_type"] != "windows":
            list_cdrom_cmd = "ls /dev/cdrom*"
            filter_cdrom_re = r"/dev/cdrom-\w+|/dev/cdrom\d*"
        output = session.cmd_output(list_cdrom_cmd)
        cdroms = re.findall(filter_cdrom_re, output)
        cdroms.sort()
        return cdroms

    def get_cdrom_mount_point(session, drive_letter, params):
        """
        Get default cdrom mount point;
        """
        mount_point = "/mnt"
        if params["os_type"] == "windows":
            cmd = "wmic volume where DriveLetter='%s' " % drive_letter
            cmd += "get DeviceID | more +1"
            mount_point = session.cmd_output(cmd).strip()
        return mount_point

    @error.context_aware
    def create_iso_image(params, name, prepare=True, file_size=None):
        """
        Creates 'new' iso image with one file on it

        :param params: parameters for test
        :param name: name of new iso image file
        :param preapre: if True then it prepare cd images.
        :param file_size: Size of iso image in MB

        :return: path to new iso image file.
        """
        error.context("Creating test iso image '%s'" % name, logging.info)
        cdrom_cd = params["target_cdrom"]
        cdrom_cd = params[cdrom_cd]
        if not os.path.isabs(cdrom_cd):
            cdrom_cd = utils_misc.get_path(data_dir.get_data_dir(), cdrom_cd)
        iso_image_dir = os.path.dirname(cdrom_cd)
        if file_size is None:
            file_size = 10
        g_mount_point = tempfile.mkdtemp("gluster")
        image_params = params.object_params(name)
        if image_params.get("enable_gluster") == "yes":
            if params.get("gluster_server"):
                gluster_server = params.get("gluster_server")
            else:
                gluster_server = "localhost"
            volume_name = params["gluster_volume_name"]
            g_mount_link = "%s:/%s" % (gluster_server, volume_name)
            mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, g_mount_point)
            utils.system(mount_cmd, timeout=60)
            file_name = os.path.join(g_mount_point, "%s.iso" % name)
        else:
            file_name = utils_misc.get_path(iso_image_dir, "%s.iso" % name)
        if prepare:
            cmd = "dd if=/dev/urandom of=%s bs=1M count=%d"
            utils.run(cmd % (name, file_size))
            utils.run("mkisofs -o %s %s" % (file_name, name))
            utils.run("rm -rf %s" % (name))
        if image_params.get("enable_gluster") == "yes":
            gluster_uri = gluster.create_gluster_uri(image_params)
            file_name = "%s%s.iso" % (gluster_uri, name)
            try:
                umount_cmd = "umount %s" % g_mount_point
                utils.system(umount_cmd, timeout=60)
                os.rmdir(g_mount_point)
            except Exception, err:
                msg = "Fail to clean up %s" % g_mount_point
                msg += "Error message %s" % err
                logging.warn(msg)
        return file_name

    def cleanup_cdrom(path):
        """ Removes created iso image """
        if path:
            error.context("Cleaning up temp iso image '%s'" % path,
                          logging.info)
            if "gluster" in path:
                g_mount_point = tempfile.mkdtemp("gluster")
                g_server, v_name, f_name = path.split("/")[-3:]
                if ":" in g_server:
                    g_server = g_server.split(":")[0]
                g_mount_link = "%s:/%s" % (g_server, v_name)
                mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link,
                                                          g_mount_point)
                utils.system(mount_cmd, timeout=60)
                path = os.path.join(g_mount_point, f_name)
            try:
                logging.debug("Remove the file with os.remove().")
                os.remove("%s" % path)
            except OSError, err:
                logging.warn("Fail to delete %s" % path)
            if "gluster" in path:
                try:
                    umount_cmd = "umount %s" % g_mount_point
                    utils.system(umount_cmd, timeout=60)
                    os.rmdir(g_mount_point)
                except Exception, err:
                    msg = "Fail to clean up %s" % g_mount_point
                    msg += "Error message %s" % err
                    logging.warn(msg)

    def get_cdrom_file(vm, qemu_cdrom_device):
        """
        :param vm: VM object
        :param qemu_cdrom_device: qemu monitor device
        :return: file associated with $qemu_cdrom_device device
        """
        blocks = vm.monitor.info("block")
        cdfile = None
        if isinstance(blocks, str):
            tmp_re_str = r'%s: .*file=(\S*) ' % qemu_cdrom_device
            file_list = re.findall(tmp_re_str, blocks)
            if file_list:
                cdfile = file_list[0]
            else:
                # try to deal with new qemu
                tmp_re_str = r'%s: (\S*) \(.*\)' % qemu_cdrom_device
                file_list = re.findall(tmp_re_str, blocks)
                if file_list:
                    cdfile = file_list[0]
        else:
            for block in blocks:
                if block['device'] == qemu_cdrom_device:
                    try:
                        cdfile = block['inserted']['file']
                        break
                    except KeyError:
                        continue
        return cdfile

    def _get_tray_stat_via_monitor(vm, qemu_cdrom_device):
        """
        Get the cdrom tray status via qemu monitor
        """
        is_open, checked = (None, False)

        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.splitlines():
                if qemu_cdrom_device in block:
                    if "tray-open=1" in block:
                        is_open, checked = (True, True)
                    elif "tray-open=0" in block:
                        is_open, checked = (False, True)
            # fallback to new qemu
            tmp_block = ""
            for block_new in blocks.splitlines():
                if tmp_block and "Removable device" in block_new:
                    if "tray open" in block_new:
                        is_open, checked = (True, True)
                    elif "tray closed" in block_new:
                        is_open, checked = (False, True)
                if qemu_cdrom_device in block_new:
                    tmp_block = block_new
                else:
                    tmp_block = ""
        else:
            for block in blocks:
                if block['device'] == qemu_cdrom_device:
                    key = filter(lambda x: re.match(r"tray.*open", x),
                                 block.keys())
                    # compatible rhel6 and rhel7 diff qmp output
                    if not key:
                        break
                    is_open, checked = (block[key[0]], True)
        return (is_open, checked)

    def is_tray_opened(vm, qemu_cdrom_device, mode='monitor',
                       dev_name="/dev/sr0"):
        """
        Checks whether the tray is opend

        :param vm: VM object
        :param qemu_cdrom_device: cdrom image file name.
        :param mode: tray status checking mode, now support:
                     "monitor": get tray status from monitor.
                     "session": get tray status from guest os.
                     "mixed": get tray status first, if failed, try to
                              get the status in guest os again.
        :param dev_name: cdrom device name in guest os.

        :return: True if cdrom tray is open, otherwise False.
                 None if failed to get the tray status.
        """
        is_open, checked = (None, False)

        if mode in ['monitor', 'mixed']:
            is_open, checked = _get_tray_stat_via_monitor(
                vm, qemu_cdrom_device)

        if (mode in ['session', 'mixed']) and not checked:
            session = vm.wait_for_login(timeout=login_timeout)
            tray_cmd = params["tray_check_cmd"] % dev_name
            o = session.cmd_output(tray_cmd)
            if "cdrom is open" in o:
                is_open, checked = (True, True)
            else:
                is_open, checked = (False, True)
        if checked:
            return is_open
        return None

    @error.context_aware
    def check_cdrom_lock(vm, cdrom):
        """
        Checks whether the cdrom is locked

        :param vm: VM object
        :param cdrom: cdrom object

        :return: Cdrom state if locked return True
        """
        error.context("Check cdrom state of locing.")
        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.splitlines():
                if cdrom in block:
                    if "locked=1" in block:
                        return True
                    elif "locked=0" in block:
                        return False
            # deal with new qemu
            lock_str_new = "locked"
            no_lock_str = "not locked"
            tmp_block = ""
            for block_new in blocks.splitlines():
                if tmp_block and "Removable device" in block_new:
                    if no_lock_str in block_new:
                        return False
                    elif lock_str_new in block_new:
                        return True
                if cdrom in block_new:
                    tmp_block = block_new
                else:
                    tmp_block = ""
        else:
            for block in blocks:
                if block['device'] == cdrom and 'locked' in block.keys():
                    return block['locked']
        return None

    @error.context_aware
    def get_device(vm, dev_file_path):
        """
        Get vm device class from device path.

        :param vm: VM object.
        :param dev_file_path: Device file path.
        :return: device object
        """
        error.context("Get cdrom device object")
        device = vm.get_block({'file': dev_file_path})
        if not device:
            device = vm.get_block({'backing_file': dev_file_path})
            if not device:
                raise error.TestFail("Could not find a valid cdrom device")
        return device

    def get_match_cdrom(vm, session, serial_num):
        """
        Find the cdrom in guest which is corresponding with the CML
        according to the serial number.

        :param session: VM session.
        :param serial num: serial number of the cdrom.
        :return match_cdrom: the cdrom in guest which is corresponding
                             with the CML according to the serial number.
        """
        error.context("Get matching cdrom in guest", logging.info)
        show_serial_num = "ls -l /dev/disk/by-id"
        serial_num_output = session.cmd_output(show_serial_num)
        if serial_num_output:
            serial_cdrom = ""
            for line in serial_num_output.splitlines():
                if utils_misc.find_substring(str(line), str(serial_num)):
                    serial_cdrom = line.split(" ")[-1].split("/")[-1]
                    break
            if not serial_cdrom:
                qtree_info = vm.monitor.info("qtree")
                raise error.TestFail("Could not find the device whose "
                                     "serial number %s is same in Qemu"
                                     " CML.\n Qtree info: %s" %
                                     (serial_num, qtree_info))

        show_cdrom_cmd = "ls -l /dev/cdrom*"
        dev_cdrom_output = session.cmd_output(show_cdrom_cmd)
        if dev_cdrom_output:
            for line in dev_cdrom_output.splitlines():
                if utils_misc.find_substring(str(line), str(serial_cdrom)):
                    match_cdrom = line.split(" ")[-3]
                    return match_cdrom
            raise error.TestFail("Could not find the corresponding cdrom"
                                 "in guest which is same in Qemu CML.")

    def get_testing_cdrom_device(vm, session, cdrom_dev_list, serial_num=None):
        """
        Get the testing cdrom used for eject
        :param session: VM session
        :param cdrom_dev_list: cdrom_dev_list
        """
        try:
            if params["os_type"] == "windows":
                winutil_drive = utils_misc.get_winutils_vol(session)
                winutil_drive = "%s:" % winutil_drive
                cdrom_dev_list.remove(winutil_drive)
                testing_cdrom_device = cdrom_dev_list[-1]
            else:
                testing_cdrom_device = get_match_cdrom(vm, session, serial_num)
        except IndexError:
            raise error.TestFail("Could not find the testing cdrom device")

        return testing_cdrom_device

    def disk_copy(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if copy_timeout is None:
            copy_timeout = 120
        session = vm.wait_for_login(timeout=login_timeout)
        copy_file_cmd = (
            "nohup cp %s %s 2> /dev/null &" % (src_path, dst_path))
        get_pid_cmd = "echo $!"
        if params["os_type"] == "windows":
            copy_file_cmd = "start cmd /c copy /y %s %s" % (src_path, dst_path)
            get_pid_cmd = "wmic process where name='cmd.exe' get ProcessID"
        session.cmd(copy_file_cmd, timeout=copy_timeout)
        pid = re.findall(r"\d+", session.cmd_output(get_pid_cmd))[-1]
        return pid

    def get_empty_cdrom_device(vm):
        """
        Get cdrom device when cdrom is not insert.
        """
        device = None
        blocks = vm.monitor.info("block")
        if isinstance(blocks, str):
            for block in blocks.strip().split('\n'):
                if 'not inserted' in block:
                    device = block.split(':')[0]
        else:
            for block in blocks:
                if 'inserted' not in block.keys():
                    device = block['device']
        return device

    def eject_test_via_monitor(vm, qemu_cdrom_device, guest_cdrom_device,
                               iso_image_orig, iso_image_new, max_times):
        """
        Test cdrom eject function via qemu monitor.
        """
        error.context("Eject the iso image in monitor %s times" % max_times,
                      logging.info)
        session = vm.wait_for_login(timeout=login_timeout)
        iso_image = iso_image_orig
        for i in range(1, max_times):
            session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
            vm.eject_cdrom(qemu_cdrom_device)
            time.sleep(2)
            if get_cdrom_file(vm, qemu_cdrom_device) is not None:
                raise error.TestFail("Device %s was not ejected"
                                     " (round %s)" % (iso_image, i))

            iso_image = iso_image_new
            # On even attempts, try to change the iso image
            if i % 2 == 0:
                iso_image = iso_image_orig
            vm.change_media(qemu_cdrom_device, iso_image)
            if get_cdrom_file(vm, qemu_cdrom_device) != iso_image:
                raise error.TestFail("Could not change iso image %s"
                                     " (round %s)" % (iso_image, i))
            time.sleep(workaround_eject_time)

    def check_tray_status_test(vm, qemu_cdrom_device, guest_cdrom_device,
                               max_times, iso_image_new):
        """
        Test cdrom tray status reporting function.
        """
        error.context("Change cdrom media via monitor", logging.info)
        iso_image_orig = get_cdrom_file(vm, qemu_cdrom_device)
        if not iso_image_orig:
            raise error.TestError("no media in cdrom")
        vm.change_media(qemu_cdrom_device, iso_image_new)
        is_opened = is_tray_opened(vm, qemu_cdrom_device)
        if is_opened:
            raise error.TestFail("cdrom tray not opened after change media")
        try:
            error.context("Copy test script to guest")
            tray_check_src = params.get("tray_check_src")
            if tray_check_src:
                tray_check_src = os.path.join(data_dir.get_deps_dir(), "cdrom",
                                              tray_check_src)
                vm.copy_files_to(tray_check_src, params["tmp_dir"])

            if is_tray_opened(vm, qemu_cdrom_device) is None:
                logging.warn("Tray status reporting is not supported by qemu!")
                logging.warn("cdrom_test_tray_status test is skipped...")
                return

            error.context("Eject the cdrom in guest %s times" % max_times,
                          logging.info)
            session = vm.wait_for_login(timeout=login_timeout)
            for i in range(1, max_times):
                session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
                if not is_tray_opened(vm, qemu_cdrom_device):
                    raise error.TestFail("Monitor reports tray closed"
                                         " when ejecting (round %s)" % i)
                if params["os_type"] != "windows":
                    cmd = "dd if=%s of=/dev/null count=1" % guest_cdrom_device
                else:
                    # windows guest does not support auto close door when reading
                    # cdrom, so close it by eject command;
                    cmd = params["close_cdrom_cmd"] % guest_cdrom_device
                session.cmd(cmd)
                if is_tray_opened(vm, qemu_cdrom_device):
                    raise error.TestFail("Monitor reports tray opened when close"
                                         " cdrom in guest (round %s)" % i)
                time.sleep(workaround_eject_time)
        finally:
            vm.change_media(qemu_cdrom_device, iso_image_orig)

    def check_tray_locked_test(vm, qemu_cdrom_device, guest_cdrom_device):
        """
        Test cdrom tray locked function.
        """
        error.context("Check cdrom tray status after cdrom is locked",
                      logging.info)
        session = vm.wait_for_login(timeout=login_timeout)
        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if tmp_is_trap_open is None:
            logging.warn("Tray status reporting is not supported by qemu!")
            logging.warn("cdrom_test_locked test is skipped...")
            return

        eject_failed = False
        eject_failed_msg = "Tray should be closed even in locked status"
        session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device)
        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if not tmp_is_trap_open:
            raise error.TestFail("Tray should not in closed status")
        session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device)
        try:
            session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device)
        except aexpect.ShellCmdError, e:
            eject_failed = True
            eject_failed_msg += ", eject command failed: %s" % str(e)

        tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed',
                                          dev_name=guest_cdrom_device)
        if (eject_failed or tmp_is_trap_open):
            raise error.TestFail(eject_failed_msg)
        session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device)
        session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device)

    def file_operation_test(session, guest_cdrom_device, max_times):
        """
        Cdrom file operation test.
        """
        filename = "new"
        mount_point = get_cdrom_mount_point(session,
                                            guest_cdrom_device, params)
        mount_cmd = params["mount_cdrom_cmd"] % (guest_cdrom_device,
                                                 mount_point)
        umount_cmd = params["umount_cdrom_cmd"] % guest_cdrom_device
        src_file = params["src_file"] % (mount_point, filename)
        dst_file = params["dst_file"] % filename
        copy_file_cmd = params["copy_file_cmd"] % (mount_point, filename)
        remove_file_cmd = params["remove_file_cmd"] % filename
        show_mount_cmd = params["show_mount_cmd"]
        md5sum_cmd = params["md5sum_cmd"]

        if params["os_type"] != "windows":
            error.context("Mounting the cdrom under %s" % mount_point,
                          logging.info)
            session.cmd(mount_cmd, timeout=30)
        error.context("File copying test", logging.info)
        session.cmd(copy_file_cmd)
        f1_hash = session.cmd(md5sum_cmd % dst_file).split()[0].strip()
        f2_hash = session.cmd(md5sum_cmd % src_file).split()[0].strip()
        if f1_hash != f2_hash:
            raise error.TestFail("On disk and on cdrom files are different, "
                                 "md5 mismatch")
        session.cmd(remove_file_cmd)
        error.context("Mount/Unmount cdrom for %s times" % max_times,
                      logging.info)
        for _ in range(1, max_times):
            try:
                session.cmd(umount_cmd)
                session.cmd(mount_cmd)
            except aexpect.ShellError, detail:
                logging.error("Mount/Unmount fail, detail: '%s'", detail)
                logging.debug(session.cmd(show_mount_cmd))
                raise
        if params["os_type"] != "windows":
            session.cmd("umount %s" % guest_cdrom_device)

    # Test main body start.
    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    class test_singlehost(MiniSubtest):

        def test(self):
            self.iso_image_orig = create_iso_image(params, "orig")
            self.iso_image_new = create_iso_image(params, "new")
            self.cdrom_dir = os.path.dirname(self.iso_image_new)
            if params.get("not_insert_at_start") == "yes":
                target_cdrom = params["target_cdrom"]
                params[target_cdrom] = ""
            params["start_vm"] = "yes"
            serial_num = generate_serial_num()
            cdrom = params.get("cdroms", "").split()[-1]
            params["drive_serial_%s" % cdrom] = serial_num
            env_process.preprocess_vm(test, params, env, params["main_vm"])
            vm = env.get_vm(params["main_vm"])

            self.session = vm.wait_for_login(timeout=login_timeout)
            pre_cmd = params.get("pre_cmd")
            if pre_cmd:
                self.session.cmd(pre_cmd, timeout=120)
                self.session = vm.reboot()
            iso_image = self.iso_image_orig
            error.context("Query cdrom devices in guest")
            cdrom_dev_list = list_guest_cdroms(self.session)
            logging.debug("cdrom_dev_list: '%s'", cdrom_dev_list)

            if params.get('not_insert_at_start') == "yes":
                error.context("Locked without media present", logging.info)
                # XXX: The device got from monitor might not match with the guest
                # defice if there are multiple cdrom devices.
                qemu_cdrom_device = get_empty_cdrom_device(vm)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              self.session,
                                                              cdrom_dev_list,
                                                              serial_num)
                if vm.check_block_locked(qemu_cdrom_device):
                    raise error.TestFail("Device should not be locked just"
                                         " after booting up")
                cmd = params["lock_cdrom_cmd"] % guest_cdrom_device
                self.session.cmd(cmd)
                if not vm.check_block_locked(qemu_cdrom_device):
                    raise error.TestFail("Device is not locked as expect.")
                return

            error.context("Detecting the existence of a cdrom (guest OS side)",
                          logging.info)
            cdrom_dev_list = list_guest_cdroms(self.session)
            guest_cdrom_device = get_testing_cdrom_device(vm,
                                                          self.session,
                                                          cdrom_dev_list,
                                                          serial_num)
            error.context("Detecting the existence of a cdrom (qemu side)",
                          logging.info)
            qemu_cdrom_device = get_device(vm, iso_image)
            if params["os_type"] != "windows":
                self.session.get_command_output("umount %s" % guest_cdrom_device)
            if params.get('cdrom_test_autounlock') == 'yes':
                error.context("Trying to unlock the cdrom", logging.info)
                if not utils_misc.wait_for(lambda: not
                                           vm.check_block_locked(qemu_cdrom_device),
                                           300):
                    raise error.TestFail("Device %s could not be"
                                         " unlocked" % (qemu_cdrom_device))

            max_test_times = int(params.get("cdrom_max_test_times", 100))
            if params.get("cdrom_test_eject") == "yes":
                eject_test_via_monitor(vm, qemu_cdrom_device,
                                       guest_cdrom_device, self.iso_image_orig,
                                       self.iso_image_new, max_test_times)

            if params.get('cdrom_test_tray_status') == 'yes':
                check_tray_status_test(vm, qemu_cdrom_device,
                                       guest_cdrom_device, max_test_times,
                                       self.iso_image_new)

            if params.get('cdrom_test_locked') == 'yes':
                check_tray_locked_test(vm, qemu_cdrom_device,
                                       guest_cdrom_device)

            error.context("Check whether the cdrom is read-only", logging.info)
            cmd = params["readonly_test_cmd"] % guest_cdrom_device
            try:
                self.session.cmd(cmd)
                raise error.TestFail("Attempt to format cdrom %s succeeded" %
                                     (guest_cdrom_device))
            except aexpect.ShellError:
                pass

            sub_test = params.get("sub_test")
            if sub_test:
                error.context("Run sub test '%s' before doing file"
                              " operation" % sub_test, logging.info)
                utils_test.run_virt_sub_test(test, params, env, sub_test)

            if params.get("cdrom_test_file_operation") == "yes":
                file_operation_test(self.session, guest_cdrom_device,
                                    max_test_times)

            error.context("Cleanup")
            # Return the self.iso_image_orig
            cdfile = get_cdrom_file(vm, qemu_cdrom_device)
            if cdfile != self.iso_image_orig:
                time.sleep(workaround_eject_time)
                self.session.cmd(params["eject_cdrom_cmd"] %
                                 guest_cdrom_device)
                vm.eject_cdrom(qemu_cdrom_device)
                if get_cdrom_file(vm, qemu_cdrom_device) is not None:
                    raise error.TestFail("Device %s was not ejected"
                                         " in clearup stage" % qemu_cdrom_device)

                vm.change_media(qemu_cdrom_device, self.iso_image_orig)
                if get_cdrom_file(vm, qemu_cdrom_device) != self.iso_image_orig:
                    raise error.TestFail("It wasn't possible to change"
                                         " cdrom %s" % iso_image)
            post_cmd = params.get("post_cmd")
            if post_cmd:
                self.session.cmd(post_cmd)
            if params.get("guest_suspend_type"):
                self.session = vm.reboot()

        def clean(self):
            self.session.close()
            cleanup_cdrom(self.iso_image_orig)
            cleanup_cdrom(self.iso_image_new)

    class Multihost(MiniSubtest):

        def test(self):
            error.context("Preparing migration env and cdroms.", logging.info)
            mig_protocol = params.get("mig_protocol", "tcp")
            self.mig_type = migration.MultihostMigration
            if mig_protocol == "fd":
                self.mig_type = migration.MultihostMigrationFd
            if mig_protocol == "exec":
                self.mig_type = migration.MultihostMigrationExec
            if "rdma" in mig_protocol:
                self.mig_type = migration.MultihostMigrationRdma

            self.vms = params.get("vms").split(" ")
            self.srchost = params.get("hosts")[0]
            self.dsthost = params.get("hosts")[1]
            self.is_src = params.get("hostid") == self.srchost
            self.mig = self.mig_type(test, params, env, False, )
            self.cdrom_size = int(params.get("cdrom_size", 10))
            cdrom = params.objects("cdroms")[-1]
            self.serial_num = params.get("drive_serial_%s" % cdrom)

            if self.is_src:
                self.cdrom_orig = create_iso_image(params, "orig",
                                                   file_size=self.cdrom_size)
                self.cdrom_dir = os.path.dirname(self.cdrom_orig)
                vm = env.get_vm(self.vms[0])
                vm.destroy()
                params["start_vm"] = "yes"
                env_process.process(test, params, env,
                                    env_process.preprocess_image,
                                    env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                vm.wait_for_login(timeout=login_timeout)
            else:
                self.cdrom_orig = create_iso_image(params, "orig", False)
                self.cdrom_dir = os.path.dirname(self.cdrom_orig)

        def clean(self):
            self.mig.cleanup()
            if self.is_src:
                cleanup_cdrom(self.cdrom_orig)

    class test_multihost_locking(Multihost):

        def test(self):
            super(test_multihost_locking, self).test()

            error.context("Lock cdrom in VM.", logging.info)
            # Starts in source
            if self.is_src:
                vm = env.get_vm(params["main_vm"])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              session,
                                                              cdrom_dev_list,
                                                              self.serial_num)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)

                session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device)
                locked = check_cdrom_lock(vm, device)
                if locked:
                    logging.debug("Cdrom device is successfully locked in VM.")
                else:
                    raise error.TestFail("Cdrom device should be locked"
                                         " in VM.")

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'cdrom_dev', cdrom_prepare_timeout)

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            # Starts in dest
            if not self.is_src:
                vm = env.get_vm(params["main_vm"])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)

                locked = check_cdrom_lock(vm, device)
                if locked:
                    logging.debug("Cdrom device stayed locked after "
                                  "migration in VM.")
                else:
                    raise error.TestFail("Cdrom device should stayed locked"
                                         " after migration in VM.")

                error.context("Unlock cdrom from VM.", logging.info)
                cdrom_dev_list = list_guest_cdroms(session)
                guest_cdrom_device = get_testing_cdrom_device(vm,
                                                              session,
                                                              cdrom_dev_list,
                                                              self.serial_num)
                session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device)
                locked = check_cdrom_lock(vm, device)
                if not locked:
                    logging.debug("Cdrom device is successfully unlocked"
                                  " from VM.")
                else:
                    raise error.TestFail("Cdrom device should be unlocked"
                                         " in VM.")

            self.mig.migrate_wait([self.vms[0]], self.dsthost, self.srchost)

            if self.is_src:
                vm = env.get_vm(params["main_vm"])
                locked = check_cdrom_lock(vm, device)
                if not locked:
                    logging.debug("Cdrom device stayed unlocked after "
                                  "migration in VM.")
                else:
                    raise error.TestFail("Cdrom device should stayed unlocked"
                                         " after migration in VM.")

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)

        def clean(self):
            super(test_multihost_locking, self).clean()

    class test_multihost_ejecting(Multihost):

        def test(self):
            super(test_multihost_ejecting, self).test()

            self.cdrom_new = create_iso_image(params, "new")

            if not self.is_src:
                self.cdrom_new = create_iso_image(params, "new", False)
                self.cdrom_dir = os.path.dirname(self.cdrom_new)
                params["cdrom_cd1"] = params.get("cdrom_cd1_host2")

            if self.is_src:
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                device = get_device(vm, self.cdrom_orig)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)

                error.context("Eject cdrom.", logging.info)
                session.cmd(params["eject_cdrom_cmd"] % cdrom)
                vm.eject_cdrom(device)
                time.sleep(2)
                if get_cdrom_file(vm, device) is not None:
                    raise error.TestFail("Device %s was not ejected" % (cdrom))

                cdrom = self.cdrom_new

                error.context("Change cdrom.", logging.info)
                vm.change_media(device, cdrom)
                if get_cdrom_file(vm, device) != cdrom:
                    raise error.TestFail("It wasn't possible to change "
                                         "cdrom %s" % (cdrom))
                time.sleep(workaround_eject_time)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'cdrom_dev', cdrom_prepare_timeout)

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:
                vm = env.get_vm(self.vms[0])
                vm.reboot()

        def clean(self):
            if self.is_src:
                cleanup_cdrom(self.cdrom_new)
            super(test_multihost_ejecting, self).clean()

    class test_multihost_copy(Multihost):

        def test(self):
            super(test_multihost_copy, self).test()
            copy_timeout = int(params.get("copy_timeout", 480))
            checksum_timeout = int(params.get("checksum_timeout", 180))

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            remove_file_cmd = params["remove_file_cmd"] % filename
            dst_file = params["dst_file"] % filename

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                vm.monitor.migrate_set_speed("1G")
                session = vm.wait_for_login(timeout=login_timeout)
                cdrom_dev_list = list_guest_cdroms(session)
                logging.debug("cdrom_dev_list: %s", cdrom_dev_list)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                copy_file_cmd = params[
                    "copy_file_cmd"] % (mount_point, filename)
                if params["os_type"] != "windows":
                    error.context("Mount and copy data", logging.info)
                    session.cmd(mount_cmd, timeout=30)

                error.context("File copying test", logging.info)
                session.cmd(remove_file_cmd)
                session.cmd(copy_file_cmd)

                pid = disk_copy(vm, src_file, dst_file, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.", logging.info)
                cdrom_dev_list = list_guest_cdroms(session)
                cdrom = get_testing_cdrom_device(vm,
                                                 session,
                                                 cdrom_dev_list,
                                                 self.serial_num)
                mount_point = get_cdrom_mount_point(session, cdrom, params)
                mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point)
                src_file = params["src_file"] % (mount_point, filename)
                md5sum_cmd = params["md5sum_cmd"]

                def is_copy_done():
                    if params["os_type"] == "windows":
                        cmd = "tasklist /FI \"PID eq %s\"" % pid
                    else:
                        cmd = "ps -p %s" % pid
                    return session.cmd_status(cmd) != 0

                if not utils_misc.wait_for(is_copy_done, timeout=copy_timeout):
                    raise error.TestFail("Wait for file copy finish timeout")

                error.context("Compare file on disk and on cdrom", logging.info)
                f1_hash = session.cmd(md5sum_cmd % dst_file,
                                      timeout=checksum_timeout).split()[0]
                f2_hash = session.cmd(md5sum_cmd % src_file,
                                      timeout=checksum_timeout).split()[0]
                if f1_hash.strip() != f2_hash.strip():
                    raise error.TestFail("On disk and on cdrom files are"
                                         " different, md5 mismatch")
                session.cmd(remove_file_cmd)

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'Finish_cdrom_test', login_timeout)

        def clean(self):
            super(test_multihost_copy, self).clean()

    test_type = params.get("test_type", "test_singlehost")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 40

Project: tp-qemu
Source File: cpuflags.py
View license
def run(test, params, env):
    """
    Boot guest with different cpu flags and check if guest works correctly.

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    utils_misc.Flag.aliases = utils_misc.kvm_map_flags_aliases
    qemu_binary = utils_misc.get_qemu_binary(params)

    cpuflags_src = os.path.join(data_dir.get_deps_dir("cpu_flags"), "src")
    cpuflags_def = os.path.join(data_dir.get_deps_dir("cpu_flags"),
                                "cpu_map.xml")
    smp = int(params.get("smp", 1))

    all_host_supported_flags = params.get("all_host_supported_flags", "no")

    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")
    mig_speed = params.get("mig_speed", "1G")

    cpu_model_black_list = params.get("cpu_model_blacklist", "").split(" ")

    multi_host_migration = params.get("multi_host_migration", "no")

    class HgFlags(object):

        def __init__(self, cpu_model, extra_flags=set([])):
            virtual_flags = set(map(utils_misc.Flag,
                                    params.get("guest_spec_flags", "").split()))
            self.hw_flags = set(map(utils_misc.Flag,
                                    params.get("host_spec_flags", "").split()))
            self.qemu_support_flags = get_all_qemu_flags()
            self.host_support_flags = set(map(utils_misc.Flag,
                                              utils_misc.get_cpu_flags()))
            self.quest_cpu_model_flags = (get_guest_host_cpuflags(cpu_model) -
                                          virtual_flags)

            self.supported_flags = (self.qemu_support_flags &
                                    self.host_support_flags)
            self.cpumodel_unsupport_flags = (self.supported_flags -
                                             self.quest_cpu_model_flags)

            self.host_unsupported_flags = (self.quest_cpu_model_flags -
                                           self.host_support_flags)

            self.all_possible_guest_flags = (self.quest_cpu_model_flags -
                                             self.host_unsupported_flags)
            self.all_possible_guest_flags |= self.cpumodel_unsupport_flags

            self.guest_flags = (self.quest_cpu_model_flags -
                                self.host_unsupported_flags)
            self.guest_flags |= extra_flags

            self.host_all_unsupported_flags = set([])
            self.host_all_unsupported_flags |= self.qemu_support_flags
            self.host_all_unsupported_flags -= (self.host_support_flags |
                                                virtual_flags)

    def start_guest_with_cpuflags(cpuflags, smp=None, migration=False,
                                  wait=True):
        """
        Try to boot guest with special cpu flags and try login in to them.
        """
        params_b = params.copy()
        params_b["cpu_model"] = cpuflags
        if smp is not None:
            params_b["smp"] = smp

        vm_name = "vm1-cpuflags"
        vm = qemu_vm.VM(vm_name, params_b, test.bindir, env['address_cache'])
        env.register_vm(vm_name, vm)
        if (migration is True):
            vm.create(migration_mode=mig_protocol)
        else:
            vm.create()

        session = None
        try:
            vm.verify_alive()

            if wait:
                session = vm.wait_for_login()
        except qemu_vm.ImageUnbootableError:
            vm.destroy(gracefully=False)
            raise

        return (vm, session)

    def get_guest_system_cpuflags(vm_session):
        """
        Get guest system cpuflags.

        :param vm_session: session to checked vm.
        :return: [corespond flags]
        """
        flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE)
        out = vm_session.cmd_output("cat /proc/cpuinfo")

        flags = flags_re.search(out).groups()[0].split()
        return set(map(utils_misc.Flag, flags))

    def get_guest_host_cpuflags_legacy(cpumodel):
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        cmd = qemu_binary + " -cpu ?dump"
        output = utils.run(cmd).stdout
        re.escape(cpumodel)
        pattern = (r".+%s.*\n.*\n +feature_edx .+ \((.*)\)\n +feature_"
                   "ecx .+ \((.*)\)\n +extfeature_edx .+ \((.*)\)\n +"
                   "extfeature_ecx .+ \((.*)\)\n" % (cpumodel))
        flags = []
        model = re.search(pattern, output)
        if model is None:
            raise error.TestFail("Cannot find %s cpu model." % (cpumodel))
        for flag_group in model.groups():
            flags += flag_group.split()
        return set(map(utils_misc.Flag, flags))

    class ParseCpuFlags(object):

        def __init__(self, encoding=None):
            self.cpus = {}
            self.parser = expat.ParserCreate(encoding)
            self.parser.StartElementHandler = self.start_element
            self.parser.EndElementHandler = self.end_element
            self.last_arch = None
            self.last_model = None
            self.sub_model = False
            self.all_flags = []

        def start_element(self, name, attrs):
            if name == "cpus":
                self.cpus = {}
            elif name == "arch":
                self.last_arch = self.cpus[attrs['name']] = {}
            elif name == "model":
                if self.last_model is None:
                    self.last_model = self.last_arch[attrs['name']] = []
                else:
                    self.last_model += self.last_arch[attrs['name']]
                    self.sub_model = True
            elif name == "feature":
                if self.last_model is not None:
                    self.last_model.append(attrs['name'])
                else:
                    self.all_flags.append(attrs['name'])

        def end_element(self, name):
            if name == "arch":
                self.last_arch = None
            elif name == "model":
                if self.sub_model is False:
                    self.last_model = None
                else:
                    self.sub_model = False

        def parse_file(self, file_path):
            self.parser.ParseFile(open(file_path, 'r'))
            return self.cpus

    def get_guest_host_cpuflags_1350(cpumodel):
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        p = ParseCpuFlags()
        cpus = p.parse_file(cpuflags_def)
        for arch in cpus.values():
            if cpumodel in arch.keys():
                flags = arch[cpumodel]
        return set(map(utils_misc.Flag, flags))

    get_guest_host_cpuflags_BAD = get_guest_host_cpuflags_1350

    def get_all_qemu_flags_legacy():
        cmd = qemu_binary + " -cpu ?cpuid"
        output = utils.run(cmd).stdout

        flags_re = re.compile(r".*\n.*f_edx:(.*)\n.*f_ecx:(.*)\n"
                              ".*extf_edx:(.*)\n.*extf_ecx:(.*)")
        m = flags_re.search(output)
        flags = []
        for a in m.groups():
            flags += a.split()

        return set(map(utils_misc.Flag, flags))

    def get_all_qemu_flags_1350():
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        flags_re = re.compile(r".*Recognized CPUID flags:\n(.*)", re.DOTALL)
        m = flags_re.search(output)
        flags = []
        for a in m.groups():
            flags += a.split()

        return set(map(utils_misc.Flag, flags))

    def get_all_qemu_flags_BAD():
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        p = ParseCpuFlags()
        p.parse_file(cpuflags_def)
        return set(map(utils_misc.Flag, p.all_flags))

    def get_cpu_models_legacy():
        """
        Get all cpu models from qemu.

        :return: cpu models.
        """
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        cpu_re = re.compile(r"\w+\s+\[?(\w+)\]?")
        return cpu_re.findall(output)

    def get_cpu_models_1350():
        """
        Get all cpu models from qemu.

        :return: cpu models.
        """
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        cpu_re = re.compile(r"x86\s+\[?(\w+)\]?")
        return cpu_re.findall(output)

    get_cpu_models_BAD = get_cpu_models_1350

    def get_qemu_cpu_cmd_version():
        cmd = qemu_binary + " -cpu ?cpuid"
        try:
            utils.run(cmd).stdout
            return "legacy"
        except:
            cmd = qemu_binary + " -cpu ?"
            output = utils.run(cmd).stdout
            if "CPUID" in output:
                return "1350"
            else:
                return "BAD"

    qcver = get_qemu_cpu_cmd_version()

    get_guest_host_cpuflags = locals()["get_guest_host_cpuflags_%s" % qcver]
    get_all_qemu_flags = locals()["get_all_qemu_flags_%s" % qcver]
    get_cpu_models = locals()["get_cpu_models_%s" % qcver]

    def get_flags_full_name(cpu_flag):
        """
        Get all name of Flag.

        :param cpu_flag: Flag
        :return: all name of Flag.
        """
        cpu_flag = utils_misc.Flag(cpu_flag)
        for f in get_all_qemu_flags():
            if f == cpu_flag:
                return utils_misc.Flag(f)
        return []

    def parse_qemu_cpucommand(cpumodel):
        """
        Parse qemu cpu params.

        :param cpumodel: Cpu model command.
        :return: All flags which guest must have.
        """
        flags = cpumodel.split(",")
        cpumodel = flags[0]

        qemu_model_flag = get_guest_host_cpuflags(cpumodel)
        host_support_flag = set(map(utils_misc.Flag,
                                    utils_misc.get_cpu_flags()))
        real_flags = qemu_model_flag & host_support_flag

        for f in flags[1:]:
            if f[0].startswith("+"):
                real_flags |= set([get_flags_full_name(f[1:])])
            if f[0].startswith("-"):
                real_flags -= set([get_flags_full_name(f[1:])])

        return real_flags

    def check_cpuflags(cpumodel, vm_session):
        """
        Check if vm flags are same like flags select by cpumodel.

        :param cpumodel: params for -cpu param in qemu-kvm
        :param vm_session: session to vm to check flags.

        :return: ([excess], [missing]) flags
        """
        gf = get_guest_system_cpuflags(vm_session)
        rf = parse_qemu_cpucommand(cpumodel)

        logging.debug("Guest flags: %s", gf)
        logging.debug("Host flags: %s", rf)
        logging.debug("Flags on guest not defined by host: %s", (gf - rf))
        return rf - gf

    def get_cpu_models_supported_by_host():
        """
        Get all cpumodels which set of flags is subset of hosts flags.

        :return: [cpumodels]
        """
        cpumodels = []
        for cpumodel in get_cpu_models():
            flags = HgFlags(cpumodel)
            if flags.host_unsupported_flags == set([]):
                cpumodels.append(cpumodel)
        return cpumodels

    def disable_cpu(vm_session, cpu, disable=True):
        """
        Disable cpu in guest system.

        :param cpu: CPU id to disable.
        :param disable: if True disable cpu else enable cpu.
        """
        system_cpu_dir = "/sys/devices/system/cpu/"
        cpu_online = system_cpu_dir + "cpu%d/online" % (cpu)
        cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip()
        if disable and cpu_state == "1":
            vm_session.cmd("echo 0 > %s" % cpu_online)
            logging.debug("Guest cpu %d is disabled.", cpu)
        elif cpu_state == "0":
            vm_session.cmd("echo 1 > %s" % cpu_online)
            logging.debug("Guest cpu %d is enabled.", cpu)

    def check_online_cpus(vm_session, smp, disabled_cpu):
        """
        Disable cpu in guest system.

        :param smp: Count of cpu core in system.
        :param disable_cpu: List of disabled cpu.

        :return: List of CPUs that are still enabled after disable procedure.
        """
        online = [0]
        for cpu in range(1, smp):
            system_cpu_dir = "/sys/devices/system/cpu/"
            cpu_online = system_cpu_dir + "cpu%d/online" % (cpu)
            cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip()
            if cpu_state == "1":
                online.append(cpu)
        cpu_proc = vm_session.cmd_output("cat /proc/cpuinfo")
        cpu_state_proc = map(lambda x: int(x),
                             re.findall(r"processor\s+:\s*(\d+)\n", cpu_proc))
        if set(online) != set(cpu_state_proc):
            raise error.TestError("Some cpus are disabled but %s are still "
                                  "visible like online in /proc/cpuinfo." %
                                  (set(cpu_state_proc) - set(online)))

        return set(online) - set(disabled_cpu)

    def install_cpuflags_test_on_vm(vm, dst_dir):
        """
        Install stress to vm.

        :param vm: virtual machine.
        :param dst_dir: Installation path.
        """
        session = vm.wait_for_login()
        vm.copy_files_to(cpuflags_src, dst_dir)
        session.cmd("sync")
        session.cmd("cd %s; make EXTRA_FLAGS='';" %
                    os.path.join(dst_dir, "cpu_flags"))
        session.cmd("sync")
        session.close()

    def check_cpuflags_work(vm, path, flags):
        """
        Check which flags work.

        :param vm: Virtual machine.
        :param path: Path of cpuflags_test
        :param flags: Flags to test.
        :return: Tuple (Working, not working, not tested) flags.
        """
        pass_Flags = []
        not_tested = []
        not_working = []
        session = vm.wait_for_login()
        for f in flags:
            try:
                for tc in utils_misc.kvm_map_flags_to_test[f]:
                    session.cmd("%s/cpuflags-test --%s" %
                                (os.path.join(path, "cpu_flags"), tc))
                pass_Flags.append(f)
            except aexpect.ShellCmdError:
                not_working.append(f)
            except KeyError:
                not_tested.append(f)
        return (set(map(utils_misc.Flag, pass_Flags)),
                set(map(utils_misc.Flag, not_working)),
                set(map(utils_misc.Flag, not_tested)))

    def run_stress(vm, timeout, guest_flags):
        """
        Run stress on vm for timeout time.
        """
        ret = False
        install_path = "/tmp"
        install_cpuflags_test_on_vm(vm, install_path)
        flags = check_cpuflags_work(vm, install_path, guest_flags)
        dd_session = vm.wait_for_login()
        stress_session = vm.wait_for_login()
        dd_session.sendline("dd if=/dev/[svh]da of=/tmp/stressblock"
                            " bs=10MB count=100 &")
        try:
            stress_session.cmd("%s/cpuflags-test --stress %s%s" %
                               (os.path.join(install_path, "cpu_flags"), smp,
                                utils_misc.kvm_flags_to_stresstests(flags[0])),
                               timeout=timeout)
        except aexpect.ShellTimeoutError:
            ret = True
        stress_session.close()
        dd_session.close()
        return ret

    def separe_cpu_model(cpu_model):
        try:
            (cpu_model, _) = cpu_model.split(":")
        except ValueError:
            cpu_model = cpu_model
        return cpu_model

    def parse_cpu_model():
        """
        Parse cpu_models from config file.

        :return: [(cpumodel, extra_flags)]
        """
        cpu_model = params.get("cpu_model", "")
        logging.debug("CPU model found: %s", str(cpu_model))

        try:
            (cpu_model, extra_flags) = cpu_model.split(":")
            extra_flags = set(map(utils_misc.Flag, extra_flags.split(",")))
        except ValueError:
            cpu_model = cpu_model
            extra_flags = set([])
        return (cpu_model, extra_flags)

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            if args is None:
                args = []
            try:
                ret = self.test(*args, **kargs)
            finally:
                if hasattr(self, "clean"):
                    self.clean()
            return ret

    def print_exception(called_object):
        exc_type, exc_value, exc_traceback = sys.exc_info()
        logging.error("In function (" + called_object.__name__ + "):")
        logging.error("Call from:\n" +
                      traceback.format_stack()[-2][:-1])
        logging.error("Exception from:\n" +
                      "".join(traceback.format_exception(
                              exc_type, exc_value,
                              exc_traceback.tb_next)))

    class Test_temp(MiniSubtest):

        def clean(self):
            logging.info("cleanup")
            if (hasattr(self, "vm")):
                vm = getattr(self, "vm")
                vm.destroy(gracefully=False)

    # 1) <qemu-kvm-cmd> -cpu ?model
    class test_qemu_cpu_model(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cpu_models = params.get("cpu_models", "core2duo").split()
                cmd = qemu_binary + " -cpu ?model"
                result = utils.run(cmd)
                missing = []
                cpu_models = map(separe_cpu_model, cpu_models)
                for cpu_model in cpu_models:
                    if cpu_model not in result.stdout:
                        missing.append(cpu_model)
                if missing:
                    raise error.TestFail("CPU models %s are not in output "
                                         "'%s' of command \n%s" %
                                         (missing, cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError("New qemu use new -cpu ? cmd.")

    # 2) <qemu-kvm-cmd> -cpu ?dump
    class test_qemu_dump(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cpu_models = params.get("cpu_models", "core2duo").split()
                cmd = qemu_binary + " -cpu ?dump"
                result = utils.run(cmd)
                cpu_models = map(separe_cpu_model, cpu_models)
                missing = []
                for cpu_model in cpu_models:
                    if cpu_model not in result.stdout:
                        missing.append(cpu_model)
                if missing:
                    raise error.TestFail("CPU models %s are not in output "
                                         "'%s' of command \n%s" %
                                         (missing, cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError(
                    "New qemu does not support -cpu ?dump.")

    # 3) <qemu-kvm-cmd> -cpu ?cpuid
    class test_qemu_cpuid(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cmd = qemu_binary + " -cpu ?cpuid"
                result = utils.run(cmd)
                if result.stdout is "":
                    raise error.TestFail("There aren't any cpu Flag in output"
                                         " '%s' of command \n%s" %
                                         (cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError("New qemu use new -cpu ? cmd.")

    # 1) boot with cpu_model
    class test_boot_cpu_model(Test_temp):

        def test(self):
            cpu_model, _ = parse_cpu_model()
            logging.debug("Run tests with cpu model %s", cpu_model)
            flags = HgFlags(cpu_model)
            (self.vm, session) = start_guest_with_cpuflags(cpu_model)
            not_enable_flags = (check_cpuflags(cpu_model, session) -
                                flags.hw_flags)
            if not_enable_flags != set([]):
                raise error.TestFail("Flags defined on host but not found "
                                     "on guest: %s" % (not_enable_flags))

    # 2) success boot with supported flags
    class test_boot_cpu_model_and_additional_flags(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            cpuf_model = cpu_model

            if all_host_supported_flags == "yes":
                for fadd in flags.cpumodel_unsupport_flags:
                    cpuf_model += ",+" + str(fadd)
            else:
                for fadd in extra_flags:
                    cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            if all_host_supported_flags == "yes":
                guest_flags = flags.all_possible_guest_flags
            else:
                guest_flags = flags.guest_flags

            (self.vm, session) = start_guest_with_cpuflags(cpuf_model)

            not_enable_flags = (check_cpuflags(cpuf_model, session) -
                                flags.hw_flags)
            if not_enable_flags != set([]):
                logging.info("Model unsupported flags: %s",
                             str(flags.cpumodel_unsupport_flags))
                logging.error("Flags defined on host but not on found "
                              "on guest: %s", str(not_enable_flags))
            logging.info("Check main instruction sets.")

            install_path = "/tmp"
            install_cpuflags_test_on_vm(self.vm, install_path)

            Flags = check_cpuflags_work(self.vm, install_path,
                                        flags.all_possible_guest_flags)
            logging.info("Woking CPU flags: %s", str(Flags[0]))
            logging.info("Not working CPU flags: %s", str(Flags[1]))
            logging.warning("Flags works even if not defined on guest cpu "
                            "flags: %s", str(Flags[0] - guest_flags))
            logging.warning("Not tested CPU flags: %s", str(Flags[2]))

            if Flags[1] & guest_flags:
                raise error.TestFail("Some flags do not work: %s" %
                                     (str(Flags[1])))

    # 3) fail boot unsupported flags
    class test_boot_warn_with_host_unsupported_flags(MiniSubtest):

        def test(self):
            # This is virtual cpu flags which are supported by
            # qemu but no with host cpu.
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Unsupported flags %s.",
                          str(flags.host_all_unsupported_flags))
            cpuf_model = cpu_model + ",check"

            # Add unsupported flags.
            for fadd in flags.host_all_unsupported_flags:
                cpuf_model += ",+" + str(fadd)

            vnc_port = utils_misc.find_free_port(5900, 6100) - 5900
            cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary,
                                                       cpuf_model,
                                                       vnc_port)
            out = None

            try:
                try:
                    out = utils.run(cmd, timeout=5, ignore_status=True).stderr
                    raise error.TestFail("Guest not boot with unsupported "
                                         "flags.")
                except error.CmdError, e:
                    out = e.result_obj.stderr
            finally:
                uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE)
                nf_re = re.compile(
                    r"^CPU feature (.+) not found", re.MULTILINE)
                warn_flags = set([utils_misc.Flag(x)
                                  for x in uns_re.findall(out)])
                not_found = set([utils_misc.Flag(x)
                                 for x in nf_re.findall(out)])
                fwarn_flags = flags.host_all_unsupported_flags - warn_flags
                fwarn_flags -= not_found
                if fwarn_flags:
                    raise error.TestFail("Qemu did not warn the use of "
                                         "flags %s" % str(fwarn_flags))

    # 3) fail boot unsupported flags
    class test_fail_boot_with_host_unsupported_flags(MiniSubtest):

        def test(self):
            # This is virtual cpu flags which are supported by
            # qemu but no with host cpu.
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)
            cpuf_model = cpu_model + ",enforce"

            logging.debug("Unsupported flags %s.",
                          str(flags.host_all_unsupported_flags))

            # Add unsupported flags.
            for fadd in flags.host_all_unsupported_flags:
                cpuf_model += ",+" + str(fadd)

            vnc_port = utils_misc.find_free_port(5900, 6100) - 5900
            cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary,
                                                       cpuf_model,
                                                       vnc_port)
            out = None
            try:
                try:
                    out = utils.run(cmd, timeout=5, ignore_status=True).stderr
                except error.CmdError:
                    logging.error("Host boot with unsupported flag")
            finally:
                uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE)
                nf_re = re.compile(
                    r"^CPU feature (.+) not found", re.MULTILINE)
                warn_flags = set([utils_misc.Flag(x)
                                  for x in uns_re.findall(out)])
                not_found = set([utils_misc.Flag(x)
                                 for x in nf_re.findall(out)])
                fwarn_flags = flags.host_all_unsupported_flags - warn_flags
                fwarn_flags -= not_found
                if fwarn_flags:
                    raise error.TestFail("Qemu did not warn the use of "
                                         "flags %s" % str(fwarn_flags))

    # 4) check guest flags under load cpu, stress and system (dd)
    class test_boot_guest_and_try_flags_under_load(Test_temp):

        def test(self):
            logging.info("Check guest working cpuflags under load "
                         "cpu and stress and system (dd)")
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            cpuf_model = cpu_model

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))

            if all_host_supported_flags == "yes":
                logging.debug("Added flags %s.",
                              str(flags.cpumodel_unsupport_flags))

                # Add unsupported flags.
                for fadd in flags.cpumodel_unsupport_flags:
                    cpuf_model += ",+" + str(fadd)

                for fdel in flags.host_unsupported_flags:
                    cpuf_model += ",-" + str(fdel)

            (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp)

            if (not run_stress(self.vm, 60, flags.guest_flags)):
                raise error.TestFail("Stress test ended before"
                                     " end of test.")

        def clean(self):
            logging.info("cleanup")
            self.vm.destroy(gracefully=False)

    # 5) Online/offline CPU
    class test_online_offline_guest_CPUs(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            logging.debug("Run tests with cpu model %s.", (cpu_model))
            flags = HgFlags(cpu_model, extra_flags)

            (self.vm, session) = start_guest_with_cpuflags(cpu_model, smp)

            def encap(timeout):
                random.seed()
                begin = time.time()
                end = begin
                if smp > 1:
                    while end - begin < 60:
                        cpu = random.randint(1, smp - 1)
                        if random.randint(0, 1):
                            disable_cpu(session, cpu, True)
                        else:
                            disable_cpu(session, cpu, False)
                        end = time.time()
                    return True
                else:
                    logging.warning("For this test is necessary smp > 1.")
                    return False
            timeout = 60

            test_flags = flags.guest_flags
            if all_host_supported_flags == "yes":
                test_flags = flags.all_possible_guest_flags

            result = utils_misc.parallel([(encap, [timeout]),
                                          (run_stress, [self.vm, timeout,
                                                        test_flags])])
            if not (result[0] and result[1]):
                raise error.TestFail("Stress tests failed before"
                                     " end of testing.")

    # 6) migration test
    class test_migration_with_additional_flags(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            # Add unsupported flags.
            for fadd in flags.cpumodel_unsupport_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp)

            install_path = "/tmp"
            install_cpuflags_test_on_vm(self.vm, install_path)
            flags = check_cpuflags_work(self.vm, install_path,
                                        flags.guest_flags)
            dd_session = self.vm.wait_for_login()
            stress_session = self.vm.wait_for_login()

            dd_session.sendline("nohup dd if=/dev/[svh]da of=/tmp/"
                                "stressblock bs=10MB count=100 &")
            cmd = ("nohup %s/cpuflags-test --stress  %s%s &" %
                   (os.path.join(install_path, "cpu_flags"), smp,
                    utils_misc.kvm_flags_to_stresstests(flags[0])))
            stress_session.sendline(cmd)

            time.sleep(5)

            self.vm.monitor.migrate_set_speed(mig_speed)
            self.clone = self.vm.migrate(
                mig_timeout, mig_protocol, offline=False,
                not_wait_for_migration=True)

            time.sleep(5)

            try:
                self.vm.wait_for_migration(10)
            except virt_vm.VMMigrateTimeoutError:
                self.vm.monitor.migrate_set_downtime(1)
                self.vm.wait_for_migration(mig_timeout)

            # Swap due to test cleaning.
            temp = self.vm.clone(copy_state=True)
            self.vm.__dict__ = self.clone.__dict__
            self.clone = temp

            self.vm.resume()
            self.clone.destroy(gracefully=False)

            stress_session = self.vm.wait_for_login()

            # If cpuflags-test hang up during migration test raise exception
            try:
                stress_session.cmd('killall cpuflags-test')
            except aexpect.ShellCmdError:
                raise error.TestFail("Cpuflags-test should work after"
                                     " migration.")

    def net_send_object(socket, obj):
        """
        Send python object over network.

        :param ip_addr: ipaddres of waiter for data.
        :param obj: object to send
        """
        data = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        socket.sendall("%6d" % len(data))
        socket.sendall(data)

    def net_recv_object(socket, timeout=60):
        """
        Receive python object over network.

        :param ip_addr: ipaddres of waiter for data.
        :param obj: object to send
        :return: object from network
        """
        try:
            time_start = time.time()
            data = ""
            d_len = int(socket.recv(6))

            while (len(data) < d_len and (time.time() - time_start) < timeout):
                data += socket.recv(d_len - len(data))

            data = pickle.loads(data)
            return data
        except:
            error.TestFail("Failed to receive python object over the network")
            raise

    class test_multi_host_migration(Test_temp):

        def test(self):
            """
            Test migration between multiple hosts.
            """
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            for fadd in extra_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            install_path = "/tmp"

            class testMultihostMigration(migration.MultihostMigration):

                def __init__(self, test, params, env):
                    migration.MultihostMigration.__init__(self, test, params,
                                                          env)

                def migration_scenario(self):
                    srchost = self.params.get("hosts")[0]
                    dsthost = self.params.get("hosts")[1]

                    def worker(mig_data):
                        vm = env.get_vm("vm1")
                        session = vm.wait_for_login(timeout=self.login_timeout)

                        install_cpuflags_test_on_vm(vm, install_path)

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s", str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))
                        session.sendline("nohup dd if=/dev/[svh]da of=/tmp/"
                                         "stressblock bs=10MB count=100 &")

                        cmd = ("nohup %s/cpuflags-test --stress  %s%s &" %
                               (os.path.join(install_path, "cpu_flags"),
                                smp,
                                utils_misc.kvm_flags_to_stresstests(Flags[0] &
                                                                    flags.guest_flags)))
                        logging.debug("Guest_flags: %s",
                                      str(flags.guest_flags))
                        logging.debug("Working_flags: %s", str(Flags[0]))
                        logging.debug("Start stress on guest: %s", cmd)
                        session.sendline(cmd)

                    def check_worker(mig_data):
                        vm = env.get_vm("vm1")

                        vm.verify_illegal_instruction()

                        session = vm.wait_for_login(timeout=self.login_timeout)

                        try:
                            session.cmd('killall cpuflags-test')
                        except aexpect.ShellCmdError:
                            raise error.TestFail("The cpuflags-test program"
                                                 " should be active after"
                                                 " migration and it's not.")

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s",
                                     str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))

                    self.migrate_wait(["vm1"], srchost, dsthost,
                                      worker, check_worker)

            params_b = params.copy()
            params_b["cpu_model"] = cpu_model
            mig = testMultihostMigration(test, params_b, env)
            mig.run()

    class test_multi_host_migration_onoff_cpu(Test_temp):

        def test(self):
            """
            Test migration between multiple hosts.
            """
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            for fadd in extra_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            smp = int(params["smp"])
            disable_cpus = map(lambda cpu: int(cpu),
                               params.get("disable_cpus", "").split())

            install_path = "/tmp"

            class testMultihostMigration(migration.MultihostMigration):

                def __init__(self, test, params, env):
                    migration.MultihostMigration.__init__(self, test, params,
                                                          env)
                    self.srchost = self.params.get("hosts")[0]
                    self.dsthost = self.params.get("hosts")[1]
                    self.id = {'src': self.srchost,
                               'dst': self.dsthost,
                               "type": "disable_cpu"}
                    self.migrate_count = int(self.params.get('migrate_count',
                                                             '2'))

                def ping_pong_migrate(self, sync, worker, check_worker):
                    for _ in range(self.migrate_count):
                        logging.info("File transfer not ended, starting"
                                     " a round of migration...")
                        sync.sync(True, timeout=mig_timeout)
                        if self.hostid == self.srchost:
                            self.migrate_wait(["vm1"],
                                              self.srchost,
                                              self.dsthost,
                                              start_work=worker)
                        elif self.hostid == self.dsthost:
                            self.migrate_wait(["vm1"],
                                              self.srchost,
                                              self.dsthost,
                                              check_work=check_worker)
                        tmp = self.dsthost
                        self.dsthost = self.srchost
                        self.srchost = tmp

                def migration_scenario(self):

                    sync = SyncData(self.master_id(), self.hostid, self.hosts,
                                    self.id, self.sync_server)

                    def worker(mig_data):
                        vm = env.get_vm("vm1")
                        session = vm.wait_for_login(timeout=self.login_timeout)

                        install_cpuflags_test_on_vm(vm, install_path)

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s", str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))
                        for cpu in disable_cpus:
                            if cpu < smp:
                                disable_cpu(session, cpu, True)
                            else:
                                logging.warning("There is no enouth cpu"
                                                " in Guest. It is trying to"
                                                "remove cpu:%s from guest with"
                                                " smp:%s." % (cpu, smp))
                        logging.debug("Guest_flags: %s",
                                      str(flags.guest_flags))
                        logging.debug("Working_flags: %s", str(Flags[0]))

                    def check_worker(mig_data):
                        vm = env.get_vm("vm1")

                        vm.verify_illegal_instruction()

                        session = vm.wait_for_login(timeout=self.login_timeout)

                        really_disabled = check_online_cpus(session, smp,
                                                            disable_cpus)

                        not_disabled = set(really_disabled) & set(disable_cpus)
                        if not_disabled:
                            raise error.TestFail("Some of disabled cpus are "
                                                 "online. This shouldn't "
                                                 "happen. Cpus disabled on "
                                                 "srchost:%s, Cpus not "
                                                 "disabled on dsthost:%s" %
                                                 (disable_cpus, not_disabled))

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s",
                                     str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))

                    self.ping_pong_migrate(sync, worker, check_worker)

            params_b = params.copy()
            params_b["cpu_model"] = cpu_model
            mig = testMultihostMigration(test, params_b, env)
            mig.run()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        if params.get("cpu_model"):
            tests_group()
        else:
            cpu_models = (set(get_cpu_models_supported_by_host()) -
                          set(cpu_model_black_list))
            logging.info("Start test with cpu models %s" % (str(cpu_models)))
            failed = []
            for cpumodel in cpu_models:
                params["cpu_model"] = cpumodel
                try:
                    tests_group()
                except:
                    print_exception(tests_group)
                    failed.append(cpumodel)
            if failed != []:
                raise error.TestFail("Test of cpu models %s failed." %
                                     (str(failed)))
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " cpuflags test" % test_type)

Example 41

Project: tp-qemu
Source File: floppy.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test virtual floppy of guest:

    1) Create a floppy disk image on host
    2) Start the guest with this floppy image.
    3) Make a file system on guest virtual floppy.
    4) Calculate md5sum value of a file and copy it into floppy.
    5) Verify whether the md5sum does match.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    source_file = params["source_file"]
    dest_file = params["dest_file"]
    login_timeout = int(params.get("login_timeout", 360))
    floppy_prepare_timeout = int(params.get("floppy_prepare_timeout", 360))
    guest_floppy_path = params["guest_floppy_path"]

    def create_floppy(params, prepare=True):
        """
        Creates 'new' floppy with one file on it

        :param params: parameters for test
        :param preapre: if True then it prepare cd images.

        :return: path to new floppy file.
        """
        error.context("creating test floppy", logging.info)
        floppy = params["floppy_name"]
        if not os.path.isabs(floppy):
            floppy = os.path.join(data_dir.get_data_dir(), floppy)
        if prepare:
            utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
        return floppy

    def cleanup_floppy(path):
        """ Removes created floppy """
        error.context("cleaning up temp floppy images", logging.info)
        os.remove("%s" % path)

    def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodically copied.
        """
        if copy_timeout is None:
            copy_timeout = 120
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ('nohup bash -c "while [ true ]; do echo \"1\" | '
               'tee -a %s >> %s; sleep 0.1; done" 2> /dev/null &' %
               (check_path, dst_path))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    class test_singlehost(MiniSubtest):

        def test(self):
            create_floppy(params)
            params["start_vm"] = "yes"
            vm_name = params.get("main_vm", "vm1")
            env_process.preprocess_vm(test, params, env, vm_name)
            vm = env.get_vm(vm_name)
            vm.verify_alive()
            self.session = vm.wait_for_login(timeout=login_timeout)

            self.dest_dir = params.get("mount_dir")
            # If mount_dir specified, treat guest as a Linux OS
            # Some Linux distribution does not load floppy at boot and Windows
            # needs time to load and init floppy driver
            if self.dest_dir:
                lsmod = self.session.cmd("lsmod")
                if 'floppy' not in lsmod:
                    self.session.cmd("modprobe floppy")
            else:
                time.sleep(20)

            error.context("Formating floppy disk before using it")
            format_cmd = params["format_floppy_cmd"]
            self.session.cmd(format_cmd, timeout=120)
            logging.info("Floppy disk formatted successfully")

            if self.dest_dir:
                error.context("Mounting floppy")
                self.session.cmd("mount %s %s" % (guest_floppy_path,
                                                  self.dest_dir))
            error.context("Testing floppy")
            self.session.cmd(params["test_floppy_cmd"])

            error.context("Copying file to the floppy")
            md5_cmd = params.get("md5_cmd")
            if md5_cmd:
                md5_source = self.session.cmd("%s %s" % (md5_cmd, source_file))
                try:
                    md5_source = md5_source.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from source file,"
                                    " output: '%s'" % md5_source)
            else:
                md5_source = None

            self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file,
                                           dest_file))
            logging.info("Succeed to copy file '%s' into floppy disk" %
                         source_file)

            error.context("Checking if the file is unchanged after copy")
            if md5_cmd:
                md5_dest = self.session.cmd("%s %s" % (md5_cmd, dest_file))
                try:
                    md5_dest = md5_dest.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from dest file,"
                                    " output: '%s'" % md5_dest)
                if md5_source != md5_dest:
                    raise error.TestFail("File changed after copy to floppy")
            else:
                md5_dest = None
                self.session.cmd("%s %s %s" % (params["diff_file_cmd"],
                                               source_file, dest_file))

        def clean(self):
            clean_cmd = "%s %s" % (params["clean_cmd"], dest_file)
            self.session.cmd(clean_cmd)
            if self.dest_dir:
                self.session.cmd("umount %s" % self.dest_dir)
            self.session.close()

    class Multihost(MiniSubtest):

        def test(self):
            error.context("Preparing migration env and floppies.", logging.info)
            mig_protocol = params.get("mig_protocol", "tcp")
            self.mig_type = migration.MultihostMigration
            if mig_protocol == "fd":
                self.mig_type = migration.MultihostMigrationFd
            if mig_protocol == "exec":
                self.mig_type = migration.MultihostMigrationExec
            if "rdma" in mig_protocol:
                self.mig_type = migration.MultihostMigrationRdma

            self.vms = params.get("vms").split(" ")
            self.srchost = params["hosts"][0]
            self.dsthost = params["hosts"][1]
            self.is_src = params["hostid"] == self.srchost
            self.mig = self.mig_type(test, params, env, False, )

            if self.is_src:
                vm = env.get_vm(self.vms[0])
                vm.destroy()
                self.floppy = create_floppy(params)
                self.floppy_dir = os.path.dirname(self.floppy)
                params["start_vm"] = "yes"
                env_process.process(test, params, env,
                                    env_process.preprocess_image,
                                    env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                vm.wait_for_login(timeout=login_timeout)
            else:
                self.floppy = create_floppy(params, False)
                self.floppy_dir = os.path.dirname(self.floppy)

        def clean(self):
            self.mig.cleanup()
            if self.is_src:
                cleanup_floppy(self.floppy)

    class test_multihost_write(Multihost):

        def test(self):
            super(test_multihost_write, self).test()

            copy_timeout = int(params.get("copy_timeout", 480))
            self.mount_dir = params["mount_dir"]
            format_floppy_cmd = params["format_floppy_cmd"]
            check_copy_path = params["check_copy_path"]

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:
                    session.cmd("rm -f %s" % (src_file))
                    session.cmd("rm -f %s" % (check_copy_path))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data.", logging.info)
                if self.mount_dir:
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir),
                                timeout=30)

                error.context("File copying test.", logging.info)

                pid = lazy_copy(vm, src_file, check_copy_path, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.", logging.info)
                status = session.cmd_status("kill %s" % pid,
                                            timeout=copy_timeout)
                if status != 0:
                    raise error.TestFail("Copy process was terminatted with"
                                         " error code %s" % (status))

                session.cmd_status("kill -s SIGINT %s" % (pid),
                                   timeout=copy_timeout)

                error.context("Check floppy file checksum.", logging.info)
                md5_cmd = params.get("md5_cmd", "md5sum")
                if md5_cmd:
                    md5_floppy = session.cmd("%s %s" % (md5_cmd, src_file))
                    try:
                        md5_floppy = md5_floppy.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from source file,"
                                        " output: '%s'" % md5_floppy)
                    md5_check = session.cmd("%s %s" % (md5_cmd, check_copy_path))
                    try:
                        md5_check = md5_check.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from dst file,"
                                        " output: '%s'" % md5_floppy)
                    if md5_check != md5_floppy:
                        raise error.TestFail("There is mistake in copying, "
                                             "it is possible to check file on vm.")

                session.cmd("rm -f %s" % (src_file))
                session.cmd("rm -f %s" % (check_copy_path))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_write, self).clean()

    class test_multihost_eject(Multihost):

        def test(self):
            super(test_multihost_eject, self).test()

            self.mount_dir = params.get("mount_dir", None)
            format_floppy_cmd = params["format_floppy_cmd"]
            floppy = params["floppy_name"]
            second_floppy = params["second_floppy_name"]
            if not os.path.isabs(floppy):
                floppy = os.path.join(data_dir.get_data_dir(), floppy)
            if not os.path.isabs(second_floppy):
                second_floppy = os.path.join(data_dir.get_data_dir(),
                                             second_floppy)
            if not self.is_src:
                self.floppy = create_floppy(params)

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:   # If linux
                    session.cmd("rm -f %s" % (src_file))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:   # If linux
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                if floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

                try:
                    session.cmd(format_floppy_cmd)
                except aexpect.ShellCmdError, e:
                    if e.status == 1:
                        logging.error("First access to floppy failed, "
                                      " Trying a second time as a workaround")
                        session.cmd(format_floppy_cmd)

                error.context("Check floppy")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)
                    session.cmd("umount %s" % (self.mount_dir), timeout=30)

                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

                error.context("Change floppy.")
                vm.monitor.cmd("eject floppy0")
                vm.monitor.cmd("change floppy %s" % (second_floppy))
                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)

                if second_floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_eject, self).clean()

    test_type = params.get("test_type", "test_singlehost")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 42

View license
@error.context_aware
def run(test, params, env):
    """
    KVM migration with destination problems.
    Contains group of test for testing qemu behavior if some
    problems happens on destination side.

    Tests are described right in test classes comments down in code.

    Test needs params: nettype = bridge.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")

    test_rand = None
    mount_path = None
    while mount_path is None or os.path.exists(mount_path):
        test_rand = utils.generate_random_string(3)
        mount_path = ("%s/ni_mount_%s" %
                      (data_dir.get_data_dir(), test_rand))

    mig_dst = os.path.join(mount_path, "mig_dst")

    migration_exec_cmd_src = params.get("migration_exec_cmd_src",
                                        "gzip -c > %s")
    migration_exec_cmd_src = (migration_exec_cmd_src % (mig_dst))

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    def control_service(session, service, init_service, action, timeout=60):
        """
        Start service on guest.

        :param vm: Virtual machine for vm.
        :param service: service to stop.
        :param action: action with service (start|stop|restart)
        :param init_service: name of service for old service control.
        """
        status = utils_misc.get_guest_service_status(session, service,
                                                     service_former=init_service)
        if action == "start" and status == "active":
            logging.debug("%s already started, no need start it again.",
                          service)
            return
        if action == "stop" and status == "inactive":
            logging.debug("%s already stopped, no need stop it again.",
                          service)
            return
        try:
            session.cmd("systemctl --version", timeout=timeout)
            session.cmd("systemctl %s %s.service" % (action, service),
                        timeout=timeout)
        except:
            session.cmd("service %s %s" % (init_service, action),
                        timeout=timeout)

    def set_nfs_server(vm, share_cfg):
        """
        Start nfs server on guest.

        :param vm: Virtual machine for vm.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = "echo '%s' > /etc/exports" % (share_cfg)
        control_service(session, "nfs-server", "nfs", "stop")
        session.cmd(cmd)
        control_service(session, "nfs-server", "nfs", "start")
        session.cmd("iptables -F")
        session.close()

    def umount(mount_path):
        """
        Umount nfs server mount_path

        :param mount_path: path where nfs dir will be placed.
        """
        utils.run("umount -f %s" % (mount_path))

    def create_file_disk(dst_path, size):
        """
        Create file with size and create there ext3 filesystem.

        :param dst_path: Path to file.
        :param size: Size of file in MB
        """
        utils.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size))
        utils.run("mkfs.ext3 -F %s" % (dst_path))

    def mount(disk_path, mount_path, options=None):
        """
        Mount Disk to path

        :param disk_path: Path to disk
        :param mount_path: Path where disk will be mounted.
        :param options: String with options for mount
        """
        if options is None:
            options = ""
        else:
            options = "%s" % options

        utils.run("mount %s %s %s" % (options, disk_path, mount_path))

    def find_disk_vm(vm, disk_serial):
        """
        Find disk on vm which ends with disk_serial

        :param vm: VM where to find a disk.
        :param disk_serial: sufix of disk id.

        :return: string Disk path
        """
        session = vm.wait_for_login(timeout=login_timeout)

        disk_path = os.path.join("/", "dev", "disk", "by-id")
        disks = session.cmd("ls %s" % disk_path).split("\n")
        session.close()
        disk = filter(lambda x: x.endswith(disk_serial), disks)
        if not disk:
            return None
        return os.path.join(disk_path, disk[0])

    def prepare_disk(vm, disk_path, mount_path):
        """
        Create Ext3 on disk a send there data from main disk.

        :param vm: VM where to find a disk.
        :param disk_path: Path to disk in guest system.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        session.cmd("mkfs.ext3 -F %s" % (disk_path))
        session.cmd("mount %s %s" % (disk_path, mount_path))
        session.close()

    def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if dsize is None:
            dsize = 100
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ("nohup /bin/bash -c 'while true; do dd if=%s of=%s bs=1M "
               "count=%s; done;' 2> /dev/null &" % (src_path, dst_path, dsize))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class IscsiServer_tgt(object):

        """
        Class for set and start Iscsi server.
        """

        def __init__(self):
            self.server_name = "autotest_guest_" + test_rand
            self.user = "user1"
            self.passwd = "pass"
            self.config = """
<target %s:dev01>
    backing-store %s
    incominguser %s %s
</target>
"""

        def set_iscsi_server(self, vm_ds, disk_path, disk_size):
            """
            Set iscsi server with some variant.

            @oaram vm_ds: VM where should be iscsi server started.
            :param disk_path: path where should be disk placed.
            :param disk_size: size of new disk.
            """
            session = vm_ds.wait_for_login(timeout=login_timeout)

            session.cmd("dd if=/dev/zero of=%s bs=1M count=%s" % (disk_path,
                                                                  disk_size))
            status, output = session.cmd_status_output("setenforce 0")
            if status not in [0, 127]:
                logging.warn("Function setenforce fails.\n %s" % (output))

            config = self.config % (self.server_name, disk_path,
                                    self.user, self.passwd)
            cmd = "cat > /etc/tgt/conf.d/virt.conf << EOF" + config + "EOF"
            control_service(session, "tgtd", "tgtd", "stop")
            session.sendline(cmd)
            control_service(session, "tgtd", "tgtd", "start")
            session.cmd("iptables -F")
            session.close()

        def find_disk(self):
            disk_path = os.path.join("/", "dev", "disk", "by-path")
            disks = utils.run("ls %s" % disk_path).stdout.split("\n")
            disk = filter(lambda x: self.server_name in x, disks)
            if disk is []:
                return None
            return os.path.join(disk_path, disk[0].strip())

        def connect(self, vm_ds):
            """
            Connect to iscsi server on guest.

            :param vm_ds: Guest where is iscsi server running.

            :return: path where disk is connected.
            """
            ip_dst = vm_ds.get_address()
            utils.run("iscsiadm -m discovery -t st -p %s" % (ip_dst))

            server_ident = ('iscsiadm -m node --targetname "%s:dev01"'
                            ' --portal %s' % (self.server_name, ip_dst))
            utils.run("%s --op update --name node.session.auth.authmethod"
                      " --value CHAP" % (server_ident))
            utils.run("%s --op update --name node.session.auth.username"
                      " --value %s" % (server_ident, self.user))
            utils.run("%s --op update --name node.session.auth.password"
                      " --value %s" % (server_ident, self.passwd))
            utils.run("%s --login" % (server_ident))
            time.sleep(1.0)
            return self.find_disk()

        def disconnect(self):
            server_ident = ('iscsiadm -m node --targetname "%s:dev01"' %
                            (self.server_name))
            utils.run("%s --logout" % (server_ident))

    class IscsiServer(object):

        """
        Iscsi server implementation interface.
        """

        def __init__(self, iscsi_type, *args, **kargs):
            if iscsi_type == "tgt":
                self.ic = IscsiServer_tgt(*args, **kargs)
            else:
                raise NotImplementedError()

        def __getattr__(self, name):
            if self.ic:
                return self.ic.__getattribute__(name)
            raise AttributeError("Cannot find attribute %s in class" % name)

    class test_read_only_dest(MiniSubtest):

        """
        Migration to read-only destination by using a migration to file.

        1) Start guest with NFS server.
        2) Config NFS server share for read-only.
        3) Mount the read-only share to host.
        4) Start second guest and try to migrate to read-only dest.

        result) Migration should fail with error message about read-only dst.
        """

        def test(self):
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            vm_ds = env.get_vm("virt_test_vm2_data_server")
            vm_guest = env.get_vm("virt_test_vm1_guest")
            ro_timeout = int(params.get("read_only_timeout", "480"))
            exp_str = r".*Read-only file system.*"
            utils.run("mkdir -p %s" % (mount_path))

            vm_ds.verify_alive()
            vm_guest.create()
            vm_guest.verify_alive()

            set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")
            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=ro_timeout, first=2):
                raise error.TestFail("The Read-only file system warning not"
                                     " come in time limit.")

        def clean(self):
            if os.path.exists(mig_dst):
                os.remove(mig_dst)
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_low_space_dest(MiniSubtest):

        """
        Migrate to destination with low space.

        1) Start guest.
        2) Create disk with low space.
        3) Try to migratie to the disk.

        result) Migration should fail with warning about No left space on dev.
        """

        def test(self):
            self.disk_path = None
            while self.disk_path is None or os.path.exists(self.disk_path):
                self.disk_path = ("%s/disk_%s" %
                                  (test.tmpdir, utils.generate_random_string(3)))

            disk_size = utils.convert_data_size(params.get("disk_size", "10M"),
                                                default_sufix='M')
            disk_size /= 1024 * 1024    # To MB.

            exp_str = r".*gzip: stdout: No space left on device.*"
            vm_guest = env.get_vm("virt_test_vm1_guest")
            utils.run("mkdir -p %s" % (mount_path))

            vm_guest.verify_alive()
            vm_guest.wait_for_login(timeout=login_timeout)

            create_file_disk(self.disk_path, disk_size)
            mount(self.disk_path, mount_path, "-o loop")

            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=60, first=1):
                raise error.TestFail("The migration to destination with low "
                                     "storage space didn't fail as it should.")

        def clean(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.disk_path):
                os.remove(self.disk_path)

    class test_extensive_io(MiniSubtest):

        """
        Migrate after extensive_io abstract class. This class only define
        basic funtionaly and define interface. For other tests.

        1) Start ds_guest which starts data server.
        2) Create disk for data stress in ds_guest.
        3) Share and prepare disk from ds_guest
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size,
                "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (
                int(params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (self.vm_guest_params.
                                           object_params("image2"))

            env_process.preprocess_image(test,
                                         self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")

        def test_params(self):
            """
            Test specific params. Could be implemented in inherited class.
            """
            pass

        def config(self):
            """
            Test specific config.
            """
            raise NotImplementedError()

        def workload(self):
            disk_path = find_disk_vm(self.vm_guest, self.disk_serial)
            if disk_path is None:
                raise error.TestFail("It was impossible to find disk on VM")

            prepare_disk(self.vm_guest, disk_path, self.guest_mount_path)

            disk_path_src = find_disk_vm(self.vm_guest, self.disk_serial_src)
            dst_path = os.path.join(self.guest_mount_path, "test.data")
            self.copier_pid = disk_load(self.vm_guest, disk_path_src, dst_path,
                                        self.copy_timeout, self.copy_block_size)

        def restart_server(self):
            raise NotImplementedError()

        def clean_test(self):
            """
            Test specific cleanup.
            """
            pass

        def clean(self):
            if self.copier_pid:
                try:
                    if self.vm_guest.is_alive():
                        session = self.vm_guest.wait_for_login(timeout=login_timeout)
                        session.cmd("kill -9 %s" % (self.copier_pid))
                except:
                    logging.warn("It was impossible to stop copier. Something "
                                 "probably happened with GUEST or NFS server.")

            if params.get("kill_vm") == "yes":
                if self.vm_guest.is_alive():
                    self.vm_guest.destroy()
                    utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30,
                                        2, 2, "Waiting for dying of guest.")
                qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                                mount_path,
                                                None)
                qemu_img.check_image(self.image2_vm_guest_params,
                                     mount_path)

            self.clean_test()

    class test_extensive_io_nfs(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts NFS server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over NFS.
        4) Mount the disk to mount_path
        5) Create disk for second guest in the mounted path.
        6) Start second guest with prepared disk.
        7) Start stress on the prepared disk on second guest.
        8) Wait few seconds.
        9) Restart iscsi server.
        10) Migrate second guest.

        result) Migration should be successful.
        """

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            set_nfs_server(vm_ds, "/mnt *(rw,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "stop")  # Stop NFS server
            time.sleep(5)
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "start")  # Start NFS server

            """
            Touch waits until all previous requests are invalidated
            (NFS grace period). Without grace period qemu start takes
            to long and timers for machine creation dies.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_extensive_io_iscsi(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts iscsi server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over iscsi.
        4) Join to disk on host.
        5) Prepare partition on the disk.
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test_params(self):
            self.iscsi_variant = params.get("iscsi_variant", "tgt")
            self.ds_disk_path = os.path.join(self.guest_mount_path, "test.img")

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            self.isci_server = IscsiServer("tgt")
            disk_path = os.path.join(self.guest_mount_path, "disk1")
            self.isci_server.set_iscsi_server(vm_ds, disk_path,
                                              (int(float(self.disk_size) * 1.1) / (1024 * 1024)))
            self.host_disk_path = self.isci_server.connect(vm_ds)

            utils.run("mkfs.ext3 -F %s" % (self.host_disk_path))
            mount(self.host_disk_path, mount_path)

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "stop", 240)  # Stop Iscsi server
            time.sleep(5)
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "start", 240)  # Start Iscsi server

            """
            Wait for iscsi server after restart and will be again
            accessible.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.host_disk_path):
                self.isci_server.disconnect()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 43

Project: pyomo
Source File: GLPK_old.py
View license
    def process_soln_file(self, results):
        soln  = None
        pdata = self._glpfile
        psoln = self._rawfile

        prob = results.problem
        solv = results.solver

        prob.name = 'unknown'   # will ostensibly get updated

        # Step 1: Make use of the GLPK's machine parseable format (--wglp) to
        #    collect variable and constraint names.
        glp_line_count = ' -- File not yet opened'

        # The trick for getting the variable names correctly matched to their
        # values is the note that the --wglp option outputs them in the same
        # order as the --write output.
        # Note that documentation for these formats is available from the GLPK
        # documentation of 'glp_read_prob' and 'glp_write_sol'
        variable_names = dict()    # cols
        constraint_names = dict()  # rows
        obj_name = 'objective'

        try:
            f = open(pdata, 'r')

            glp_line_count = 1
            pprob, ptype, psense, prows, pcols, pnonz = f.readline().split()
            prows = int(prows)  # fails if not a number; intentional
            pcols = int(pcols)  # fails if not a number; intentional
            pnonz = int(pnonz)  # fails if not a number; intentional

            if pprob != 'p' or \
               ptype not in ('lp', 'mip') or \
               psense not in ('max', 'min') or \
               prows < 0 or pcols < 0 or pnonz < 0:
                raise ValueError

            self.is_integer = ('mip' == ptype and True or False)
            prob.sense = 'min' == psense and ProblemSense.minimize or ProblemSense.maximize
            prob.number_of_constraints = prows
            prob.number_of_nonzeros    = pnonz
            prob.number_of_variables   = pcols

            extract_duals = False
            extract_reduced_costs = False
            for suffix in self._suffixes:
                flag = False
                if re.match(suffix, "dual"):
                    if not self.is_integer:
                        flag = True
                        extract_duals = True
                if re.match(suffix, "rc"):
                    if not self.is_integer:
                        flag = True
                        extract_reduced_costs = True
                if not flag:
                    # TODO: log a warning
                    pass

            for line in f:
                glp_line_count += 1
                tokens = line.split()
                switch = tokens.pop(0)

                if switch in ('a', 'e', 'i', 'j'):
                    pass
                elif 'n' == switch:  # naming some attribute
                    ntype = tokens.pop(0)
                    name  = tokens.pop()
                    if 'i' == ntype:      # row
                        row = tokens.pop()
                        constraint_names[int(row)] = name
                        # --write order == --wglp order; store name w/ row no
                    elif 'j' == ntype:    # var
                        col = tokens.pop()
                        variable_names[int(col)] = name
                        # --write order == --wglp order; store name w/ col no
                    elif 'z' == ntype:    # objective
                        obj_name = name
                    elif 'p' == ntype:    # problem name
                        prob.name = name
                    else:                 # anything else is incorrect.
                        raise ValueError

                else:
                    raise ValueError
        except Exception:
            e = sys.exc_info()[1]
            msg = "Error parsing solution description file, line %s: %s"
            raise ValueError(msg % (glp_line_count, str(e)))
        finally:
            f.close()

        range_duals = {}
        # Step 2: Make use of the GLPK's machine parseable format (--write) to
        #    collect solution variable and constraint values.
        raw_line_count = ' -- File not yet opened'
        try:
            f = open(psoln, 'r')

            raw_line_count = 1
            prows, pcols = f.readline().split()
            prows = int(prows)  # fails if not a number; intentional
            pcols = int(pcols)  # fails if not a number; intentional

            raw_line_count = 2
            if self.is_integer:
                pstat, obj_val = f.readline().split()
            else:
                pstat, dstat, obj_val = f.readline().split()
                dstat = float(dstat) # dual status of basic solution.  Ignored.

            pstat = float(pstat)       # fails if not a number; intentional
            obj_val = float(obj_val)   # fails if not a number; intentional
            soln_status = self._glpk_get_solution_status(pstat)

            if soln_status is SolutionStatus.infeasible:
                solv.termination_condition = TerminationCondition.infeasible

            elif soln_status is SolutionStatus.unbounded:
                solv.termination_condition = TerminationCondition.unbounded

            elif soln_status is SolutionStatus.other:
                if solv.termination_condition == TerminationCondition.unknown:
                    solv.termination_condition = TerminationCondition.other

            elif soln_status in (SolutionStatus.optimal, SolutionStatus.feasible):
                soln   = results.solution.add()
                soln.status = soln_status

                prob.lower_bound = obj_val
                prob.upper_bound = obj_val

                # TODO: Does a 'feasible' status mean that we're optimal?
                soln.gap=0.0
                solv.termination_condition = TerminationCondition.optimal

                # I'd like to choose the correct answer rather than just doing
                # something like commenting the obj_name line.  The point is that
                # we ostensibly could or should make use of the user's choice in
                # objective name.  In that vein I'd like to set the objective value
                # to the objective name.  This would make parsing on the user end
                # less 'arbitrary', as in the yaml key 'f'.  Weird
                soln.objective[obj_name] = {'Value': obj_val}

                if (self.is_integer is True) or (extract_duals is False):
                    # we use nothing from this section so just read in the
                    # lines and throw them away
                    for mm in range(1, prows +1):
                        raw_line_count += 1
                        f.readline()
                else:
                    for mm in range(1, prows +1):
                        raw_line_count += 1

                        rstat, rprim, rdual = f.readline().split()
                        rstat = float(rstat)

                        cname = constraint_names[mm]
                        if 'ONE_VAR_CONSTANT' == cname[-16:]: continue

                        if cname.startswith('c_'):
                            soln.constraint[cname] = {"Dual":float(rdual)}
                        elif cname.startswith('r_l_'):
                            range_duals.setdefault(cname[4:],[0,0])[0] = float(rdual)
                        elif cname.startswith('r_u_'):
                            range_duals.setdefault(cname[4:],[0,0])[1] = float(rdual)

                for nn in range(1, pcols +1):
                    raw_line_count += 1
                    if self.is_integer:
                        cprim = f.readline()      # should be a single number
                    else:
                        cstat, cprim, cdual = f.readline().split()
                        cstat = float(cstat)  # fails if not a number; intentional

                    vname = variable_names[nn]
                    if 'ONE_VAR_CONSTANT' == vname: continue
                    cprim = float(cprim)
                    if extract_reduced_costs is False:
                        soln.variable[vname] = {"Value" : cprim}
                    else:
                        soln.variable[vname] = {"Value" : cprim,
                                                "Rc" : float(cdual)}

        except Exception:
            print(sys.exc_info()[1])
            msg = "Error parsing solution data file, line %d" % raw_line_count
            raise ValueError(msg)
        finally:
            f.close()

        if not soln is None:
            # For the range constraints, supply only the dual with the largest
            # magnitude (at least one should always be numerically zero)
            scon = soln.Constraint
            for key,(ld,ud) in iteritems(range_duals):
                if abs(ld) > abs(ud):
                    scon['r_l_'+key] = {"Dual":ld}
                else:
                    scon['r_l_'+key] = {"Dual":ud}      # Use the same key

Example 44

Project: pyomo
Source File: GLPK_old.py
View license
    def process_soln_file(self, results):

        # the only suffixes that we extract from GLPK are
        # constraint duals. scan through the solver suffix
        # list and throw an exception if the user has
        # specified any others.
        extract_duals = False
        for suffix in self._suffixes:
            flag = False
            if re.match(suffix, "dual"):
                extract_duals = True
                flag = True
            if not flag:
                raise RuntimeError(\
                      "***GLPK solver plugin cannot extract solution " + \
                       "suffix='%s'" % (suffix))

        lp_solution = True  # if false, we're dealing with a MIP!
        if not os.path.exists(self._soln_file):
            return
        soln = results.solution(0)
        INPUT = open(self._soln_file, "r")

        range_duals = {}
        try:

            state = 0  # 0=initial header, 1=constraints, 2=variables, -1=done

            results.problem.number_of_objectives = 1

            # for validation of the total count read and the order
            number_of_constraints_read = 0
            number_of_variables_read = 0

            # constraint names and their value/bounds can be split
            # across multiple lines
            active_constraint_name = ""

            # variable names and their value/bounds can be split across
            # multiple lines
            active_variable_name = ""

            for line in INPUT:
                tokens = re.split('[ \t]+', line.strip())

                if (len(tokens) == 1) and (len(tokens[0]) == 0):
                    pass
                elif state == 0:
                    #
                    # Processing initial header
                    #
                    if len(tokens) == 2 and tokens[0] == "Problem:":
                        # the problem name may be absent, in which case
                        # the "Problem:" line will be skipped.
                        results.problem.name = tokens[1]
                    elif len(tokens) == 2 and tokens[0] == "Rows:":
                        results.problem.number_of_constraints = eval(tokens[1])
                    elif len(tokens) == 2 and tokens[0] == "Columns:":
                        lp_solution = True
                        results.problem.number_of_variables = eval(tokens[1])
                    elif len(tokens) > 2 and tokens[0] == "Columns:":
                        lp_solution = False
                        results.problem.number_of_variables = eval(tokens[1])
                    elif len(tokens) == 2 and tokens[0] == "Non-zeros:":
                        results.problem.number_of_nonzeros = eval(tokens[1])
                    elif len(tokens) >= 2 and tokens[0] == "Status:":
                        if tokens[1] == "OPTIMAL":
                            soln.status = SolutionStatus.optimal
                        elif len(tokens) == 3 and tokens[1] == "INTEGER" and \
                                 tokens[2] == "NON-OPTIMAL":
                            soln.status = SolutionStatus.bestSoFar
                        elif len(tokens) == 3 and tokens[1] == "INTEGER" and \
                                 tokens[2] == "OPTIMAL":
                            soln.status = SolutionStatus.optimal
                        elif len(tokens) == 3 and tokens[1] == "INTEGER" and \
                                 tokens[2] == "UNDEFINED":
                            soln.status = SolutionStatus.stoppedByLimit
                        elif len(tokens) == 3 and tokens[1] == "INTEGER" and \
                                tokens[2] == "EMPTY":
                            soln.status = SolutionStatus.infeasible
                        elif (len(tokens) == 2) and (tokens[1] == "UNDEFINED"):
                            soln.status = SolutionStatus.infeasible
                        else:
                            print("WARNING: Read unknown status while " + \
                                   "parsing GLPK solution file - " + \
                                   "status='%s'") % (" ".join(tokens[1:]))
                    elif len(tokens) >= 2 and tokens[0] == "Objective:":
                        if tokens[4] == "(MINimum)":
                            results.problem.sense = ProblemSense.minimize
                        else:
                            results.problem.sense = ProblemSense.maximize
                        soln.objective[tokens[1]] = {'Value': float(tokens[3])}
                        if soln.status is SolutionStatus.optimal:
                            results.problem.lower_bound = soln.objective[tokens[1]]['Value']
                            results.problem.upper_bound = soln.objective[tokens[1]]['Value']
                        # the objective is the last entry in the problem section - move on to constraints.
                        state = 1

                elif state == 1:
                    #
                    # Process Constraint Info
                    #

                    if (len(tokens) == 2) and (len(active_constraint_name) == 0):
                        number_of_constraints_read = number_of_constraints_read + 1
                        active_constraint_name = tokens[1].strip()
                        index = eval(tokens[0].strip())

                        # sanity check - the indices should be in sequence.
                        if index != number_of_constraints_read:
                            raise ValueError(\
                                  ("***ERROR: Unexpected constraint index " + \
                                   "encountered on line=%s; expected " + \
                                   "value=%s; actual value=%s") % \
                                   (line, str(number_of_consrtaints_read),
                                    str(index)))
                    else:
                        index = None
                        activity = None
                        lower_bound = None
                        upper_bound = None
                        marginal = None

                        # extract the field names and process accordingly. there
                        # is some wasted processing w.r.t. single versus double-line
                        # entries, but it's not significant enough to worry about.

                        index_string = line[0:6].strip()
                        name_string = line[7:19].strip()
                        activity_string = line[23:36].strip()
                        lower_bound_string = line[37:50].strip()
                        upper_bound_string = line[51:64].strip()

                        state_string = None
                        marginal_string = None

                        # skip any headers
                        if (index_string == "------") or (index_string == "No."):
                            continue

                        if len(index_string) > 0:
                            index = eval(index_string)

                        if lp_solution is True:
                            state_string = line[20:22].strip()
                            marginal_string = line[65:78].strip()
                            if (activity_string != "< eps") and (len(activity_string) > 0):
                                activity = eval(activity_string)
                            else:
                                activity = 0.0
                            if (lower_bound_string != "< eps") and (len(lower_bound_string) > 0):
                                lower_bound = eval(lower_bound_string)
                            else:
                                lower_bound = 0.0
                            if state_string != "NS" and upper_bound_string != '=':
                                if (upper_bound_string != "< eps") and (len(upper_bound_string) > 0):
                                    upper_bound = eval(upper_bound_string)
                                else:
                                    upper_bound = 0.0
                            if (marginal_string != "< eps") and (len(marginal_string) > 0):
                                marginal = eval(marginal_string)
                            else:
                                marginal = 0.0

                        else:
                            # no constraint-related attributes/values are extracted currently for MIPs.
                            pass

                        constraint_name = None
                        if len(active_constraint_name) > 0:
                            # if there is an active constraint name, the identifier was
                            # too long for everything to be on a single line; the second
                            # line contains all of the value information.
                            constraint_name = active_constraint_name
                            active_constraint_name = ""
                        else:
                            # everything is on a single line.
                            constraint_name = name_string
                            number_of_constraints_read = number_of_constraints_read + 1
                            # sanity check - the indices should be in sequence.
                            if index != number_of_constraints_read:
                                raise ValueError("***ERROR: Unexpected constraint index encountered on line="+line+"; expected value="+str(number_of_constraints_read)+"; actual value="+str(index))

                        if (lp_solution is True) and (extract_duals is True):
                            # GLPK doesn't report slacks directly.
                            constraint_dual = activity
                            if state_string == "B":
                                constraint_dual = 0.0
                            elif (state_string == "NS") or (state_string == "NL") or (state_string == "NU"):
                                constraint_dual = marginal
                            else:
                                raise ValueError("Unknown status="+tokens[0]+" encountered "
                                                 "for constraint="+active_constraint_name+" "
                                                 "in line="+line+" of solution file="+self._soln_file)

                            if constraint_name.startswith('c_'):
                                soln.constraint[constraint_name] = {"Dual" : float(constraint_dual)}
                            elif constraint_name.startswith('r_l_'):
                                range_duals.setdefault(constraint_name[4:],[0,0])[0] = float(constraint_dual)
                            elif constraint_name.startswith('r_u_'):
                                range_duals.setdefault(constraint_name[4:],[0,0])[1] = float(constraint_dual)

                        else:
                            # there isn't anything interesting to do with constraints in the MIP case.
                            pass

                        # if all of the constraints have been read, exit.
                        if number_of_constraints_read == results.problem.number_of_constraints:
                            state = 2

                elif state == 2:
                    #
                    # Process Variable Info
                    #

                    if (len(tokens) == 2) and (len(active_variable_name) == 0):

                        # in the case of name over-flow, there are only two tokens
                        # on the first of two lines for the variable entry.
                        number_of_variables_read = number_of_variables_read + 1
                        active_variable_name = tokens[1].strip()
                        index = eval(tokens[0].strip())

                        # sanity check - the indices should be in sequence.
                        if index != number_of_variables_read:
                            raise ValueError("***ERROR: Unexpected variable index encountered on line="+line+"; expected value="+str(number_of_variables_read)+"; actual value="+str(index))

                    else:

                        index = None
                        activity = None
                        lower_bound = None
                        upper_bound = None
                        marginal = None

                        # extract the field names and process accordingly. there
                        # is some wasted processing w.r.t. single versus double-line
                        # entries, but it's not significant enough to worry about.

                        index_string = line[0:6].strip()
                        name_string = line[7:19].strip()
                        activity_string = line[23:36].strip()
                        lower_bound_string = line[37:50].strip()
                        upper_bound_string = line[51:64].strip()

                        state_string = None
                        marginal_string = None

                        # skip any headers
                        if (index_string == "------") or (index_string == "No."):
                            continue

                        if len(index_string) > 0:
                            index = eval(index_string)

                        if lp_solution is True:

                            state_string = line[20:22].strip()
                            marginal_string = line[65:78].strip()

                            if (activity_string != "< eps") and (len(activity_string) > 0):
                                activity = eval(activity_string)
                            else:
                                activity = 0.0
                                if (lower_bound_string != "< eps") and (len(lower_bound_string) > 0):
                                    lower_bound = eval(lower_bound_string)
                                else:
                                    lower_bound = 0.0
                            if state_string != "NS":
                                if (upper_bound_string != "< eps") and (len(upper_bound_string) > 0):
                                    upper_bound = eval(upper_bound_string)
                                else:
                                    upper_bound = 0.0
                            if (marginal_string != "< eps") and (len(marginal_string) > 0):
                                marginal = eval(marginal_string)
                            else:
                                marginal = 0.0
                        else:
                            if (activity_string != "< eps") and (len(activity_string) > 0):
                                activity = eval(activity_string)
                            else:
                                activity = 0.0

                        variable_name = None
                        if len(active_variable_name) > 0:
                            # if there is an active variable name, the identifier was
                            # too long for everything to be on a single line; the second
                            # line contains all of the value information.
                            variable_name = active_variable_name
                            active_variable_name = ""
                        else:
                            # everything is on a single line.
                            variable_name = name_string
                            number_of_variables_read = number_of_variables_read + 1
                            # sanity check - the indices should be in sequence.
                            if index != number_of_variables_read:
                                raise ValueError("***ERROR: Unexpected variable index encountered on line="+line+"; expected value="+str(number_of_variables_read)+"; actual value="+str(index))

                        if lp_solution is True:
                            # the "activity" column always specifies the variable value.
                            # embedding the if-then-else to validate the basis status.
                            # we are currently ignoring all bound-related information.
                            variable_value = None
                            if state_string in ('B', 'NL', 'NS', 'NU', 'NF'):
                                # NF = non-basic free (unbounded) variable
                                # NL = non-basic variable at its lower bound
                                # NU = non-basic variable at its upper bound
                                # NS = non-basic fixed variable
                                variable_value = activity
                            else:
                                raise ValueError("Unknown status="+state_string+" encountered "
                                                 "for variable="+variable_name+" in the "
                                                 "following line of the GLPK solution file="
                                                 +self._soln_file+":\n"+line)

                            variable = soln.variable[variable_name] = {"Value" : variable_value}
                        else:
                            variable = soln.variable[variable_name] = {"Value" : activity}

                    # if all of the variables have been read, exit.
                    if number_of_variables_read == results.problem.number_of_variables:
                        state = -1

                if state==-1:
                    break

            INPUT.close()

        except ValueError:
            msg = sys.exc_info()[1]
            INPUT.close()
            raise RuntimeError(msg)
        except Exception:
            msg = sys.exc_info()[1]
            INPUT.close()
            raise

        # For the range constraints, supply only the dual with the largest
        # magnitude (at least one should always be numerically zero)
        scon = soln.Constraint
        for key,(ld,ud) in range_duals.items():
            if abs(ld) > abs(ud):
                scon['r_l_'+key] = {"Dual" : ld}
            else:
                scon['r_l_'+key] = {"Dual" : ud}        # Use the same key

        #
        if soln.status is SolutionStatus.optimal:
            soln.gap = 0.0
        elif soln.status is SolutionStatus.stoppedByLimit:
            soln.gap = "Infinity"  # until proven otherwise
            if "lower_bound" in dir(results.problem):
                if results.problem.lower_bound is "-Infinity":
                    soln.gap = "Infinity"
                elif not results.problem.lower_bound is None:
                    if "upper_bound" not in dir(results.problem):
                        gap = "Infinity"
                    elif results.problem.upper_bound is None:
                        gap = "Infinity"
                    else:
                        soln.gap = eval(soln.objective(0)) - \
                                   eval(results.problem.lower_bound)
            elif "upper_bound" in dir(results.problem):
                if results.problem.upper_bound is "Infinity":
                    soln.gap = "Infinity"
                elif not results.problem.upper_bound is None:
                    soln.gap = eval(results.problem.upper_bound) - \
                               eval(soln.objective(0))

Example 45

Project: pyomo
Source File: gurobi_direct.py
View license
    def _populate_gurobi_instance (self, pyomo_instance):

        from pyomo.core.base import Var, Objective, Constraint, SOSConstraint
        from pyomo.repn import LinearCanonicalRepn, canonical_degree

        try:
            grbmodel = Model(name=pyomo_instance.name)
        except Exception:
            e = sys.exc_info()[1]
            msg = 'Unable to create Gurobi model.  Have you installed the Python'\
            '\n       bindings for Gurobi?\n\n\tError message: %s'
            raise Exception(msg % e)

        if self._symbolic_solver_labels:
            labeler = TextLabeler()
        else:
            labeler = NumericLabeler('x')
        # cache to avoid dictionary getitem calls in the loops below.
        self_symbol_map = self._symbol_map = SymbolMap()
        pyomo_instance.solutions.add_symbol_map(self_symbol_map)
        self._smap_id = id(self_symbol_map)

        # we use this when iterating over the constraints because it
        # will have a much smaller hash table, we also use this for
        # the warm start code after it is cleaned to only contain
        # variables referenced in the constraints
        self_variable_symbol_map = self._variable_symbol_map = SymbolMap()
        var_symbol_pairs = []

        # maps _VarData labels to the corresponding Gurobi variable object
        pyomo_gurobi_variable_map = {}

        self._referenced_variable_ids.clear()

        # cache to avoid dictionary getitem calls in the loop below.
        grb_infinity = GRB.INFINITY

        for var_value in pyomo_instance.component_data_objects(Var, active=True):

            lb = -grb_infinity
            ub = grb_infinity

            if (var_value.lb is not None) and (var_value.lb != -infinity):
                lb = value(var_value.lb)
            if (var_value.ub is not None) and (var_value.ub != infinity):
                ub = value(var_value.ub)

            # _VarValue objects will not be in the symbol map yet, so
            # avoid some checks.
            var_value_label = self_symbol_map.createSymbol(var_value, labeler)
            var_symbol_pairs.append((var_value, var_value_label))

            # be sure to impart the integer and binary nature of any variables
            if var_value.is_integer():
                var_type = GRB.INTEGER
            elif var_value.is_binary():
                var_type = GRB.BINARY
            elif var_value.is_continuous():
                var_type = GRB.CONTINUOUS
            else:
                raise TypeError("Invalid domain type for variable with name '%s'. "
                                "Variable is not continuous, integer, or binary.")

            pyomo_gurobi_variable_map[var_value_label] = \
                grbmodel.addVar(lb=lb, \
                                ub=ub, \
                                vtype=var_type, \
                                name=var_value_label)

        self_variable_symbol_map.addSymbols(var_symbol_pairs)

        grbmodel.update()

        # The next loop collects the following component types from the model:
        #  - SOSConstraint
        #  - Objective
        #  - Constraint
        sos1 = self._capabilities.sos1
        sos2 = self._capabilities.sos2
        modelSOS = ModelSOS()
        objective_cntr = 0
        # Track the range constraints and their associated variables added by gurobi
        self._last_native_var_idx = grbmodel.NumVars-1
        range_var_idx = grbmodel.NumVars
        _self_range_con_var_pairs = self._range_con_var_pairs = []
        for block in pyomo_instance.block_data_objects(active=True):

            gen_obj_canonical_repn = \
                getattr(block, "_gen_obj_canonical_repn", True)
            gen_con_canonical_repn = \
                getattr(block, "_gen_con_canonical_repn", True)
            # Get/Create the ComponentMap for the repn
            if not hasattr(block,'_canonical_repn'):
                block._canonical_repn = ComponentMap()
            block_canonical_repn = block._canonical_repn

            # SOSConstraints
            for soscondata in block.component_data_objects(SOSConstraint,
                                                           active=True,
                                                           descend_into=False):
                level = soscondata.level
                if (level == 1 and not sos1) or \
                   (level == 2 and not sos2) or \
                   (level > 2):
                    raise RuntimeError(
                        "Solver does not support SOS level %s constraints" % (level,))
                modelSOS.count_constraint(self_symbol_map,
                                          labeler,
                                          self_variable_symbol_map,
                                          pyomo_gurobi_variable_map,
                                          soscondata)

            # Objective
            for obj_data in block.component_data_objects(Objective,
                                                         active=True,
                                                         descend_into=False):

                if objective_cntr > 1:
                    raise ValueError(
                        "Multiple active objectives found on Pyomo instance '%s'. "
                        "Solver '%s' will only handle a single active objective" \
                        % (pyomo_instance.name, self.type))

                sense = GRB_MIN if (obj_data.is_minimizing()) else GRB_MAX
                grbmodel.ModelSense = sense
                obj_expr = LinExpr()

                if gen_obj_canonical_repn:
                    obj_repn = generate_canonical_repn(obj_data.expr)
                    block_canonical_repn[obj_data] = obj_repn
                else:
                    obj_repn = block_canonical_repn[obj_data]

                if isinstance(obj_repn, LinearCanonicalRepn):

                    if obj_repn.constant != None:
                        obj_expr.addConstant(obj_repn.constant)

                    if obj_repn.linear != None:

                        for i in xrange(len(obj_repn.linear)):
                            var_coefficient = obj_repn.linear[i]
                            var_value = obj_repn.variables[i]
                            self._referenced_variable_ids.add(id(var_value))
                            label = self_variable_symbol_map.getSymbol(var_value)
                            obj_expr.addTerms(var_coefficient,
                                              pyomo_gurobi_variable_map[label])
                else:

                    if 0 in obj_repn: # constant term
                        obj_expr.addConstant(obj_repn[0][None])

                    if 1 in obj_repn: # first-order terms
                        hash_to_variable_map = obj_repn[-1]
                        for var_hash, var_coefficient in iteritems(obj_repn[1]):
                            vardata = hash_to_variable_map[var_hash]
                            self._referenced_variable_ids.add(id(vardata))
                            label = self_variable_symbol_map.getSymbol(vardata)
                            obj_expr.addTerms(var_coefficient,
                                              pyomo_gurobi_variable_map[label])

                    if 2 in obj_repn:
                        obj_expr = QuadExpr(obj_expr)
                        hash_to_variable_map = obj_repn[-1]
                        for quad_repn, coef in iteritems(obj_repn[2]):
                            gurobi_expr = QuadExpr(coef)
                            for var_hash, exponent in iteritems(quad_repn):
                                vardata = hash_to_variable_map[var_hash]
                                self._referenced_variable_ids.add(id(vardata))
                                gurobi_var = pyomo_gurobi_variable_map\
                                             [self_variable_symbol_map.\
                                              getSymbol(vardata)]
                                gurobi_expr *= gurobi_var
                                if exponent == 2:
                                    gurobi_expr *= gurobi_var
                            obj_expr += gurobi_expr

                    degree = canonical_degree(obj_repn)
                    if (degree is None) or (degree > 2):
                        raise ValueError(
                            "gurobi_direct plugin does not support general nonlinear "
                            "objective expressions (only linear or quadratic).\n"
                            "Objective: %s" % (obj_data.name))

                # need to cache the objective label, because the
                # GUROBI python interface doesn't track this.
                # _ObjectiveData objects will not be in the symbol map
                # yet, so avoid some checks.
                self._objective_label = \
                    self_symbol_map.createSymbol(obj_data, labeler)

                grbmodel.setObjective(obj_expr, sense=sense)

            # Constraint
            for constraint_data in block.component_data_objects(Constraint,
                                                                active=True,
                                                                descend_into=False):

                if (constraint_data.lower is None) and \
                   (constraint_data.upper is None):
                    continue  # not binding at all, don't bother

                con_repn = None
                if isinstance(constraint_data, LinearCanonicalRepn):
                    con_repn = constraint_data
                else:
                    if gen_con_canonical_repn:
                        con_repn = generate_canonical_repn(constraint_data.body)
                        block_canonical_repn[constraint_data] = con_repn
                    else:
                        con_repn = block_canonical_repn[constraint_data]

                offset = 0.0
                # _ConstraintData objects will not be in the symbol
                # map yet, so avoid some checks.
                constraint_label = \
                    self_symbol_map.createSymbol(constraint_data, labeler)

                trivial = False
                if isinstance(con_repn, LinearCanonicalRepn):

                    #
                    # optimization (these might be generated on the fly)
                    #
                    constant = con_repn.constant
                    coefficients = con_repn.linear
                    variables = con_repn.variables

                    if constant is not None:
                        offset = constant
                    expr = LinExpr() + offset

                    if coefficients is not None:

                        linear_coefs = list()
                        linear_vars = list()

                        for i in xrange(len(coefficients)):

                            var_coefficient = coefficients[i]
                            var_value = variables[i]
                            self._referenced_variable_ids.add(id(var_value))
                            label = self_variable_symbol_map.getSymbol(var_value)
                            linear_coefs.append(var_coefficient)
                            linear_vars.append(pyomo_gurobi_variable_map[label])

                        expr += LinExpr(linear_coefs, linear_vars)

                    else:

                        trivial = True

                else:

                    if 0 in con_repn:
                        offset = con_repn[0][None]
                    expr = LinExpr() + offset

                    if 1 in con_repn: # first-order terms

                        linear_coefs = list()
                        linear_vars = list()

                        hash_to_variable_map = con_repn[-1]
                        for var_hash, var_coefficient in iteritems(con_repn[1]):
                            var = hash_to_variable_map[var_hash]
                            self._referenced_variable_ids.add(id(var))
                            label = self_variable_symbol_map.getSymbol(var)
                            linear_coefs.append( var_coefficient )
                            linear_vars.append( pyomo_gurobi_variable_map[label] )

                        expr += LinExpr(linear_coefs, linear_vars)

                    if 2 in con_repn: # quadratic constraint
                        if _GUROBI_VERSION_MAJOR < 5:
                            raise ValueError(
                                "The gurobi_direct plugin does not handle quadratic "
                                "constraint expressions for Gurobi major versions "
                                "< 5. Current version: Gurobi %s.%s%s"
                                % (gurobi.version()))

                        expr = QuadExpr(expr)
                        hash_to_variable_map = con_repn[-1]
                        for quad_repn, coef in iteritems(con_repn[2]):
                            gurobi_expr = QuadExpr(coef)
                            for var_hash, exponent in iteritems(quad_repn):
                                vardata = hash_to_variable_map[var_hash]
                                self._referenced_variable_ids.add(id(vardata))
                                gurobi_var = pyomo_gurobi_variable_map\
                                             [self_variable_symbol_map.\
                                              getSymbol(vardata)]
                                gurobi_expr *= gurobi_var
                                if exponent == 2:
                                    gurobi_expr *= gurobi_var
                            expr += gurobi_expr

                    degree = canonical_degree(con_repn)
                    if (degree is None) or (degree > 2):
                        raise ValueError(
                            "gurobi_direct plugin does not support general nonlinear "
                            "constraint expressions (only linear or quadratic).\n"
                            "Constraint: %s" % (constraint_data.name))

                if (not trivial) or (not self._skip_trivial_constraints):

                    if constraint_data.equality:
                        sense = GRB.EQUAL
                        bound = self._get_bound(constraint_data.lower)
                        grbmodel.addConstr(lhs=expr,
                                           sense=sense,
                                           rhs=bound,
                                           name=constraint_label)
                    else:
                        # L <= body <= U
                        if (constraint_data.upper is not None) and \
                           (constraint_data.lower is not None):
                            grb_con = grbmodel.addRange(
                                expr,
                                self._get_bound(constraint_data.lower),
                                self._get_bound(constraint_data.upper),
                                constraint_label)
                            _self_range_con_var_pairs.append((grb_con,range_var_idx))
                            range_var_idx += 1
                        # body <= U
                        elif constraint_data.upper is not None:
                            bound = self._get_bound(constraint_data.upper)
                            if bound < float('inf'):
                                grbmodel.addConstr(
                                    lhs=expr,
                                    sense=GRB.LESS_EQUAL,
                                    rhs=bound,
                                    name=constraint_label
                                    )
                        # L <= body
                        else:
                            bound = self._get_bound(constraint_data.lower)
                            if bound > -float('inf'):
                                grbmodel.addConstr(
                                    lhs=expr,
                                    sense=GRB.GREATER_EQUAL,
                                    rhs=bound,
                                    name=constraint_label
                                    )

        if modelSOS.sosType:
            for key in modelSOS.sosType:
                grbmodel.addSOS(modelSOS.sosType[key], \
                                modelSOS.varnames[key], \
                                modelSOS.weights[key] )
                self._referenced_variable_ids.update(modelSOS.varids[key])

        for var_id in self._referenced_variable_ids:
            varname = self._variable_symbol_map.byObject[var_id]
            vardata = self._variable_symbol_map.bySymbol[varname]()
            if vardata.fixed:
                if not self._output_fixed_variable_bounds:
                    raise ValueError("Encountered a fixed variable (%s) inside an active objective "
                                     "or constraint expression on model %s, which is usually indicative of "
                                     "a preprocessing error. Use the IO-option 'output_fixed_variable_bounds=True' "
                                     "to suppress this error and fix the variable by overwriting its bounds in "
                                     "the Gurobi instance."
                                     % (vardata.name,pyomo_instance.name,))

                grbvar = pyomo_gurobi_variable_map[varname]
                grbvar.setAttr(GRB.Attr.UB, vardata.value)
                grbvar.setAttr(GRB.Attr.LB, vardata.value)

        grbmodel.update()

        self._gurobi_instance = grbmodel
        self._pyomo_gurobi_variable_map = pyomo_gurobi_variable_map

Example 46

Project: pip
Source File: connectionpool.py
View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, chunked=False,
                **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param chunked:
            If True, urllib3 will send the body using chunked transfer
            encoding. Otherwise, urllib3 will send the body using the standard
            content-length form. Defaults to False.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Track whether `conn` needs to be released before
        # returning/raising/recursing. Update this variable if necessary, and
        # leave `release_conn` constant throughout the function. That way, if
        # the function recurses, the original value of `release_conn` will be
        # passed down into the recursive call, and its value will be respected.
        #
        # See issue #651 [1] for details.
        #
        # [1] <https://github.com/shazow/urllib3/issues/651>
        release_this_conn = release_conn

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        # Keep track of whether we cleanly exited the except block. This
        # ensures we do proper cleanup in finally.
        clean_exit = False

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers,
                                                  chunked=chunked)

            # If we're going to release the connection in ``finally:``, then
            # the response doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = conn if not release_conn else None

            # Import httplib's response into our own wrapper object
            response = self.ResponseCls.from_httplib(httplib_response,
                                                     pool=self,
                                                     connection=response_conn,
                                                     **response_kw)

            # Everything went great!
            clean_exit = True

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            clean_exit = False
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            clean_exit = False
            raise

        except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            clean_exit = False

            if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if not clean_exit:
                # We hit some kind of exception, handled or otherwise. We need
                # to throw the connection away unless explicitly told not to.
                # Close the connection, set the variable to None, and make sure
                # we put the None back in the pool to avoid leaking it.
                conn = conn and conn.close()
                release_this_conn = True

            if release_this_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s", retries, err, url)
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s", url, redirect_location)
            return self.urlopen(
                method, redirect_location, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_status:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response
            retries.sleep()
            log.info("Forced retry: %s", url)
            return self.urlopen(
                method, url, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        return response

Example 47

View license
def lambda_handler(event, context):
    try:
        if debug:
            print('Connect to Redshift: %s' % host)
        conn = pg8000.connect(database=database, user=user, password=password, host=host, port=port, ssl=ssl)
    except:
        print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
        return 'Failed'

    if debug:
        print('Succesfully Connected Redshift Cluster')
    cursor = conn.cursor()

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ \"schema\" || '.' || \"table\" as table, encoded, max_varchar, unsorted, stats_off, tbl_rows, skew_sortkey1, skew_rows from svv_table_info")
    tables_not_compressed = 0
    max_skew_ratio = 0
    total_skew_ratio = 0
    number_tables_skew = 0

    number_tables = 0
    
    max_skew_sort_ratio = 0
    total_skew_sort_ratio = 0
    number_tables_skew_sort = 0
    number_tables_statsoff = 0
    max_varchar_size = 0
    max_unsorted_pct = 0
    total_rows = 0
    result = cursor.fetchall()
    for table in result:
        table_name, encoded, max_varchar, unsorted, stats_off, tbl_rows, skew_sortkey1, skew_rows = table
        number_tables += 1
        if encoded == 'N':
            tables_not_compressed += 1
        if skew_rows != None:
            if skew_rows > max_skew_ratio:
                max_skew_ratio = skew_rows
            total_skew_ratio += skew_rows
            number_tables_skew += 1
        if skew_sortkey1 != None:
            if skew_sortkey1 > max_skew_sort_ratio:
                max_skew_sort_ratio = skew_sortkey1
            total_skew_sort_ratio += skew_sortkey1
            number_tables_skew_sort += 1
        if stats_off != None and stats_off > 5:
            number_tables_statsoff += 1
        if max_varchar != None and max_varchar > max_varchar_size:
            max_varchar_size = max_varchar
        if unsorted != None and unsorted > max_unsorted_pct:
            max_unsorted_pct = unsorted
        if tbl_rows != None:
            total_rows += tbl_rows

    if number_tables_skew > 0:
        avg_skew_ratio = total_skew_ratio / number_tables_skew
    else:
        avg_skew_ratio = 0

    if number_tables_skew_sort > 0:
        avg_skew_sort_ratio = total_skew_sort_ratio / number_tables_skew_sort
    else:
        avg_skew_sort_ratio = 0

    run_command(cursor,"SELECT /* Lambda CloudWatch Exporter */ count(a.attname) FROM pg_namespace n, pg_class c, pg_attribute a  WHERE n.oid = c.relnamespace AND c.oid = a.attrelid AND a.attnum > 0 AND NOT a.attisdropped and n.nspname NOT IN ('information_schema','pg_catalog','pg_toast') AND format_encoding(a.attencodingtype::integer) = 'none' AND c.relkind='r' AND a.attsortkeyord != 1")
    columns_not_compressed = cursor.fetchone()[0]
    if columns_not_compressed == None:
        columns_not_compressed = 0

    run_command(cursor,"SELECT /* Lambda CloudWatch Exporter */ sum(nvl(s.num_qs,0)) FROM svv_table_info t LEFT JOIN (SELECT tbl, COUNT(distinct query) num_qs FROM stl_scan s WHERE s.userid > 1 AND starttime >= GETDATE() - INTERVAL '%s' GROUP BY tbl) s ON s.tbl = t.table_id WHERE t.sortkey1 IS NULL" % interval)
    queries_scan_no_sort = cursor.fetchone()[0]
    if queries_scan_no_sort == None:
        queries_scan_no_sort = 0

    run_command(cursor,"SELECT /* Lambda CloudWatch Exporter */ SUM(w.total_queue_time) / 1000000.0 FROM stl_wlm_query w WHERE w.queue_start_time >= GETDATE() - INTERVAL '%s' AND w.total_queue_time > 0" % interval)
    total_wlm_queue_time = cursor.fetchone()[0]
    if total_wlm_queue_time == None:
        total_wlm_queue_time = 0

    run_command(cursor,"SELECT /* Lambda CloudWatch Exporter */ count(distinct query) FROM svl_query_report WHERE is_diskbased='t' AND (LABEL LIKE 'hash%%' OR LABEL LIKE 'sort%%' OR LABEL LIKE 'aggr%%') AND userid > 1 AND start_time >= GETDATE() - INTERVAL '{0}'".format(interval))
    total_disk_based_queries = cursor.fetchone()[0]
    if total_disk_based_queries == None:
        total_disk_based_queries = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ avg(datediff(ms,startqueue,startwork)) from stl_commit_stats  where startqueue >= GETDATE() - INTERVAL '%s'" % interval)
    avg_commit_queue = cursor.fetchone()[0]
    if avg_commit_queue == None:
        avg_commit_queue = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ count(distinct l.query) from stl_alert_event_log as l where l.userid >1 and l.event_time >= GETDATE() - INTERVAL '%s'" % interval)
    total_alerts = cursor.fetchone()[0]
    if total_alerts == None:
        total_alerts = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ avg(datediff(ms, starttime, endtime)) from stl_query where starttime >= GETDATE() - INTERVAL '%s'" % interval)
    avg_query_time = cursor.fetchone()[0]
    if avg_query_time == None:
        avg_query_time = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ sum(packets) from stl_dist where starttime >= GETDATE() - INTERVAL '%s'" % interval)
    total_packets = cursor.fetchone()[0]
    if total_packets == None:
        total_packets = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ sum(total) from (select count(query) total from stl_dist where starttime >= GETDATE() - INTERVAL '%s' group by query having sum(packets) > 1000000)" % interval)
    queries_traffic = cursor.fetchone()[0]
    if queries_traffic == None:
        queries_traffic = 0

    run_command(cursor,"select /* Lambda CloudWatch Exporter */ count(event) from stl_connection_log where event = 'initiating session' and username != 'rdsdb' and pid not in (select pid from stl_connection_log where event = 'disconnecting session')")
    db_connections = cursor.fetchone()[0]
    if db_connections == None:
        db_connections = 0

    if debug:
        print("Publishing CloudWatch Metrics")
    
    try:  
        cw.put_metric_data(
            Namespace='Redshift',
            MetricData=[
                {
                    'MetricName': 'TablesNotCompressed',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': tables_not_compressed,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'ColumnsNotCompressed',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': columns_not_compressed,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'MaxSkewRatio',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': max_skew_ratio,
                    'Unit': 'None'
                },
                {
                    'MetricName': 'AvgSkewRatio',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': avg_skew_ratio,
                    'Unit': 'None'
                },
                {
                    'MetricName': 'Tables',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': number_tables,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'QueriesScanNoSort',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': queries_scan_no_sort,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'MaxSkewSortRatio',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': max_skew_sort_ratio,
                    'Unit': 'None'
                },
                {
                    'MetricName': 'AvgSkewSortRatio',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': avg_skew_sort_ratio,
                    'Unit': 'None'
                },
                {
                    'MetricName': 'TablesStatsOff',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': number_tables_statsoff,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'MaxVarcharSize',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': max_varchar_size,
                    'Unit': 'None'
                },
                {
                    'MetricName': 'TotalWLMQueueTime',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': total_wlm_queue_time,
                    'Unit': 'Seconds'
                },
                {
                    'MetricName': 'DiskBasedQueries',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': total_disk_based_queries,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'AvgCommitQueueTime',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': avg_commit_queue,
                    'Unit': 'Milliseconds'
                },
                {
                    'MetricName': 'TotalAlerts',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': total_alerts,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'MaxUnsorted',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': max_unsorted_pct,
                    'Unit': 'Percent'
                },
                {
                    'MetricName': 'Rows',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': total_rows,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'AverageQueryTime',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': avg_query_time,
                    'Unit': 'Milliseconds'
                },
                {
                    'MetricName': 'Packets',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': total_packets,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'QueriesWithHighTraffic',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': queries_traffic,
                    'Unit': 'Count'
                },
                {
                    'MetricName': 'DbConnections',
                    'Dimensions': [
                        { 'Name': 'ClusterIdentifier', 'Value': cluster}
                    ],
                    'Timestamp': datetime.datetime.utcnow(),
                    'Value': db_connections,
                    'Unit': 'Count'
                }
            ]
        )
    except:
        print('Pushing metrics to CloudWatch failed: exception %s' % sys.exc_info()[1])

    cursor.close()
    conn.close()
    return 'Finished'

Example 48

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, chunked=False,
                **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param chunked:
            If True, urllib3 will send the body using chunked transfer
            encoding. Otherwise, urllib3 will send the body using the standard
            content-length form. Defaults to False.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        # Keep track of whether we cleanly exited the except block. This
        # ensures we do proper cleanup in finally.
        clean_exit = False

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers,
                                                  chunked=chunked)

            # If we're going to release the connection in ``finally:``, then
            # the response doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = conn if not release_conn else None

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # Everything went great!
            clean_exit = True

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            clean_exit = False
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            clean_exit = False
            raise

        except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            clean_exit = False

            if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if not clean_exit:
                # We hit some kind of exception, handled or otherwise. We need
                # to throw the connection away unless explicitly told not to.
                # Close the connection, set the variable to None, and make sure
                # we put the None back in the pool to avoid leaking it.
                conn = conn and conn.close()
                release_conn = True

            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s", retries, err, url)
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s", url, redirect_location)
            return self.urlopen(
                method, redirect_location, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_status:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response
            retries.sleep()
            log.info("Forced retry: %s", url)
            return self.urlopen(
                method, url, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        return response

Example 49

Project: rdflib
Source File: __init__.py
View license
def processURI(uri, outputFormat, form={}) :
	"""The standard processing of an RDFa uri options in a form; used as an entry point from a CGI call.

	The call accepts extra form options (i.e., HTTP GET options) as follows:

	 - C{graph=[output|processor|output,processor|processor,output]} specifying which graphs are returned. Default: C{output}
	 - C{space_preserve=[true|false]} means that plain literals are normalized in terms of white spaces. Default: C{false}
	 - C{rfa_version} provides the RDFa version that should be used for distilling. The string should be of the form "1.0" or "1.1". Default is the highest version the current package implements, currently "1.1"
	 - C{host_language=[xhtml,html,xml]} : the host language. Used when files are uploaded or text is added verbatim, otherwise the HTTP return header should be used. Default C{xml}
	 - C{embedded_rdf=[true|false]} : whether embedded turtle or RDF/XML content should be added to the output graph. Default: C{false}
	 - C{vocab_expansion=[true|false]} : whether the vocabularies should be expanded through the restricted RDFS entailment. Default: C{false}
	 - C{vocab_cache=[true|false]} : whether vocab caching should be performed or whether it should be ignored and vocabulary files should be picked up every time. Default: C{false}
	 - C{vocab_cache_report=[true|false]} : whether vocab caching details should be reported. Default: C{false}
	 - C{vocab_cache_bypass=[true|false]} : whether vocab caches have to be regenerated every time. Default: C{false}
	 - C{rdfa_lite=[true|false]} : whether warnings should be generated for non RDFa Lite attribute usage. Default: C{false}

	@param uri: URI to access. Note that the C{text:} and C{uploaded:} fake URI values are treated separately; the former is for textual intput (in which case a StringIO is used to get the data) and the latter is for uploaded file, where the form gives access to the file directly.
	@param outputFormat: serialization format, as defined by the package. Currently "xml", "turtle", "nt", or "json". Default is "turtle", also used if any other string is given.
	@param form: extra call options (from the CGI call) to set up the local options
	@type form: cgi FieldStorage instance
	@return: serialized graph
	@rtype: string
	"""
	def _get_option(param, compare_value, default) :
		param_old = param.replace('_','-')
		if param in list(form.keys()) :
			val = form.getfirst(param).lower()
			return val == compare_value
		elif param_old in list(form.keys()) :
			# this is to ensure the old style parameters are still valid...
			# in the old days I used '-' in the parameters, the standard favours '_'
			val = form.getfirst(param_old).lower()
			return val == compare_value
		else :
			return default

	if uri == "uploaded:" :
		input	= form["uploaded"].file
		base	= ""
	elif uri == "text:" :
		input	= StringIO(form.getfirst("text"))
		base	= ""
	else :
		input	= uri
		base	= uri

	if "rdfa_version" in list(form.keys()) :
		rdfa_version = form.getfirst("rdfa_version")
	else :
		rdfa_version = None

	# working through the possible options
	# Host language: HTML, XHTML, or XML
	# Note that these options should be used for the upload and inline version only in case of a form
	# for real uris the returned content type should be used
	if "host_language" in list(form.keys()) :
		if form.getfirst("host_language").lower() == "xhtml" :
			media_type = MediaTypes.xhtml
		elif form.getfirst("host_language").lower() == "html" :
			media_type = MediaTypes.html
		elif form.getfirst("host_language").lower() == "svg" :
			media_type = MediaTypes.svg
		elif form.getfirst("host_language").lower() == "atom" :
			media_type = MediaTypes.atom
		else :
			media_type = MediaTypes.xml
	else :
		media_type = ""

	transformers = []

	check_lite = "rdfa_lite" in list(form.keys()) and form.getfirst("rdfa_lite").lower() == "true"

	# The code below is left for backward compatibility only. In fact, these options are not exposed any more,
	# they are not really in use
	if "extras" in list(form.keys()) and form.getfirst("extras").lower() == "true" :
		from .transform.metaname              	import meta_transform
		from .transform.OpenID                	import OpenID_transform
		from .transform.DublinCore            	import DC_transform
		for t in [OpenID_transform, DC_transform, meta_transform] :
			transformers.append(t)
	else :
		if "extra-meta" in list(form.keys()) and form.getfirst("extra-meta").lower() == "true" :
			from .transform.metaname import meta_transform
			transformers.append(meta_transform)
		if "extra-openid" in list(form.keys()) and form.getfirst("extra-openid").lower() == "true" :
			from .transform.OpenID import OpenID_transform
			transformers.append(OpenID_transform)
		if "extra-dc" in list(form.keys()) and form.getfirst("extra-dc").lower() == "true" :
			from .transform.DublinCore import DC_transform
			transformers.append(DC_transform)

	output_default_graph 	= True
	output_processor_graph 	= False
	# Note that I use the 'graph' and the 'rdfagraph' form keys here. Reason is that
	# I used 'graph' in the previous versions, including the RDFa 1.0 processor,
	# so if I removed that altogether that would create backward incompatibilities
	# On the other hand, the RDFa 1.1 doc clearly refers to 'rdfagraph' as the standard
	# key.
	a = None
	if "graph" in list(form.keys()) :
		a = form.getfirst("graph").lower()
	elif "rdfagraph" in list(form.keys()) :
		a = form.getfirst("rdfagraph").lower()
	if a != None :
		if a == "processor" :
			output_default_graph 	= False
			output_processor_graph 	= True
		elif a == "processor,output" or a == "output,processor" :
			output_processor_graph 	= True

	embedded_rdf        = _get_option( "embedded_rdf", "true", False)
	space_preserve      = _get_option( "space_preserve", "true", True)
	vocab_cache         = _get_option( "vocab_cache", "true", True)
	vocab_cache_report  = _get_option( "vocab_cache_report", "true", False)
	refresh_vocab_cache = _get_option( "vocab_cache_refresh", "true", False)
	vocab_expansion     = _get_option( "vocab_expansion", "true", False)
	if vocab_cache_report : output_processor_graph = True

	options = Options(output_default_graph   = output_default_graph,
					  output_processor_graph = output_processor_graph,
					  space_preserve         = space_preserve,
					  transformers           = transformers,
					  vocab_cache            = vocab_cache,
					  vocab_cache_report     = vocab_cache_report,
					  refresh_vocab_cache    = refresh_vocab_cache,
					  vocab_expansion        = vocab_expansion,
					  embedded_rdf           = embedded_rdf,
					  check_lite             = check_lite
					  )
	processor = pyRdfa(options = options, base = base, media_type = media_type, rdfa_version = rdfa_version)

	# Decide the output format; the issue is what should happen in case of a top level error like an inaccessibility of
	# the html source: should a graph be returned or an HTML page with an error message?

	# decide whether HTML or RDF should be sent.
	htmlOutput = False
	#if 'HTTP_ACCEPT' in os.environ :
	#	acc = os.environ['HTTP_ACCEPT']
	#	possibilities = ['text/html',
	#					 'application/rdf+xml',
	#					 'text/turtle; charset=utf-8',
	#					 'application/json',
	#					 'application/ld+json',
	#					 'text/rdf+n3']
	#
	#	# this nice module does content negotiation and returns the preferred format
	#	sg = acceptable_content_type(acc, possibilities)
	#	htmlOutput = (sg != None and sg[0] == content_type('text/html'))
	#	os.environ['rdfaerror'] = 'true'

	# This is really for testing purposes only, it is an unpublished flag to force RDF output no
	# matter what
	try :
		graph = processor.rdf_from_source(input, outputFormat, rdfOutput = ("forceRDFOutput" in list(form.keys())) or not htmlOutput)
		if outputFormat == "n3" :
			retval = 'Content-Type: text/rdf+n3; charset=utf-8\n'
		elif outputFormat == "nt" or outputFormat == "turtle" :
			retval = 'Content-Type: text/turtle; charset=utf-8\n'
		elif outputFormat == "json-ld" or outputFormat == "json" :
			retval = 'Content-Type: application/ld+json; charset=utf-8\n'
		else :
			retval = 'Content-Type: application/rdf+xml; charset=utf-8\n'
		retval += '\n'
		retval += graph
		return retval
	except HTTPError :
		(type,h,traceback) = sys.exc_info()
		import cgi

		retval = 'Content-type: text/html; charset=utf-8\nStatus: %s \n\n' % h.http_code
		retval += "<html>\n"
		retval += "<head>\n"
		retval += "<title>HTTP Error in distilling RDFa content</title>\n"
		retval += "</head><body>\n"
		retval += "<h1>HTTP Error in distilling RDFa content</h1>\n"
		retval += "<p>HTTP Error: %s (%s)</p>\n" % (h.http_code,h.msg)
		retval += "<p>On URI: <code>'%s'</code></p>\n" % cgi.escape(uri)
		retval +="</body>\n"
		retval +="</html>\n"
		return retval
	except :
		# This branch should occur only if an exception is really raised, ie, if it is not turned
		# into a graph value.
		(type,value,traceback) = sys.exc_info()

		import traceback, cgi

		retval = 'Content-type: text/html; charset=utf-8\nStatus: %s\n\n' % processor.http_status
		retval += "<html>\n"
		retval += "<head>\n"
		retval += "<title>Exception in RDFa processing</title>\n"
		retval += "</head><body>\n"
		retval += "<h1>Exception in distilling RDFa</h1>\n"
		retval += "<pre>\n"
		strio  = StringIO()
		traceback.print_exc(file=strio)
		retval += strio.getvalue()
		retval +="</pre>\n"
		retval +="<pre>%s</pre>\n" % value
		retval +="<h1>Distiller request details</h1>\n"
		retval +="<dl>\n"
		if uri == "text:" and "text" in form and form["text"].value != None and len(form["text"].value.strip()) != 0 :
			retval +="<dt>Text input:</dt><dd>%s</dd>\n" % cgi.escape(form["text"].value).replace('\n','<br/>')
		elif uri == "uploaded:" :
			retval +="<dt>Uploaded file</dt>\n"
		else :
			retval +="<dt>URI received:</dt><dd><code>'%s'</code></dd>\n" % cgi.escape(uri)
		if "host_language" in list(form.keys()) :
			retval +="<dt>Media Type:</dt><dd>%s</dd>\n" % media_type
		if "graph" in list(form.keys()) :
			retval +="<dt>Requested graphs:</dt><dd>%s</dd>\n" % form.getfirst("graph").lower()
		else :
			retval +="<dt>Requested graphs:</dt><dd>default</dd>\n"
		retval +="<dt>Output serialization format:</dt><dd> %s</dd>\n" % outputFormat
		if "space_preserve" in form : retval +="<dt>Space preserve:</dt><dd> %s</dd>\n" % form["space_preserve"].value
		retval +="</dl>\n"
		retval +="</body>\n"
		retval +="</html>\n"
		return retval

Example 50

Project: headphones
Source File: connectionpool.py
View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            conn = conn and conn.close()
            release_conn = True
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            conn = conn and conn.close()
            release_conn = True
            raise

        except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            conn = conn and conn.close()
            release_conn = True

            if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response