sys.exc_info

Here are the examples of the python api sys.exc_info taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: AZOrange
Source File: getUnbiasedAccuracy.py
View license
    def getAcc(self, callBack = None, callBackWithFoldModel = None):
        """ For regression problems, it returns the RMSE and the Q2 
            For Classification problems, it returns CA and the ConfMat
            The return is made in a Dict: {"RMSE":0.2,"Q2":0.1,"CA":0.98,"CM":[[TP, FP],[FN,TN]]}
            For the EvalResults not supported for a specific learner/datase, the respective result will be None

            if the learner is a dict {"LearnerName":learner, ...} the results will be a dict with results for all Learners and for a consensus
                made out of those that were stable

            It some error occurred, the respective values in the Dict will be None
        """
        self.__log("Starting Calculating MLStatistics")
        statistics = {}
        if not self.__areInputsOK():
            return None
        # Set the response type
        self.responseType =  self.data.domain.classVar.varType == orange.VarTypes.Discrete and "Classification"  or "Regression"
        self.__log("  "+str(self.responseType))

        #Create the Train and test sets
        if self.usePreDefFolds:
            DataIdxs = self.preDefIndices 
        else:
            DataIdxs = self.sampler(self.data, self.nExtFolds) 
        foldsN = [f for f in dict.fromkeys(DataIdxs) if f != 0] #Folds used only from 1 on ... 0 are for fixed train Bias
        nFolds = len(foldsN)
        #Fix the Indexes based on DataIdxs
        # (0s) represents the train set  ( >= 1s) represents the test set folds
        if self.useVarCtrlCV:
            nShifted = [0] * nFolds
            for idx,isTest in enumerate(self.preDefIndices):  # self.preDefIndices == 0 are to be used in TrainBias
                if not isTest:
                    if DataIdxs[idx]:
                        nShifted[DataIdxs[idx]] += 1
                        DataIdxs[idx] = 0
            for idx,shift in enumerate(nShifted):
                self.__log("In fold "+str(idx)+", "+str(shift)+" examples were shifted to the train set.")

        #Var for saving each Fols result
        optAcc = {}
        results = {}
        exp_pred = {}
        nTrainEx = {}
        nTestEx = {}
        
        #Set a dict of learners
        MLmethods = {}
        if type(self.learner) == dict:
            for ml in self.learner:
                MLmethods[ml] = self.learner[ml]
        else:
            MLmethods[self.learner.name] = self.learner

        models={}
        self.__log("Calculating Statistics for MLmethods:")
        self.__log("  "+str([x for x in MLmethods]))

        #Check data in advance so that, by chance, it will not faill at the last fold!
        for foldN in foldsN:
            trainData = self.data.select(DataIdxs,foldN,negate=1)
            self.__checkTrainData(trainData)

        #Optional!!
        # Order Learners so that PLS is the first
        sortedML = [ml for ml in MLmethods]
        if "PLS" in sortedML:
            sortedML.remove("PLS")
            sortedML.insert(0,"PLS")

        stepsDone = 0
        nTotalSteps = len(sortedML) * self.nExtFolds  
        for ml in sortedML:
          startTime = time.time()
          self.__log("    > "+str(ml)+"...")
          try:
            #Var for saving each Fols result
            results[ml] = []
            exp_pred[ml] = []
            models[ml] = []
            nTrainEx[ml] = []
            nTestEx[ml] = []
            optAcc[ml] = []
            logTxt = ""
            for foldN in foldsN:
                if type(self.learner) == dict:
                    self.paramList = None

                trainData = self.data.select(DataIdxs,foldN,negate=1)
                testData = self.data.select(DataIdxs,foldN)
                smilesAttr = dataUtilities.getSMILESAttr(trainData)
                if smilesAttr:
                    self.__log("Found SMILES attribute:"+smilesAttr)
                    if MLmethods[ml].specialType == 1:
                       trainData = dataUtilities.attributeSelectionData(trainData, [smilesAttr, trainData.domain.classVar.name]) 
                       testData = dataUtilities.attributeSelectionData(testData, [smilesAttr, testData.domain.classVar.name]) 
                       self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain]))
                    else:
                       trainData = dataUtilities.attributeDeselectionData(trainData, [smilesAttr]) 
                       testData = dataUtilities.attributeDeselectionData(testData, [smilesAttr]) 
                       self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain[0:3]] + ["..."] + [attr.name for attr in trainData.domain[len(trainData.domain)-3:]]))

                nTrainEx[ml].append(len(trainData))
                nTestEx[ml].append(len(testData))
                #Test if trainsets inside optimizer will respect dataSize criterias.
                #  if not, don't optimize, but still train the model
                dontOptimize = False
                if self.responseType != "Classification" and (len(trainData)*(1-1.0/self.nInnerFolds) < 20):
                    dontOptimize = True
                else:                      
                    tmpDataIdxs = self.sampler(trainData, self.nInnerFolds)
                    tmpTrainData = trainData.select(tmpDataIdxs,1,negate=1)
                    if not self.__checkTrainData(tmpTrainData, False):
                        dontOptimize = True

                SpecialModel = None
                if dontOptimize:
                    logTxt += "       Fold "+str(foldN)+": Too few compounds to optimize model hyper-parameters\n"
                    self.__log(logTxt)
                    if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                        res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                        CA = evalUtilities.CA(res)[0]
                        optAcc[ml].append(CA)
                    else:
                        res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                        R2 = evalUtilities.R2(res)[0]
                        optAcc[ml].append(R2)
                else:
                    if MLmethods[ml].specialType == 1: 
                            if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                                    optInfo, SpecialModel = MLmethods[ml].optimizePars(trainData, folds = 5)
                                    optAcc[ml].append(optInfo["Acc"])
                            else:
                                    res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                                    R2 = evalUtilities.R2(res)[0]
                                    optAcc[ml].append(R2)
                    else:
                            runPath = miscUtilities.createScratchDir(baseDir = AZOC.NFS_SCRATCHDIR, desc = "AccWOptParam", seed = id(trainData))
                            trainData.save(os.path.join(runPath,"trainData.tab"))
                            tunedPars = paramOptUtilities.getOptParam(
                                learner = MLmethods[ml], 
                                trainDataFile = os.path.join(runPath,"trainData.tab"), 
                                paramList = self.paramList, 
                                useGrid = False, 
                                verbose = self.verbose, 
                                queueType = self.queueType, 
                                runPath = runPath, 
                                nExtFolds = None, 
                                nFolds = self.nInnerFolds,
                                logFile = self.logFile,
                                getTunedPars = True,
                                fixedParams = self.fixedParams)
                            if not MLmethods[ml] or not MLmethods[ml].optimized:
                                self.__log("       WARNING: GETACCWOPTPARAM: The learner "+str(ml)+" was not optimized.")
                                self.__log("                It will be ignored")
                                #self.__log("                It will be set to default parameters")
                                self.__log("                    DEBUG can be done in: "+runPath)
                                #Set learner back to default 
                                #MLmethods[ml] = MLmethods[ml].__class__()
                                raise Exception("The learner "+str(ml)+" was not optimized.")
                            else:
                                if trainData.domain.classVar.varType == orange.VarTypes.Discrete:
                                    optAcc[ml].append(tunedPars[0])
                                else:
                                    res = evalUtilities.crossValidation([MLmethods[ml]], trainData, folds=5, stratified=orange.MakeRandomIndices.StratifiedIfPossible, random_generator = random.randint(0, 100))
                                    R2 = evalUtilities.R2(res)[0]
                                    optAcc[ml].append(R2)

                                miscUtilities.removeDir(runPath) 
                #Train the model
                if SpecialModel is not None:
                    model = SpecialModel 
                else:
                    model = MLmethods[ml](trainData)
                models[ml].append(model)
                #Test the model
                if self.responseType == "Classification":
                    results[ml].append((evalUtilities.getClassificationAccuracy(testData, model), evalUtilities.getConfMat(testData, model) ) )
                else:
                    local_exp_pred = []
                    # Predict using bulk-predict
                    predictions = model(testData)
                    # Gather predictions
                    for n,ex in enumerate(testData):
                        local_exp_pred.append((ex.getclass().value, predictions[n].value))
                    results[ml].append((evalUtilities.calcRMSE(local_exp_pred), evalUtilities.calcRsqrt(local_exp_pred) ) )
                    #Save the experimental value and correspondent predicted value
                    exp_pred[ml] += local_exp_pred
                if callBack:
                     stepsDone += 1
                     if not callBack((100*stepsDone)/nTotalSteps): return None
                if callBackWithFoldModel:
                    callBackWithFoldModel(model) 

            res = self.createStatObj(results[ml], exp_pred[ml], nTrainEx[ml], nTestEx[ml],self.responseType, self.nExtFolds, logTxt, labels = hasattr(self.data.domain.classVar,"values") and list(self.data.domain.classVar.values) or None )
            if self.verbose > 0: 
                print "UnbiasedAccuracyGetter!Results  "+ml+":\n"
                pprint(res)
            if not res:
                raise Exception("No results available!")
            res["runningTime"] = time.time() - startTime
            statistics[ml] = copy.deepcopy(res)
            self.__writeResults(statistics)
            self.__log("       OK")
          except:
            self.__log("       Learner "+str(ml)+" failed to create/optimize the model!")
            error = str(sys.exc_info()[0]) +" "+\
                        str(sys.exc_info()[1]) +" "+\
                        str(traceback.extract_tb(sys.exc_info()[2]))
            self.__log(error)
 
            res = self.createStatObj()
            statistics[ml] = copy.deepcopy(res)
            self.__writeResults(statistics)

        if not statistics or len(statistics) < 1:
            self.__log("ERROR: No statistics to return!")
            return None
        elif len(statistics) > 1:
            #We still need to build a consensus model out of the stable models 
            #   ONLY if there are more that one model stable!
            #   When only one or no stable models, build a consensus based on all models
            # ALWAYS exclude specialType models (MLmethods[ml].specialType > 0)
            consensusMLs={}
            for modelName in statistics:
                StabilityValue = statistics[modelName]["StabilityValue"]
                if StabilityValue is not None and statistics[modelName]["stable"]:
                    consensusMLs[modelName] = copy.deepcopy(statistics[modelName])

            self.__log("Found "+str(len(consensusMLs))+" stable MLmethods out of "+str(len(statistics))+" MLmethods.")

            if len(consensusMLs) <= 1:   # we need more models to build a consensus!
                consensusMLs={}
                for modelName in statistics:
                    consensusMLs[modelName] = copy.deepcopy(statistics[modelName])

            # Exclude specialType models 
            excludeThis = []
            for learnerName in consensusMLs:
                if models[learnerName][0].specialType > 0:
                    excludeThis.append(learnerName)
            for learnerName in excludeThis:
                consensusMLs.pop(learnerName)
                self.__log("    > Excluded special model " + learnerName)
            self.__log("    > Stable modules: " + str(consensusMLs.keys()))

            if len(consensusMLs) >= 2:
                #Var for saving each Fols result
                startTime = time.time()
                Cresults = []
                Cexp_pred = []
                CnTrainEx = []
                CnTestEx = []
                self.__log("Calculating the statistics for a Consensus model based on "+str([ml for ml in consensusMLs]))
                for foldN in range(self.nExtFolds):
                    if self.responseType == "Classification":
                        CLASS0 = str(self.data.domain.classVar.values[0])
                        CLASS1 = str(self.data.domain.classVar.values[1])
                        # exprTest0
                        exprTest0 = "(0"
                        for ml in consensusMLs:
                            exprTest0 += "+( "+ml+" == "+CLASS0+" )*"+str(optAcc[ml][foldN])+" "
                        exprTest0 += ")/IF0(sum([False"
                        for ml in consensusMLs:
                            exprTest0 += ", "+ml+" == "+CLASS0+" "
                        exprTest0 += "]),1)"
                        # exprTest1
                        exprTest1 = "(0"
                        for ml in consensusMLs:
                            exprTest1 += "+( "+ml+" == "+CLASS1+" )*"+str(optAcc[ml][foldN])+" "
                        exprTest1 += ")/IF0(sum([False"
                        for ml in consensusMLs:
                            exprTest1 += ", "+ml+" == "+CLASS1+" "
                        exprTest1 += "]),1)"
                        # Expression
                        expression = [exprTest0+" >= "+exprTest1+" -> "+CLASS0," -> "+CLASS1]
                    else:
                        Q2sum = sum([optAcc[ml][foldN] for ml in consensusMLs])
                        expression = "(1 / "+str(Q2sum)+") * (0"
                        for ml in consensusMLs:
                            expression += " + "+str(optAcc[ml][foldN])+" * "+ml+" "
                        expression += ")"

                    testData = self.data.select(DataIdxs,foldN+1)  # fold 0 if for the train Bias!!
                    smilesAttr = dataUtilities.getSMILESAttr(testData)
                    if smilesAttr:
                        self.__log("Found SMILES attribute:"+smilesAttr)
                        testData = dataUtilities.attributeDeselectionData(testData, [smilesAttr])
                        self.__log("Selected attrs: "+str([attr.name for attr in trainData.domain[0:3]] + ["..."] + [attr.name for attr in trainData.domain[len(trainData.domain)-3:]]))

                    CnTestEx.append(len(testData))
                    consensusClassifiers = {}
                    for learnerName in consensusMLs:
                        consensusClassifiers[learnerName] = models[learnerName][foldN]

                    model = AZorngConsensus.ConsensusClassifier(classifiers = consensusClassifiers, expression = expression)     
                    CnTrainEx.append(model.NTrainEx)
                    #Test the model
                    if self.responseType == "Classification":
                        Cresults.append((evalUtilities.getClassificationAccuracy(testData, model), evalUtilities.getConfMat(testData, model) ) )
                    else:
                        local_exp_pred = []
                        # Predict using bulk-predict
                        predictions = model(testData)
                        # Gather predictions
                        for n,ex in enumerate(testData):
                            local_exp_pred.append((ex.getclass().value, predictions[n].value))
                        Cresults.append((evalUtilities.calcRMSE(local_exp_pred), evalUtilities.calcRsqrt(local_exp_pred) ) )
                        #Save the experimental value and correspondent predicted value
                        Cexp_pred += local_exp_pred

                res = self.createStatObj(Cresults, Cexp_pred, CnTrainEx, CnTestEx, self.responseType, self.nExtFolds, labels = hasattr(self.data.domain.classVar,"values") and list(self.data.domain.classVar.values) or None )
                res["runningTime"] = time.time() - startTime
                statistics["Consensus"] = copy.deepcopy(res)
                statistics["Consensus"]["IndividualStatistics"] = copy.deepcopy(consensusMLs)
                self.__writeResults(statistics)
            self.__log("Returned multiple ML methods statistics.")
            return statistics
                 
        #By default return the only existing statistics!
        self.__writeResults(statistics)
        self.__log("Returned only one ML method statistics.")
        return statistics[statistics.keys()[0]]

Example 2

Project: Nagstamon
Source File: Icinga.py
View license
    def _get_status_HTML(self):
        """
        Get status from Nagios Server - the oldschool CGI HTML way
        """
        # create Nagios items dictionary with to lists for services and hosts
        # every list will contain a dictionary for every failed service/host
        # this dictionary is only temporarily
        # ##global icons
        nagitems = {'services':[], 'hosts':[]}

        # new_hosts dictionary
        self.new_hosts = dict()

        # hosts - mostly the down ones
        # unfortunately the hosts status page has a different structure so
        # hosts must be analyzed separately
        try:
            for status_type in 'hard', 'soft':
                result = self.FetchURL(self.cgiurl_hosts[status_type])
                htobj, error, status_code = result.result,\
                                            result.error,\
                                            result.status_code

                # check if any error occured
                errors_occured = self.check_for_error(htobj, error, status_code)
                # if there are errors return them
                if errors_occured != False:
                    return(errors_occured)    

                # put a copy of a part of htobj into table to be able to delete htobj
                table = htobj('table', {'class': 'status'})[0]

                # do some cleanup
                del result, error

                # access table rows
                # some Icinga versions have a <tbody> tag in cgi output HTML which
                # omits the <tr> tags being found
                if len(table('tbody')) == 0:
                    trs = table('tr', recursive=False)
                else:
                    tbody = table('tbody')[0]
                    trs = tbody('tr', recursive=False)

                # kick out table heads
                trs.pop(0)

                for tr in trs:
                    try:
                        # ignore empty <tr> rows
                        if len(tr('td', recursive=False)) > 1:
                            n = {}
                            # get tds in one tr
                            tds = tr('td', recursive=False)
                            # host
                            try:
                                n['host'] = str(tds[0].table.tr.td.table.tr.td.a.string)
                            except:
                                n['host'] = str(nagitems[len(nagitems) - 1]['host'])
                                # status
                            n['status'] = str(tds[1].string)
                            # last_check
                            n['last_check'] = str(tds[2].string)
                            # duration
                            n['duration'] = str(tds[3].string)
                            # division between Nagios and Icinga in real life... where
                            # Nagios has only 5 columns there are 7 in Icinga 1.3...
                            # ... and 6 in Icinga 1.2 :-)
                            if len(tds) < 7:
                                # the old Nagios table
                                # status_information
                                if len(tds[4](text=not_empty)) == 0:
                                    n['status_information'] = ''
                                else:
                                    n['status_information'] = str(tds[4].string)
                                    # attempts are not shown in case of hosts so it defaults to 'N/A'
                                n['attempt'] = 'N/A'
                            else:
                                # attempts are shown for hosts
                                # to fix http://sourceforge.net/tracker/?func=detail&atid=1101370&aid=3280961&group_id=236865 .attempt needs
                                # to be stripped
                                n['attempt'] = str(tds[4].string).strip()
                                # status_information
                                if len(tds[5](text=not_empty)) == 0:
                                    n['status_information'] = ''
                                else:
                                    n['status_information'] = str(tds[5].string)

                            # status flags
                            n['passiveonly'] = False
                            n['notifications_disabled'] = False
                            n['flapping'] = False
                            n['acknowledged'] = False
                            n['scheduled_downtime'] = False

                            # map status icons to status flags
                            icons = tds[0].findAll('img')
                            for i in icons:
                                icon = i['src'].split('/')[-1]
                                if icon in self.STATUS_MAPPING:
                                    n[self.STATUS_MAPPING[icon]] = True
                            # cleaning
                            del icons

                            # add dictionary full of information about this host item to nagitems
                            nagitems['hosts'].append(n)
                            # after collection data in nagitems create objects from its informations
                            # host objects contain service objects
                            if not 'host' in self.new_hosts:
                                new_host = n['host']
                                self.new_hosts[new_host] = GenericHost()
                                self.new_hosts[new_host].name = n['host']
                                self.new_hosts[new_host].server = self.name
                                self.new_hosts[new_host].status = n['status']
                                self.new_hosts[new_host].last_check = n['last_check']
                                self.new_hosts[new_host].duration = n['duration']
                                self.new_hosts[new_host].attempt = n['attempt']
                                self.new_hosts[new_host].status_information = n['status_information'].replace('\n', ' ').strip()
                                self.new_hosts[new_host].passiveonly = n['passiveonly']
                                self.new_hosts[new_host].notifications_disabled = n['notifications_disabled']
                                self.new_hosts[new_host].flapping = n['flapping']
                                self.new_hosts[new_host].acknowledged = n['acknowledged']
                                self.new_hosts[new_host].scheduled_downtime = n['scheduled_downtime']
                                self.new_hosts[new_host].status_type = status_type

                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs host_name and no display name
                                self.new_hosts[new_host].real_name = n['host']
                            
                            # some cleanup
                            del tds, n
                    except:
                        self.Error(sys.exc_info())

                # do some cleanup
                htobj.decompose()
                del htobj, trs, table

        except:
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                return Result(result=result, error=error)

        # services
        try:
            for status_type in 'hard', 'soft':
                result = self.FetchURL(self.cgiurl_services[status_type])
                htobj, error, status_code = result.result,\
                                            result.error,\
                                            result.status_code
                                            
                # check if any error occured
                errors_occured = self.check_for_error(htobj, error, status_code)
                # if there are errors return them
                if errors_occured != False:
                    return(errors_occured)    
                
                table = htobj('table', {'class': 'status'})[0]

                # some Icinga versions have a <tbody> tag in cgi output HTML which
                # omits the <tr> tags being found
                if len(table('tbody')) == 0:
                    trs = table('tr', recursive=False)
                else:
                    tbody = table('tbody')[0]
                    trs = tbody('tr', recursive=False)

                # do some cleanup
                del result, error

                # kick out table heads
                trs.pop(0)

                for tr in trs:
                    try:
                        # ignore empty <tr> rows - there are a lot of them - a Nagios bug?
                        tds = tr('td', recursive=False)
                        if len(tds) > 1:
                            n = {}
                            # host
                            # the resulting table of Nagios status.cgi table omits the
                            # hostname of a failing service if there are more than one
                            # so if the hostname is empty the nagios status item should get
                            # its hostname from the previuos item - one reason to keep 'nagitems'
                            try:
                                n['host'] = str(tds[0](text=not_empty)[0])
                            except:
                                n['host'] = str(nagitems['services'][len(nagitems['services']) - 1]['host'])
                                # service
                            n['service'] = str(tds[1](text=not_empty)[0])
                            # status
                            n['status'] = str(tds[2](text=not_empty)[0])
                            # last_check
                            n['last_check'] = str(tds[3](text=not_empty)[0])
                            # duration
                            n['duration'] = str(tds[4](text=not_empty)[0])
                            # attempt
                            # to fix http://sourceforge.net/tracker/?func=detail&atid=1101370&aid=3280961&group_id=236865 .attempt needs
                            # to be stripped
                            n['attempt'] = str(tds[5](text=not_empty)[0]).strip()
                            # status_information
                            if len(tds[6](text=not_empty)) == 0:
                                n['status_information'] = ''
                            else:
                                n['status_information'] = str(tds[6](text=not_empty)[0])
                                # status flags
                            n['passiveonly'] = False
                            n['notifications_disabled'] = False
                            n['flapping'] = False
                            n['acknowledged'] = False
                            n['scheduled_downtime'] = False

                            # map status icons to status flags
                            icons = tds[1].findAll('img')
                            for i in icons:
                                icon = i['src'].split('/')[-1]
                                if icon in self.STATUS_MAPPING:
                                    n[self.STATUS_MAPPING[icon]] = True
                            # cleaning
                            del icons

                            # add dictionary full of information about this service item to nagitems - only if service
                            nagitems['services'].append(n)
                            # after collection data in nagitems create objects of its informations
                            # host objects contain service objects
                            if not n['host'] in self.new_hosts:
                                self.new_hosts[n['host']] = GenericHost()
                                self.new_hosts[n['host']].name = n['host']
                                self.new_hosts[n['host']].status = 'UP'
                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs host_description and no display name
                                self.new_hosts[n['host']].real_name = n['host']
                                
                                # trying to fix https://sourceforge.net/tracker/index.php?func=detail&aid=3299790&group_id=236865&atid=1101370
                                # if host is not down but in downtime or any other flag this should be evaluated too
                                # map status icons to status flags
                                icons = tds[0].findAll('img')
                                for i in icons:
                                    icon = i['src'].split('/')[-1]
                                    if icon in self.STATUS_MAPPING:
                                        self.new_hosts[n['host']].__dict__[self.STATUS_MAPPING[icon]] = True
                                # cleaning
                                del icons
                                # if a service does not exist create its object
                            if not n['service'] in self.new_hosts[n['host']].services:
                                new_service = n['service']
                                self.new_hosts[n['host']].services[new_service] = GenericService()
                                self.new_hosts[n['host']].services[new_service].host = n['host']
                                self.new_hosts[n['host']].services[new_service].server = self.name
                                self.new_hosts[n['host']].services[new_service].name = n['service']
                                self.new_hosts[n['host']].services[new_service].status = n['status']
                                self.new_hosts[n['host']].services[new_service].last_check = n['last_check']
                                self.new_hosts[n['host']].services[new_service].duration = n['duration']
                                self.new_hosts[n['host']].services[new_service].attempt = n['attempt']
                                self.new_hosts[n['host']].services[new_service].status_information = n['status_information'].replace('\n', ' ').strip()
                                self.new_hosts[n['host']].services[new_service].passiveonly = n['passiveonly']
                                self.new_hosts[n['host']].services[new_service].notifications_disabled = n['notifications_disabled']
                                self.new_hosts[n['host']].services[new_service].flapping = n['flapping']
                                self.new_hosts[n['host']].services[new_service].acknowledged = n['acknowledged']
                                self.new_hosts[n['host']].services[new_service].scheduled_downtime = n['scheduled_downtime']

                                # extra Icinga properties to solve https://github.com/HenriWahl/Nagstamon/issues/192
                                # acknowledge needs service_description and no display name
                                self.new_hosts[n['host']].services[new_service].real_name = n['service_description']
                                
                            # some cleanup
                            del tds, n
                    except:
                        self.Error(sys.exc_info())

                # do some cleanup
                htobj.decompose()
                del htobj, trs, table

        except:
            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

            # some cleanup
        del nagitems

        # dummy return in case all is OK
        return Result()

Example 3

Project: pyvows
Source File: gevent.py
View license
    def run_context(self, ctx_collection, ctx_name, ctx_obj, execution_plan, index=-1, suite=None, skipReason=None):
        #   FIXME: Add Docstring

        #-----------------------------------------------------------------------
        # Local variables and defs
        #-----------------------------------------------------------------------
        ctx_result = {
            'filename': suite or inspect.getsourcefile(ctx_obj.__class__),
            'name': ctx_name,
            'tests': [],
            'contexts': [],
            'topic_elapsed': 0,
            'error': None,
            'skip': skipReason
        }

        ctx_collection.append(ctx_result)
        ctx_obj.index = index
        ctx_obj.pool = self.pool
        teardown_blockers = []

        def _run_setup_and_topic(ctx_obj, index):
            # If we're already mid-skip, don't run anything
            if skipReason:
                raise skipReason

            # Run setup function
            try:
                ctx_obj.setup()
            except Exception:
                raise VowsTopicError('setup', sys.exc_info())

            # Find & run topic function
            if not hasattr(ctx_obj, 'topic'):  # ctx_obj has no topic
                return ctx_obj._get_first_available_topic(index)

            try:
                topic_func = ctx_obj.topic
                topic_list = get_topics_for(topic_func, ctx_obj)

                start_time = time.time()

                if topic_func is None:
                    return None

                topic = topic_func(*topic_list)
                ctx_result['topic_elapsed'] = elapsed(start_time)
                return topic
            except SkipTest:
                raise
            except Exception:
                raise VowsTopicError('topic', sys.exc_info())

        def _run_tests(topic):
            def _run_with_topic(topic):
                def _run_vows_and_subcontexts(topic, index=-1, enumerated=False):
                    # methods
                    for vow_name, vow in vows:
                        if skipReason:
                            skipped_result = self.get_vow_result(vow, topic, ctx_obj, vow_name, enumerated)
                            skipped_result['skip'] = skipReason
                            ctx_result['tests'].append(skipped_result)
                        else:
                            vow_greenlet = self._run_vow(
                                ctx_result['tests'],
                                topic,
                                ctx_obj,
                                vow,
                                vow_name,
                                enumerated=enumerated)
                            teardown_blockers.append(vow_greenlet)

                    # classes
                    for subctx_name, subctx in subcontexts:
                        # resolve user-defined Context classes
                        if not issubclass(subctx, self.context_class):
                            subctx = type(ctx_name, (subctx, self.context_class), {})

                        subctx_obj = subctx(ctx_obj)
                        subctx_obj.pool = self.pool

                        subctx_greenlet = self.pool.spawn(
                            self.run_context,
                            ctx_result['contexts'],
                            subctx_name,
                            subctx_obj,
                            execution_plan['contexts'][subctx_name],
                            index=index,
                            suite=suite or ctx_result['filename'],
                            skipReason=skipReason
                        )
                        teardown_blockers.append(subctx_greenlet)

                # setup generated topics if needed
                is_generator = inspect.isgenerator(topic)
                if is_generator:
                    try:
                        ctx_obj.generated_topic = True
                        topic = ctx_obj.topic_value = list(topic)
                    except Exception:
                        # Actually getting the values from the generator may raise exception
                        raise VowsTopicError('topic', sys.exc_info())
                else:
                    ctx_obj.topic_value = topic

                if is_generator:
                    for index, topic_value in enumerate(topic):
                        _run_vows_and_subcontexts(topic_value, index=index, enumerated=True)
                else:
                    _run_vows_and_subcontexts(topic)

            vows = set((vow_name, getattr(type(ctx_obj), vow_name)) for vow_name in execution_plan['vows'])
            subcontexts = set((subctx_name, getattr(type(ctx_obj), subctx_name)) for subctx_name in execution_plan['contexts'])

            if not isinstance(topic, VowsAsyncTopic):
                _run_with_topic(topic)
            else:
                def handle_callback(*args, **kw):
                    _run_with_topic(VowsAsyncTopicValue(args, kw))
                topic(handle_callback)

        def _run_teardown():
            try:
                for blocker in teardown_blockers:
                    blocker.join()
                ctx_obj.teardown()
            except Exception:
                raise VowsTopicError('teardown', sys.exc_info())

        def _update_execution_plan():
            '''Since Context.ignore can modify the ignored_members during setup or topic,
                update the execution_plan to reflect the new ignored_members'''

            for name in ctx_obj.ignored_members:
                if name in execution_plan['vows']:
                    execution_plan['vows'].remove(name)
                if name in execution_plan['contexts']:
                    del execution_plan['contexts'][name]

        #-----------------------------------------------------------------------
        # Begin
        #-----------------------------------------------------------------------
        try:
            try:
                topic = _run_setup_and_topic(ctx_obj, index)
                _update_execution_plan()
            except SkipTest, se:
                ctx_result['skip'] = se
                skipReason = se
                topic = None
            except VowsTopicError, e:
                ctx_result['error'] = e
                skipReason = SkipTest('topic dependency failed')
                topic = None
            _run_tests(topic)
            if not ctx_result['error']:
                try:
                    _run_teardown()
                except Exception, e:
                    ctx_result['error'] = e
        finally:
            ctx_result['stdout'] = VowsParallelRunner.output.stdout.getvalue()
            ctx_result['stderr'] = VowsParallelRunner.output.stderr.getvalue()

Example 4

Project: p2pool
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 5

Project: numscons
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))

        while 1:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + '\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message('    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message('       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message('       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message('     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message('Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 6

View license
def build_annotated_tgm(closest_gene_output,distance_to_tss,logistic_score_output,fasta_file,motif_ids,makeWindow=True,tgm_file='',do_pkl=True):
    '''
    Takes existing tgm, and maps to gene names and TF ids within a specific window
    '''
    from chipsequtil import Fasta
    ##get fasta file events, since these are columns in the logistic_score matrix
    seq_ids=Fasta.load(fasta_file,key_func=lambda x: x)

    ##need to get sequence mids in the order they are processed
    ##in the file, this is the index into the score_output file
    ##. ASSUMES GALAXY-formatted FASTA!!!!
    seq_mids=[] ##list of FASTA regions, in their appropriate order in the file
    filtered_events={}##gene name of closest gene to event within window
    for k in seq_ids.keys():
        vals=k.split(';')
        if len(vals)==1:
    	    vals=k.split()
        if ':' in vals[0]: #bed tools used 
            chr,range=vals[0].split(':')
            low,high=range.split('-')
            mid=str(int(low)+((int(high)-int(low))/2))
            seq_mids.append(chr+':'+mid)
        elif 'random' not in vals[0]: #galaxy tools used
            genome,chr,low,high,strand=vals[0].split('_')
            mid=str(int(low)+((int(high)-int(low))/2))
            seq_mids.append(chr+':'+mid)
        
        if len(vals)==3:            
            filtered_events[chr+':'+mid]=vals[2]
    print 'Found %d events, of which %d have gene names'%(len(seq_mids),len(filtered_events))
    ##this next section relies on xls 
    ##filter events that are within distance from closest_gene_output to get gene mapping
    ##
    filtered_fc={}##FC of events within window, in case we want to use in the future

    event_indexes=[] ##

    
 #    ###open the closest_gene_output and determine
#     try:
#         cgo=open(closest_gene_output,'rU').readlines()
#     except:
#         print "Error opening file:", sys.exc_info()[0]
#         print "Check to make sure file exists at %s"%(closest_gene_output)
#         raise
#     inds=cgo[0].strip().split('\t')
#     for row in cgo[1:]:
#         arr=row.strip().split('\t')
#         if 'geneSymbol' in inds: #this is true if we used an xref file
#             gene=arr[inds.index('geneSymbol')]        
# #            mid=arr[2]+':'+str(int(arr[3])+(int(arr[4])-int(arr[3]))/2)
#         else: #otherwise we just gene id
#             gene=arr[inds.index('knownGeneID')]
#         #position mapping is different
#         if 'Position' in inds: #this is for GPS
#             mid='chr'+arr[inds.index('Position')]
#         elif 'chrom' in inds: #this is for BED
#             mid=arr[inds.index('chrom')]+':'+str(int(arr[inds.index('chromStart')])+(int(arr[inds.index('chromEnd')])-int(arr[inds.index('chromStart')]))/2)
#         else: #this is for MACS
#             mid=arr[inds.index('chr')]+':'+str(int(arr[inds.index('start')])+(int(arr[inds.index('end')])-int(arr[inds.index('start')]))/2)

        
#         #print gene,mid
#         dist=arr[inds.index('dist from feature')]
#         try:
#             sv=arr[inds.index('score')]
#         except:
#             try:
#                 sv=arr[inds.index('IPvsCTR')]
#             except:
#                 fc=0.0
#         if sv!='':
#             fc=float(sv)
#         else:
#             next
                
#         #check absolute distance if we're doing a window, or negative distance if we're looking upstream
#         if distance_to_tss=='' or (makeWindow and np.absolute(int(dist))<int(distance_to_tss)) or int(dist)>(-1*int(distance_to_tss)):
# #            filtered_events[mid]=gene #(this was out of if clause, should it be there?) 1/2
#             if mid in seq_mids:
#                 event_indexes.append(seq_mids.index(mid))##index into fasta file value/maps to array
                
#                 ##UPDATE: moved these to within if clause - so that unrelated scores are not included
#                 filtered_events[mid]=gene ##gene name of event
#                 filtered_fc[mid]=float(fc) ##fc value of event
# #            filtered_fc[mid]=float(fc) #see above, 2/2

                
  #  print 'Got '+str(len(filtered_events))+' per-gene events within '+distance_to_tss+' bp window out of '+str(len(cgo))

 #   print 'These map to '+str(len(event_indexes))+' regions in the FASTA file'

    ##get gene ids, or just use mid of sequence region
    gene_names=[t for t in set(filtered_events.values())]
    print gene_names[0:10]

    #get gene ids for all matrices list loaded in
    mi_files=motif_ids.split(',')
    if len(mi_files)>0:
        #open first motif name file that contains names for each element in TAMO file
        all_tf_names=[a.strip() for a in open(mi_files[0],'rU').readlines()]
    if len(mi_files)>1:
        #if we have additional files, check to see if if names already exist
        for i,f in enumerate(mi_files):
            if i==0:
                next
            try:
                #open file and read in extra ids
                newfs=[a.strip() for a in open(f,'rU').readlines()]
            except:
                print "Error opening file:", sys.exc_info()[0]
                print "Check to make sure file exists at %s"%(f)
                raise
               
            if len(newfs)==len(all_tf_names):
                #combine existing tf names with these with . delimiter....
                all_tf_names=['.'.join((a,b)) for a,b in zip(all_tf_names,newfs)]

    ##now go through and clean up TF names
    cleaned_tf_names=[]
    for i,a in enumerate(all_tf_names):
        tfn=set([b for b in a.split('.') if '$' not in b and b!=''])
        if(len(tfn)==0):
            tfn=a.split('.')
#        else:
#            print 'Replacing %s with %s'%(a,'.'.join(tfn))
        cleaned_tf_names.append('.'.join(tfn))

    all_tf_names=cleaned_tf_names
    #print len(cleaned_tf_names)

    
    ##now actually map events to scores
    ##load motif matrix scanning output that maps matrices to regions
    print 'Loading complete motif score file...'
    event_scores=np.loadtxt(logistic_score_output)
    print '\t...Loaded!'
                      
    #create new tgm matrix with approriate file name
    newmat=np.zeros((len(all_tf_names),len(gene_names)),dtype='float')##fill in gene length),dtype='float')
    if makeWindow:
        distance_to_tss=distance_to_tss+'_bpWindow'
    else:
        distance_to_tss=distance_to_tss+'_bpUpstream'

    if tgm_file=='': 
        tgm_file=re.sub('.txt','_'+distance_to_tss+'.tgm',os.path.basename(logistic_score_output))
    if do_pkl:
        pkl_file=re.sub('.tgm','.pkl',tgm_file)
    else:
        pkl_file=''
        
    ##sort event indexes from seq_mids that are in the filtered_events file
    event_indexes.sort()
    
    #populate matrix with greatest score attributed to that gene/tf combo
    for ind,arr in enumerate(event_scores):
        ##name of matrix/motif
        mat=all_tf_names[ind]

        #tfnames=[mat]
        ##here we enumerate which sequences were mapped to a gene within the window
        for k,val in enumerate(seq_mids):#k in event_indexes:
            
            #here we want the event midpoint for the index
#            val=seq_mids[k]
            
            #get score for that index
            score=arr[k]
            
            #now map it to closest gene for that midpoint
            cg=filtered_events[val]

            fc=1.0 ##update this if we want to normalize score by fold change
            score=float(score)*float(fc) ##this should do nothing sine fcgenerally =1

            #if len(tfnames)==1:
            curscore=newmat[all_tf_names.index(mat),gene_names.index(cg)]
            ##updated to include maximum score!!

            if np.abs(score)>np.abs(curscore):
                newmat[all_tf_names.index(mat),gene_names.index(cg)]=score
            #else:
            #    for t in tfnames:
            #        curscore=newmat[all_tf_names.index(t),gene_names.index(cg)]
            #    ##updated to include maximum score!!
            #        if np.abs(float(score))>np.abs(curscore):
            #            newmat[all_tf_names.index(t),gene_names.index(cg)]=float(score)

                
    ###save these intermediate files for debugging purposes
    np.savetxt(tgm_file,newmat)
    gin=re.sub('.tgm','_geneids.txt',tgm_file)
    tin=re.sub('.tgm','_tfids.txt',tgm_file)

    try:
        open(gin,'w').writelines([g+'\n' for g in gene_names])
        open(tin,'w').writelines([t+'\n' for t in all_tf_names])
    except:
        print "Error opening file:", sys.exc_info()[0]
        print "Check to make sure file exists at %s"%(closest_gene_output)
        raise
    
    if pkl_file!='':
        zipcmd='python '+os.path.join(progdir,'zipTgms.py')+' '+tgm_file+' '+tin+' '+gin+' --pkl='+pkl_file
        print 'Compressing matrix file into pkl'
        print zipcmd
        os.system(zipcmd)
        return pkl_file
    else:
        return tgm_file

Example 7

Project: Nuitka
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 8

Project: Nuitka
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 9

Project: pysnmp
Source File: service.py
View license
    def processIncomingMsg(self, snmpEngine, messageProcessingModel,
                           maxMessageSize, securityParameters,
                           securityModel, securityLevel, wholeMsg, msg):
        mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder

        # 3.2.9 -- moved up here to be able to report
        # maxSizeResponseScopedPDU on error
        # (48 - maximum SNMPv3 header length)
        maxSizeResponseScopedPDU = int(maxMessageSize) - len(securityParameters) - 48

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: securityParameters %s' % debug.hexdump(securityParameters))

        # 3.2.1
        try:
            securityParameters, rest = decoder.decode(
                securityParameters, asn1Spec=self.__securityParametersSpec
            )

        except PyAsn1Error:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
            snmpInASNParseErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
            snmpInASNParseErrs.syntax += 1
            raise error.StatusInformation(errorIndication=errind.parseError)

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (securityParameters.prettyPrint(),))

        if eoo.endOfOctets.isSameTypeWith(securityParameters):
            raise error.StatusInformation(errorIndication=errind.parseError)

        # 3.2.2
        msgAuthoritativeEngineId = securityParameters.getComponentByPosition(0)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3)
        )

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: cache write securityStateReference %s by msgUserName %s' % (
                securityStateReference, securityParameters.getComponentByPosition(3)))

        scopedPduData = msg.getComponentByPosition(3)

        # Used for error reporting
        contextEngineId = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        contextName = null

        snmpEngineID = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax

        # 3.2.3
        if msgAuthoritativeEngineId != snmpEngineID and \
                msgAuthoritativeEngineId not in self.__timeline:
            if msgAuthoritativeEngineId and \
                    4 < len(msgAuthoritativeEngineId) < 33:
                # 3.2.3a - cloned user when request was sent
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: unsynchronized securityEngineID %r' % (msgAuthoritativeEngineId,))
            else:
                # 3.2.3b
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: peer requested snmpEngineID discovery')
                usmStatsUnknownEngineIDs, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownEngineIDs')
                usmStatsUnknownEngineIDs.syntax += 1
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: null or malformed msgAuthoritativeEngineId')
                pysnmpUsmDiscoverable, = mibBuilder.importSymbols(
                    '__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                if pysnmpUsmDiscoverable.syntax:
                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: starting snmpEngineID discovery procedure')

                    # Report original contextName
                    if scopedPduData.getName() != 'plaintext':
                        debug.logger & debug.flagSM and debug.logger(
                            'processIncomingMsg: scopedPduData not plaintext %s' % scopedPduData.prettyPrint())
                        raise error.StatusInformation(
                            errorIndication=errind.unknownEngineID
                        )

                    # 7.2.6.a.1
                    scopedPdu = scopedPduData.getComponent()
                    contextEngineId = scopedPdu.getComponentByPosition(0)
                    contextName = scopedPdu.getComponentByPosition(1)

                    raise error.StatusInformation(
                        errorIndication=errind.unknownEngineID,
                        oid=usmStatsUnknownEngineIDs.name,
                        val=usmStatsUnknownEngineIDs.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        scopedPDU=scopedPdu,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
                else:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: will not discover EngineID')
                    # free securityStateReference XXX
                    raise error.StatusInformation(
                        errorIndication=errind.unknownEngineID
                    )

        msgUserName = securityParameters.getComponentByPosition(3)

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: read from securityParams msgAuthoritativeEngineId %r msgUserName %r' % (
                msgAuthoritativeEngineId, msgUserName))

        if msgUserName:
            # 3.2.4
            try:
                (usmUserName, usmUserSecurityName, usmUserAuthProtocol,
                 usmUserAuthKeyLocalized, usmUserPrivProtocol,
                 usmUserPrivKeyLocalized) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    msgAuthoritativeEngineId, msgUserName
                )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read user info from LCD')

            except NoSuchInstanceError:
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: unknown securityEngineID %r msgUserName %r' % (
                        msgAuthoritativeEngineId, msgUserName))
                usmStatsUnknownUserNames, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
                usmStatsUnknownUserNames.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.unknownSecurityName,
                    oid=usmStatsUnknownUserNames.name,
                    val=usmStatsUnknownUserNames.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax += 1
                raise error.StatusInformation(errorIndication=errind.invalidMsg)
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: now have usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %r usmUserPrivProtocol %r for msgUserName %r' % (
                usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, msgUserName))

        # 3.2.11 (moved up here to let Reports be authenticated & encrypted)
        self._cache.pop(securityStateReference)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3),
            usmUserSecurityName=usmUserSecurityName,
            usmUserAuthProtocol=usmUserAuthProtocol,
            usmUserAuthKeyLocalized=usmUserAuthKeyLocalized,
            usmUserPrivProtocol=usmUserPrivProtocol,
            usmUserPrivKeyLocalized=usmUserPrivKeyLocalized
        )

        msgAuthoritativeEngineBoots = securityParameters.getComponentByPosition(1)
        msgAuthoritativeEngineTime = securityParameters.getComponentByPosition(2)

        snmpEngine.observer.storeExecutionContext(
            snmpEngine, 'rfc3414.processIncomingMsg',
            dict(securityEngineId=msgAuthoritativeEngineId,
                 snmpEngineBoots=msgAuthoritativeEngineBoots,
                 snmpEngineTime=msgAuthoritativeEngineTime,
                 userName=usmUserName,
                 securityName=usmUserSecurityName,
                 authProtocol=usmUserAuthProtocol,
                 authKey=usmUserAuthKeyLocalized,
                 privProtocol=usmUserPrivProtocol,
                 privKey=usmUserPrivKeyLocalized)
        )
        snmpEngine.observer.clearExecutionContext(
            snmpEngine, 'rfc3414.processIncomingMsg'
        )

        # 3.2.5
        if msgAuthoritativeEngineId == snmpEngineID:
            # Authoritative SNMP engine: make sure securityLevel is sufficient
            badSecIndication = None
            if securityLevel == 3:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    badSecIndication = 'authPriv wanted while auth not expected'
                if usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                    badSecIndication = 'authPriv wanted while priv not expected'
            elif securityLevel == 2:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    badSecIndication = 'authNoPriv wanted while auth not expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    # 4 (discovery phase always uses authenticated messages)
                    if msgAuthoritativeEngineBoots or msgAuthoritativeEngineTime:
                        badSecIndication = 'authNoPriv wanted while priv expected'

            elif securityLevel == 1:
                if usmUserAuthProtocol != noauth.NoAuth.serviceID:
                    badSecIndication = 'noAuthNoPriv wanted while auth expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    badSecIndication = 'noAuthNoPriv wanted while priv expected'
            if badSecIndication:
                usmStatsUnsupportedSecLevels, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnsupportedSecLevels')
                usmStatsUnsupportedSecLevels.syntax += 1
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: reporting inappropriate security level for user %s: %s' % (
                        msgUserName, badSecIndication))
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel,
                    oid=usmStatsUnsupportedSecLevels.name,
                    val=usmStatsUnsupportedSecLevels.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

        # 3.2.6
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure
                )

            try:
                authHandler.authenticateIncomingMsg(
                    usmUserAuthKeyLocalized,
                    securityParameters.getComponentByPosition(4),
                    wholeMsg
                )

            except error.StatusInformation:
                usmStatsWrongDigests, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsWrongDigests')
                usmStatsWrongDigests.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure,
                    oid=usmStatsWrongDigests.name,
                    val=usmStatsWrongDigests.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )

            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: incoming msg authenticated')

            # synchronize time with authed peer
            self.__timeline[msgAuthoritativeEngineId] = (
                securityParameters.getComponentByPosition(1),
                securityParameters.getComponentByPosition(2),
                securityParameters.getComponentByPosition(2),
                int(time.time())
            )

            timerResolution = snmpEngine.transportDispatcher is None and 1.0 or snmpEngine.transportDispatcher.getTimerResolution()
            expireAt = int(self.__expirationTimer + 300 / timerResolution)
            if expireAt not in self.__timelineExpQueue:
                self.__timelineExpQueue[expireAt] = []
            self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)

            debug.logger & debug.flagSM and debug.logger(
                'processIncomingMsg: store timeline for securityEngineID %r' % (msgAuthoritativeEngineId,))

        # 3.2.7
        if securityLevel == 3 or securityLevel == 2:
            if msgAuthoritativeEngineId == snmpEngineID:
                # Authoritative SNMP engine: use local notion (SF bug #1649032)
                (snmpEngineBoots,
                 snmpEngineTime) = mibBuilder.importSymbols(
                    '__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
                snmpEngineBoots = snmpEngineBoots.syntax
                snmpEngineTime = snmpEngineTime.syntax.clone()
                idleTime = 0
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: read snmpEngineBoots (%s), snmpEngineTime (%s) from LCD' % (
                        snmpEngineBoots, snmpEngineTime))
            else:
                # Non-authoritative SNMP engine: use cached estimates
                if msgAuthoritativeEngineId in self.__timeline:
                    (snmpEngineBoots, snmpEngineTime,
                     latestReceivedEngineTime,
                     latestUpdateTimestamp) = self.__timeline[
                        msgAuthoritativeEngineId
                    ]
                    # time passed since last talk with this SNMP engine
                    idleTime = int(time.time()) - latestUpdateTimestamp
                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: read timeline snmpEngineBoots %s snmpEngineTime %s for msgAuthoritativeEngineId %r, idle time %s secs' % (
                            snmpEngineBoots, snmpEngineTime, msgAuthoritativeEngineId, idleTime))
                else:
                    raise error.ProtocolError('Peer SNMP engine info missing')

            # 3.2.7a
            if msgAuthoritativeEngineId == snmpEngineID:
                if snmpEngineBoots == 2147483647 or \
                        snmpEngineBoots != msgAuthoritativeEngineBoots or \
                        abs(idleTime + int(snmpEngineTime) - int(msgAuthoritativeEngineTime)) > 150:
                    usmStatsNotInTimeWindows, = mibBuilder.importSymbols(
                        '__SNMP-USER-BASED-SM-MIB', 'usmStatsNotInTimeWindows')
                    usmStatsNotInTimeWindows.syntax += 1
                    raise error.StatusInformation(
                        errorIndication=errind.notInTimeWindow,
                        oid=usmStatsNotInTimeWindows.name,
                        val=usmStatsNotInTimeWindows.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=2,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            # 3.2.7b
            else:
                # 3.2.7b.1
                # noinspection PyUnboundLocalVariable
                if msgAuthoritativeEngineBoots > snmpEngineBoots or \
                        msgAuthoritativeEngineBoots == snmpEngineBoots and \
                        msgAuthoritativeEngineTime > latestReceivedEngineTime:
                    self.__timeline[msgAuthoritativeEngineId] = (
                        msgAuthoritativeEngineBoots,
                        msgAuthoritativeEngineTime,
                        msgAuthoritativeEngineTime,
                        int(time.time())
                    )

                    timerResolution = snmpEngine.transportDispatcher is None and 1.0 or snmpEngine.transportDispatcher.getTimerResolution()
                    expireAt = int(self.__expirationTimer + 300 / timerResolution)
                    if expireAt not in self.__timelineExpQueue:
                        self.__timelineExpQueue[expireAt] = []
                    self.__timelineExpQueue[expireAt].append(msgAuthoritativeEngineId)

                    debug.logger & debug.flagSM and debug.logger(
                        'processIncomingMsg: stored timeline msgAuthoritativeEngineBoots %s msgAuthoritativeEngineTime %s for msgAuthoritativeEngineId %r' % (
                            msgAuthoritativeEngineBoots, msgAuthoritativeEngineTime, msgAuthoritativeEngineId))

                # 3.2.7b.2
                if snmpEngineBoots == 2147483647 or \
                        msgAuthoritativeEngineBoots < snmpEngineBoots or \
                        msgAuthoritativeEngineBoots == snmpEngineBoots and \
                        abs(idleTime + int(snmpEngineTime) - int(msgAuthoritativeEngineTime)) > 150:
                    raise error.StatusInformation(
                        errorIndication=errind.notInTimeWindow
                    )

        # 3.2.8a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
            encryptedPDU = scopedPduData.getComponentByPosition(1)
            if encryptedPDU is None:  # no ciphertext
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

            try:
                decryptedData = privHandler.decryptData(
                    usmUserPrivKeyLocalized,
                    (securityParameters.getComponentByPosition(1),
                     securityParameters.getComponentByPosition(2),
                     securityParameters.getComponentByPosition(5)),
                    encryptedPDU
                )
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: PDU deciphered into %s' % debug.hexdump(decryptedData))

            except error.StatusInformation:
                usmStatsDecryptionErrors, = mibBuilder.importSymbols(
                    '__SNMP-USER-BASED-SM-MIB', 'usmStatsDecryptionErrors')
                usmStatsDecryptionErrors.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError,
                    oid=usmStatsDecryptionErrors.name,
                    val=usmStatsDecryptionErrors.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                )
            scopedPduSpec = scopedPduData.setComponentByPosition(0).getComponentByPosition(0)
            try:
                scopedPDU, rest = decoder.decode(decryptedData,
                                                 asn1Spec=scopedPduSpec)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    'processIncomingMsg: scopedPDU decoder failed %s' % sys.exc_info()[0])
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

            if eoo.endOfOctets.isSameTypeWith(scopedPDU):
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
        else:
            # 3.2.8b
            scopedPDU = scopedPduData.getComponentByPosition(0)
            if scopedPDU is None:  # no plaintext
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: scopedPDU decoded %s' % scopedPDU.prettyPrint())

        # 3.2.10
        securityName = usmUserSecurityName

        debug.logger & debug.flagSM and debug.logger(
            'processIncomingMsg: cached msgUserName %s info by securityStateReference %s' % (
                msgUserName, securityStateReference))

        # Delayed to include details
        if not msgUserName and not msgAuthoritativeEngineId:
            usmStatsUnknownUserNames, = mibBuilder.importSymbols(
                '__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
            usmStatsUnknownUserNames.syntax += 1
            raise error.StatusInformation(
                errorIndication=errind.unknownSecurityName,
                oid=usmStatsUnknownUserNames.name,
                val=usmStatsUnknownUserNames.syntax,
                securityStateReference=securityStateReference,
                securityEngineID=msgAuthoritativeEngineId,
                securityLevel=securityLevel,
                contextEngineId=contextEngineId,
                contextName=contextName,
                maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
                PDU=scopedPDU
            )

        # 3.2.12
        return (msgAuthoritativeEngineId, securityName, scopedPDU,
                maxSizeResponseScopedPDU, securityStateReference)

Example 10

Project: pysnmp
Source File: service.py
View license
    def __generateRequestOrResponseMsg(self, snmpEngine,
                                       messageProcessingModel,
                                       globalData, maxMessageSize,
                                       securityModel, securityEngineID,
                                       securityName, securityLevel,
                                       scopedPDU, securityStateReference):
        mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder
        snmpEngineID = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax

        # 3.1.1
        if securityStateReference is not None:
            # 3.1.1a
            cachedSecurityData = self._cache.pop(securityStateReference)
            usmUserName = cachedSecurityData['msgUserName']
            if 'usmUserSecurityName' in cachedSecurityData:
                usmUserSecurityName = cachedSecurityData['usmUserSecurityName']
            else:
                usmUserSecurityName = usmUserName
            if 'usmUserAuthProtocol' in cachedSecurityData:
                usmUserAuthProtocol = cachedSecurityData['usmUserAuthProtocol']
            else:
                usmUserAuthProtocol = noauth.NoAuth.serviceID
            if 'usmUserAuthKeyLocalized' in cachedSecurityData:
                usmUserAuthKeyLocalized = cachedSecurityData['usmUserAuthKeyLocalized']
            else:
                usmUserAuthKeyLocalized = None
            if 'usmUserPrivProtocol' in cachedSecurityData:
                usmUserPrivProtocol = cachedSecurityData['usmUserPrivProtocol']
            else:
                usmUserPrivProtocol = nopriv.NoPriv.serviceID
            if 'usmUserPrivKeyLocalized' in cachedSecurityData:
                usmUserPrivKeyLocalized = cachedSecurityData['usmUserPrivKeyLocalized']
            else:
                usmUserPrivKeyLocalized = None
            securityEngineID = snmpEngineID
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: user info read from cache')
        elif securityName:
            # 3.1.1b
            try:
                (usmUserName, usmUserSecurityName, usmUserAuthProtocol,
                 usmUserAuthKeyLocalized, usmUserPrivProtocol,
                 usmUserPrivKeyLocalized) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    securityEngineID,
                    self.__sec2usr(snmpEngine, securityName, securityEngineID)
                )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read user info')

            except NoSuchInstanceError:
                pysnmpUsmDiscovery, = mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscovery')
                reportUnknownName = not pysnmpUsmDiscovery.syntax
                if not reportUnknownName:
                    try:
                        (usmUserName, usmUserSecurityName,
                         usmUserAuthProtocol, usmUserAuthKeyLocalized,
                         usmUserPrivProtocol,
                         usmUserPrivKeyLocalized) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            securityEngineID,
                            self.__sec2usr(snmpEngine, securityName)
                        )

                    except NoSuchInstanceError:
                        reportUnknownName = True

                if reportUnknownName:
                    raise error.StatusInformation(
                        errorIndication=errind.unknownSecurityName
                    )

                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: clone user info')

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax += 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use empty USM data')

        # noinspection PyUnboundLocalVariable
        debug.logger & debug.flagSM and debug.logger(
            '__generateRequestOrResponseMsg: local usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %s usmUserPrivProtocol %s securityEngineID %r securityName %r' % (
                usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, securityEngineID, securityName))

        msg = globalData

        # 3.1.2
        if securityLevel == 3:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID or \
                    usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel
                )

        # 3.1.3
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel
                )

        securityParameters = self.__securityParametersSpec

        scopedPDUData = msg.setComponentByPosition(3).getComponentByPosition(3)
        scopedPDUData.setComponentByPosition(
            0, scopedPDU, verifyConstraints=False
        )

        # 3.1.6a
        if securityStateReference is None and securityLevel in (2, 3):
            if securityEngineID in self.__timeline:
                (snmpEngineBoots, snmpEngineTime, latestReceivedEngineTime,
                 latestUpdateTimestamp) = self.__timeline[securityEngineID]
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from timeline')
            else:
                # 2.3 XXX is this correct?
                snmpEngineBoots = snmpEngineTime = 0
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: no timeline for securityEngineID %r' % (securityEngineID,))
        # 3.1.6.b
        elif securityStateReference is not None:  # XXX Report?
            (snmpEngineBoots,
             snmpEngineTime) = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
            snmpEngineBoots = snmpEngineBoots.syntax
            snmpEngineTime = snmpEngineTime.syntax.clone()
            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from LCD')
        # 3.1.6.c
        else:
            snmpEngineBoots = snmpEngineTime = 0
            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: assuming zero snmpEngineBoots, snmpEngineTime')

        debug.logger & debug.flagSM and debug.logger(
            '__generateRequestOrResponseMsg: use snmpEngineBoots %s snmpEngineTime %s for securityEngineID %r' % (
                snmpEngineBoots, snmpEngineTime, securityEngineID))

        # 3.1.4a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.encryptionError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU %s' % scopedPDU.prettyPrint())

            try:
                dataToEncrypt = encoder.encode(scopedPDU)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: scopedPDU serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU encoded into %s' % debug.hexdump(dataToEncrypt))

            # noinspection PyUnboundLocalVariable
            (encryptedData,
             privParameters) = privHandler.encryptData(
                usmUserPrivKeyLocalized,
                (snmpEngineBoots, snmpEngineTime, None), dataToEncrypt
            )

            securityParameters.setComponentByPosition(
                5, privParameters, verifyConstraints=False
            )
            scopedPDUData.setComponentByPosition(
                1, encryptedData, verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: scopedPDU ciphered into %s' % debug.hexdump(encryptedData))

        # 3.1.4b
        elif securityLevel == 1 or securityLevel == 2:
            securityParameters.setComponentByPosition(5, '')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % scopedPDUData.prettyPrint())

        # 3.1.5
        securityParameters.setComponentByPosition(
            0, securityEngineID, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            1, snmpEngineBoots, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            2, snmpEngineTime, verifyConstraints=False
        )

        # 3.1.7
        securityParameters.setComponentByPosition(
            3, usmUserName, verifyConstraints=False
        )

        # 3.1.8a
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication=errind.authenticationFailure
                )

            # extra-wild hack to facilitate BER substrate in-place re-write
            securityParameters.setComponentByPosition(
                4, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: securityParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: auth outgoing msg: %s' % msg.prettyPrint())

            try:
                wholeMsg = encoder.encode(msg)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            # noinspection PyUnboundLocalVariable
            authenticatedWholeMsg = authHandler.authenticateOutgoingMsg(
                usmUserAuthKeyLocalized, wholeMsg
            )

        # 3.1.8b
        else:
            securityParameters.setComponentByPosition(
                4, '', verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger(
                '__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: secutiryParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

            try:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: plain outgoing msg: %s' % msg.prettyPrint())
                authenticatedWholeMsg = encoder.encode(msg)

            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger(
                    '__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication=errind.serializationError
                )

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s outgoing msg: %s' % (
            securityLevel > 1 and "authenticated" or "plain", debug.hexdump(authenticatedWholeMsg)))

        # 3.1.9
        return msg.getComponentByPosition(2), authenticatedWholeMsg

Example 11

Project: masakari
Source File: masakari_controller.py
View license
    @log_process_begin_and_end.output_log
    def _notification_reciever(self, env, start_response):

        try:
            len = env['CONTENT_LENGTH']
            if len > 0:
                body = env['wsgi.input'].read(len)
                json_data = json.loads(body)

                msg = "Recieved notification : " + body
                LOG.info(msg)

                ret = self._check_json_param(json_data)
                if ret == 1:
                    # Return Response
                    start_response(
                        '400 Bad Request', [('Content-Type', 'text/plain')])

                    msg = "Wsgi response: " \
                          "status=400 Bad Request, " \
                          "body=method _notification_reciever returned."
                    LOG.info(msg)

                    return ['method _notification_reciever returned.\r\n']

                # Insert notification into notification_list_db
                notification_list_dic = {}
                notification_list_dic = self._create_notification_list_db(
                    json_data)

                # Return Response
                start_response('200 OK', [('Content-Type', 'text/plain')])

                msg = "Wsgi response: " \
                    + "status=200 OK, " \
                    + "body=method _notification_reciever returned."
                LOG.info(msg)

                if notification_list_dic != {}:
                    # Start thread
                    if notification_list_dic.get("recover_by") == 0 and \
                       notification_list_dic.get("progress") == 0:

                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"), notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()

                        # Sleep until nova recognizes the node down.
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = ("Sleeping %s sec before starting recovery"
                               "thread until nova recognizes the node down..."
                               % (node_err_wait)
                               )
                        LOG.info(msg)
                        greenthread.sleep(int(node_err_wait))

                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_host." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + " notification_cluster_port=" \
                            + notification_list_dic.get(
                                "notification_cluster_port") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                notification_list_dic.get(
                                "notification_cluster_port"),
                                retry_mode, ))

                        th.start()
                    elif notification_list_dic.get("recover_by") == 0 and \
                            notification_list_dic.get("progress") == 3:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 1:
                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_instance." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_uuid=" \
                            + notification_list_dic.get("notification_uuid") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_uuid"), retry_mode, )
                        )
                        th.start()
                    elif notification_list_dic.get("recover_by") == 2:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                True, )
                        )
                        th.start()
                    else:
                        LOG.warning(
                            "Column \"recover_by\" \
                            on notification_list DB is invalid value.")

        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        return ['method _notification_reciever returned.\r\n']

Example 12

Project: scalyr-agent-2
Source File: service.py
View license
    def __generateRequestOrResponseMsg(
        self,
        snmpEngine,
        messageProcessingModel,
        globalData,
        maxMessageSize,
        securityModel,
        securityEngineID,
        securityName,
        securityLevel,
        scopedPDU,
        securityStateReference
        ):
        snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        # 3.1.1
        if securityStateReference is not None:
            # 3.1.1a
            cachedSecurityData = self._cache.pop(securityStateReference)
            usmUserName = cachedSecurityData['msgUserName']
            if 'usmUserSecurityName' in cachedSecurityData:
                usmUserSecurityName = cachedSecurityData['usmUserSecurityName']
            else:
                usmUserSecurityName = usmUserName
            if 'usmUserAuthProtocol' in cachedSecurityData:
                usmUserAuthProtocol = cachedSecurityData['usmUserAuthProtocol']
            else:
                usmUserAuthProtocol = noauth.NoAuth.serviceID
            if 'usmUserAuthKeyLocalized' in cachedSecurityData:
                usmUserAuthKeyLocalized = cachedSecurityData['usmUserAuthKeyLocalized']
            else:
                usmUserAuthKeyLocalized = None
            if 'usmUserPrivProtocol' in cachedSecurityData:
                usmUserPrivProtocol = cachedSecurityData['usmUserPrivProtocol']
            else:
                usmUserPrivProtocol = nopriv.NoPriv.serviceID
            if 'usmUserPrivKeyLocalized' in cachedSecurityData:
                usmUserPrivKeyLocalized = cachedSecurityData['usmUserPrivKeyLocalized']
            else:
                usmUserPrivKeyLocalized = None
            securityEngineID = snmpEngineID
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: user info read from cache')
        elif securityName:
            # 3.1.1b
            try:
                ( usmUserName,
                  usmUserSecurityName,
                  usmUserAuthProtocol,
                  usmUserAuthKeyLocalized,
                  usmUserPrivProtocol,
                  usmUserPrivKeyLocalized ) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    securityEngineID,
                    self.__sec2usr(snmpEngine, securityName, securityEngineID)
                )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read user info')
            except NoSuchInstanceError:
                pysnmpUsmDiscovery, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscovery')
                __reportUnknownName = not pysnmpUsmDiscovery.syntax
                if not __reportUnknownName:
                    try:
                        ( usmUserName,
                          usmUserSecurityName,
                          usmUserAuthProtocol,
                          usmUserAuthKeyLocalized,
                          usmUserPrivProtocol,
                          usmUserPrivKeyLocalized ) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            securityEngineID,
                            self.__sec2usr(snmpEngine, securityName)
                        )
                    except NoSuchInstanceError:
                        __reportUnknownName = 1

                if __reportUnknownName:
                    raise error.StatusInformation(
                        errorIndication = errind.unknownSecurityName
                        )
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: clone user info')
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax = snmpInGenErrs.syntax + 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use empty USM data')
            
        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: local usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %s usmUserPrivProtocol %s securityEngineID %r securityName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, securityEngineID, securityName))

        msg = globalData
        
        # 3.1.2
        if securityLevel == 3:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID or \
               usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                raise error.StatusInformation(
                    errorIndication = errind.unsupportedSecurityLevel
                    )

        # 3.1.3
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                raise error.StatusInformation(
                    errorIndication = errind.unsupportedSecurityLevel
                    )

        securityParameters = self.__securityParametersSpec

        scopedPDUData = msg.setComponentByPosition(3).getComponentByPosition(3)
        scopedPDUData.setComponentByPosition(
            0, scopedPDU, verifyConstraints=False
            )
        
        # 3.1.6a
        if securityStateReference is None and (  # request type check added
            securityLevel == 3 or securityLevel == 2
            ):
            if securityEngineID in self.__timeline:
                ( snmpEngineBoots,
                  snmpEngineTime,
                  latestReceivedEngineTime,
                  latestUpdateTimestamp) = self.__timeline[
                    securityEngineID
                    ]
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from timeline')
            else:
                # 2.3 XXX is this correct?
                snmpEngineBoots = snmpEngineTime = 0
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: no timeline for securityEngineID %r' % (securityEngineID,))
        # 3.1.6.b
        elif securityStateReference is not None:  # XXX Report?
            ( snmpEngineBoots,
              snmpEngineTime ) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
            snmpEngineBoots = snmpEngineBoots.syntax
            snmpEngineTime = snmpEngineTime.syntax.clone()
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: read snmpEngineBoots, snmpEngineTime from LCD')
        # 3.1.6.c
        else:
            snmpEngineBoots = snmpEngineTime = 0
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: assuming zero snmpEngineBoots, snmpEngineTime')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: use snmpEngineBoots %s snmpEngineTime %s for securityEngineID %r' % (snmpEngineBoots, snmpEngineTime, securityEngineID))

        # 3.1.4a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.encryptionError
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU %s' % scopedPDU.prettyPrint())

            try:
                dataToEncrypt = encoder.encode(scopedPDU)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )
            
            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU encoded into %s' % debug.hexdump(dataToEncrypt))

            ( encryptedData,
              privParameters ) = privHandler.encryptData(
                usmUserPrivKeyLocalized,
                ( snmpEngineBoots, snmpEngineTime, None ),
                dataToEncrypt
            )

            securityParameters.setComponentByPosition(
                5, privParameters, verifyConstraints=False
            )
            scopedPDUData.setComponentByPosition(
                1, encryptedData, verifyConstraints=False
            )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: scopedPDU ciphered into %s' % debug.hexdump(encryptedData))

        # 3.1.4b
        elif securityLevel == 1 or securityLevel == 2:
            securityParameters.setComponentByPosition(5, '')

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % scopedPDUData.prettyPrint())
        
        # 3.1.5
        securityParameters.setComponentByPosition(
            0, securityEngineID, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            1, snmpEngineBoots, verifyConstraints=False
        )
        securityParameters.setComponentByPosition(
            2, snmpEngineTime, verifyConstraints=False
        )
    
        # 3.1.7
        securityParameters.setComponentByPosition(
            3, usmUserName, verifyConstraints=False
        )

        # 3.1.8a
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure
                    )

            # extra-wild hack to facilitate BER substrate in-place re-write
            securityParameters.setComponentByPosition(
                4, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))
            
            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: securityParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: auth outgoing msg: %s' % msg.prettyPrint())

            try:
                wholeMsg = encoder.encode(msg)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            authenticatedWholeMsg = authHandler.authenticateOutgoingMsg(
                usmUserAuthKeyLocalized, wholeMsg
                )

        # 3.1.8b
        else:
            securityParameters.setComponentByPosition(
                4, '', verifyConstraints=False
                )

            debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s' % (securityParameters.prettyPrint(),))

            try:
                msg.setComponentByPosition(2, encoder.encode(securityParameters), verifyConstraints=False)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: secutiryParameters serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

            try:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: plain outgoing msg: %s' % msg.prettyPrint())
                authenticatedWholeMsg = encoder.encode(msg)
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: msg serialization error: %s' % sys.exc_info()[1])
                raise error.StatusInformation(
                    errorIndication = errind.serializationError
                )

        debug.logger & debug.flagSM and debug.logger('__generateRequestOrResponseMsg: %s outgoing msg: %s' % (securityLevel > 1 and "authenticated" or "plain", debug.hexdump(authenticatedWholeMsg)))

        # 3.1.9
        return (
            msg.getComponentByPosition(2),
            authenticatedWholeMsg
            )

Example 13

Project: stock-logistics-barcode
Source File: sentinel.py
View license
    def main_loop(self):
        """
        Loops until the user asks for ending
        """
        code = False
        result = None
        value = None

        while True:
            try:
                try:
                    # No active scenario, select one
                    if not self.scenario_id:
                        (code, result, value) = self._select_scenario()
                    else:
                        # Search for a step title
                        title = None
                        title_key = '|'
                        if isinstance(result, (types.NoneType, bool)):
                            pass
                        elif (isinstance(result, dict) and
                              result.get(title_key, None)):
                            title = result[title_key]
                            del result[title_key]
                        elif (isinstance(result[0], (tuple, list)) and
                              result[0][0] == title_key):
                            title = result.pop(0)[1]
                        elif (isinstance(result[0], basestring) and
                              result[0].startswith(title_key)):
                            title = result.pop(0)[len(title_key):]

                        if title is None and self.scenario_name:
                            # If no title is defined, display the scenario name
                            title = self.scenario_name

                        if code == 'Q' or code == 'N':
                            # Quantity selection
                            quantity = self._select_quantity(
                                '\n'.join(result), '%g' % value,
                                integer=(code == 'N'), title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   quantity)
                        elif code == 'C':
                            # Confirmation query
                            confirm = self._confirm(
                                '\n'.join(result), title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   confirm)
                        elif code == 'T':
                            # Select arguments from value
                            default = ''
                            size = None
                            if isinstance(value, dict):
                                default = value.get('default', '')
                                size = value.get('size', None)
                            elif isinstance(value, str):
                                default = value

                            # Text input
                            text = self._input_text(
                                '\n'.join(result), default=default,
                                size=size, title=title)
                            (code, result, value) = self.oerp_call('action',
                                                                   text)
                        elif code == 'R':
                            # Critical error
                            self.scenario_id = False
                            self.scenario_name = False
                            self._display_error('\n'.join(result), title=title)
                        elif code == 'U':
                            # Unknown action : message with return back to the
                            # last state
                            self._display(
                                '\n'.join(result), clear=True, scroll=True,
                                title=title)
                            (code, result, value) = self.oerp_call('back')
                        elif code == 'E':
                            # Error message
                            self._display_error(
                                '\n'.join(result), title=title)
                            # Execute transition
                            if not value:
                                (code, result, value) = self.oerp_call(
                                    'action')
                            else:
                                # Back to the previous step required
                                (code, result, value) = self.oerp_call(
                                    'back')
                        elif code == 'M':
                            # Simple message
                            self._display(
                                '\n'.join(result), clear=True, scroll=True,
                                title=title)
                            # Execute transition
                            (code, result, value) = self.oerp_call('action',
                                                                   value)
                        elif code == 'L':
                            if result:
                                # Select a value in the list
                                choice = self._menu_choice(result, title=title)
                                # Send the result to Odoo
                                (code, result, value) = self.oerp_call(
                                    'action', choice)
                            else:
                                # Empty list supplied, display an error
                                (code, result, value) = (
                                    'E', [_('No value available')], True)

                            # Check if we are in a scenario (to retrieve the
                            # scenario name from a submenu)
                            self.scanner_check()
                            if not self.scenario_id:
                                self.scenario_id = True
                                self.scenario_name = False
                        elif code == 'F':
                            # End of scenario
                            self.scenario_id = False
                            self.scenario_name = False
                            self._display('\n'.join(result), clear=True,
                                          scroll=True, title=title)
                        else:
                            # Default call
                            (code, result, value) = self.oerp_call('restart')
                except SentinelBackException:
                    # Back to the previous step required
                    (code, result, value) = self.oerp_call('back')
                    # Do not display the termination message
                    if code == 'F':
                        self.ungetch(ord('\n'))
                    self.screen.bkgd(0, self._get_color('base'))
                except Exception:
                    # Generates log contents
                    log_contents = """%s
# %s
# Hardware code : %s
# ''Current scenario : %s (%s)
# Current values :
#\tcode : %s
#\tresult : %s
#\tvalue : %s
%s
%s
"""
                    log_contents = log_contents % (
                        '#' * 79, datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        self.hardware_code, str(self.scenario_id),
                        self.scenario_name, code, repr(result), repr(value),
                        '#' * 79, reduce(
                            lambda x, y: x + y, traceback.format_exception(
                                sys.exc_info()[0],
                                sys.exc_info()[1],
                                sys.exc_info()[2])))

                    # Writes traceback in log file
                    logfile = open(self.datadir + 'oerp_sentinel.log', 'a')
                    logfile.write(log_contents)
                    logfile.close()

                    # Display error message
                    (code, result, value) = (
                        'E', [_('An error occured\n\nPlease contact your '
                                'administrator')], False)
            except KeyboardInterrupt:
                # If Ctrl+C, exit
                (code, result, value) = self.oerp_call('end')
                # Restore normal background colors
                self.screen.bkgd(0, self._get_color('base'))

Example 14

Project: scons
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 15

Project: Nagstamon
Source File: Zabbix.py
View license
    def _get_status(self):
        """
            Get status from Zabbix Server
        """
        ret = Result()
        # create Nagios items dictionary with to lists for services and hosts
        # every list will contain a dictionary for every failed service/host
        # this dictionary is only temporarily
        nagitems = {"services": [], "hosts": []}

        # Create URLs for the configured filters
        if self.zapi is None:
            self._login()

        try:
            hosts = []
            try:
                hosts = self.zapi.host.get(
                    {"output": ["host", "ip", "status", "available", "error", "errors_from"], "filter": {}})
            except (ZabbixError, ZabbixAPIException):
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                return Result(result=result, error=error)

            for host in hosts:
                # if host is disabled on server safely ignore it
                if host['available'] != '0':
                    n = {
                        'host': host['host'],
                        'status': self.statemap.get(host['status'], host['status']),
                        'last_check': 'n/a',
                        'duration': HumanReadableDurationFromTimestamp(host['errors_from']),
                        'status_information': host['error'],
                        'attempt': '1/1',
                        'site': '',
                        'address': host['host'],
                    }

                    # Zabbix shows OK hosts too - kick 'em!
                    if not n['status'] == 'OK':

                        # add dictionary full of information about this host item to nagitems
                        nagitems["hosts"].append(n)
                        # after collection data in nagitems create objects from its informations
                        # host objects contain service objects
                        if n["host"] not in self.new_hosts:
                            new_host = n["host"]
                            self.new_hosts[new_host] = GenericHost()
                            self.new_hosts[new_host].name = n["host"]
                            self.new_hosts[new_host].status = n["status"]
                            self.new_hosts[new_host].last_check = n["last_check"]
                            self.new_hosts[new_host].duration = n["duration"]
                            self.new_hosts[new_host].attempt = n["attempt"]
                            self.new_hosts[new_host].status_information = n["status_information"]
                            self.new_hosts[new_host].site = n["site"]
                            self.new_hosts[new_host].address = n["address"]
        except ZabbixError:
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            return Result(result=result, error=error)

        # services
        services = []
        # groupids = [] # never used - probably old code
        zabbix_triggers = []
        try:
            api_version = self.zapi.api_version()
        except ZabbixAPIException:
            # FIXME Is there a cleaner way to handle this? I just borrowed
            # this code from 80 lines ahead. -- AGV
            # set checking flag back to False

            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            print(sys.exc_info())
            return Result(result=result, error=error)

        try:
            # response = [] # never used - probably old code
            try:
                triggers_list = []

                hostgroup_ids = [x['groupid'] for x in self.zapi.hostgroup.get(
                    {'output': 'extend', 'with_monitored_items': True})
                    if int(x['internal']) == 0]

                zabbix_triggers = self.zapi.trigger.get(
                    {'sortfield': 'lastchange', 'withLastEventUnacknowledged': True, 'groupids': hostgroup_ids,
                     "monitored": True, "filter": {'value': 1}})

                triggers_list = []

                for trigger in zabbix_triggers:
                    triggers_list.append(trigger.get('triggerid'))
                this_trigger = self.zapi.trigger.get(
                    {'triggerids': triggers_list,
                     'expandDescription': True,
                     'output': 'extend',
                     'select_items': 'extend',  # thats for zabbix api 1.8
                     'selectItems': 'extend',  # thats for zabbix api 2.0+
                     'expandData': True}
                )
                if type(this_trigger) is dict:
                    for triggerid in list(this_trigger.keys()):
                        services.append(this_trigger[triggerid])
                        this_item = self.zapi.item.get(
                            {'itemids': [this_trigger[triggerid]['items'][0]['itemid']],
                             'selectApplications': 'extend'}
                        )
                        last_app = len(this_item[0]['applications']) - 1
                        this_trigger[triggerid]['application'] = this_item[0]['applications'][last_app]['name']
                elif type(this_trigger) is list:
                    for trigger in this_trigger:
                        services.append(trigger)
                        this_item = self.zapi.item.get(
                            {'itemids': trigger['items'][0]['itemid'],
                             'selectApplications': 'extend'}
                        )
                        # last_app = 0  # use it to get the first application name
                        last_app = len(this_item[0]['applications']) - 1  # use it to get the last application name
                        trigger['application'] = this_item[0]['applications'][last_app]['name']

            except ZabbixAPIException:
                # FIXME Is there a cleaner way to handle this? I just borrowed
                # this code from 80 lines ahead. -- AGV
                # set checking flag back to False
                self.isChecking = False
                result, error = self.Error(sys.exc_info())
                print(sys.exc_info())
                return Result(result=result, error=error)

            except ZabbixError as e:
                if e.terminate:
                    return e.result
                else:
                    service = e.result.content
                    ret = e.result

            for service in services:
                # Zabbix probably shows OK services too - kick 'em!
                # UPDATE Zabbix api 3.0 doesn't but I didn't tried with older
                #        so I left it
                status = self.statemap.get(service['priority'], service['priority'])
                if not status == 'OK':
                    if not service['description'].endswith('...'):
                        state = service['description']
                    else:
                        state = service['items'][0]['lastvalue']
                    lastcheck = 0
                    for item in service['items']:
                        if int(item['lastclock']) > lastcheck:
                            lastcheck = int(item['lastclock'])
                    if len(service['comments']) == 0:
                        srvc = service['application']
                    else:
                        srvc = self.nagiosify_service(service['comments'])
                    n = {
                        'service': srvc,
                        'status': status,
                        # 1/1 attempt looks at least like there has been any attempt
                        'attempt': '1/1',
                        'duration': HumanReadableDurationFromTimestamp(service['lastchange']),
                        'status_information': state,
                        'passiveonly': 'no',
                        'last_check': time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(lastcheck)),
                        'notifications': 'yes',
                        'flapping': 'no',
                        'site': '',
                        'command': 'zabbix',
                        'triggerid': service['triggerid'],
                    }

                    if api_version >= '3.0':
                        n['host'] = self.zapi.host.get({"output": ["host"], "filter": {}, "triggerids": service['triggerid']})[0]['host']
                    else:
                        n['host'] = service['host']

                    nagitems["services"].append(n)
                    # after collection data in nagitems create objects of its informations
                    # host objects contain service objects
                    if n["host"] not in self.new_hosts:
                        self.new_hosts[n["host"]] = GenericHost()
                        self.new_hosts[n["host"]].name = n["host"]
                        self.new_hosts[n["host"]].status = "UP"
                        self.new_hosts[n["host"]].site = n["site"]
                        self.new_hosts[n["host"]].address = n["host"]
                        # if a service does not exist create its object
                    if n["service"] not in self.new_hosts[n["host"]].services:
                        # workaround for non-existing (or not found) host status flag
                        if n["service"] == "Host is down %s" % (n["host"]):
                            self.new_hosts[n["host"]].status = "DOWN"
                            # also take duration from "service" aka trigger
                            self.new_hosts[n["host"]].duration = n["duration"]
                        else:
                            new_service = n["service"]
                            self.new_hosts[n["host"]].services[new_service] = GenericService()
                            self.new_hosts[n["host"]].services[new_service].host = n["host"]
                            self.new_hosts[n["host"]].services[new_service].name = n["service"]
                            self.new_hosts[n["host"]].services[new_service].status = n["status"]
                            self.new_hosts[n["host"]].services[new_service].last_check = n["last_check"]
                            self.new_hosts[n["host"]].services[new_service].duration = n["duration"]
                            self.new_hosts[n["host"]].services[new_service].attempt = n["attempt"]
                            self.new_hosts[n["host"]].services[new_service].status_information = n["status_information"]
                            self.new_hosts[n["host"]].services[new_service].passiveonly = False
                            self.new_hosts[n["host"]].services[new_service].flapping = False
                            self.new_hosts[n["host"]].services[new_service].site = n["site"]
                            self.new_hosts[n["host"]].services[new_service].address = n["host"]
                            self.new_hosts[n["host"]].services[new_service].command = n["command"]
                            self.new_hosts[n["host"]].services[new_service].triggerid = n["triggerid"]

        except (ZabbixError, ZabbixAPIException):
            # set checking flag back to False
            self.isChecking = False
            result, error = self.Error(sys.exc_info())
            print(sys.exc_info())
            return Result(result=result, error=error)

        return ret

Example 16

Project: masakari
Source File: masakari_controller.py
View license
    @log_process_begin_and_end.output_log
    def _notification_reciever(self, env, start_response):

        try:
            len = env['CONTENT_LENGTH']
            if len > 0:
                body = env['wsgi.input'].read(len)
                json_data = json.loads(body)

                msg = "Recieved notification : " + body
                LOG.info(msg)

                ret = self._check_json_param(json_data)
                if ret == 1:
                    # Return Response
                    start_response(
                        '400 Bad Request', [('Content-Type', 'text/plain')])

                    msg = "Wsgi response: " \
                          "status=400 Bad Request, " \
                          "body=method _notification_reciever returned."
                    LOG.info(msg)

                    return ['method _notification_reciever returned.\r\n']

                # Insert notification into notification_list_db
                notification_list_dic = {}
                notification_list_dic = self._create_notification_list_db(
                    json_data)

                # Return Response
                start_response('200 OK', [('Content-Type', 'text/plain')])

                msg = "Wsgi response: " \
                    + "status=200 OK, " \
                    + "body=method _notification_reciever returned."
                LOG.info(msg)

                if notification_list_dic != {}:
                    # Start thread
                    if notification_list_dic.get("recover_by") == 0 and \
                       notification_list_dic.get("progress") == 0:

                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"), notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()

                        # Sleep until nova recognizes the node down.
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = ("Sleeping %s sec before starting recovery"
                               "thread until nova recognizes the node down..."
                               % (node_err_wait)
                               )
                        LOG.info(msg)
                        greenthread.sleep(int(node_err_wait))

                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_host." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + " notification_cluster_port=" \
                            + notification_list_dic.get(
                                "notification_cluster_port") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                notification_list_dic.get(
                                "notification_cluster_port"),
                                retry_mode, ))

                        th.start()
                    elif notification_list_dic.get("recover_by") == 0 and \
                            notification_list_dic.get("progress") == 3:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(notification_list_dic.get(
                                "notification_id"),
                                notification_list_dic.get(
                                "notification_hostname"),
                                False, ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 1:
                        retry_mode = False
                        msg = "Run thread rc_starter.add_failed_instance." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_uuid=" \
                            + notification_list_dic.get("notification_uuid") \
                            + " retry_mode=" + str(retry_mode)
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_uuid"), retry_mode, )
                        )
                        th.start()
                    elif notification_list_dic.get("recover_by") == 2:
                        msg = "Run thread rc_worker.host_maintenance_mode." \
                            + " notification_id=" \
                            + notification_list_dic.get("notification_id") \
                            + " notification_hostname=" \
                            + notification_list_dic.get(
                                "notification_hostname") \
                            + "update_progress=False"
                        LOG.info(msg)
                        thread_name = self.rc_util.make_thread_name(
                            NOTIFICATION_LIST,
                            notification_list_dic.get("notification_id"))
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            name=thread_name,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                True, )
                        )
                        th.start()
                    else:
                        LOG.warning(
                            "Column \"recover_by\" \
                            on notification_list DB is invalid value.")

        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            start_response(
                '500 Internal Server Error', [('Content-Type', 'text/plain')])

            msg = "Wsgi response: " \
                  "status=500 Internal Server Error, " \
                  "body=method _notification_reciever returned."
            LOG.info(msg)

        return ['method _notification_reciever returned.\r\n']

Example 17

Project: popcorn_maker
Source File: multiprocess.py
View license
    def run(self, test):
        """
        Execute the test (which may be a test suite). If the test is a suite,
        distribute it out among as many processes as have been configured, at
        as fine a level as is possible given the context fixtures defined in
        the suite or any sub-suites.

        """
        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
        wrapper = self.config.plugins.prepareTest(test)
        if wrapper is not None:
            test = wrapper

        # plugins can decorate or capture the output stream
        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        testQueue = Queue()
        resultQueue = Queue()
        tasks = []
        completed = []
        workers = []
        to_teardown = []
        shouldStop = Event()

        result = self._makeResult()
        start = time.time()

        # dispatch and collect results
        # put indexes only on queue because tests aren't picklable
        for case in self.nextBatch(test):
            log.debug("Next batch %s (%s)", case, type(case))
            if (isinstance(case, nose.case.Test) and
                isinstance(case.test, failure.Failure)):
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            # handle shared fixtures
            if isinstance(case, ContextSuite) and case.context is failure.Failure:
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
                log.debug("%s has shared fixtures", case)
                try:
                    case.setUp()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    log.debug("%s setup failed", sys.exc_info())
                    result.addError(case, sys.exc_info())
                else:
                    to_teardown.append(case)
                    for _t in case:
                        test_addr = self.addtask(testQueue,tasks,_t)
                        log.debug("Queued shared-fixture test %s (%s) to %s",
                                  len(tasks), test_addr, testQueue)

            else:
                test_addr = self.addtask(testQueue,tasks,case)
                log.debug("Queued test %s (%s) to %s",
                          len(tasks), test_addr, testQueue)

        log.debug("Starting %s workers", self.config.multiprocess_workers)
        for i in range(self.config.multiprocess_workers):
            currentaddr = Value('c',bytes_(''))
            currentstart = Value('d',0.0)
            keyboardCaught = Event()
            p = Process(target=runner, args=(i, testQueue, resultQueue,
                                             currentaddr, currentstart,
                                             keyboardCaught, shouldStop,
                                             self.loaderClass,
                                             result.__class__,
                                             pickle.dumps(self.config)))
            p.currentaddr = currentaddr
            p.currentstart = currentstart
            p.keyboardCaught = keyboardCaught
            # p.setDaemon(True)
            p.start()
            workers.append(p)
            log.debug("Started worker process %s", i+1)

        total_tasks = len(tasks)
        # need to keep track of the next time to check for timeouts in case
        # more than one process times out at the same time.
        nexttimeout=self.config.multiprocess_timeout
        while tasks:
            log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
                      len(completed), total_tasks,nexttimeout)
            try:
                iworker, addr, newtask_addrs, batch_result = resultQueue.get(
                                                        timeout=nexttimeout)
                log.debug('Results received for worker %d, %s, new tasks: %d',
                          iworker,addr,len(newtask_addrs))
                try:
                    try:
                        tasks.remove(addr)
                    except ValueError:
                        log.warn('worker %s failed to remove from tasks: %s',
                                 iworker,addr)
                    total_tasks += len(newtask_addrs)
                    for newaddr in newtask_addrs:
                        tasks.append(newaddr)
                except KeyError:
                    log.debug("Got result for unknown task? %s", addr)
                    log.debug("current: %s",str(list(tasks)[0]))
                else:
                    completed.append([addr,batch_result])
                self.consolidate(result, batch_result)
                if (self.config.stopOnError
                    and not result.wasSuccessful()):
                    # set the stop condition
                    shouldStop.set()
                    break
                if self.config.multiprocess_restartworker:
                    log.debug('joining worker %s',iworker)
                    # wait for working, but not that important if worker
                    # cannot be joined in fact, for workers that add to
                    # testQueue, they will not terminate until all their
                    # items are read
                    workers[iworker].join(timeout=1)
                    if not shouldStop.is_set() and not testQueue.empty():
                        log.debug('starting new process on worker %s',iworker)
                        currentaddr = Value('c',bytes_(''))
                        currentstart = Value('d',time.time())
                        keyboardCaught = Event()
                        workers[iworker] = Process(target=runner,
                                                   args=(iworker, testQueue,
                                                         resultQueue,
                                                         currentaddr,
                                                         currentstart,
                                                         keyboardCaught,
                                                         shouldStop,
                                                         self.loaderClass,
                                                         result.__class__,
                                                         pickle.dumps(self.config)))
                        workers[iworker].currentaddr = currentaddr
                        workers[iworker].currentstart = currentstart
                        workers[iworker].keyboardCaught = keyboardCaught
                        workers[iworker].start()
            except Empty:
                log.debug("Timed out with %s tasks pending "
                          "(empty testQueue=%d): %s",
                          len(tasks),testQueue.empty(),str(tasks))
                any_alive = False
                for iworker, w in enumerate(workers):
                    if w.is_alive():
                        worker_addr = bytes_(w.currentaddr.value,'ascii')
                        timeprocessing = time.time() - w.currentstart.value
                        if ( len(worker_addr) == 0
                                and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('worker %d has finished its work item, '
                                      'but is not exiting? do we wait for it?',
                                      iworker)
                        else:
                            any_alive = True
                        if (len(worker_addr) > 0
                            and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('timed out worker %s: %s',
                                      iworker,worker_addr)
                            w.currentaddr.value = bytes_('')
                            # If the process is in C++ code, sending a SIGINT
                            # might not send a python KeybordInterrupt exception
                            # therefore, send multiple signals until an
                            # exception is caught. If this takes too long, then
                            # terminate the process
                            w.keyboardCaught.clear()
                            startkilltime = time.time()
                            while not w.keyboardCaught.is_set() and w.is_alive():
                                if time.time()-startkilltime > self.waitkilltime:
                                    # have to terminate...
                                    log.error("terminating worker %s",iworker)
                                    w.terminate()
                                    currentaddr = Value('c',bytes_(''))
                                    currentstart = Value('d',time.time())
                                    keyboardCaught = Event()
                                    workers[iworker] = Process(target=runner,
                                        args=(iworker, testQueue, resultQueue,
                                              currentaddr, currentstart,
                                              keyboardCaught, shouldStop,
                                              self.loaderClass,
                                              result.__class__,
                                              pickle.dumps(self.config)))
                                    workers[iworker].currentaddr = currentaddr
                                    workers[iworker].currentstart = currentstart
                                    workers[iworker].keyboardCaught = keyboardCaught
                                    workers[iworker].start()
                                    # there is a small probability that the
                                    # terminated process might send a result,
                                    # which has to be specially handled or
                                    # else processes might get orphaned.
                                    w = workers[iworker]
                                    break
                                os.kill(w.pid, signal.SIGINT)
                                time.sleep(0.1)
                if not any_alive and testQueue.empty():
                    log.debug("All workers dead")
                    break
            nexttimeout=self.config.multiprocess_timeout
            for w in workers:
                if w.is_alive() and len(w.currentaddr.value) > 0:
                    timeprocessing = time.time()-w.currentstart.value
                    if timeprocessing <= self.config.multiprocess_timeout:
                        nexttimeout = min(nexttimeout,
                            self.config.multiprocess_timeout-timeprocessing)

        log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))

        for case in to_teardown:
            log.debug("Tearing down shared fixtures for %s", case)
            try:
                case.tearDown()
            except (KeyboardInterrupt, SystemExit):
                raise
            except:
                result.addError(case, sys.exc_info())

        stop = time.time()

        # first write since can freeze on shutting down processes
        result.printErrors()
        result.printSummary(start, stop)
        self.config.plugins.finalize(result)

        log.debug("Tell all workers to stop")
        for w in workers:
            if w.is_alive():
                testQueue.put('STOP', block=False)

        # wait for the workers to end
        try:
            for iworker,worker in enumerate(workers):
                if worker.is_alive():
                    log.debug('joining worker %s',iworker)
                    worker.join()#10)
                    if worker.is_alive():
                        log.debug('failed to join worker %s',iworker)
        except KeyboardInterrupt:
            log.info('parent received ctrl-c')
            for worker in workers:
                worker.terminate()
                worker.join()

        return result

Example 18

Project: jirash
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 19

View license
def callback_allhit(pages, **kwargs):

    if type(pages) != type([]):
        raise Exception('::callback_allhit() must be called with one list argument')

    def remove_newline_fields(list):
        while True:
            try:
                list.remove("\n")
            except:
                break
        return list
    
#    def is_soup(object):
#        soup = BeautifulSoup()
#        if type(object) == type(soup) or type(object) == type(ResultSet('')) or type(object) == type(Tag(soup, "div", [])):
#            return True
#        return False

    data = []
    errors = []

    # Processing every page
    for page_number in pages:
        try:
            # Downloading page
            log.info("Downloading page: %s" % page_number)
            page_url = get_allhit_url(page_number)
            log.debug("Downloading %s" % page_url)
            response = urllib2.urlopen(page_url)
            html = response.read()
            soup = BeautifulSoup(html)

            # Parsing HIT groups' list
            table = soup.find('table', cellpadding='0', cellspacing='5', border='0', width='100%')
            if type(table) == type(None):

                i = 0
                while i < 3:
                    log.warn("Soup returned an empty table for page %s. Trying once more" % page_number)
                    response = urllib2.urlopen(page_url)
                    html = response.read()
                    soup = BeautifulSoup(html)
                    table = soup.find('table', cellpadding='0', cellspacing='5', border='0', width='100%')
                    if type(table) != type(None):
                        break
                    else:
                        table = None
                        soup = None
                        html = None
                        i = i + 1

                if type(table) == type(None):
                    log.warn("Soup returned an empty table. This should not happen. Skipping page")
                    continue

            table.contents = remove_newline_fields(table.contents)

            # Parsing and fetching information about each group
            for i_group in range(0, len(table.contents)):
                log.debug("Processing group %s on page %s" % (i_group, page_number))
                try:
                    group_html = table.contents[i_group]

                    # Title
                    title = group_html.find('a', {'class':'capsulelink'})
                    if type(title) != type(None):
                        try:
                            title = str(title.contents[0])
                        except:
                            title = unicode(title.contents[0])
                        try:
                            title = unicode(remove_whitespaces(title))
                        except:
                            title = ''
                    
                    # Remove <span> in title
                    title = remove_whitespaces(strip_html(title))
                    
                    fields = group_html.findAll('td', {'align':'left','valign':'top','class':'capsule_field_text'})

                    if len(fields) == 7:

                        # Requester's name and ID
                        requester_html = remove_newline_fields(fields[0].contents)[0]
                        requester_name = remove_whitespaces(strip_html(unicode(requester_html.contents[0]))) # Remove <span> in requester name
                        requester_id = requester_html['href']
                        start = requester_id.index('requesterId=')+12
                        stop = requester_id.index('&state')
                        requester_id = requester_id[start:stop]

                        # HIT group expiration date
                        hit_expiration_date = remove_newline_fields(fields[1].contents)[0]
                        hit_expiration_date = remove_whitespaces(strip_html(hit_expiration_date))
                        hit_expiration_date = hit_expiration_date[:hit_expiration_date.index('(')-2]
                        hit_expiration_date = datetime.datetime.strptime(hit_expiration_date, '%b %d, %Y')

                        # Time alloted
                        time_alloted = remove_newline_fields(fields[2].contents)[0]
                        time_alloted = remove_whitespaces(strip_html(time_alloted))
                        time_alloted = int(time_alloted[:time_alloted.index(' ')])

                        # Reward
                        reward = float(remove_newline_fields(fields[3].contents)[0][1:])

                        # HITs available
                        hits_available = int(remove_newline_fields(fields[4].contents)[0])

                        # Description
                        description = unicode(remove_newline_fields(fields[5].contents)[0])

                        # Keywords
                        keywords_raw = remove_newline_fields(fields[6].contents)
                        keywords = []
                        for i in range(0, len(keywords_raw)):
                            try:
                                keyword = keywords_raw[i].contents[0]
                                keywords.append(keyword)
                            except:
                                continue
                        keywords = unicode(fuse(keywords, ','))

                        # Qualification
                        qualifications = ''
                        qfields = group_html.findAll('td', {'style':'padding-right: 2em; white-space: nowrap;'})

                        if len(qfields) > 0:
                            qfields = [remove_whitespaces(unicode(remove_newline_fields(qfield.contents)[0])) for qfield in qfields]
                            qualifications = fuse(qfields, ', ')
                        qfields = None

                        # Occurrence date
                        occurrence_date = datetime.datetime.now()

                        # Group ID
                        group_id = group_html.find('span', {'class':'capsulelink'})
                        group_id_hashed = False
                        if type(group_id) != type(None):
                            group_id = remove_newline_fields(group_id.contents)[0]
                            if 'href' in group_id._getAttrMap():
                                start = group_id['href'].index('groupId=')+8
                                stop = group_id['href'].index('&')
                                group_id = group_id['href'][start:stop]
                            else:
                                group_id_hashed = True
                                composition = "%s;%s;%s;%s;%s;%s;%s;" % (title,requester_id,
                                                                         time_alloted,reward,
                                                                         description,keywords,
                                                                         qualifications)
                                composition = smart_str(composition)
                                group_id = hashlib.md5(composition).hexdigest()

                        # Checking whether processed content is already stored in the database
                        hit_group_content = None
                        try:
                            log.debug("group_id=%s; requester=%s; title=%s; desc=%s; ta=%s; reward=%s" % (group_id, requester_id, title, description, time_alloted, reward))
                            hit_group_content = HitGroupContent.objects.get(group_id=group_id,
                                                                            requester_id=requester_id,
                                                                            title=title,
                                                                            description=description,
                                                                            time_alloted=time_alloted,
                                                                            reward=reward,
                                                                            )
                        except HitGroupContent.DoesNotExist:
                            hit_group_content = HitGroupContent(**{
                                    'title': title,
                                    'requester_id': requester_id,
                                    'requester_name': requester_name,
                                    'time_alloted': time_alloted,
                                    'reward': reward,
                                    'html': '',
                                    'description': description,
                                    'keywords': keywords,
                                    'qualifications': qualifications,
                                    'occurrence_date': occurrence_date,
                                    'group_id': group_id,
                                    'group_id_hashed': group_id_hashed
                                })

                        data.append({
                            'HitGroupStatus': {
                                'group_id': group_id,
                                'hits_available': hits_available,
                                'page_number': page_number,
                                'inpage_position': i_group+1,
                                'hit_expiration_date': hit_expiration_date,
                                'hit_group_content': hit_group_content
                            }
                        })

                    fields = None
                    group_html = None

                except:
                    log.error("Failed to process group %s on %s page (%s)" % (i_group, page_number, sys.exc_info()[0].__name__))
                    errors.append(grab_error(sys.exc_info()))
                    print grab_error(sys.exc_info())

            table = None
            soup = None
            html = None

        except:
            log.error("Failed to process page %d (%s)" % (page_number, sys.exc_info()[0].__name__))
            errors.append(grab_error(sys.exc_info()))
            print grab_error(sys.exc_info())

    return {'data':data,'errors':errors}

Example 20

Project: scalarizr
Source File: gce_persistent.py
View license
    def _ensure(self):

        garbage_can = []
        zone = os.path.basename(__node__['gce']['zone'])
        project_id = __node__['gce']['project_id']
        server_name = __node__['server_id']

        try:
            connection = __node__['gce'].connect_compute()
        except:
            e = sys.exc_info()[1]
            LOG.debug('Can not get GCE connection: %s' % e)
            """ No connection, implicit check """
            try:
                self._check_attr('name')
            except:
                raise storage2.StorageError('Disk is not created yet, and GCE connection is unavailable')
            device = gce_util.devicename_to_device(self.name)
            if not device:
                raise storage2.StorageError("Disk is not attached and GCE connection is unavailable")

            self.device = device
        else:
            LOG.debug('Successfully created connection to cloud engine')
            try:
                create = False
                if not self.link:
                    # Disk does not exist, create it first
                    create_request_body = dict(name=self.name)
                    if self.snap:
                        snap_dict = dict(self.snap)
                        snap_dict['type'] = STORAGE_TYPE
                        self.snap = storage2.snapshot(snap_dict)
                        LOG.debug('Ensuring that snapshot is ready, before creating disk from it')
                        gce_util.wait_snapshot_ready(self.snap)
                        create_request_body['sourceSnapshot'] = to_current_api_version(self.snap.link)
                    else:
                        create_request_body['sizeGb'] = self.size

                    create = True
                else:
                    self._check_attr('zone')
                    LOG.debug('Checking that disk already exists')
                    try:
                        disk_dict = connection.disks().get(disk=self.name, project=project_id,
                                                                            zone=zone).execute()
                        self.link = disk_dict['selfLink']
                    except HttpError, e:
                        code = int(e.resp['status'])
                        if code == 404:
                            raise storage2.VolumeNotExistsError(self.name)
                        else:
                            raise

                    if self.zone != zone:
                        # Volume is in different zone, snapshot it,
                        # create new volume from this snapshot, then attach
                        temp_snap = self.snapshot('volume')
                        garbage_can.append(temp_snap)
                        new_name = self.name + zone
                        create_request_body = dict(
                            name=new_name, sourceSnapshot=to_current_api_version(temp_snap.link))
                        create = True

                attach = False
                if create:
                    disk_name = create_request_body['name']
                    if "pd-standard" != self.disk_type:
                        disk_type = gce_util.get_disktype(conn=connection,
                            project_id=project_id, zone=zone, disktype=self.disk_type)
                        create_request_body.update({'type': disk_type['selfLink']})

                    LOG.debug('Creating new GCE disk %s' % disk_name)
                    op = connection.disks().insert(project=project_id,
                                                   zone=zone,
                                                   body=create_request_body).execute()
                    gce_util.wait_for_operation(connection, project_id, op['name'], zone)
                    disk_dict = connection.disks().get(disk=disk_name,
                                                       project=project_id,
                                                       zone=zone).execute()
                    self.id = disk_dict['id']
                    self.link = disk_dict['selfLink']
                    self.zone = zone
                    self.name = disk_name
                    attach = True

                else:
                    if self.last_attached_to and self.last_attached_to != server_name:
                        LOG.debug("Making sure that disk %s detached from previous attachment place." % self.name)
                        try:
                            gce_util.ensure_disk_detached(connection,
                                                          project_id,
                                                          zone,
                                                          self.last_attached_to,
                                                          self.link)
                        except:
                            e = sys.exc_info()[1]
                            if 'resource was not found' in str(e):
                                raise storage2.VolumeNotExistsError(self.link)
                            raise
                        
                    attachment_inf = self._attachment_info(connection)
                    if attachment_inf:
                        disk_devicename = attachment_inf['deviceName']
                    else:
                        attach = True

                if attach:
                    for _ in range(10):
                        try:
                            LOG.debug('Attaching disk %s to current instance' % self.name)
                            try:
                                op = connection.instances().attachDisk(
                                    instance=server_name, 
                                    project=project_id,
                                    zone=zone, 
                                    body=dict(
                                        deviceName=self.name,
                                        source=self.link,
                                        mode="READ_WRITE",
                                        type="PERSISTENT")).execute()
                            except:
                                e = sys.exc_info()[1]
                                if 'resource was not found' in str(e):
                                    raise storage2.VolumeNotExistsError(self.link)
                                raise

                            gce_util.wait_for_operation(connection, project_id, op['name'], zone=zone)
                            disk_devicename = self.name
                            break
                        except:
                            e = sys.exc_info()[1]
                            #  sometimes occurs when volume was just detached from another instance
                            if 'is already being used by' in str(e):
                                LOG.debug('%s. Retrying in 1s', str(e))
                                time.sleep(1)
                            else:
                                raise

                if not linux.os.windows:
                    for i in range(10):
                        device = gce_util.devicename_to_device(disk_devicename)
                        if device:
                            break
                        LOG.debug('Device not found in system. Retrying in 1s.')
                        time.sleep(1)
                    else:
                        msg = "Disk should be attached, but corresponding device not found in system"
                        raise storage2.StorageError(msg)
                else:
                    device = disk_devicename

                self.device = device
                self.last_attached_to = server_name
                self.snap = None

            finally:
                # Perform cleanup
                for garbage in garbage_can:
                    try:
                        garbage.destroy(force=True)
                    except:
                        e = sys.exc_info()[1]
                        LOG.debug('Failed to destroy temporary storage object %s: %s', garbage, e)

Example 21

Project: discord_feedbot
Source File: feed2discord.py
View license
@asyncio.coroutine
def background_check_feed(feed,asyncioloop):
    global timezone
    logger.info(feed+': Starting up background_check_feed')

    # Try to wait until Discord client has connected, etc:
    yield from client.wait_until_ready()
    # make sure debug output has this check run in the right order...
    yield from asyncio.sleep(1)

    # just a bit easier to use...
    FEED=config[feed]

    # pull config for this feed out:
    feed_url = FEED.get('feed_url')
    rss_refresh_time = FEED.getint('rss_refresh_time',3600)
    max_age = FEED.getint('max_age',86400)

    # loop through all the channels this feed is configured to send to
    channels = []
    for key in FEED.get('channels').split(','):
        logger.debug(feed+': adding channel '+key)
        # stick a dict in the channels array so we have more to work with
        channels.append(
            {
              'object': discord.Object(id=config['CHANNELS'][key]),
              'name': key,
              'id': config['CHANNELS'][key],
            }
        )

    # Basically run forever
    while not client.is_closed:
        # And tries to catch all the exceptions and just keep going
        # (but see list of except/finally stuff below)
        try:
            logger.info(feed+': processing feed')

            # If send_typing is on for the feed, send a little "typing ..."
            # whenever a feed is being worked on.  configurable per-room
            if FEED.getint(
                           feed+'.send_typing',
                           FEED.getint('send_typing',0)) >= 1:
                for channel in channels:
                    # Since this is first attempt to talk to this channel,
                    # be very verbose about failures to talk to channel
                    try:
                        yield from client.send_typing(channel['object'])
                    except discord.errors.Forbidden:
                        logger.error(feed+':discord.errors.Forbidden')
                        logger.error(sys.exc_info())
                        logger.error(
                            feed+
                            ":Perhaps bot isn't allowed in this channel?")
                        logger.error(channel)

            http_headers = {}
            http_headers['User-Agent'] = MAIN.get('UserAgent',
                                                  'feed2discord/1.0')

            ### Download the actual feed, if changed since last fetch

            # pull data about history of this *feed* from DB:
            cursor = conn.cursor()
            cursor.execute(
                "select lastmodified,etag from feed_info where feed=? OR url=?",
                [feed,feed_url])
            data=cursor.fetchone()

            # If we've handled this feed before,
            # and we have etag from last run, add etag to headers.
            # and if we have a last modified time from last run,
            # add "If-Modified-Since" to headers.
            if data is None: # never handled this feed before...
                logger.info(feed+':looks like updated version. saving info')
                cursor.execute(
                    "REPLACE INTO feed_info (feed,url) VALUES (?,?)",
                    [feed,feed_url])
                conn.commit()
                logger.debug(feed+':feed info saved')
            else:
                logger.debug(feed+
                             ':setting up extra headers for HTTP request.')
                logger.debug(data)
                lastmodified = data[0]
                etag = data[1]
                if lastmodified is not None and len(lastmodified):
                    logger.debug(feed+
                                 ':adding header If-Modified-Since: '+
                                 lastmodified)
                    http_headers['If-Modified-Since'] = lastmodified
                else:
                    logger.debug(feed+':no stored lastmodified')
                if etag is not None and len(etag):
                    logger.debug(feed+':adding header ETag: '+etag)
                    http_headers['ETag'] = etag
                else:
                    logger.debug(feed+':no stored ETag')

            logger.debug(feed+':sending http request for '+feed_url)
            # Send actual request.  yield from can yield control to another
            # instance.
            http_response = yield from httpclient.request('GET',
                                                          feed_url,
                                                          headers=http_headers)
            logger.debug(http_response)

            # Some feeds are smart enough to use that if-modified-since or
            # etag info, which gives us a 304 status.  If that happens,
            # assume no new items, fall through rest of this and try again
            # later.
            if http_response.status == 304:
                logger.debug(feed+':data is old; moving on')
                http_response.close()
                raise HTTPNotModified()
            elif http_response.status is None:
                logger.error(feed+':HTTP response code is NONE')
                raise HTTPError()
            # If we get anything but a 200, that's a problem and we don't
            # have good data, so give up and try later.
            # Mostly handled different than 304/not-modified to make logging
            # clearer.
            elif http_response.status != 200:
                logger.debug(feed+':HTTP error: '+str(http_response.status))
                raise HTTPError()
            else:
                logger.debug(feed+':HTTP success')


            # pull data out of the http response
            logger.debug(feed+':reading http response')
            http_data = yield from http_response.read()

            # parse the data from the http response with feedparser
            logger.debug(feed+':parsing http data')
            feed_data = feedparser.parse(http_data)
            logger.debug(feed+':done fetching')


            # If we got an ETAG back in headers, store that, so we can
            # include on next fetch
            if 'ETAG' in http_response.headers:
                etag = http_response.headers['ETAG']
                logger.debug(feed+':saving etag: '+etag)
                cursor.execute(
                    "UPDATE feed_info SET etag=? where feed=? or url=?",
                    [etag,feed,feed_url])
                conn.commit()
                logger.debug(feed+':etag saved')
            else:
                logger.debug(feed+':no etag')

            # If we got a Last-Modified header back, store that, so we can
            # include on next fetch
            if 'LAST-MODIFIED' in http_response.headers:
                modified = http_response.headers['LAST-MODIFIED']
                logger.debug(feed+':saving lastmodified: '+modified)
                cursor.execute(
                    "UPDATE feed_info SET lastmodified=? where feed=? or url=?",
                    [modified,feed,feed_url])
                conn.commit()
                logger.debug(feed+':saved lastmodified')
            else:
                logger.debug(feed+':no last modified date')

            http_response.close()

            # Process all of the entries in the feed
            # Use reversed to start with end, which is usually oldest
            logger.debug(feed+':processing entries')
            for item in reversed(feed_data.entries):
                logger.debug(feed+':item:processing this entry')
                if debug > 1:
                    logger.debug(item) # can be very noisy

                # Pull out the unique id, or just give up on this item.
                id = ''
                if 'id' in item:
                    id=item.id
                elif 'guid' in item:
                    id=item.guid
                elif 'link' in item:
                    id=item.link
                else:
                    logger.error(feed+':item:no id, skipping')
                    continue

                # Get our best date out, in both raw and parsed form
                pubDateDict = extract_best_item_date(item)
                pubDate = pubDateDict['date']
                pubDate_parsed = pubDateDict['date_parsed']

                logger.debug(feed+':item:id:'+id)
                logger.debug(feed+
                             ':item:checking database history for this item')
                # Check DB for this item
                cursor.execute(
                    "SELECT published,title,url,reposted FROM feed_items WHERE id=?",
                    [id])
                data=cursor.fetchone()

                # If we've never seen it before, then actually processing
                # this:
                if data is None:
                    logger.info(feed+':item '+id+' unseen, processing:')

                    # Store info about this item, so next time we skip it:
                    cursor.execute(
                        "INSERT INTO feed_items (id,published) VALUES (?,?)",
                        [id,pubDate])
                    conn.commit()

                    # Doing some crazy date math stuff...
                    # max_age is mostly so that first run doesn't spew too
                    # much stuff into a room, but is also a useful safety
                    # measure in case a feed suddenly reverts to something
                    # ancient or other weird problems...
                    time_since_published = timezone.localize(datetime.now()) - pubDate_parsed.astimezone(timezone)

                    if time_since_published.total_seconds() < max_age:
                        logger.info(feed+':item:fresh and ready for parsing')

                        # Loop over all channels for this particular feed
                        # and process appropriately:
                        for channel in channels:
                            include = True
                            filter_field = FEED.get(
                                                    channel['name']+'.filter_field',
                                                    FEED.get('filter_field',
                                                        'title'))
                            # Regex if channel exists
                            if (channel['name']+'.filter') in FEED or 'filter' in FEED:
                                logger.debug(feed+':item:running filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter',
                                                    FEED.get('filter','^.*$'))
                                logger.debug(feed+':item:using filter:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = False
                                    logger.info(feed+':item:failed filter for '+channel['name'])
                            elif (channel['name']+'.filter_exclude') in FEED or 'filter_exclude' in FEED:
                                logger.debug(feed+':item:running exclude filter for'+channel['name'])
                                regexpat = FEED.get(
                                                    channel['name']+'.filter_exclude',
                                                    FEED.get('filter_exclude',
                                                    '^.*$'))
                                logger.debug(feed+':item:using filter_exclude:'+regexpat+' on '+item['title']+' field '+filter_field)
                                regexmatch = re.search(regexpat,item[filter_field])
                                if regexmatch is None:
                                    include = True
                                    logger.info(feed+':item:passed exclude filter for '+channel['name'])
                                else:
                                    include = False
                                    logger.info(feed+':item:failed exclude filter for '+channel['name'])
                            else:
                                include = True # redundant safety net
                                logger.debug(feed+':item:no filter configured for'+channel['name'])

                            if include is True:
                                logger.debug(feed+':item:building message for '+channel['name'])
                                message = build_message(FEED,item,channel)
                                logger.debug(feed+':item:sending message (eventually) to '+channel['name'])
                                yield from send_message_wrapper(asyncioloop,
                                                                FEED,
                                                                feed,
                                                                channel,
                                                                client,
                                                                message)
                            else:
                                logger.info(feed+':item:skipping item due to not passing filter for '+channel['name'])

                    else:
                        # Logs of debugging info for date handling stuff...
                        logger.info(feed+':too old; skipping')
                        logger.debug(feed+':now:'+str(time.time()))
                        logger.debug(feed+':now:gmtime:'+str(time.gmtime()))

                        logger.debug(feed+':now:localtime:'+str(time.localtime()))
                        logger.debug(feed+':timezone.localize(datetime.now()):'+str(timezone.localize(datetime.now())))
                        logger.debug(feed+':pubDate:'+str(pubDate))
                        logger.debug(feed+':pubDate_parsed:'+str(pubDate_parsed))
                        logger.debug(feed+':pubDate_parsed.astimezome(timezone):'+str(pubDate_parsed.astimezone(timezone)))
                        if debug >= 4:
                            logger.debug(item)
                # seen before, move on:
                else:
                    logger.debug(feed+':item:'+id+' seen before, skipping')
        # This is completely expected behavior for a well-behaved feed:
        except HTTPNotModified:
            logger.debug(feed+':Headers indicate feed unchanged since last time fetched:')
            logger.debug(sys.exc_info())
        # Many feeds have random periodic problems that shouldn't cause
        # permanent death:
        except HTTPError:
            logger.warn(feed+':Unexpected HTTP error:')
            logger.warn(sys.exc_info())
            logger.warn(feed+':Assuming error is transient and trying again later')
        # sqlite3 errors are probably really bad and we should just totally
        # give up on life
        except sqlite3.Error as sqlerr:
            logger.error(feed+':sqlite3 error: ')
            logger.error(sys.exc_info())
            logger.error(sqlerr)
            raise
        # Ideally we'd remove the specific channel or something...
        # But I guess just throw an error into the log and try again later...
        except discord.errors.Forbidden:
            logger.error(feed+':discord.errors.Forbidden')
            logger.error(sys.exc_info())
            logger.error(feed+":Perhaps bot isn't allowed in one of the channels for this feed?")
            # raise # or not? hmm...
        # unknown error: definitely give up and die and move on
        except:
            logger.error(feed+':Unexpected error:')
            # logger.error(sys.exc_info())
            logger.error(traceback.format_exc())
            logger.error(feed+':giving up')
            raise
        # No matter what goes wrong, wait same time and try again
        finally:
            logger.debug(feed+':sleeping for '+str(rss_refresh_time)+' seconds')
            yield from asyncio.sleep(rss_refresh_time)

Example 22

Project: p2pool-n
Source File: Server.py
View license
    def do_POST(self):
        global _contexts
        
        status = 500
        try:
            if self.server.config.dumpHeadersIn:
                s = 'Incoming HTTP headers'
                debugHeader(s)
                print self.raw_requestline.strip()
                print "\n".join(map (lambda x: x.strip(),
                    self.headers.headers))
                debugFooter(s)

            data = self.rfile.read(int(self.headers["Content-length"]))

            if self.server.config.dumpSOAPIn:
                s = 'Incoming SOAP'
                debugHeader(s)
                print data,
                if data[-1] != '\n':
                    print
                debugFooter(s)

            (r, header, body, attrs) = \
                parseSOAPRPC(data, header = 1, body = 1, attrs = 1)

            method = r._name
            args   = r._aslist()
            kw     = r._asdict()

            if Config.simplify_objects:
                args = simplify(args)
                kw = simplify(kw)

            # Handle mixed named and unnamed arguments by assuming
            # that all arguments with names of the form "v[0-9]+"
            # are unnamed and should be passed in numeric order,
            # other arguments are named and should be passed using
            # this name.

            # This is a non-standard exension to the SOAP protocol,
            # but is supported by Apache AXIS.

            # It is enabled by default.  To disable, set
            # Config.specialArgs to False.


            ordered_args = {}
            named_args   = {}

            if Config.specialArgs: 
                
                for (k,v) in  kw.items():

                    if k[0]=="v":
                        try:
                            i = int(k[1:])
                            ordered_args[i] = v
                        except ValueError:
                            named_args[str(k)] = v

                    else:
                        named_args[str(k)] = v

            # We have to decide namespace precedence
            # I'm happy with the following scenario
            # if r._ns is specified use it, if not check for
            # a path, if it's specified convert it and use it as the
            # namespace. If both are specified, use r._ns.
            
            ns = r._ns

            if len(self.path) > 1 and not ns:
                ns = self.path.replace("/", ":")
                if ns[0] == ":": ns = ns[1:]
            
            # authorization method
            a = None

            keylist = ordered_args.keys()
            keylist.sort()

            # create list in proper order w/o names
            tmp = map( lambda x: ordered_args[x], keylist)
            ordered_args = tmp

            #print '<-> Argument Matching Yielded:'
            #print '<-> Ordered Arguments:' + str(ordered_args)
            #print '<-> Named Arguments  :' + str(named_args)
             
            resp = ""
            
            # For fault messages
            if ns:
                nsmethod = "%s:%s" % (ns, method)
            else:
                nsmethod = method

            try:
                # First look for registered functions
                if self.server.funcmap.has_key(ns) and \
                    self.server.funcmap[ns].has_key(method):
                    f = self.server.funcmap[ns][method]

                    # look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if self.server.funcmap.has_key(ns) and \
                               self.server.funcmap[ns].has_key(authmethod):
                            a = self.server.funcmap[ns][authmethod]
                else:
                    # Now look at registered objects
                    # Check for nested attributes. This works even if
                    # there are none, because the split will return
                    # [method]
                    f = self.server.objmap[ns]
                    
                    # Look for the authorization method
                    if self.server.config.authMethod != None:
                        authmethod = self.server.config.authMethod
                        if hasattr(f, authmethod):
                            a = getattr(f, authmethod)

                    # then continue looking for the method
                    l = method.split(".")
                    for i in l:
                        f = getattr(f, i)
            except:
                info = sys.exc_info()
                try:
                    resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
                                               "Method Not Found",
                                               "%s : %s %s %s" % (nsmethod,
                                                                  info[0],
                                                                  info[1],
                                                                  info[2])),
                                     encoding = self.server.encoding,
                                     config = self.server.config)
                finally:
                    del info
                status = 500
            else:
                try:
                    if header:
                        x = HeaderHandler(header, attrs)

                    fr = 1

                    # call context book keeping
                    # We're stuffing the method into the soapaction if there
                    # isn't one, someday, we'll set that on the client
                    # and it won't be necessary here
                    # for now we're doing both

                    if "SOAPAction".lower() not in self.headers.keys() or \
                       self.headers["SOAPAction"] == "\"\"":
                        self.headers["SOAPAction"] = method
                        
                    thread_id = thread.get_ident()
                    _contexts[thread_id] = SOAPContext(header, body,
                                                       attrs, data,
                                                       self.connection,
                                                       self.headers,
                                                       self.headers["SOAPAction"])

                    # Do an authorization check
                    if a != None:
                        if not apply(a, (), {"_SOAPContext" :
                                             _contexts[thread_id] }):
                            raise faultType("%s:Server" % NS.ENV_T,
                                            "Authorization failed.",
                                            "%s" % nsmethod)
                    
                    # If it's wrapped, some special action may be needed
                    if isinstance(f, MethodSig):
                        c = None
                    
                        if f.context:  # retrieve context object
                            c = _contexts[thread_id]

                        if Config.specialArgs:
                            if c:
                                named_args["_SOAPContext"] = c
                            fr = apply(f, ordered_args, named_args)
                        elif f.keywords:
                            # This is lame, but have to de-unicode
                            # keywords
                            
                            strkw = {}
                            
                            for (k, v) in kw.items():
                                strkw[str(k)] = v
                            if c:
                                strkw["_SOAPContext"] = c
                            fr = apply(f, (), strkw)
                        elif c:
                            fr = apply(f, args, {'_SOAPContext':c})
                        else:
                            fr = apply(f, args, {})

                    else:
                        if Config.specialArgs:
                            fr = apply(f, ordered_args, named_args)
                        else:
                            fr = apply(f, args, {})

                    
                    if type(fr) == type(self) and \
                        isinstance(fr, voidType):
                        resp = buildSOAP(kw = {'%sResponse' % method: fr},
                            encoding = self.server.encoding,
                            config = self.server.config)
                    else:
                        resp = buildSOAP(kw =
                            {'%sResponse' % method: {'Result': fr}},
                            encoding = self.server.encoding,
                            config = self.server.config)

                    # Clean up _contexts
                    if _contexts.has_key(thread_id):
                        del _contexts[thread_id]
                        
                except Exception, e:
                    import traceback
                    info = sys.exc_info()

                    try:
                        if self.server.config.dumpFaultInfo:
                            s = 'Method %s exception' % nsmethod
                            debugHeader(s)
                            traceback.print_exception(info[0], info[1],
                                                      info[2])
                            debugFooter(s)

                        if isinstance(e, faultType):
                            f = e
                        else:
                            f = faultType("%s:Server" % NS.ENV_T,
                                          "Method Failed",
                                          "%s" % nsmethod)

                        if self.server.config.returnFaultInfo:
                            f._setDetail("".join(traceback.format_exception(
                                info[0], info[1], info[2])))
                        elif not hasattr(f, 'detail'):
                            f._setDetail("%s %s" % (info[0], info[1]))
                    finally:
                        del info

                    resp = buildSOAP(f, encoding = self.server.encoding,
                       config = self.server.config)
                    status = 500
                else:
                    status = 200
        except faultType, e:
            import traceback
            info = sys.exc_info()
            try:
                if self.server.config.dumpFaultInfo:
                    s = 'Received fault exception'
                    debugHeader(s)
                    traceback.print_exception(info[0], info[1],
                        info[2])
                    debugFooter(s)

                if self.server.config.returnFaultInfo:
                    e._setDetail("".join(traceback.format_exception(
                            info[0], info[1], info[2])))
                elif not hasattr(e, 'detail'):
                    e._setDetail("%s %s" % (info[0], info[1]))
            finally:
                del info

            resp = buildSOAP(e, encoding = self.server.encoding,
                config = self.server.config)
            status = 500
        except Exception, e:
            # internal error, report as HTTP server error

            if self.server.config.dumpFaultInfo:
                s = 'Internal exception %s' % e
                import traceback
                debugHeader(s)
                info = sys.exc_info()
                try:
                    traceback.print_exception(info[0], info[1], info[2])
                finally:
                    del info

                debugFooter(s)

            self.send_response(500)
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, 500, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                debugFooter(s)
        else:
            # got a valid SOAP response
            self.send_response(status)

            t = 'text/xml';
            if self.server.encoding != None:
                t += '; charset=%s' % self.server.encoding
            self.send_header("Content-type", t)
            self.send_header("Content-length", str(len(resp)))
            self.end_headers()

            if self.server.config.dumpHeadersOut and \
                self.request_version != 'HTTP/0.9':
                s = 'Outgoing HTTP headers'
                debugHeader(s)
                if self.responses.has_key(status):
                    s = ' ' + self.responses[status][0]
                else:
                    s = ''
                print "%s %d%s" % (self.protocol_version, status, s)
                print "Server:", self.version_string()
                print "Date:", self.__last_date_time_string
                print "Content-type:", t
                print "Content-length:", len(resp)
                debugFooter(s)

            if self.server.config.dumpSOAPOut:
                s = 'Outgoing SOAP'
                debugHeader(s)
                print resp,
                if resp[-1] != '\n':
                    print
                debugFooter(s)

            self.wfile.write(resp)
            self.wfile.flush()

            # We should be able to shut down both a regular and an SSL
            # connection, but under Python 2.1, calling shutdown on an
            # SSL connections drops the output, so this work-around.
            # This should be investigated more someday.

            if self.server.config.SSLserver and \
                isinstance(self.connection, SSL.Connection):
                self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
                    SSL.SSL_RECEIVED_SHUTDOWN)
            else:
                self.connection.shutdown(1)

        def do_GET(self):
            
            #print 'command        ', self.command
            #print 'path           ', self.path
            #print 'request_version', self.request_version
            #print 'headers'
            #print '   type    ', self.headers.type
            #print '   maintype', self.headers.maintype
            #print '   subtype ', self.headers.subtype
            #print '   params  ', self.headers.plist
            
            path = self.path.lower()
            if path.endswith('wsdl'):
                method = 'wsdl'
                function = namespace = None
                if self.server.funcmap.has_key(namespace) \
                        and self.server.funcmap[namespace].has_key(method):
                    function = self.server.funcmap[namespace][method]
                else: 
                    if namespace in self.server.objmap.keys():
                        function = self.server.objmap[namespace]
                        l = method.split(".")
                        for i in l:
                            function = getattr(function, i)
            
                if function:
                    self.send_response(200)
                    self.send_header("Content-type", 'text/plain')
                    self.end_headers()
                    response = apply(function, ())
                    self.wfile.write(str(response))
                    return
            
            # return error
            self.send_response(200)
            self.send_header("Content-type", 'text/html')
            self.end_headers()
            self.wfile.write('''\
<title>
<head>Error!</head>
</title>

<body>
<h1>Oops!</h1>

<p>
  This server supports HTTP GET requests only for the the purpose of
  obtaining Web Services Description Language (WSDL) for a specific
  service.

  Either you requested an URL that does not end in "wsdl" or this
  server does not implement a wsdl method.
</p>


</body>''')

Example 23

Project: capirca
Source File: aclgen.py
View license
def RenderFile(input_file, output_directory, definitions,
               exp_info, write_files):
  """Render a single file.

  Args:
    input_file: the name of the input policy file.
    output_directory: the directory in which we place the rendered file.
    definitions: the definitions from naming.Naming().
    exp_info: print a info message when a term is set to expire
              in that many weeks.
    write_files: a list of file tuples, (output_file, acl_text), to write
  """
  logging.debug('rendering file: %s into %s', input_file,
                output_directory)
  pol = None
  jcl = False
  acl = False
  asacl = False
  aacl = False
  bacl = False
  eacl = False
  gcefw = False
  ips = False
  ipt = False
  spd = False
  nsx = False
  pcap_accept = False
  pcap_deny = False
  pf = False
  srx = False
  jsl = False
  nft = False
  win_afw = False
  xacl = False

  try:
    conf = open(input_file).read()
    logging.debug('opened and read %s', input_file)
  except IOError as e:
    logging.warn('bad file: \n%s', e)
    raise

  try:
    pol = policy.ParsePolicy(
        conf, definitions, optimize=FLAGS.optimize,
        base_dir=FLAGS.base_directory, shade_check=FLAGS.shade_check)
  except policy.ShadingError as e:
    logging.warn('shading errors for %s:\n%s', input_file, e)
    return
  except (policy.Error, naming.Error):
    raise ACLParserError('Error parsing policy file %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

  platforms = set()
  for header in pol.headers:
    platforms.update(header.platforms)

  if 'juniper' in platforms:
    jcl = copy.deepcopy(pol)
  if 'cisco' in platforms:
    acl = copy.deepcopy(pol)
  if 'ciscoasa' in platforms:
    asacl = copy.deepcopy(pol)
  if 'brocade' in platforms:
    bacl = copy.deepcopy(pol)
  if 'arista' in platforms:
    eacl = copy.deepcopy(pol)
  if 'aruba' in platforms:
    aacl = copy.deepcopy(pol)
  if 'ipset' in platforms:
    ips = copy.deepcopy(pol)
  if 'iptables' in platforms:
    ipt = copy.deepcopy(pol)
  if 'nsxv' in platforms:
    nsx = copy.deepcopy(pol)
  if 'packetfilter' in platforms:
    pf = copy.deepcopy(pol)
  if 'pcap' in platforms:
    pcap_accept = copy.deepcopy(pol)
    pcap_deny = copy.deepcopy(pol)
  if 'speedway' in platforms:
    spd = copy.deepcopy(pol)
  if 'srx' in platforms:
    srx = copy.deepcopy(pol)
  if 'srxlo' in platforms:
    jsl = copy.deepcopy(pol)
  if 'windows_advfirewall' in platforms:
    win_afw = copy.deepcopy(pol)
  if 'ciscoxr' in platforms:
    xacl = copy.deepcopy(pol)
  if 'nftables' in platforms:
    nft = copy.deepcopy(pol)
  if 'gce' in platforms:
    gcefw = copy.deepcopy(pol)

  if not output_directory.endswith('/'):
    output_directory += '/'

  try:
    if jcl:
      acl_obj = juniper.Juniper(jcl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if srx:
      acl_obj = junipersrx.JuniperSRX(srx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if acl:
      acl_obj = cisco.Cisco(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if asacl:
      acl_obj = ciscoasa.CiscoASA(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if aacl:
      acl_obj = aruba.Aruba(aacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if bacl:
      acl_obj = brocade.Brocade(bacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if eacl:
      acl_obj = arista.Arista(eacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ips:
      acl_obj = ipset.Ipset(ips, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ipt:
      acl_obj = iptables.Iptables(ipt, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nsx:
      acl_obj = nsxv.Nsxv(nsx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if spd:
      acl_obj = speedway.Speedway(spd, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_accept:
      acl_obj = pcap.PcapFilter(pcap_accept, exp_info)
      RenderACL(str(acl_obj), '-accept' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_deny:
      acl_obj = pcap.PcapFilter(pcap_deny, exp_info, invert=True)
      RenderACL(str(acl_obj), '-deny' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pf:
      acl_obj = packetfilter.PacketFilter(pf, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if win_afw:
      acl_obj = windows_advfirewall.WindowsAdvFirewall(win_afw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if jsl:
      acl_obj = srxlo.SRXlo(jsl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if xacl:
      acl_obj = ciscoxr.CiscoXR(xacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nft:
      acl_obj = nftables.Nftables(nft, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if gcefw:
      acl_obj = gce.GCE(gcefw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
  # TODO(robankeny) add additional errors.
  except (juniper.Error, junipersrx.Error, cisco.Error, ipset.Error,
          iptables.Error, speedway.Error, pcap.Error,
          aclgenerator.Error, aruba.Error, nftables.Error, gce.Error):
    raise ACLGeneratorError('Error generating target ACL for %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

Example 24

Project: xraylarch
Source File: builtins.py
View license
def _addplugin(plugin, _larch=None, **kws):
    """add plugin components from plugin directory"""
    if _larch is None:
        raise Warning("cannot add plugins. larch broken?")
    write = _larch.writer.write
    errmsg = 'is not a valid larch plugin\n'
    pjoin = os.path.join
    path = site_config.plugins_path
    _sysconf = _larch.symtable._sys.config
    if not hasattr(_larch.symtable._sys, 'import_ok'):
        _larch.symtable._sys.import_ok  = True
    if not _larch.symtable._sys.import_ok:
        return

    if not hasattr(_sysconf, 'plugins_path'):
        _sysconf.plugins_path = site_config.plugins_path

    def _find_plugin(plugin, p_path):
        """find the plugin from path
        returns True, package name for packages
                False, (fh, modpath, desc) for imported modules
                None, None for Not Found
        """
        if not _larch.symtable._sys.import_ok:
            return

        if plugin == '__init__':
            return None, None
        mod, is_pkg = None, False
        try:
            mod = imp.find_module(plugin, [p_path])
        except ImportError:
            is_pkg = os.path.isdir(pjoin(p_path, plugin))

        if is_pkg or (mod is not None and
                      mod[2][2] == imp.PKG_DIRECTORY):
            return True, pjoin(p_path, plugin)
        elif mod is not None:
            return False, mod
        else:
            return None, None

    def on_error(msg):
        _larch.raise_exception(None, exc=ImportError, msg=msg)
        _larch.symtable._sys.import_ok = False

    def _check_requirements(ppath):
        """check for requirements.txt, return True only if all
        requirements are met
        """
        req_file = os.path.abspath(os.path.join(ppath, PLUGINSREQ))
        if not os.path.exists(req_file):
            return True
        if os.path.exists(req_file):
            with open(req_file, 'r') as fh:
                for line in fh.readlines():
                    line = line[:-1]
                    if line.startswith('#'):  continue
                    match = REQMATCH(line)
                    if match is None:
                        continue
                    ok = False
                    modname, cmp, req_vers = match.groups()
                    try:
                        mod = __import__(modname)
                        vers = getattr(mod, '__version__', None)
                        if   cmp == '>':  ok = vers >  req_vers
                        elif cmp == '<':  ok = vers <  req_vers
                        elif cmp == '>=': ok = vers >= req_vers
                        elif cmp == '<=': ok = vers <= req_vers
                        elif cmp == '==': ok = vers == req_vers
                        elif cmp == '=':  ok = vers == req_vers
                        elif cmp == '!=': ok = vers != req_vers
                    except:
                        ok = False
                    if not ok:
                        return False
        return True

    def _plugin_file(plugin, path=None):
        "defined here to allow recursive imports for packages"
        fh = None
        if plugin == '__init__':
            return
        if not _larch.symtable._sys.import_ok:
            return

        if path is None:
            try:
                path = _larch.symtable._sys.config.plugins_path
            except:
                path = site_config.plugins_path

        for p_path in path:
            # print(" -- find_plugin ", plugin)
            is_pkg, mod = _find_plugin(plugin, p_path)
            if is_pkg is not None:
                break
        if is_pkg is None and mod is None:
            write('Warning: plugin %s not found\n' % plugin)
            return False

        retval = True
        if is_pkg:
            if _check_requirements(plugin):
                filelist = []
                if PLUGINSTXT in sorted(os.listdir(mod)):
                    pfile = os.path.abspath(os.path.join(mod, PLUGINSTXT))
                    try:
                        with open(pfile, 'r') as pluginsfile:
                            for name in pluginsfile:
                                name = name[:-1].strip()
                                if (not name.startswith('#') and
                                    name.endswith('.py') and len(name) > 3):
                                    filelist.append(name)
                    except:
                        write("Warning:: Error reading plugin file:\n %s\n" %
                              pfile)
                if len(filelist) == 0:
                    for fname in sorted(os.listdir(mod)):
                        if fname.endswith('.py') and len(fname) > 3:
                            filelist.append(fname)

                retvals = []
                for fname in filelist:
                    if not _larch.symtable._sys.import_ok:
                        return
                    try:
                        ret =  _plugin_file(fname[:-3], path=[mod])
                    except:
                        err, exc, tback = sys.exc_info()
                        write('Warning: %s is =not= a valid plugin\n' %
                              pjoin(mod, fname))
                        write("   error:  %s\n" % (repr(sys.exc_info()[1])))
                        ret = False
                        _larch.symtable._sys.import_ok = False
                    retvals.append(ret)
                retval = all(retvals)
        else:
            fh, modpath, desc = mod
            try:
                out = imp.load_module(plugin, fh, modpath, desc)
                _larch.symtable.add_plugin(out, on_error, **kws)
            except:
                err, exc, tback = sys.exc_info()
                lineno = getattr(exc, 'lineno', 0)
                offset = getattr(exc, 'offset', 0)
                etext  = getattr(exc, 'text', '')
                emsg   = getattr(exc, 'message', '')
                try:
                    write(traceback.print_tb(tback))
                except:
                    pass
                write("""Python Error at plugin '%s', line %d
  %s %s^
%s: %s\n""" % (modpath, lineno, etext, ' '*offset, err.__name__, emsg))
                retval = False
                _larch.symtable._sys.import_ok = False

        if _larch.error:
            retval = False
            err = _larch.error.pop(0)
            fname, lineno = err.fname, err.lineno
            output = ["Error Adding Plugin %s from file %s" % (plugin, fname),
                      "%s" % (err.get_error()[1])]

            for err in _larch.error:
                if ((err.fname != fname or err.lineno != lineno) and
                    err.lineno > 0 and lineno > 0):
                    output.append("%s" % (err.get_error()[1]))
            write('\n'.join(output))

        if fh is not None:
            fh.close()
        return retval

    return _plugin_file(plugin)

Example 25

Project: django-simple-import
Source File: views.py
View license
@staff_member_required
def do_import(request, import_log_id):
    """ Import the data! """
    import_log = get_object_or_404(ImportLog, id=import_log_id)
    if import_log.import_type == "N" and 'undo' in request.GET and request.GET['undo'] == "True":
        import_log.undo()
        return HttpResponseRedirect(reverse(
                    do_import,
                    kwargs={'import_log_id': import_log.id}) + '?success_undo=True')

    if 'success_undo' in request.GET and request.GET['success_undo'] == "True":
        success_undo = True
    else:
        success_undo = False

    model_class = import_log.import_setting.content_type.model_class()
    import_data = import_log.get_import_file_as_list()
    header_row = import_data.pop(0)
    header_row_field_names = []
    header_row_default = []
    header_row_null_on_empty = []
    error_data = [header_row + ['Error Type', 'Error Details']]
    create_count = 0
    update_count = 0
    fail_count = 0
    if 'commit' in request.GET and request.GET['commit'] == "True":
        commit = True
    else:
        commit = False

    key_column_name = None
    if import_log.update_key and import_log.import_type in ["U", "O"]:
        key_match = import_log.import_setting.columnmatch_set.get(column_name=import_log.update_key)
        key_column_name = key_match.column_name
        key_field_name = key_match.field_name
    for i, cell in enumerate(header_row):
        match = import_log.import_setting.columnmatch_set.get(column_name=cell)
        header_row_field_names += [match.field_name]
        header_row_default += [match.default_value]
        header_row_null_on_empty += [match.null_on_empty]
        if key_column_name != None and key_column_name.lower() == cell.lower():
            key_index = i

    with transaction.atomic():
        sid = transaction.savepoint()
        for row in import_data:
            try:
                with transaction.atomic():
                    is_created = True
                    if import_log.import_type == "N":
                        new_object = model_class()
                    elif import_log.import_type == "O":
                        filters = {key_field_name: row[key_index]}
                        new_object = model_class.objects.get(**filters)
                        is_created = False
                    elif import_log.import_type == "U":
                        filters = {key_field_name: row[key_index]}
                        new_object = model_class.objects.filter(**filters).first()
                        if new_object == None:
                            new_object = model_class()
                            is_created = False

                    new_object.simple_import_m2ms = {} # Need to deal with these after saving
                    for i, cell in enumerate(row):
                        if header_row_field_names[i]: # skip blank
                            if not import_log.is_empty(cell) or header_row_null_on_empty[i]:
                                set_field_from_cell(import_log, new_object, header_row_field_names[i], cell)
                            elif header_row_default[i]:
                                set_field_from_cell(import_log, new_object, header_row_field_names[i], header_row_default[i])
                    new_object.save()

                    for i, cell in enumerate(row):
                        if header_row_field_names[i]: # skip blank
                            if not import_log.is_empty(cell) or header_row_null_on_empty[i]:
                                set_method_from_cell(import_log, new_object, header_row_field_names[i], cell)
                            elif header_row_default[i]:
                                set_method_from_cell(import_log, new_object, header_row_field_names[i], header_row_default[i])
                    new_object.save()

                    for key in new_object.simple_import_m2ms.keys():
                        value = new_object.simple_import_m2ms[key]
                        m2m = getattr(new_object, key)
                        m2m_model = type(m2m.model())
                        related_field_name = RelationalMatch.objects.get(import_log=import_log, field_name=key).related_field_name
                        m2m_object = m2m_model.objects.get(**{related_field_name:value})
                        m2m.add(m2m_object)

                    if is_created:
                        LogEntry.objects.log_action(
                            user_id         = request.user.pk,
                            content_type_id = ContentType.objects.get_for_model(new_object).pk,
                            object_id       = new_object.pk,
                            object_repr     = smart_text(new_object),
                            action_flag     = ADDITION
                        )
                        create_count += 1
                    else:
                        LogEntry.objects.log_action(
                            user_id         = request.user.pk,
                            content_type_id = ContentType.objects.get_for_model(new_object).pk,
                            object_id       = new_object.pk,
                            object_repr     = smart_text(new_object),
                            action_flag     = CHANGE
                        )
                        update_count += 1
                    ImportedObject.objects.create(
                        import_log = import_log,
                        object_id = new_object.pk,
                        content_type = import_log.import_setting.content_type)
            except IntegrityError:
                exc = sys.exc_info()
                error_data += [row + ["Integrity Error", smart_text(exc[1])]]
                fail_count += 1
            except ObjectDoesNotExist:
                exc = sys.exc_info()
                error_data += [row + ["No Record Found to Update", smart_text(exc[1])]]
                fail_count += 1
            except ValueError:
                exc = sys.exc_info()
                if str(exc[1]).startswith('invalid literal for int() with base 10'):
                    error_data += [row + ["Incompatible Data - A number was expected, but a character was used", smart_text(exc[1])]]
                else:
                    error_data += [row + ["Value Error", smart_text(exc[1])]]
                fail_count += 1
            except:
                error_data += [row + ["Unknown Error"]]
                fail_count += 1
        if not commit:
            transaction.savepoint_rollback(sid)


    if fail_count:
        from io import StringIO
        from django.core.files.base import ContentFile
        from openpyxl.workbook import Workbook
        from openpyxl.writer.excel import save_virtual_workbook

        wb = Workbook()
        ws = wb.worksheets[0]
        ws.title = "Errors"
        filename = 'Errors.xlsx'
        for row in error_data:
            ws.append(row)
        buf = StringIO()
        # Not Python 3 compatible
        #buf.write(str(save_virtual_workbook(wb)))
        import_log.error_file.save(filename, ContentFile(save_virtual_workbook(wb)))
        import_log.save()

    return render(
        request,
        'simple_import/do_import.html',
        {
            'error_data': error_data,
            'create_count': create_count,
            'update_count': update_count,
            'fail_count': fail_count,
            'import_log': import_log,
            'commit': commit,
            'success_undo': success_undo,},
    )

Example 26

Project: openwrt-mt7620
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 27

Project: scalyr-agent-2
Source File: service.py
View license
    def processIncomingMsg(
        self,
        snmpEngine,
        messageProcessingModel,
        maxMessageSize,
        securityParameters,
        securityModel,
        securityLevel,
        wholeMsg,
        msg  # XXX 
        ):
        # 3.2.9 -- moved up here to be able to report
        # maxSizeResponseScopedPDU on error
        # (48 - maximum SNMPv3 header length)
        maxSizeResponseScopedPDU = int(maxMessageSize) - len(securityParameters) - 48

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: securityParameters %s' % debug.hexdump(securityParameters))

        # 3.2.1 
        try:
            securityParameters, rest = decoder.decode(
                securityParameters,
                asn1Spec=self.__securityParametersSpec
                )
        except PyAsn1Error:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
            snmpInASNParseErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInASNParseErrs')
            snmpInASNParseErrs.syntax = snmpInASNParseErrs.syntax + 1
            raise error.StatusInformation(
                errorIndication=errind.parseError
            )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (securityParameters.prettyPrint(),))

        if eoo.endOfOctets.isSameTypeWith(securityParameters):
            raise error.StatusInformation(
                errorIndication=errind.parseError
            )

        # 3.2.2
        msgAuthoritativeEngineID = securityParameters.getComponentByPosition(0)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3)
            )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cache write securityStateReference %s by msgUserName %s' % (securityStateReference, securityParameters.getComponentByPosition(3)))
        
        scopedPduData = msg.getComponentByPosition(3)

        # Used for error reporting
        contextEngineId = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
        contextName = null

        # 3.2.3
        if msgAuthoritativeEngineID not in self.__timeline:
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unknown securityEngineID %r' % (msgAuthoritativeEngineID,))
            if not msgAuthoritativeEngineID or  \
                    not 4 < len(msgAuthoritativeEngineID) < 33:
                # 3.2.3b
                usmStatsUnknownEngineIDs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownEngineIDs')
                usmStatsUnknownEngineIDs.syntax = usmStatsUnknownEngineIDs.syntax+1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: null or malformed msgAuthoritativeEngineID')
                pysnmpUsmDiscoverable, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                if pysnmpUsmDiscoverable.syntax:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: starting snmpEngineID discovery procedure')

                    # Report original contextName
                    if scopedPduData.getName() != 'plaintext':
                        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPduData not plaintext %s' % scopedPduData.prettyPrint())
                        raise error.StatusInformation(
                            errorIndication = errind.unknownEngineID
                            )

                    # 7.2.6.a.1 
                    scopedPdu = scopedPduData.getComponent()
                    contextEngineId = scopedPdu.getComponentByPosition(0)
                    contextName = scopedPdu.getComponentByPosition(1)

                    raise error.StatusInformation(
                        errorIndication = errind.unknownEngineID,
                        oid=usmStatsUnknownEngineIDs.name,
                        val=usmStatsUnknownEngineIDs.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        scopedPDU=scopedPdu,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
                else:
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: will not discover EngineID')                    
                    # free securityStateReference XXX
                    raise error.StatusInformation(
                        errorIndication = errind.unknownEngineID
                        )

        snmpEngineID = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')[0].syntax
 
        msgUserName = securityParameters.getComponentByPosition(3)

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read from securityParams msgAuthoritativeEngineID %r msgUserName %r' % (msgAuthoritativeEngineID, msgUserName))
        
        if msgUserName:
            # 3.2.4
            try:
                ( usmUserName,
                  usmUserSecurityName,
                  usmUserAuthProtocol,
                  usmUserAuthKeyLocalized,
                  usmUserPrivProtocol,
                  usmUserPrivKeyLocalized ) = self.__getUserInfo(
                    snmpEngine.msgAndPduDsp.mibInstrumController,
                    msgAuthoritativeEngineID,
                    msgUserName
                )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read user info from LCD')
            except NoSuchInstanceError:
                pysnmpUsmDiscoverable, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__PYSNMP-USM-MIB', 'pysnmpUsmDiscoverable')
                __reportUnknownName = not pysnmpUsmDiscoverable.syntax
                if not __reportUnknownName:
                    try:
                        ( usmUserName,
                          usmUserSecurityName,
                          usmUserAuthProtocol,
                          usmUserAuthKeyLocalized,
                          usmUserPrivProtocol,
                          usmUserPrivKeyLocalized ) = self.__cloneUserInfo(
                            snmpEngine.msgAndPduDsp.mibInstrumController,
                            msgAuthoritativeEngineID,
                            msgUserName
                        )
                        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cloned user info')
                    except NoSuchInstanceError:
                        __reportUnknownName = 1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: unknown securityEngineID %r msgUserName %r' % (msgAuthoritativeEngineID, msgUserName))
                if __reportUnknownName:
                    usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
                    usmStatsUnknownUserNames.syntax = usmStatsUnknownUserNames.syntax+1
                    raise error.StatusInformation(
                        errorIndication = errind.unknownSecurityName,
                        oid = usmStatsUnknownUserNames.name,
                        val = usmStatsUnknownUserNames.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=securityLevel,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: %s' % (sys.exc_info()[1],))
                snmpInGenErrs, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpInGenErrs')
                snmpInGenErrs.syntax = snmpInGenErrs.syntax + 1
                raise error.StatusInformation(
                    errorIndication=errind.invalidMsg
                )
        else:
            # empty username used for engineID discovery
            usmUserName = usmUserSecurityName = null
            usmUserAuthProtocol = noauth.NoAuth.serviceID
            usmUserPrivProtocol = nopriv.NoPriv.serviceID
            usmUserAuthKeyLocalized = usmUserPrivKeyLocalized = None

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: now have usmUserName %r usmUserSecurityName %r usmUserAuthProtocol %r usmUserPrivProtocol %r for msgUserName %r' % (usmUserName, usmUserSecurityName, usmUserAuthProtocol, usmUserPrivProtocol, msgUserName))

        # 3.2.11 (moved up here to let Reports be authenticated & encrypted)
        self._cache.pop(securityStateReference)
        securityStateReference = self._cache.push(
            msgUserName=securityParameters.getComponentByPosition(3),
            usmUserSecurityName=usmUserSecurityName,
            usmUserAuthProtocol=usmUserAuthProtocol,
            usmUserAuthKeyLocalized=usmUserAuthKeyLocalized,
            usmUserPrivProtocol=usmUserPrivProtocol,
            usmUserPrivKeyLocalized=usmUserPrivKeyLocalized
        )

        msgAuthoritativeEngineBoots = securityParameters.getComponentByPosition(1)
        msgAuthoritativeEngineTime = securityParameters.getComponentByPosition(2)

        # 3.2.5
        if msgAuthoritativeEngineID == snmpEngineID:
            # Authoritative SNMP engine: make sure securityLevel is sufficient
            __badSecIndication = None
            if securityLevel == 3:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    __badSecIndication = 'authPriv wanted while auth not expected'
                if usmUserPrivProtocol == nopriv.NoPriv.serviceID:
                    __badSecIndication = 'authPriv wanted while priv not expected'
            elif securityLevel == 2:
                if usmUserAuthProtocol == noauth.NoAuth.serviceID:
                    __badSecIndication = 'authNoPriv wanted while auth not expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    # 4 (discovery phase always uses authenticated messages)
                    if msgAuthoritativeEngineBoots or msgAuthoritativeEngineTime:
                        __badSecIndication = 'authNoPriv wanted while priv expected'

            elif securityLevel == 1:
                if usmUserAuthProtocol != noauth.NoAuth.serviceID:
                    __badSecIndication = 'noAuthNoPriv wanted while auth expected'
                if usmUserPrivProtocol != nopriv.NoPriv.serviceID:
                    __badSecIndication = 'noAuthNoPriv wanted while priv expected'
            if __badSecIndication:
                usmStatsUnsupportedSecLevels, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnsupportedSecLevels')
                usmStatsUnsupportedSecLevels.syntax = usmStatsUnsupportedSecLevels.syntax + 1
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: reporting inappropriate security level for user %s: %s' % (msgUserName, __badSecIndication))
                raise error.StatusInformation(
                    errorIndication=errind.unsupportedSecurityLevel,
                    oid=usmStatsUnsupportedSecLevels.name,
                    val=usmStatsUnsupportedSecLevels.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )

        # 3.2.6
        if securityLevel == 3 or securityLevel == 2:
            if usmUserAuthProtocol in self.authServices:
                authHandler = self.authServices[usmUserAuthProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure
                    )
            try:
                authenticatedWholeMsg = authHandler.authenticateIncomingMsg(
                    usmUserAuthKeyLocalized,
                    securityParameters.getComponentByPosition(4),
                    wholeMsg
                    )
            except error.StatusInformation:
                usmStatsWrongDigests, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsWrongDigests')
                usmStatsWrongDigests.syntax = usmStatsWrongDigests.syntax+1
                raise error.StatusInformation(
                    errorIndication = errind.authenticationFailure,
                    oid=usmStatsWrongDigests.name,
                    val=usmStatsWrongDigests.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            
            debug.logger & debug.flagSM and debug.logger('processIncomingMsg: incoming msg authenticated')

            if msgAuthoritativeEngineID:
                # 3.2.3a moved down here to execute only for authed msg
                self.__timeline[msgAuthoritativeEngineID] = (
                    securityParameters.getComponentByPosition(1),
                    securityParameters.getComponentByPosition(2),
                    securityParameters.getComponentByPosition(2),
                    int(time.time())
                    )
                
                expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
                if expireAt not in self.__timelineExpQueue:
                    self.__timelineExpQueue[expireAt] = []
                self.__timelineExpQueue[expireAt].append(
                    msgAuthoritativeEngineID
                    )
                    
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: store timeline for securityEngineID %r' % (msgAuthoritativeEngineID,))
            
        # 3.2.7
        if securityLevel == 3 or securityLevel == 2:
            if msgAuthoritativeEngineID == snmpEngineID:
                # Authoritative SNMP engine: use local notion (SF bug #1649032)
                ( snmpEngineBoots,
                  snmpEngineTime ) = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots', 'snmpEngineTime')
                snmpEngineBoots = snmpEngineBoots.syntax
                snmpEngineTime = snmpEngineTime.syntax.clone()
                idleTime = 0
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read snmpEngineBoots (%s), snmpEngineTime (%s) from LCD' % (snmpEngineBoots, snmpEngineTime))
            else:
                # Non-authoritative SNMP engine: use cached estimates
                if msgAuthoritativeEngineID in self.__timeline:
                    ( snmpEngineBoots,
                      snmpEngineTime,
                      latestReceivedEngineTime,
                      latestUpdateTimestamp ) = self.__timeline[
                        msgAuthoritativeEngineID
                        ]
                    # time passed since last talk with this SNMP engine
                    idleTime = int(time.time())-latestUpdateTimestamp
                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: read timeline snmpEngineBoots %s snmpEngineTime %s for msgAuthoritativeEngineID %r, idle time %s secs' % (snmpEngineBoots, snmpEngineTime, msgAuthoritativeEngineID, idleTime))
                else:
                    raise error.ProtocolError('Peer SNMP engine info missing')

            # 3.2.7a
            if msgAuthoritativeEngineID == snmpEngineID:
                if snmpEngineBoots == 2147483647 or \
                   snmpEngineBoots != msgAuthoritativeEngineBoots or \
                   abs(idleTime + int(snmpEngineTime) - \
                       int(msgAuthoritativeEngineTime)) > 150:
                    usmStatsNotInTimeWindows, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsNotInTimeWindows')
                    usmStatsNotInTimeWindows.syntax = usmStatsNotInTimeWindows.syntax+1
                    raise error.StatusInformation(
                        errorIndication = errind.notInTimeWindow,
                        oid=usmStatsNotInTimeWindows.name,
                        val=usmStatsNotInTimeWindows.syntax,
                        securityStateReference=securityStateReference,
                        securityLevel=2,
                        contextEngineId=contextEngineId,
                        contextName=contextName,
                        maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                        )
            # 3.2.7b
            else:
                # 3.2.7b.1
                if msgAuthoritativeEngineBoots > snmpEngineBoots or \
                   msgAuthoritativeEngineBoots == snmpEngineBoots and \
                   msgAuthoritativeEngineTime > latestReceivedEngineTime:
                    self.__timeline[msgAuthoritativeEngineID] = (
                        msgAuthoritativeEngineBoots,
                        msgAuthoritativeEngineTime,
                        msgAuthoritativeEngineTime,
                        int(time.time())
                        )
                    expireAt = int(self.__expirationTimer + 300 / snmpEngine.transportDispatcher.getTimerResolution())
                    if expireAt not in self.__timelineExpQueue:
                        self.__timelineExpQueue[expireAt] = []
                    self.__timelineExpQueue[expireAt].append(
                        msgAuthoritativeEngineID
                        )

                    debug.logger & debug.flagSM and debug.logger('processIncomingMsg: stored timeline msgAuthoritativeEngineBoots %s msgAuthoritativeEngineTime %s for msgAuthoritativeEngineID %r' % (msgAuthoritativeEngineBoots, msgAuthoritativeEngineTime, msgAuthoritativeEngineID))
                    
                # 3.2.7b.2
                if snmpEngineBoots == 2147483647 or \
                   msgAuthoritativeEngineBoots < snmpEngineBoots or \
                   msgAuthoritativeEngineBoots == snmpEngineBoots and \
                   abs(idleTime + int(snmpEngineTime) - \
                       int(msgAuthoritativeEngineTime)) > 150:
                    raise error.StatusInformation(
                        errorIndication = errind.notInTimeWindow
                        )

        # 3.2.8a
        if securityLevel == 3:
            if usmUserPrivProtocol in self.privServices:
                privHandler = self.privServices[usmUserPrivProtocol]
            else:
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )
            encryptedPDU = scopedPduData.getComponentByPosition(1)
            if encryptedPDU is None: # no ciphertext
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

            try:
                decryptedData = privHandler.decryptData(
                    usmUserPrivKeyLocalized,
                        ( securityParameters.getComponentByPosition(1),
                          securityParameters.getComponentByPosition(2),
                          securityParameters.getComponentByPosition(5) ),
                        encryptedPDU
                    )
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: PDU deciphered into %s' % debug.hexdump(decryptedData))
            except error.StatusInformation:
                usmStatsDecryptionErrors, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsDecryptionErrors')
                usmStatsDecryptionErrors.syntax = usmStatsDecryptionErrors.syntax+1
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError,
                    oid=usmStatsDecryptionErrors.name,
                    val=usmStatsDecryptionErrors.syntax,
                    securityStateReference=securityStateReference,
                    securityLevel=securityLevel,
                    contextEngineId=contextEngineId,
                    contextName=contextName,
                    maxSizeResponseScopedPDU=maxSizeResponseScopedPDU
                    )
            scopedPduSpec = scopedPduData.setComponentByPosition(0).getComponentByPosition(0)
            try:
                scopedPDU, rest = decoder.decode(
                    decryptedData, asn1Spec=scopedPduSpec
                    )
            except PyAsn1Error:
                debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoder failed %s' % sys.exc_info()[0])                
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

            if eoo.endOfOctets.isSameTypeWith(scopedPDU):
                raise error.StatusInformation(
                    errorIndication=errind.decryptionError
                )
        else:
            # 3.2.8b
            scopedPDU = scopedPduData.getComponentByPosition(0)
            if scopedPDU is None:  # no plaintext
                raise error.StatusInformation(
                    errorIndication = errind.decryptionError
                    )

        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: scopedPDU decoded %s' % scopedPDU.prettyPrint()) 

        # 3.2.10
        securityName = usmUserSecurityName
        
        debug.logger & debug.flagSM and debug.logger('processIncomingMsg: cached msgUserName %s info by securityStateReference %s' % (msgUserName, securityStateReference))
        
        # Delayed to include details
        if not msgUserName and not msgAuthoritativeEngineID:
            usmStatsUnknownUserNames, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-USER-BASED-SM-MIB', 'usmStatsUnknownUserNames')
            usmStatsUnknownUserNames.syntax = usmStatsUnknownUserNames.syntax+1
            raise error.StatusInformation(
                errorIndication=errind.unknownSecurityName,
                oid=usmStatsUnknownUserNames.name,
                val=usmStatsUnknownUserNames.syntax,
                securityStateReference=securityStateReference,
                securityEngineID=msgAuthoritativeEngineID,
                securityLevel=securityLevel,
                contextEngineId=contextEngineId,
                contextName=contextName,
                maxSizeResponseScopedPDU=maxSizeResponseScopedPDU,
                PDU=scopedPDU
                )

        # 3.2.12
        return ( msgAuthoritativeEngineID,
                 securityName,
                 scopedPDU,
                 maxSizeResponseScopedPDU,
                 securityStateReference )

Example 28

Project: MAClient
Source File: player.py
View license
  def _readerLoop( self ):
    f= None
    try:
      while self.exitFlag== 0:
        if self.playingFile== None:
          time.sleep( 0.01 )
          continue
        
        self.length= self.frameNum= -1
        # Initialize demuxer and read small portion of the file to have more info on the format
        self.clearError()
        if type( self.playingFile ) in ( str, unicode ):
          try:
            f= open( self.playingFile, 'rb' )
            format= self.playingFile.split( '.' )[ -1 ].lower()
          except:
            traceback.print_exc()
            self.err.append( sys.exc_info() )
            self.playingFile= None
            continue
        else:
          format= self.fileFormat
          f= self.playingFile
        
        try:
          dm= muxer.Demuxer( format )
          s= f.read( FILE_CHUNK )
          r= dm.parse( s )
        except:
          traceback.print_exc()
          self.err.append( sys.exc_info() )
          self.playingFile= None
          continue
        
        try: self.metaData= dm.getHeaderInfo()
        except: self.metaData= {}
        
        # This seek sets the seeking position already at the desired offset from the beginning
        if self.startPos:
          self.seekTo( self.startPos )
        
        # Setup video( only first matching stream will be used )
        self.clearError()
        self.vindex= -1
        streams= filter( lambda x: x, dm.streams )
        for st in streams:
          if st and st[ 'type' ]== muxer.CODEC_TYPE_VIDEO:
            self._initVideo( st )
            self.vindex= list( streams ).index( st )
            break
        
        # Setup audio( only first matching stream will be used )
        self.aindex= -1
        self.aPTS= None
        for st in streams:
          if st and st[ 'type' ]== muxer.CODEC_TYPE_AUDIO:
            self._initAudio( st )
            self.aindex= list( streams ).index( st )
            break
        
        # Open current file for playing
        currentFile= self.playingFile
        if self.vindex>= 0:
          self._getVStreamParams( self.vindex, streams[ self.vindex ], r )
        
        self._getStreamLength( format, dm, f, r )
        
        # Play until no exit flag, not eof, no errs and file still the same
        while len(s) and len( self.err )== 0 and \
            self.exitFlag== 0 and self.playingFile and len( streams ) and \
            self.playingFile== currentFile:
          
          if self.isPaused():
            time.sleep( PAUSE_SLEEP )
            continue
        
          for d in r:
            if self.playingFile!= currentFile:
              break
            
            # Seeking stuff
            if self.seek>= 0:
              # Find the file position first
              if self.length> 0 and self.fileSize> 0:
                #print self.seek, self.length, self.fileSize, ( float( self.seek ) / self.length )* self.fileSize
                f.seek( ( float( self.seek ) / self.length )* self.fileSize, 0 )
              else:
                f.seek( self.seek* self.getBitRate()/ 8, 0 )
                #print self.seek, self.getBitRate(), f.tell()
              
              #print 'seek to', self.seek, f.tell()
              seek= self.seek
              self.aDecodedFrames= []
              if self.ac:
                self.ac.reset()
                self.snd.stop()
              self.rawFrames= []
              self.decodedFrames= []
              if self.vc:
                self.vc.reset()
              
              dm.reset()
              if self.vindex== -1:
                # Seek immediately if only audio stream is available
                self.seek= -1
                self.aDelta= seek
              else:
                # Wait for a key video frame to arrive
                self.seek= SEEK_IN_PROGRESS
              break
            
            # See if we reached the end position of the video clip
            if self.endPos and self._getPTS()* 1000> self.endPos:
              # Seek at the end and close the reading loop instantly 
              f.seek( 0, 2 )
              break
            
            try:
              # Update length if not set already
              if self.getLength()== -1 and self.getBitRate()> 0:
                # Check file size against length and bitrates
                self.length= self.fileSize/ ( self.getBitRate()/ 8 )
              
              # Demux file into streams
              if d[ 0 ]== self.vindex:
                # Process video frame
                seek= self.seek
                self._processVideoFrame( d )
                if self.seek!= SEEK_IN_PROGRESS and seek== SEEK_IN_PROGRESS:
                  # If key frame was found, change the time position
                  self.videoPTS= ( float( f.tell() )/ self.fileSize )* self.length
                  self.aDelta= self.videoPTS+ self.initADelta
                  #print '---->position', f.tell(), self.aDelta
              elif d[ 0 ]== self.aindex and self.seek!= SEEK_IN_PROGRESS:
                # Decode and play audio frame
                self._processAudioFrame( d )
            except:
              traceback.print_exc()
              self.err.append( sys.exc_info() )
              self.playingFile= None
              break
          
          # Read next encoded chunk and demux it
          try:
            s= f.read( 512 )
            r= dm.parse( s )
          except:
            traceback.print_exc()
            self.err.append( sys.exc_info() )
            self.playingFile= None
            continue
        
        if f: f.close()

        # Close current file when error detected
        if len( self.err ):
          self.stopPlayback()
        
        # Wait until all frames are played
        while self._hasQueue()> 0 and self.isPlaying():
          self._processVideoFrame( None, True )
          self._processAudio()
        
        while self.aindex!= -1 and self._getSndLeft()> 0 and self.isPlaying():
          time.sleep( 0.01 )
        
        self._resetAudio()
        self._resetVideo()
        
        if self.loops> 0:
          self.loops-= 1
          continue
        
        # Report the file end
        try: f= self.callback.onPlaybackEnd
        except: f= None
        if f: 
          f( self )
        else:
          self.playingFile= None
    except:
      traceback.print_exc()
    
    self.exitFlag= 1

Example 29

Project: popcorn_maker
Source File: multiprocess.py
View license
    def run(self, test):
        """
        Execute the test (which may be a test suite). If the test is a suite,
        distribute it out among as many processes as have been configured, at
        as fine a level as is possible given the context fixtures defined in
        the suite or any sub-suites.

        """
        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
        wrapper = self.config.plugins.prepareTest(test)
        if wrapper is not None:
            test = wrapper

        # plugins can decorate or capture the output stream
        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        testQueue = Queue()
        resultQueue = Queue()
        tasks = []
        completed = []
        workers = []
        to_teardown = []
        shouldStop = Event()

        result = self._makeResult()
        start = time.time()

        # dispatch and collect results
        # put indexes only on queue because tests aren't picklable
        for case in self.nextBatch(test):
            log.debug("Next batch %s (%s)", case, type(case))
            if (isinstance(case, nose.case.Test) and
                isinstance(case.test, failure.Failure)):
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            # handle shared fixtures
            if isinstance(case, ContextSuite) and case.context is failure.Failure:
                log.debug("Case is a Failure")
                case(result) # run here to capture the failure
                continue
            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
                log.debug("%s has shared fixtures", case)
                try:
                    case.setUp()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    log.debug("%s setup failed", sys.exc_info())
                    result.addError(case, sys.exc_info())
                else:
                    to_teardown.append(case)
                    for _t in case:
                        test_addr = self.addtask(testQueue,tasks,_t)
                        log.debug("Queued shared-fixture test %s (%s) to %s",
                                  len(tasks), test_addr, testQueue)

            else:
                test_addr = self.addtask(testQueue,tasks,case)
                log.debug("Queued test %s (%s) to %s",
                          len(tasks), test_addr, testQueue)

        log.debug("Starting %s workers", self.config.multiprocess_workers)
        for i in range(self.config.multiprocess_workers):
            currentaddr = Value('c',bytes_(''))
            currentstart = Value('d',0.0)
            keyboardCaught = Event()
            p = Process(target=runner, args=(i, testQueue, resultQueue,
                                             currentaddr, currentstart,
                                             keyboardCaught, shouldStop,
                                             self.loaderClass,
                                             result.__class__,
                                             pickle.dumps(self.config)))
            p.currentaddr = currentaddr
            p.currentstart = currentstart
            p.keyboardCaught = keyboardCaught
            # p.setDaemon(True)
            p.start()
            workers.append(p)
            log.debug("Started worker process %s", i+1)

        total_tasks = len(tasks)
        # need to keep track of the next time to check for timeouts in case
        # more than one process times out at the same time.
        nexttimeout=self.config.multiprocess_timeout
        while tasks:
            log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
                      len(completed), total_tasks,nexttimeout)
            try:
                iworker, addr, newtask_addrs, batch_result = resultQueue.get(
                                                        timeout=nexttimeout)
                log.debug('Results received for worker %d, %s, new tasks: %d',
                          iworker,addr,len(newtask_addrs))
                try:
                    try:
                        tasks.remove(addr)
                    except ValueError:
                        log.warn('worker %s failed to remove from tasks: %s',
                                 iworker,addr)
                    total_tasks += len(newtask_addrs)
                    for newaddr in newtask_addrs:
                        tasks.append(newaddr)
                except KeyError:
                    log.debug("Got result for unknown task? %s", addr)
                    log.debug("current: %s",str(list(tasks)[0]))
                else:
                    completed.append([addr,batch_result])
                self.consolidate(result, batch_result)
                if (self.config.stopOnError
                    and not result.wasSuccessful()):
                    # set the stop condition
                    shouldStop.set()
                    break
                if self.config.multiprocess_restartworker:
                    log.debug('joining worker %s',iworker)
                    # wait for working, but not that important if worker
                    # cannot be joined in fact, for workers that add to
                    # testQueue, they will not terminate until all their
                    # items are read
                    workers[iworker].join(timeout=1)
                    if not shouldStop.is_set() and not testQueue.empty():
                        log.debug('starting new process on worker %s',iworker)
                        currentaddr = Value('c',bytes_(''))
                        currentstart = Value('d',time.time())
                        keyboardCaught = Event()
                        workers[iworker] = Process(target=runner,
                                                   args=(iworker, testQueue,
                                                         resultQueue,
                                                         currentaddr,
                                                         currentstart,
                                                         keyboardCaught,
                                                         shouldStop,
                                                         self.loaderClass,
                                                         result.__class__,
                                                         pickle.dumps(self.config)))
                        workers[iworker].currentaddr = currentaddr
                        workers[iworker].currentstart = currentstart
                        workers[iworker].keyboardCaught = keyboardCaught
                        workers[iworker].start()
            except Empty:
                log.debug("Timed out with %s tasks pending "
                          "(empty testQueue=%d): %s",
                          len(tasks),testQueue.empty(),str(tasks))
                any_alive = False
                for iworker, w in enumerate(workers):
                    if w.is_alive():
                        worker_addr = bytes_(w.currentaddr.value,'ascii')
                        timeprocessing = time.time() - w.currentstart.value
                        if ( len(worker_addr) == 0
                                and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('worker %d has finished its work item, '
                                      'but is not exiting? do we wait for it?',
                                      iworker)
                        else:
                            any_alive = True
                        if (len(worker_addr) > 0
                            and timeprocessing > self.config.multiprocess_timeout-0.1):
                            log.debug('timed out worker %s: %s',
                                      iworker,worker_addr)
                            w.currentaddr.value = bytes_('')
                            # If the process is in C++ code, sending a SIGINT
                            # might not send a python KeybordInterrupt exception
                            # therefore, send multiple signals until an
                            # exception is caught. If this takes too long, then
                            # terminate the process
                            w.keyboardCaught.clear()
                            startkilltime = time.time()
                            while not w.keyboardCaught.is_set() and w.is_alive():
                                if time.time()-startkilltime > self.waitkilltime:
                                    # have to terminate...
                                    log.error("terminating worker %s",iworker)
                                    w.terminate()
                                    currentaddr = Value('c',bytes_(''))
                                    currentstart = Value('d',time.time())
                                    keyboardCaught = Event()
                                    workers[iworker] = Process(target=runner,
                                        args=(iworker, testQueue, resultQueue,
                                              currentaddr, currentstart,
                                              keyboardCaught, shouldStop,
                                              self.loaderClass,
                                              result.__class__,
                                              pickle.dumps(self.config)))
                                    workers[iworker].currentaddr = currentaddr
                                    workers[iworker].currentstart = currentstart
                                    workers[iworker].keyboardCaught = keyboardCaught
                                    workers[iworker].start()
                                    # there is a small probability that the
                                    # terminated process might send a result,
                                    # which has to be specially handled or
                                    # else processes might get orphaned.
                                    w = workers[iworker]
                                    break
                                os.kill(w.pid, signal.SIGINT)
                                time.sleep(0.1)
                if not any_alive and testQueue.empty():
                    log.debug("All workers dead")
                    break
            nexttimeout=self.config.multiprocess_timeout
            for w in workers:
                if w.is_alive() and len(w.currentaddr.value) > 0:
                    timeprocessing = time.time()-w.currentstart.value
                    if timeprocessing <= self.config.multiprocess_timeout:
                        nexttimeout = min(nexttimeout,
                            self.config.multiprocess_timeout-timeprocessing)

        log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))

        for case in to_teardown:
            log.debug("Tearing down shared fixtures for %s", case)
            try:
                case.tearDown()
            except (KeyboardInterrupt, SystemExit):
                raise
            except:
                result.addError(case, sys.exc_info())

        stop = time.time()

        # first write since can freeze on shutting down processes
        result.printErrors()
        result.printSummary(start, stop)
        self.config.plugins.finalize(result)

        log.debug("Tell all workers to stop")
        for w in workers:
            if w.is_alive():
                testQueue.put('STOP', block=False)

        # wait for the workers to end
        try:
            for iworker,worker in enumerate(workers):
                if worker.is_alive():
                    log.debug('joining worker %s',iworker)
                    worker.join()#10)
                    if worker.is_alive():
                        log.debug('failed to join worker %s',iworker)
        except KeyboardInterrupt:
            log.info('parent received ctrl-c')
            for worker in workers:
                worker.terminate()
                worker.join()

        return result

Example 30

Project: gramps
Source File: dateparserdisplaytest.py
View license
    def run_tool(self):
        self.progress = ProgressMeter(_('Running Date Test'), '',
                                      parent=self.parent_window)
        self.progress.set_pass(_('Generating dates'),
                               4)
        dates = []
        # first some valid dates
        calendar = Date.CAL_GREGORIAN
        for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED,
                        Date.QUAL_CALCULATED):
            for modifier in (Date.MOD_NONE, Date.MOD_BEFORE,
                             Date.MOD_AFTER, Date.MOD_ABOUT):
                for slash1 in (False,True):
                    for month in range(0,13):
                        for day in (0,5,27):
                            if not month and day:
                                continue
                            d = Date()
                            d.set(quality,modifier,calendar,(day,month,1789,slash1),"Text comment")
                            dates.append( d)
            for modifier in (Date.MOD_RANGE, Date.MOD_SPAN):
                for slash1 in (False,True):
                    for slash2 in (False,True):
                        for month in range(0,13):
                            for day in (0,5,27):
                                if not month and day:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,day,month,1876,slash2),"Text comment")
                                dates.append( d)

                                if not month:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,day,13-month,1876,slash2),"Text comment")
                                dates.append( d)

                                if not day:
                                    continue

                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,32-day,month,1876,slash2),"Text comment")
                                dates.append( d)
                                d = Date()
                                d.set(quality,modifier,calendar,(day,month,1789,slash1,32-day,13-month,1876,slash2),"Text comment")
                                dates.append( d)
            modifier = Date.MOD_TEXTONLY
            d = Date()
            d.set(quality,modifier,calendar,Date.EMPTY,
                  "This is a textual date")
            dates.append( d)
            self.progress.step()

        # test invalid dates
        #dateval = (4,7,1789,False,5,8,1876,False)
        #for l in range(1,len(dateval)):
        #    d = Date()
        #    try:
        #        d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #              Date.CAL_GREGORIAN,dateval[:l],"Text comment")
        #        dates.append( d)
        #    except DateError, e:
        #        d.set_as_text("Date identified value correctly as invalid.\n%s" % e)
        #        dates.append( d)
        #    except:
        #        d = Date()
        #        d.set_as_text("Date.set Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
        #        dates.append( d)
        #for l in range(1,len(dateval)):
        #    d = Date()
        #    try:
        #        d.set(Date.QUAL_NONE,Date.MOD_SPAN,Date.CAL_GREGORIAN,dateval[:l],"Text comment")
        #        dates.append( d)
        #    except DateError, e:
        #        d.set_as_text("Date identified value correctly as invalid.\n%s" % e)
        #        dates.append( d)
        #    except:
        #        d = Date()
        #        d.set_as_text("Date.set Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
        #        dates.append( d)
        #self.progress.step()
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #      Date.CAL_GREGORIAN,(44,7,1789,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_NONE,
        #      Date.CAL_GREGORIAN,(4,77,1789,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_SPAN,
        #      Date.CAL_GREGORIAN,
        #      (4,7,1789,False,55,8,1876,False),"Text comment")
        #dates.append( d)
        #d = Date()
        #d.set(Date.QUAL_NONE,Date.MOD_SPAN,
        #      Date.CAL_GREGORIAN,
        #      (4,7,1789,False,5,88,1876,False),"Text comment")
        #dates.append( d)

        with DbTxn(_("Date Test Plugin"), self.db, batch=True) as self.trans:
            self.db.disable_signals()
            self.progress.set_pass(_('Generating dates'),
                                   len(dates))

            # create pass and fail tags
            pass_handle = self.create_tag(_('Pass'), '#0000FFFF0000')
            fail_handle = self.create_tag(_('Fail'), '#FFFF00000000')

            # now add them as birth to new persons
            i = 1
            for dateval in dates:
                person = Person()
                surname = Surname()
                surname.set_surname("DateTest")
                name = Name()
                name.add_surname(surname)
                name.set_first_name("Test %d" % i)
                person.set_primary_name(name)
                self.db.add_person(person, self.trans)
                bevent = Event()
                bevent.set_type(EventType.BIRTH)
                bevent.set_date_object(dateval)
                bevent.set_description("Date Test %d (source)" % i)
                bevent_h = self.db.add_event(bevent, self.trans)
                bevent_ref = EventRef()
                bevent_ref.set_reference_handle(bevent_h)
                # for the death event display the date as text and parse it back to a new date
                ndate = None
                try:
                    datestr = _dd.display( dateval)
                    try:
                        ndate = _dp.parse( datestr)
                        if not ndate:
                            ndate = Date()
                            ndate.set_as_text("DateParser None")
                            person.add_tag(fail_handle)
                        else:
                            person.add_tag(pass_handle)
                    except:
                        ndate = Date()
                        ndate.set_as_text("DateParser Exception %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
                        person.add_tag(fail_handle)
                    else:
                        person.add_tag(pass_handle)
                except:
                    ndate = Date()
                    ndate.set_as_text("DateDisplay Exception: %s" % ("".join(traceback.format_exception(*sys.exc_info())),))
                    person.add_tag(fail_handle)

                if dateval.get_modifier() != Date.MOD_TEXTONLY \
                       and ndate.get_modifier() == Date.MOD_TEXTONLY:
                    # parser was unable to correctly parse the string
                    ndate.set_as_text( "TEXTONLY: "+ndate.get_text())
                    person.add_tag(fail_handle)
                if dateval.get_modifier() == Date.MOD_TEXTONLY \
                        and dateval.get_text().count("Traceback") \
                        and pass_handle in person.get_tag_list():
                    person.add_tag(fail_handle)

                devent = Event()
                devent.set_type(EventType.DEATH)
                devent.set_date_object(ndate)
                devent.set_description("Date Test %d (result)" % i)
                devent_h = self.db.add_event(devent, self.trans)
                devent_ref = EventRef()
                devent_ref.set_reference_handle(devent_h)
                person.set_birth_ref(bevent_ref)
                person.set_death_ref(devent_ref)
                self.db.commit_person(person, self.trans)
                i = i + 1
                self.progress.step()
        self.db.enable_signals()
        self.db.request_rebuild()
        self.progress.close()

Example 31

Project: bigcouch
Source File: Taskmaster.py
View license
    def _find_next_ready_node(self):
        """
        Finds the next node that is ready to be built.

        This is *the* main guts of the DAG walk.  We loop through the
        list of candidates, looking for something that has no un-built
        children (i.e., that is a leaf Node or has dependencies that are
        all leaf Nodes or up-to-date).  Candidate Nodes are re-scanned
        (both the target Node itself and its sources, which are always
        scanned in the context of a given target) to discover implicit
        dependencies.  A Node that must wait for some children to be
        built will be put back on the candidates list after the children
        have finished building.  A Node that has been put back on the
        candidates list in this way may have itself (or its sources)
        re-scanned, in order to handle generated header files (e.g.) and
        the implicit dependencies therein.

        Note that this method does not do any signature calculation or
        up-to-date check itself.  All of that is handled by the Task
        class.  This is purely concerned with the dependency graph walk.
        """

        self.ready_exc = None

        T = self.trace
        if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))

        while True:
            node = self.next_candidate()
            if node is None:
                if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
                return None

            node = node.disambiguate()
            state = node.get_state()

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            if CollectStats:
                if not hasattr(node, 'stats'):
                    node.stats = Stats()
                    StatsNodes.append(node)
                S = node.stats
                S.considered = S.considered + 1
            else:
                S = None

            if T: T.write(self.trace_message(u'    Considering node %s and its children:' % self.trace_node(node)))

            if state == NODE_NO_STATE:
                # Mark this node as being on the execution stack:
                node.set_state(NODE_PENDING)
            elif state > NODE_PENDING:
                # Skip this node if it has already been evaluated:
                if S: S.already_handled = S.already_handled + 1
                if T: T.write(self.trace_message(u'       already handled (executed)'))
                continue

            executor = node.get_executor()

            try:
                children = executor.get_all_children()
            except SystemExit:
                exc_value = sys.exc_info()[1]
                e = SCons.Errors.ExplicitExit(node, exc_value.code)
                self.ready_exc = (SCons.Errors.ExplicitExit, e)
                if T: T.write(self.trace_message('       SystemExit'))
                return node
            except Exception, e:
                # We had a problem just trying to figure out the
                # children (like a child couldn't be linked in to a
                # VariantDir, or a Scanner threw something).  Arrange to
                # raise the exception when the Task is "executed."
                self.ready_exc = sys.exc_info()
                if S: S.problem = S.problem + 1
                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                return node

            children_not_visited = []
            children_pending = set()
            children_not_ready = []
            children_failed = False

            for child in chain(executor.get_all_prerequisites(), children):
                childstate = child.get_state()

                if T: T.write(self.trace_message(u'       ' + self.trace_node(child)))

                if childstate == NODE_NO_STATE:
                    children_not_visited.append(child)
                elif childstate == NODE_PENDING:
                    children_pending.add(child)
                elif childstate == NODE_FAILED:
                    children_failed = True

                if childstate <= NODE_EXECUTING:
                    children_not_ready.append(child)


            # These nodes have not even been visited yet.  Add
            # them to the list so that on some next pass we can
            # take a stab at evaluating them (or their children).
            children_not_visited.reverse()
            self.candidates.extend(self.order(children_not_visited))
            #if T and children_not_visited:
            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))

            # Skip this node if any of its children have failed.
            #
            # This catches the case where we're descending a top-level
            # target and one of our children failed while trying to be
            # built by a *previous* descent of an earlier top-level
            # target.
            #
            # It can also occur if a node is reused in multiple
            # targets. One first descends though the one of the
            # target, the next time occurs through the other target.
            #
            # Note that we can only have failed_children if the
            # --keep-going flag was used, because without it the build
            # will stop before diving in the other branch.
            #
            # Note that even if one of the children fails, we still
            # added the other children to the list of candidate nodes
            # to keep on building (--keep-going).
            if children_failed:
                for n in executor.get_action_targets():
                    n.set_state(NODE_FAILED)

                if S: S.child_failed = S.child_failed + 1
                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                continue

            if children_not_ready:
                for child in children_not_ready:
                    # We're waiting on one or more derived targets
                    # that have not yet finished building.
                    if S: S.not_built = S.not_built + 1

                    # Add this node to the waiting parents lists of
                    # anything we're waiting on, with a reference
                    # count so we can be put back on the list for
                    # re-evaluation when they've all finished.
                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
                    if T: T.write(self.trace_message(u'     adjusted ref count: %s, child %s' %
                                  (self.trace_node(node), repr(str(child)))))

                if T:
                    for pc in children_pending:
                        T.write(self.trace_message('       adding %s to the pending children set\n' %
                                self.trace_node(pc)))
                self.pending_children = self.pending_children | children_pending

                continue

            # Skip this node if it has side-effects that are
            # currently being built:
            wait_side_effects = False
            for se in executor.get_action_side_effects():
                if se.get_state() == NODE_EXECUTING:
                    se.add_to_waiting_s_e(node)
                    wait_side_effects = True

            if wait_side_effects:
                if S: S.side_effects = S.side_effects + 1
                continue

            # The default when we've gotten through all of the checks above:
            # this node is ready to be built.
            if S: S.build = S.build + 1
            if T: T.write(self.trace_message(u'Evaluating %s\n' %
                                             self.trace_node(node)))

            # For debugging only:
            #
            # try:
            #     self._validate_pending_children()
            # except:
            #     self.ready_exc = sys.exc_info()
            #     return node

            return node

        return None

Example 32

Project: dt
Source File: dt.py
View license
def dt():
    '''darktable sqlite3 database maintenance'''
    
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=textwrap.dedent(
            '''
            Darktable filmroll/image/metadata maintenance:
            
            mv <src> <dest>                             : rename film roll or image
            query <dir|file> [ <dir|file> ... ]         : dump details of an object
                                                          (best on 132+ column term)
            scan <dir|file> [ <dir}|file> ... ]         : just report if the object 
                                                          doesn't exist in the db - 
                                                          .xmp and .meta extensions 
                                                          will be stripped before the 
                                                          check
            set <image> <var> <val> [ <var> <val> ... ] : modify metadata (Note 6)
            
            For the 'set' command, the valid var's are:
            
            image table:
                datetime_taken         "YYYY:MM:DD HH:MM:SS"
                caption                "text"
                description            "text"
                license                "text"
                longitude              float (-180.0 to +180.0, positive E) (Note 3)
                latitude               float (-90.0 to +90.0, positive N) (Note 3)
                                       
            meta_data table:
                creator                "text"
                publisher              "text"
                title                  "text"
                description            "text"
                rights                 "text"
            history table:
                crop                   "angle,cx,cy,cw,ch" (Note 4)
                                       
            tags and tagged_images tables:
                tag                    "text" (Note 5)
                                       
            NOTES:
              1) before modifying the database dt will check to make sure darktable 
                 is not running
              2) if a modification is going to be made to the database a backup will 
                 be made first: <library.db>.<timestamp>, unless you specify --no-backup
              3) to convert degrees minutes seconds to a float: 
                    f = degrees + ( minutes + seconds/60. ) / 60.
                 and change sign if W or S.
              4) angle is the rotation angle before crop, positive clockwise,
                 cx, cy, cw, ch are the normalized crop coordinates (0.0 to 1.0),
                 cx,cy is lower left(?) and cw,ch are new width and height.
              5) you may add tags but not modify or remove existing tags
              6) if any of the variables you want to use in the set command begin with
                 a "-", add a "--" option before the file name so that the python 
                 argparser doesn't try to interpret them 
                 (i.e. set -- file.jpg crop -1,0,0,1,1)
            
            '''))
    
    global dbVer
    
    parser.add_argument('-d', '--db', dest='dtdb', action='store',
        default=os.getenv("HOME")+os.sep+'.config'+os.sep+'darktable'+os.sep+'library.db',
        help='Darktable database path, default= ~/.config/darktable/library.db')
    
    parser.add_argument('--no-backup', dest='doBackup', default=True, action='store_false',
        help='don\'t backup the Darktable database before modifications')
    
    parser.add_argument('cmd', metavar='<command>', type=str,
        help='mv, query, set, etc')

    parser.parse_args(['--', '-f'])
    
    parser.add_argument('files', metavar='<parm>', type=str, nargs='+')
    
    args = parser.parse_args()
    
    # connect to the darktable library.db file
    
    try:
        if os.path.exists(args.dtdb):
            conn = sqlite3.connect(args.dtdb)
        else:
            raise Exception('%s does not exist'%args.dtdb)
        
        
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        c = conn.cursor()
        '''
            Here is the darktable settings blob:
            
            typedef struct dt_ctl_settings_t
                {
                  // TODO: remove most of these options, maybe the whole struct?
                  // global
                  int32_t version;
                  char dbname[512];
                
                  int32_t lib_image_mouse_over_id;
                
                  // synchronized navigation
                  float dev_zoom_x, dev_zoom_y, dev_zoom_scale;
                  dt_dev_zoom_t dev_zoom;
                  int dev_closeup;
                }
                dt_ctl_settings_t;
        '''
        try:
            vblob = c.execute('select settings from settings').fetchone()[0]
            dbVer = getVersion(vblob)
            if dbVer != 34 and dbVer != 36:
                raise Exception('%s '%args.dtdb + 'version %d is not 34 or 36!'%dbVer)
            
        except:
            #print 'error: ',sys.exc_info()
            raise Exception('%s '%args.dtdb + 'does not appear to be a Darktable database')

    except:
        print 'darktable db error: %s '%sys.exc_info()[1]
        sys.exit()
    
    
    
    if args.cmd == 'query':
        for qFile in args.files:
            try:
                query(conn,os.path.abspath(qFile))
            except:
                print 'error: [%s]'%sys.exc_info()[1]
                
    elif args.cmd == 'scan':
        for qFile in args.files:
            try:
                scan(conn,os.path.abspath(qFile))
            except:
                print sys.exc_info()[1]
                
    elif args.cmd == 'mv':
        try:
            if len(args.files) != 2:
                raise Exception('mv <from> <to>')
            else:
                mvSrc  = os.path.abspath(args.files[0])
                mvDest = os.path.abspath(args.files[1])             
                
            is_running('darktable',os.getenv('USER'))
            do_backup(args.dtdb,args.doBackup)
            dt_mv(conn,mvSrc,mvDest)
        except:
            print 'error: %s'%sys.exc_info()[1]
            sys.exit()
                
    elif args.cmd == 'set':
        try:
            if len(args.files) < 3:
                raise Exception('set <image> <variable> <value> ...')
            else:
                imRoll = os.path.dirname(os.path.abspath(args.files[0]))
                imName = os.path.basename(os.path.abspath(args.files[0]))
                frId = fr_getId(conn,imRoll)
                imId = im_getId(conn,frId,imName)
                # need to make each tag key unique so I can create a dict using it as key
                for i in range(1,len(args.files),2):
                    if args.files[i] == 'tag':
                        args.files[i] = 'tag---%d'%i 
       
                imVars = dict(zip(args.files[1::2], args.files[2::2]))
                # check that # of keys = # of values
                if len(imVars)*2 != len(args.files)-1:
                    raise Exception('Failed to convert %s to dict.  Missing value?'%args.files[1:])
            
            is_running('darktable',os.getenv('USER'))
            do_backup(args.dtdb,args.doBackup)
            
            metaKeys = [ 'creator', 'publisher', 'title', 'description', 'rights' ]
            imageKeys = [ 'datetime_taken', 'caption', 'description', 'license', 'longitude', 'latitude' ]
            historyKeys = [ 'crop' ]
            #print 'imVars: ',imVars
            for k,v in imVars.iteritems():
                if k in metaKeys:
                    im_setMeta(conn,imId,k,v)
                if k in imageKeys:
                    im_setImage(conn,imId,k,v)
                if k in historyKeys: # there is only one valid right now ....
                    im_setHistory(conn,imId,v)
                if k.startswith('tag---'):
                    im_setTag(conn,imId,v)
            
        except:
            print 'error: ',sys.exc_info()
            sys.exit()           
    else:
        print 'command??'          

Example 33

Project: scalyr-agent-2
Source File: compiler.py
View license
    def compile(self, *mibnames, **options):
        """Transform requested and possibly referred MIBs.

        The *compile* method should be invoked when *MibCompiler* object
        is operational meaning at least *sources* are specified.
        
        Once called with a MIB module name, *compile* will:

        * fetch ASN.1 MIB module with given name by calling *sources*
        * make sure no such transformed MIB already exists (with *searchers*)
        * parse ASN.1 MIB text with *parser*
        * perform actual MIB transformation into target format with *code generator*
        * may attempt to borrow pre-transformed MIB through *borrowers*
        * write transformed MIB through *writer*

        The above sequence will be performed for each MIB name given in
        *mibnames* and may be performed for all MIBs referred to from
        MIBs being processed.

        Args:
            mibnames: list of ASN.1 MIBs names
            options: options that affect the way PySMI components work

        Returns:
            A dictionary of MIB module names processed (keys) and *MibStatus*
            class instances (values)

        """
        processed = {}
        parsedMibs = {}; failedMibs = {}; borrowedMibs = {}; builtMibs = {}
        symbolTableMap = {}
        mibsToParse = [x for x in mibnames]
        while mibsToParse:
            mibname = mibsToParse.pop(0)
            if mibname in parsedMibs:
                debug.logger & debug.flagCompiler and debug.logger('MIB %s already parsed' % mibname)
                continue
            if mibname in failedMibs:
                debug.logger & debug.flagCompiler and debug.logger('MIB %s already failed' % mibname)
                continue

            for source in self._sources:
                debug.logger & debug.flagCompiler and debug.logger('trying source %s' % source)
                try:
                    fileInfo, fileData = source.getData(mibname)
                    for mibTree in self._parser.parse(fileData):
                        mibInfo, symbolTable = self._symbolgen.genCode(
                            mibTree, symbolTableMap
                        )

                        symbolTableMap[mibInfo.name] = symbolTable

                        parsedMibs[mibInfo.name] = fileInfo, mibInfo, mibTree
                        if mibname in failedMibs:
                            del failedMibs[mibname]

                        mibsToParse.extend(mibInfo.imported)

                        debug.logger & debug.flagCompiler and debug.logger('%s (%s) read from %s, immediate dependencies: %s' % (mibInfo.name, mibname, fileInfo.path, ', '.join(mibInfo.imported) or '<none>'))

                    break

                except error.PySmiReaderFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no %s found at %s' % (mibname, source))
                    continue
                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.source = source
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('%serror %s from %s' % (options.get('ignoreErrors') and 'ignoring ' or 'failing on ', exc, source))
                    failedMibs[mibname] = exc
                    processed[mibname] = statusFailed.setOptions(error=exc)
            else:
                exc = error.PySmiError('MIB source %s not found' % mibname)
                exc.mibname = mibname
                debug.logger & debug.flagCompiler and debug.logger('no %s found everywhare' % mibname)
                if mibname not in failedMibs:
                    failedMibs[mibname] = exc
                if mibname not in processed:
                    processed[mibname] = statusMissing

        debug.logger & debug.flagCompiler and debug.logger('MIBs analized %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # See what MIBs need generating
        #

        for mibname in parsedMibs.copy():
            fileInfo, mibInfo, mibTree = parsedMibs[mibname]
            debug.logger & debug.flagCompiler and debug.logger('checking if %s requires updating' % mibname)
            for searcher in self._searchers:
                try:
                    searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
                except error.PySmiFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
                    continue

                except error.PySmiFileNotModifiedError:
                    debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
                    del parsedMibs[mibname]
                    processed[mibname] = statusUntouched
                    break

                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.searcher = searcher
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
                    continue
            else:
                debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)

                if options.get('noDeps') and mibname not in mibnames:
                    debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from code generation' % mibname)
                    del parsedMibs[mibname]
                    processed[mibname] = statusUntouched
                    continue

        debug.logger & debug.flagCompiler and debug.logger('MIBs parsed %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # Generate code for parsed MIBs
        #

        for mibname in parsedMibs.copy():
            fileInfo, mibInfo, mibTree = parsedMibs[mibname]

            comments = [
                'ASN.1 source %s' % fileInfo.path,
                'Produced by %s-%s at %s' % (packageName, packageVersion, time.asctime()),
                'On host %s platform %s version %s by user %s' % (hasattr(os, 'uname') and os.uname()[1] or '?', hasattr(os, 'uname') and os.uname()[0] or '?', hasattr(os, 'uname') and os.uname()[2] or '?', hasattr(os, 'getuid') and getpwuid(os.getuid())[0] or '?'),
                'Using Python version %s' % sys.version.split('\n')[0]
            ]

            try:
                mibInfo, mibData = self._codegen.genCode(
                        mibTree,
                        symbolTableMap,
                        comments=comments,
                        genTexts=options.get('genTexts')
                    )

                builtMibs[mibname] = fileInfo, mibInfo, mibData
                del parsedMibs[mibname]

                debug.logger & debug.flagCompiler and debug.logger('%s read from %s and compiled by %s' % (mibname, fileInfo.path, self._writer))

            except error.PySmiError:
                exc_class, exc, tb = sys.exc_info()
                exc.handler = self._codegen
                exc.mibname = mibname
                exc.msg += ' at MIB %s' % mibname
                debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (self._codegen, exc))
                processed[mibname] = statusFailed.setOptions(error=exc)
                failedMibs[mibname] = exc
                del parsedMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(parsedMibs), len(failedMibs)))

        #
        # Try to borrow pre-compiled MIBs for failed ones
        #

        for mibname in failedMibs.copy():
            if options.get('noDeps') and mibname not in mibnames:
                debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
                continue

            for borrower in self._borrowers:
                debug.logger & debug.flagCompiler and debug.logger('trying to borrow %s from %s' % (mibname, borrower))
                try:
                    fileInfo, fileData = borrower.getData(
                        mibname,
                        genTexts=options.get('genTexts')
                    )

                    borrowedMibs[mibname] = fileInfo, MibInfo(name=mibname, imported=[]), fileData

                    del failedMibs[mibname]

                    debug.logger & debug.flagCompiler and debug.logger('%s borrowed with %s' % (mibname, borrower))
                    break

                except error.PySmiError:
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (borrower, exc))

        debug.logger & debug.flagCompiler and debug.logger('MIBs available for borrowing %s, MIBs failed %s' % (len(borrowedMibs), len(failedMibs)))

        #
        # See what MIBs need borrowing
        #

        for mibname in borrowedMibs.copy():
            debug.logger & debug.flagCompiler and debug.logger('checking if failed MIB %s requires borrowing' % mibname)
            fileInfo, mibInfo, mibData = borrowedMibs[mibname]
            for searcher in self._searchers:
                try:
                    searcher.fileExists(mibname, fileInfo.mtime, rebuild=options.get('rebuild'))
                except error.PySmiFileNotFoundError:
                    debug.logger & debug.flagCompiler and debug.logger('no compiled MIB %s available through %s' % (mibname, searcher))
                    continue

                except error.PySmiFileNotModifiedError:
                    debug.logger & debug.flagCompiler and debug.logger('will be using existing compiled MIB %s found by %s' % (mibname, searcher))
                    del borrowedMibs[mibname]
                    processed[mibname] = statusUntouched
                    break

                except error.PySmiError:
                    exc_class, exc, tb = sys.exc_info()
                    exc.searcher = searcher
                    exc.mibname = mibname
                    exc.msg += ' at MIB %s' % mibname
                    debug.logger & debug.flagCompiler and debug.logger('error from %s: %s' % (searcher, exc))
                    continue
            else:
                debug.logger & debug.flagCompiler and debug.logger('no suitable compiled MIB %s found anywhere' % mibname)

                if options.get('noDeps') and mibname not in mibnames:
                    debug.logger & debug.flagCompiler and debug.logger('excluding imported MIB %s from borrowing' % mibname)
                    processed[mibname] = statusUntouched
                else:
                    debug.logger & debug.flagCompiler and debug.logger('will borrow MIB %s' % mibname)
                    builtMibs[mibname] = borrowedMibs[mibname]

                    processed[mibname] = statusBorrowed.setOptions(
                        path=fileInfo.path, file=fileInfo.file,
                        alias=fileInfo.name
                    )

                del borrowedMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs built %s, MIBs failed %s' % (len(builtMibs), len(failedMibs)))

        #
        # We could attempt to ignore missing/failed MIBs
        #

        if failedMibs and not options.get('ignoreErrors'):
            debug.logger & debug.flagCompiler and debug.logger('failing with problem MIBs %s' % ', '.join(failedMibs))
            for mibname in builtMibs:
                processed[mibname] = statusUnprocessed
            return processed

        debug.logger & debug.flagCompiler and debug.logger('proceeding with built MIBs %s, failed MIBs %s' % (', '.join(builtMibs), ', '.join(failedMibs)))

        #
        # Store compiled MIBs
        #

        for mibname in builtMibs.copy():
            fileInfo, mibInfo, mibData = builtMibs[mibname]
            try:
                self._writer.putData(
                    mibname, mibData, dryRun=options.get('dryRun')
                )

                debug.logger & debug.flagCompiler and debug.logger('%s stored by %s' % (mibname, self._writer))

                del builtMibs[mibname]

                if mibname not in processed:
                    processed[mibname] = statusCompiled.setOptions(
                        path=fileInfo.path, file=fileInfo.file,
                        alias=fileInfo.name
                    )

            except error.PySmiError:
                exc_class, exc, tb = sys.exc_info()
                exc.handler = self._codegen
                exc.mibname = mibname
                exc.msg += ' at MIB %s' % mibname
                debug.logger & debug.flagCompiler and debug.logger('error %s from %s' % (exc, self._writer))
                processed[mibname] = statusFailed.setOptions(error=exc)
                failedMibs[mibname] = exc
                del builtMibs[mibname]

        debug.logger & debug.flagCompiler and debug.logger('MIBs modifed: %s' % ', '.join([x for x in processed if processed[x] in ('compiled', 'borrowed')]))

        return processed

Example 34

Project: SickGear
Source File: connectionpool.py
View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 35

Project: rdflib
Source File: __init__.py
View license
def processURI(uri, outputFormat, form={}) :
	"""The standard processing of an RDFa uri options in a form; used as an entry point from a CGI call.

	The call accepts extra form options (i.e., HTTP GET options) as follows:

	 - C{graph=[output|processor|output,processor|processor,output]} specifying which graphs are returned. Default: C{output}
	 - C{space_preserve=[true|false]} means that plain literals are normalized in terms of white spaces. Default: C{false}
	 - C{rfa_version} provides the RDFa version that should be used for distilling. The string should be of the form "1.0" or "1.1". Default is the highest version the current package implements, currently "1.1"
	 - C{host_language=[xhtml,html,xml]} : the host language. Used when files are uploaded or text is added verbatim, otherwise the HTTP return header should be used. Default C{xml}
	 - C{embedded_rdf=[true|false]} : whether embedded turtle or RDF/XML content should be added to the output graph. Default: C{false}
	 - C{vocab_expansion=[true|false]} : whether the vocabularies should be expanded through the restricted RDFS entailment. Default: C{false}
	 - C{vocab_cache=[true|false]} : whether vocab caching should be performed or whether it should be ignored and vocabulary files should be picked up every time. Default: C{false}
	 - C{vocab_cache_report=[true|false]} : whether vocab caching details should be reported. Default: C{false}
	 - C{vocab_cache_bypass=[true|false]} : whether vocab caches have to be regenerated every time. Default: C{false}
	 - C{rdfa_lite=[true|false]} : whether warnings should be generated for non RDFa Lite attribute usage. Default: C{false}

	@param uri: URI to access. Note that the C{text:} and C{uploaded:} fake URI values are treated separately; the former is for textual intput (in which case a StringIO is used to get the data) and the latter is for uploaded file, where the form gives access to the file directly.
	@param outputFormat: serialization format, as defined by the package. Currently "xml", "turtle", "nt", or "json". Default is "turtle", also used if any other string is given.
	@param form: extra call options (from the CGI call) to set up the local options
	@type form: cgi FieldStorage instance
	@return: serialized graph
	@rtype: string
	"""
	def _get_option(param, compare_value, default) :
		param_old = param.replace('_','-')
		if param in list(form.keys()) :
			val = form.getfirst(param).lower()
			return val == compare_value
		elif param_old in list(form.keys()) :
			# this is to ensure the old style parameters are still valid...
			# in the old days I used '-' in the parameters, the standard favours '_'
			val = form.getfirst(param_old).lower()
			return val == compare_value
		else :
			return default

	if uri == "uploaded:" :
		input	= form["uploaded"].file
		base	= ""
	elif uri == "text:" :
		input	= StringIO(form.getfirst("text"))
		base	= ""
	else :
		input	= uri
		base	= uri

	if "rdfa_version" in list(form.keys()) :
		rdfa_version = form.getfirst("rdfa_version")
	else :
		rdfa_version = None

	# working through the possible options
	# Host language: HTML, XHTML, or XML
	# Note that these options should be used for the upload and inline version only in case of a form
	# for real uris the returned content type should be used
	if "host_language" in list(form.keys()) :
		if form.getfirst("host_language").lower() == "xhtml" :
			media_type = MediaTypes.xhtml
		elif form.getfirst("host_language").lower() == "html" :
			media_type = MediaTypes.html
		elif form.getfirst("host_language").lower() == "svg" :
			media_type = MediaTypes.svg
		elif form.getfirst("host_language").lower() == "atom" :
			media_type = MediaTypes.atom
		else :
			media_type = MediaTypes.xml
	else :
		media_type = ""

	transformers = []

	check_lite = "rdfa_lite" in list(form.keys()) and form.getfirst("rdfa_lite").lower() == "true"

	# The code below is left for backward compatibility only. In fact, these options are not exposed any more,
	# they are not really in use
	if "extras" in list(form.keys()) and form.getfirst("extras").lower() == "true" :
		from .transform.metaname              	import meta_transform
		from .transform.OpenID                	import OpenID_transform
		from .transform.DublinCore            	import DC_transform
		for t in [OpenID_transform, DC_transform, meta_transform] :
			transformers.append(t)
	else :
		if "extra-meta" in list(form.keys()) and form.getfirst("extra-meta").lower() == "true" :
			from .transform.metaname import meta_transform
			transformers.append(meta_transform)
		if "extra-openid" in list(form.keys()) and form.getfirst("extra-openid").lower() == "true" :
			from .transform.OpenID import OpenID_transform
			transformers.append(OpenID_transform)
		if "extra-dc" in list(form.keys()) and form.getfirst("extra-dc").lower() == "true" :
			from .transform.DublinCore import DC_transform
			transformers.append(DC_transform)

	output_default_graph 	= True
	output_processor_graph 	= False
	# Note that I use the 'graph' and the 'rdfagraph' form keys here. Reason is that
	# I used 'graph' in the previous versions, including the RDFa 1.0 processor,
	# so if I removed that altogether that would create backward incompatibilities
	# On the other hand, the RDFa 1.1 doc clearly refers to 'rdfagraph' as the standard
	# key.
	a = None
	if "graph" in list(form.keys()) :
		a = form.getfirst("graph").lower()
	elif "rdfagraph" in list(form.keys()) :
		a = form.getfirst("rdfagraph").lower()
	if a != None :
		if a == "processor" :
			output_default_graph 	= False
			output_processor_graph 	= True
		elif a == "processor,output" or a == "output,processor" :
			output_processor_graph 	= True

	embedded_rdf        = _get_option( "embedded_rdf", "true", False)
	space_preserve      = _get_option( "space_preserve", "true", True)
	vocab_cache         = _get_option( "vocab_cache", "true", True)
	vocab_cache_report  = _get_option( "vocab_cache_report", "true", False)
	refresh_vocab_cache = _get_option( "vocab_cache_refresh", "true", False)
	vocab_expansion     = _get_option( "vocab_expansion", "true", False)
	if vocab_cache_report : output_processor_graph = True

	options = Options(output_default_graph   = output_default_graph,
					  output_processor_graph = output_processor_graph,
					  space_preserve         = space_preserve,
					  transformers           = transformers,
					  vocab_cache            = vocab_cache,
					  vocab_cache_report     = vocab_cache_report,
					  refresh_vocab_cache    = refresh_vocab_cache,
					  vocab_expansion        = vocab_expansion,
					  embedded_rdf           = embedded_rdf,
					  check_lite             = check_lite
					  )
	processor = pyRdfa(options = options, base = base, media_type = media_type, rdfa_version = rdfa_version)

	# Decide the output format; the issue is what should happen in case of a top level error like an inaccessibility of
	# the html source: should a graph be returned or an HTML page with an error message?

	# decide whether HTML or RDF should be sent.
	htmlOutput = False
	#if 'HTTP_ACCEPT' in os.environ :
	#	acc = os.environ['HTTP_ACCEPT']
	#	possibilities = ['text/html',
	#					 'application/rdf+xml',
	#					 'text/turtle; charset=utf-8',
	#					 'application/json',
	#					 'application/ld+json',
	#					 'text/rdf+n3']
	#
	#	# this nice module does content negotiation and returns the preferred format
	#	sg = acceptable_content_type(acc, possibilities)
	#	htmlOutput = (sg != None and sg[0] == content_type('text/html'))
	#	os.environ['rdfaerror'] = 'true'

	# This is really for testing purposes only, it is an unpublished flag to force RDF output no
	# matter what
	try :
		graph = processor.rdf_from_source(input, outputFormat, rdfOutput = ("forceRDFOutput" in list(form.keys())) or not htmlOutput)
		if outputFormat == "n3" :
			retval = 'Content-Type: text/rdf+n3; charset=utf-8\n'
		elif outputFormat == "nt" or outputFormat == "turtle" :
			retval = 'Content-Type: text/turtle; charset=utf-8\n'
		elif outputFormat == "json-ld" or outputFormat == "json" :
			retval = 'Content-Type: application/ld+json; charset=utf-8\n'
		else :
			retval = 'Content-Type: application/rdf+xml; charset=utf-8\n'
		retval += '\n'
		retval += graph
		return retval
	except HTTPError :
		(type,h,traceback) = sys.exc_info()
		import cgi

		retval = 'Content-type: text/html; charset=utf-8\nStatus: %s \n\n' % h.http_code
		retval += "<html>\n"
		retval += "<head>\n"
		retval += "<title>HTTP Error in distilling RDFa content</title>\n"
		retval += "</head><body>\n"
		retval += "<h1>HTTP Error in distilling RDFa content</h1>\n"
		retval += "<p>HTTP Error: %s (%s)</p>\n" % (h.http_code,h.msg)
		retval += "<p>On URI: <code>'%s'</code></p>\n" % cgi.escape(uri)
		retval +="</body>\n"
		retval +="</html>\n"
		return retval
	except :
		# This branch should occur only if an exception is really raised, ie, if it is not turned
		# into a graph value.
		(type,value,traceback) = sys.exc_info()

		import traceback, cgi

		retval = 'Content-type: text/html; charset=utf-8\nStatus: %s\n\n' % processor.http_status
		retval += "<html>\n"
		retval += "<head>\n"
		retval += "<title>Exception in RDFa processing</title>\n"
		retval += "</head><body>\n"
		retval += "<h1>Exception in distilling RDFa</h1>\n"
		retval += "<pre>\n"
		strio  = StringIO()
		traceback.print_exc(file=strio)
		retval += strio.getvalue()
		retval +="</pre>\n"
		retval +="<pre>%s</pre>\n" % value
		retval +="<h1>Distiller request details</h1>\n"
		retval +="<dl>\n"
		if uri == "text:" and "text" in form and form["text"].value != None and len(form["text"].value.strip()) != 0 :
			retval +="<dt>Text input:</dt><dd>%s</dd>\n" % cgi.escape(form["text"].value).replace('\n','<br/>')
		elif uri == "uploaded:" :
			retval +="<dt>Uploaded file</dt>\n"
		else :
			retval +="<dt>URI received:</dt><dd><code>'%s'</code></dd>\n" % cgi.escape(uri)
		if "host_language" in list(form.keys()) :
			retval +="<dt>Media Type:</dt><dd>%s</dd>\n" % media_type
		if "graph" in list(form.keys()) :
			retval +="<dt>Requested graphs:</dt><dd>%s</dd>\n" % form.getfirst("graph").lower()
		else :
			retval +="<dt>Requested graphs:</dt><dd>default</dd>\n"
		retval +="<dt>Output serialization format:</dt><dd> %s</dd>\n" % outputFormat
		if "space_preserve" in form : retval +="<dt>Space preserve:</dt><dd> %s</dd>\n" % form["space_preserve"].value
		retval +="</dl>\n"
		retval +="</body>\n"
		retval +="</html>\n"
		return retval

Example 36

Project: SickRage
Source File: connectionpool.py
View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            conn = conn and conn.close()
            release_conn = True
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            conn = conn and conn.close()
            release_conn = True
            raise

        except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            conn = conn and conn.close()
            release_conn = True

            if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(
                method, redirect_location, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(
                method, url, body, headers,
                retries=retries, redirect=redirect,
                assert_same_host=assert_same_host,
                timeout=timeout, pool_timeout=pool_timeout,
                release_conn=release_conn, **response_kw)

        return response

Example 37

View license
    def run(self, test):
        """
        Execute the test (which may be a test suite). If the test is a suite,
        distribute it out among as many processes as have been configured, at
        as fine a level as is possible given the context fixtures defined in
        the suite or any sub-suites.

        """
        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
        wrapper = self.config.plugins.prepareTest(test)
        if wrapper is not None:
            test = wrapper

        # plugins can decorate or capture the output stream
        wrapped = self.config.plugins.setOutputStream(self.stream)
        if wrapped is not None:
            self.stream = wrapped

        testQueue = Queue()
        resultQueue = Queue()
        tasks = []
        completed = []
        workers = []
        to_teardown = []
        shouldStop = Event()

        result = self._makeResult()
        start = time.time()

        self.collect(test, testQueue, tasks, to_teardown, result)

        log.debug("Starting %s workers", self.config.multiprocess_workers)
        for i in range(self.config.multiprocess_workers):
            p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
            workers.append(p)
            log.debug("Started worker process %s", i+1)

        total_tasks = len(tasks)
        # need to keep track of the next time to check for timeouts in case
        # more than one process times out at the same time.
        nexttimeout=self.config.multiprocess_timeout
        thrownError = None

        try:
            while tasks:
                log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
                          len(completed), total_tasks,nexttimeout)
                try:
                    iworker, addr, newtask_addrs, batch_result = resultQueue.get(
                                                            timeout=nexttimeout)
                    log.debug('Results received for worker %d, %s, new tasks: %d',
                              iworker,addr,len(newtask_addrs))
                    try:
                        try:
                            tasks.remove(addr)
                        except ValueError:
                            log.warn('worker %s failed to remove from tasks: %s',
                                     iworker,addr)
                        total_tasks += len(newtask_addrs)
                        tasks.extend(newtask_addrs)
                    except KeyError:
                        log.debug("Got result for unknown task? %s", addr)
                        log.debug("current: %s",str(list(tasks)[0]))
                    else:
                        completed.append([addr,batch_result])
                    self.consolidate(result, batch_result)
                    if (self.config.stopOnError
                        and not result.wasSuccessful()):
                        # set the stop condition
                        shouldStop.set()
                        break
                    if self.config.multiprocess_restartworker:
                        log.debug('joining worker %s',iworker)
                        # wait for working, but not that important if worker
                        # cannot be joined in fact, for workers that add to
                        # testQueue, they will not terminate until all their
                        # items are read
                        workers[iworker].join(timeout=1)
                        if not shouldStop.is_set() and not testQueue.empty():
                            log.debug('starting new process on worker %s',iworker)
                            workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
                except Empty:
                    log.debug("Timed out with %s tasks pending "
                              "(empty testQueue=%r): %s",
                              len(tasks),testQueue.empty(),str(tasks))
                    any_alive = False
                    for iworker, w in enumerate(workers):
                        if w.is_alive():
                            worker_addr = bytes_(w.currentaddr.value,'ascii')
                            timeprocessing = time.time() - w.currentstart.value
                            if ( len(worker_addr) == 0
                                    and timeprocessing > self.config.multiprocess_timeout-0.1):
                                log.debug('worker %d has finished its work item, '
                                          'but is not exiting? do we wait for it?',
                                          iworker)
                            else:
                                any_alive = True
                            if (len(worker_addr) > 0
                                and timeprocessing > self.config.multiprocess_timeout-0.1):
                                log.debug('timed out worker %s: %s',
                                          iworker,worker_addr)
                                w.currentaddr.value = bytes_('')
                                # If the process is in C++ code, sending a SIGILL
                                # might not send a python KeybordInterrupt exception
                                # therefore, send multiple signals until an
                                # exception is caught. If this takes too long, then
                                # terminate the process
                                w.keyboardCaught.clear()
                                startkilltime = time.time()
                                while not w.keyboardCaught.is_set() and w.is_alive():
                                    if time.time()-startkilltime > self.waitkilltime:
                                        # have to terminate...
                                        log.error("terminating worker %s",iworker)
                                        w.terminate()
                                        # there is a small probability that the
                                        # terminated process might send a result,
                                        # which has to be specially handled or
                                        # else processes might get orphaned.
                                        workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
                                        break
                                    os.kill(w.pid, signal.SIGILL)
                                    time.sleep(0.1)
                    if not any_alive and testQueue.empty():
                        log.debug("All workers dead")
                        break
                nexttimeout=self.config.multiprocess_timeout
                for w in workers:
                    if w.is_alive() and len(w.currentaddr.value) > 0:
                        timeprocessing = time.time()-w.currentstart.value
                        if timeprocessing <= self.config.multiprocess_timeout:
                            nexttimeout = min(nexttimeout,
                                self.config.multiprocess_timeout-timeprocessing)
            log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))

        except (KeyboardInterrupt, SystemExit), e:
            log.info('parent received ctrl-c when waiting for test results')
            thrownError = e
            #resultQueue.get(False)
                
            result.addError(test, sys.exc_info())

        try:
            for case in to_teardown:
                log.debug("Tearing down shared fixtures for %s", case)
                try:
                    case.tearDown()
                except (KeyboardInterrupt, SystemExit):
                    raise
                except:
                    result.addError(case, sys.exc_info())

            stop = time.time()

            # first write since can freeze on shutting down processes
            result.printErrors()
            result.printSummary(start, stop)
            self.config.plugins.finalize(result)

            if thrownError is None:
                log.debug("Tell all workers to stop")
                for w in workers:
                    if w.is_alive():
                        testQueue.put('STOP', block=False)

            # wait for the workers to end
            for iworker,worker in enumerate(workers):
                if worker.is_alive():
                    log.debug('joining worker %s',iworker)
                    worker.join()
                    if worker.is_alive():
                        log.debug('failed to join worker %s',iworker)
        except (KeyboardInterrupt, SystemExit):
            log.info('parent received ctrl-c when shutting down: stop all processes')
            for worker in workers:
                if worker.is_alive():
                    worker.terminate()

            if thrownError: raise thrownError
            else: raise

        return result

Example 38

Project: tp-qemu
Source File: floppy.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test virtual floppy of guest:

    1) Create a floppy disk image on host
    2) Start the guest with this floppy image.
    3) Make a file system on guest virtual floppy.
    4) Calculate md5sum value of a file and copy it into floppy.
    5) Verify whether the md5sum does match.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    source_file = params["source_file"]
    dest_file = params["dest_file"]
    login_timeout = int(params.get("login_timeout", 360))
    floppy_prepare_timeout = int(params.get("floppy_prepare_timeout", 360))
    guest_floppy_path = params["guest_floppy_path"]

    def create_floppy(params, prepare=True):
        """
        Creates 'new' floppy with one file on it

        :param params: parameters for test
        :param preapre: if True then it prepare cd images.

        :return: path to new floppy file.
        """
        error.context("creating test floppy", logging.info)
        floppy = params["floppy_name"]
        if not os.path.isabs(floppy):
            floppy = os.path.join(data_dir.get_data_dir(), floppy)
        if prepare:
            utils.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy)
        return floppy

    def cleanup_floppy(path):
        """ Removes created floppy """
        error.context("cleaning up temp floppy images", logging.info)
        os.remove("%s" % path)

    def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodically copied.
        """
        if copy_timeout is None:
            copy_timeout = 120
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ('nohup bash -c "while [ true ]; do echo \"1\" | '
               'tee -a %s >> %s; sleep 0.1; done" 2> /dev/null &' %
               (check_path, dst_path))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    class test_singlehost(MiniSubtest):

        def test(self):
            create_floppy(params)
            params["start_vm"] = "yes"
            vm_name = params.get("main_vm", "vm1")
            env_process.preprocess_vm(test, params, env, vm_name)
            vm = env.get_vm(vm_name)
            vm.verify_alive()
            self.session = vm.wait_for_login(timeout=login_timeout)

            self.dest_dir = params.get("mount_dir")
            # If mount_dir specified, treat guest as a Linux OS
            # Some Linux distribution does not load floppy at boot and Windows
            # needs time to load and init floppy driver
            if self.dest_dir:
                lsmod = self.session.cmd("lsmod")
                if 'floppy' not in lsmod:
                    self.session.cmd("modprobe floppy")
            else:
                time.sleep(20)

            error.context("Formating floppy disk before using it")
            format_cmd = params["format_floppy_cmd"]
            self.session.cmd(format_cmd, timeout=120)
            logging.info("Floppy disk formatted successfully")

            if self.dest_dir:
                error.context("Mounting floppy")
                self.session.cmd("mount %s %s" % (guest_floppy_path,
                                                  self.dest_dir))
            error.context("Testing floppy")
            self.session.cmd(params["test_floppy_cmd"])

            error.context("Copying file to the floppy")
            md5_cmd = params.get("md5_cmd")
            if md5_cmd:
                md5_source = self.session.cmd("%s %s" % (md5_cmd, source_file))
                try:
                    md5_source = md5_source.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from source file,"
                                    " output: '%s'" % md5_source)
            else:
                md5_source = None

            self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file,
                                           dest_file))
            logging.info("Succeed to copy file '%s' into floppy disk" %
                         source_file)

            error.context("Checking if the file is unchanged after copy")
            if md5_cmd:
                md5_dest = self.session.cmd("%s %s" % (md5_cmd, dest_file))
                try:
                    md5_dest = md5_dest.split(" ")[0]
                except IndexError:
                    error.TestError("Failed to get md5 from dest file,"
                                    " output: '%s'" % md5_dest)
                if md5_source != md5_dest:
                    raise error.TestFail("File changed after copy to floppy")
            else:
                md5_dest = None
                self.session.cmd("%s %s %s" % (params["diff_file_cmd"],
                                               source_file, dest_file))

        def clean(self):
            clean_cmd = "%s %s" % (params["clean_cmd"], dest_file)
            self.session.cmd(clean_cmd)
            if self.dest_dir:
                self.session.cmd("umount %s" % self.dest_dir)
            self.session.close()

    class Multihost(MiniSubtest):

        def test(self):
            error.context("Preparing migration env and floppies.", logging.info)
            mig_protocol = params.get("mig_protocol", "tcp")
            self.mig_type = migration.MultihostMigration
            if mig_protocol == "fd":
                self.mig_type = migration.MultihostMigrationFd
            if mig_protocol == "exec":
                self.mig_type = migration.MultihostMigrationExec
            if "rdma" in mig_protocol:
                self.mig_type = migration.MultihostMigrationRdma

            self.vms = params.get("vms").split(" ")
            self.srchost = params["hosts"][0]
            self.dsthost = params["hosts"][1]
            self.is_src = params["hostid"] == self.srchost
            self.mig = self.mig_type(test, params, env, False, )

            if self.is_src:
                vm = env.get_vm(self.vms[0])
                vm.destroy()
                self.floppy = create_floppy(params)
                self.floppy_dir = os.path.dirname(self.floppy)
                params["start_vm"] = "yes"
                env_process.process(test, params, env,
                                    env_process.preprocess_image,
                                    env_process.preprocess_vm)
                vm = env.get_vm(self.vms[0])
                vm.wait_for_login(timeout=login_timeout)
            else:
                self.floppy = create_floppy(params, False)
                self.floppy_dir = os.path.dirname(self.floppy)

        def clean(self):
            self.mig.cleanup()
            if self.is_src:
                cleanup_floppy(self.floppy)

    class test_multihost_write(Multihost):

        def test(self):
            super(test_multihost_write, self).test()

            copy_timeout = int(params.get("copy_timeout", 480))
            self.mount_dir = params["mount_dir"]
            format_floppy_cmd = params["format_floppy_cmd"]
            check_copy_path = params["check_copy_path"]

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:
                    session.cmd("rm -f %s" % (src_file))
                    session.cmd("rm -f %s" % (check_copy_path))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data.", logging.info)
                if self.mount_dir:
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir),
                                timeout=30)

                error.context("File copying test.", logging.info)

                pid = lazy_copy(vm, src_file, check_copy_path, copy_timeout)

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                error.context("Wait for copy finishing.", logging.info)
                status = session.cmd_status("kill %s" % pid,
                                            timeout=copy_timeout)
                if status != 0:
                    raise error.TestFail("Copy process was terminatted with"
                                         " error code %s" % (status))

                session.cmd_status("kill -s SIGINT %s" % (pid),
                                   timeout=copy_timeout)

                error.context("Check floppy file checksum.", logging.info)
                md5_cmd = params.get("md5_cmd", "md5sum")
                if md5_cmd:
                    md5_floppy = session.cmd("%s %s" % (md5_cmd, src_file))
                    try:
                        md5_floppy = md5_floppy.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from source file,"
                                        " output: '%s'" % md5_floppy)
                    md5_check = session.cmd("%s %s" % (md5_cmd, check_copy_path))
                    try:
                        md5_check = md5_check.split(" ")[0]
                    except IndexError:
                        error.TestError("Failed to get md5 from dst file,"
                                        " output: '%s'" % md5_floppy)
                    if md5_check != md5_floppy:
                        raise error.TestFail("There is mistake in copying, "
                                             "it is possible to check file on vm.")

                session.cmd("rm -f %s" % (src_file))
                session.cmd("rm -f %s" % (check_copy_path))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_write, self).clean()

    class test_multihost_eject(Multihost):

        def test(self):
            super(test_multihost_eject, self).test()

            self.mount_dir = params.get("mount_dir", None)
            format_floppy_cmd = params["format_floppy_cmd"]
            floppy = params["floppy_name"]
            second_floppy = params["second_floppy_name"]
            if not os.path.isabs(floppy):
                floppy = os.path.join(data_dir.get_data_dir(), floppy)
            if not os.path.isabs(second_floppy):
                second_floppy = os.path.join(data_dir.get_data_dir(),
                                             second_floppy)
            if not self.is_src:
                self.floppy = create_floppy(params)

            pid = None
            sync_id = {'src': self.srchost,
                       'dst': self.dsthost,
                       "type": "file_trasfer"}
            filename = "orig"
            src_file = os.path.join(self.mount_dir, filename)

            if self.is_src:  # Starts in source
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)

                if self.mount_dir:   # If linux
                    session.cmd("rm -f %s" % (src_file))
                # If mount_dir specified, treat guest as a Linux OS
                # Some Linux distribution does not load floppy at boot
                # and Windows needs time to load and init floppy driver
                error.context("Prepare floppy for writing.", logging.info)
                if self.mount_dir:   # If linux
                    lsmod = session.cmd("lsmod")
                    if 'floppy' not in lsmod:
                        session.cmd("modprobe floppy")
                else:
                    time.sleep(20)

                if floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

                try:
                    session.cmd(format_floppy_cmd)
                except aexpect.ShellCmdError, e:
                    if e.status == 1:
                        logging.error("First access to floppy failed, "
                                      " Trying a second time as a workaround")
                        session.cmd(format_floppy_cmd)

                error.context("Check floppy")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)
                    session.cmd("umount %s" % (self.mount_dir), timeout=30)

                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

                error.context("Change floppy.")
                vm.monitor.cmd("eject floppy0")
                vm.monitor.cmd("change floppy %s" % (second_floppy))
                session.cmd(format_floppy_cmd)

                error.context("Mount and copy data")
                if self.mount_dir:   # If linux
                    session.cmd("mount %s %s" % (guest_floppy_path,
                                                 self.mount_dir), timeout=30)

                if second_floppy not in vm.monitor.info("block"):
                    raise error.TestFail("Wrong floppy image is placed in vm.")

            sync = SyncData(self.mig.master_id(), self.mig.hostid,
                            self.mig.hosts, sync_id, self.mig.sync_server)

            pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost]

            self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost)

            if not self.is_src:  # Starts in destination
                vm = env.get_vm(self.vms[0])
                session = vm.wait_for_login(timeout=login_timeout)
                written = None
                if self.mount_dir:
                    filepath = os.path.join(self.mount_dir, "test.txt")
                    session.cmd("echo 'test' > %s" % (filepath))
                    output = session.cmd("cat %s" % (filepath))
                    written = "test\n"
                else:   # Windows version.
                    filepath = "A:\\test.txt"
                    session.cmd("echo test > %s" % (filepath))
                    output = session.cmd("type %s" % (filepath))
                    written = "test \n\n"
                if output != written:
                    raise error.TestFail("Data read from the floppy differs"
                                         "from the data written to it."
                                         " EXPECTED: %s GOT: %s" %
                                         (repr(written), repr(output)))

            self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts,
                                    'finish_floppy_test', login_timeout)

        def clean(self):
            super(test_multihost_eject, self).clean()

    test_type = params.get("test_type", "test_singlehost")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 39

Project: headphones
Source File: connectionpool.py
View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            conn = conn and conn.close()
            release_conn = True
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            conn = conn and conn.close()
            release_conn = True
            raise

        except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
            # Discard the connection for these exceptions. It will be
            # be replaced during the next _get_conn() call.
            conn = conn and conn.close()
            release_conn = True

            if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    # Release the connection for this response, since we're not
                    # returning it to be released manually.
                    response.release_conn()
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 40

Project: spiderfoot
Source File: sfscan.py
View license
    def startScan(self):
        global globalScanStatus

        self.ts = threading.local()
        self.ts.moduleInstances = dict()
        self.ts.sf = SpiderFoot(self.temp['config'])
        self.ts.config = deepcopy(self.temp['config'])
        self.ts.dbh = SpiderFootDb(self.temp['config'])
        self.ts.targetValue = self.temp['targetValue']
        self.ts.targetType = self.temp['targetType']
        self.ts.moduleList = self.temp['moduleList']
        self.ts.modconfig = dict()
        self.ts.scanName = self.temp['scanName']
        self.ts.scanId = self.temp['scanId']
        aborted = False
        self.ts.sf.setDbh(self.ts.dbh)

        # Create a unique ID for this scan and create it in the back-end DB.
        self.ts.sf.setGUID(self.ts.scanId)
        self.ts.dbh.scanInstanceCreate(self.ts.scanId,
                                       self.ts.scanName, self.ts.targetValue)
        self.setStatus("STARTING", time.time() * 1000, None)
        # Create our target
        target = SpiderFootTarget(self.ts.targetValue, self.ts.targetType)

        # Save the config current set for this scan
        self.ts.config['_modulesenabled'] = self.ts.moduleList
        self.ts.dbh.scanConfigSet(self.ts.scanId,
                                  self.ts.sf.configSerialize(deepcopy(self.ts.config)))

        self.ts.sf.status("Scan [" + self.ts.scanId + "] initiated.")
        # moduleList = list of modules the user wants to run
        try:
            # Process global options that point to other places for data

            # Save default socket methods that will be overridden
            if not hasattr(socket, 'savedsocket'):
                socket.savedsocket = socket.socket
                socket.savedcreate_connection = socket.create_connection
                #socket.savedgetaddrinfo = socket.getaddrinfo

            # If a SOCKS server was specified, set it up
            if self.ts.config['_socks1type'] != '':
                socksType = socks.PROXY_TYPE_SOCKS4
                socksDns = self.ts.config['_socks6dns']
                socksAddr = self.ts.config['_socks2addr']
                socksPort = int(self.ts.config['_socks3port'])
                socksUsername = ''
                socksPassword = ''

                if self.ts.config['_socks1type'] == '4':
                    socksType = socks.PROXY_TYPE_SOCKS4
                if self.ts.config['_socks1type'] == '5':
                    socksType = socks.PROXY_TYPE_SOCKS5
                    socksUsername = self.ts.config['_socks4user']
                    socksPassword = self.ts.config['_socks5pwd']

                if self.ts.config['_socks1type'] == 'HTTP':
                    socksType = socks.PROXY_TYPE_HTTP

                if self.ts.config['_socks1type'] == 'TOR':
                    socksType = socks.PROXY_TYPE_SOCKS5

                self.ts.sf.debug("SOCKS: " + socksAddr + ":" + str(socksPort) + \
                                 "(" + socksUsername + ":" + socksPassword + ")")
                socks.setdefaultproxy(socksType, socksAddr, socksPort,
                                      socksDns, socksUsername, socksPassword)

                # Override the default socket and getaddrinfo calls with the 
                # SOCKS ones. Just ensure we don't also try and SOCKS-proxy
                # connectivity to the TOR control port.
                def _create_connection(address, timeout=None, source_address=None):
                    if socksAddr not in address:
                        sock = socks.socksocket()
                        sock.setproxy(socks.PROXY_TYPE_SOCKS5, socksAddr, socksPort)
                        sock.connect(address)
                        return sock
                    else:
                        sock = socket.socket
                        sock.connect(address)
                        return sock

                socket.socket = socks.socksocket
                socket.create_connection = _create_connection
                #socket.getaddrinfo = socks.getaddrinfo
                self.ts.sf.updateSocket(socket)
            else:
                # BUG: If the user had a SOCKS proxy set
                # and then decided to unset it, the original socket class
                # is not reverted to its default state - we still have
                # the SOCKS version of socket.
                socket.socket = socket.savedsocket
                socket.create_connection = socket.savedcreate_connection
                #socket.getaddrinfo = socket.savedgetaddrinfo
                self.ts.sf.revertSocket()

            # Override the default DNS server
            if self.ts.config['_dnsserver'] != "":
                res = dns.resolver.Resolver()
                res.nameservers = [self.ts.config['_dnsserver']]
                dns.resolver.override_system_resolver(res)
            else:
                dns.resolver.restore_system_resolver()

            # Set the user agent
            self.ts.config['_useragent'] = self.ts.sf.optValueToData(
                self.ts.config['_useragent'])

            # Get internet TLDs
            tlddata = self.ts.sf.cacheGet("internet_tlds",
                                          self.ts.config['_internettlds_cache'])
            # If it wasn't loadable from cache, load it from scratch
            if tlddata is None:
                self.ts.config['_internettlds'] = self.ts.sf.optValueToData(
                    self.ts.config['_internettlds'])
                self.ts.sf.cachePut("internet_tlds", self.ts.config['_internettlds'])
            else:
                self.ts.config["_internettlds"] = tlddata.splitlines()

            for modName in self.ts.moduleList:
                if modName == '':
                    continue

                module = __import__('modules.' + modName, globals(), locals(),
                                    [modName])
                mod = getattr(module, modName)()
                mod.__name__ = modName

                # Set up the module
                # Configuration is a combined global config with module-specific options
                self.ts.modconfig[modName] = deepcopy(self.ts.config['__modules__'][modName]['opts'])
                for opt in self.ts.config.keys():
                    self.ts.modconfig[modName][opt] = deepcopy(self.ts.config[opt])

                mod.clearListeners()  # clear any listener relationships from the past
                mod.setup(self.ts.sf, self.ts.modconfig[modName])
                mod.setDbh(self.ts.dbh)
                mod.setScanId(self.ts.scanId)

                # Give modules a chance to 'enrich' the original target with
                # aliases of that target.
                newTarget = mod.enrichTarget(target)
                if newTarget is not None:
                    target = newTarget
                self.ts.moduleInstances[modName] = mod

                # Override the module's local socket module
                # to be the SOCKS one.
                if self.ts.config['_socks1type'] != '':
                    mod._updateSocket(socket)

                self.ts.sf.status(modName + " module loaded.")

            # Register listener modules and then start all modules sequentially
            for module in self.ts.moduleInstances.values():
                # Register the target with the module
                module.setTarget(target)

                for listenerModule in self.ts.moduleInstances.values():
                    # Careful not to register twice or you will get duplicate events
                    if listenerModule in module._listenerModules:
                        continue
                    # Note the absence of a check for whether a module can register
                    # to itself. That is intentional because some modules will
                    # act on their own notifications (e.g. sfp_dns)!
                    if listenerModule.watchedEvents() is not None:
                        module.registerListener(listenerModule)

            # Now we are ready to roll..
            self.setStatus("RUNNING")

            # Create a pseudo module for the root event to originate from
            psMod = SpiderFootPlugin()
            psMod.__name__ = "SpiderFoot UI"
            psMod.setTarget(target)
            psMod.clearListeners()
            for mod in self.ts.moduleInstances.values():
                if mod.watchedEvents() is not None:
                    psMod.registerListener(mod)

            # Create the "ROOT" event which un-triggered modules will link events to
            rootEvent = SpiderFootEvent("ROOT", self.ts.targetValue, "", None)
            psMod.notifyListeners(rootEvent)
            firstEvent = SpiderFootEvent(self.ts.targetType, self.ts.targetValue,
                                         "SpiderFoot UI", rootEvent)
            psMod.notifyListeners(firstEvent)

            # Check in case the user requested to stop the scan between modules 
            # initializing
            for module in self.ts.moduleInstances.values():
                if module.checkForStop():
                    self.setStatus('ABORTING')
                    aborted = True
                    break

            if aborted:
                self.ts.sf.status("Scan [" + self.ts.scanId + "] aborted.")
                self.setStatus("ABORTED", None, time.time() * 1000)
            else:
                self.ts.sf.status("Scan [" + self.ts.scanId + "] completed.")
                self.setStatus("FINISHED", None, time.time() * 1000)
        except BaseException as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            self.ts.sf.error("Unhandled exception (" + e.__class__.__name__ + ") " + \
                             "encountered during scan. Please report this as a bug: " + \
                             repr(traceback.format_exception(exc_type, exc_value, exc_traceback)), False)
            self.ts.sf.status("Scan [" + self.ts.scanId + "] failed: " + str(e))
            self.setStatus("ERROR-FAILED", None, time.time() * 1000)

        self.ts.dbh.close()
        del self.ts
        del self.temp

Example 41

Project: quadcopterPi
Source File: MPU6050.py
View license
    def __init__(self, address=0x68):

        self.logger = logging.getLogger('myQ.MPU6050')

        self.logger.debug('IMU initializing...')
        try:
            self.i2c = I2C(address)
            self.address = address

            self.cal_iteration = 100

            self.roll_a_cal = 0
            self.pitch_a_cal = 0
            self.yaw_a_cal = 0
            self.roll_g_cal = 0
            self.pitch_g_cal = 0
            self.yaw_g_cal = 0
            self.gyro_scale = 0
            self.sensor_data = array('B', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
            self.result_array = array('h', [0, 0, 0, 0, 0, 0, 0])

            #---------------------------------------------------------------------------
            # Reset all registers
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_PWR_MGMT_1, 0x80)
            time.sleep(5)

            #---------------------------------------------------------------------------
            # Sets clock source to gyro reference w/ PLL
            #---------------------------------------------------------------------------
            #SNT: 0x02 >> 0x03 (pll con z gyro reference)
            self.i2c.write8(self.__MPU6050_RA_PWR_MGMT_1, 0x03)
            time.sleep(0.005)

            # Sets sample rate to 1000/1+4 = 200Hz
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_SMPLRT_DIV, 0x04)
            time.sleep(0.005)

            #SoleNeroTech note: moved up this part of the code to solve a bug in MPU6050:
            #CONFIG has to be set just after PWR_MGMT_1
            #---------------------------------------------------------------------------
            # 0x02 => 98Hz  2ms delay
            # 0x03 => 40Hz  4
            # 0x04 => 20Hz  8
            # 0x05 => 10Hz  15
            self.i2c.write8(self.__MPU6050_RA_CONFIG, 0x05)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            #0x00=+/-250 0x08=+/- 500    0x10=+/-1000 0x18=+/-2000
            #SoleNeroTech  modified in 0x00
            self.i2c.write8(self.__MPU6050_RA_GYRO_CONFIG, 0x00)
            self.gyro_scale = 250
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            #0x00=+/-2 0x08=+/- 4    0x10=+/-8 0x18=+/-16
            self.i2c.write8(self.__MPU6050_RA_ACCEL_CONFIG, 0x00)
            time.sleep(0.005)

            #--------------------------------------------------------------------------
            # Disables FIFO, AUX I2C, FIFO and I2C reset bits to 0
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_USER_CTRL, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Setup INT pin to latch and AUX I2C pass through
            #---------------------------------------------------------------------------
            ##logger.debug('Enable interrupt')
            #SNT 0x20>0x02
            self.i2c.write8(self.__MPU6050_RA_INT_PIN_CFG, 0x02)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Controls frequency of wakeups in accel low power mode plus the sensor standby modes
            #---------------------------------------------------------------------------
            ##logger.debug('Disable low-power')
            self.i2c.write8(self.__MPU6050_RA_PWR_MGMT_2, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # ********************************: Experimental :**************************
            # Enable data ready interrupt
            #---------------------------------------------------------------------------
            ##logger.debug('Interrupt data ready')
            self.i2c.write8(self.__MPU6050_RA_INT_ENABLE, 0x01)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Freefall threshold of |0mg|
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_FF_THR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Freefall duration limit of 0
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_FF_DUR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Motion threshold of 0mg
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_MOT_THR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Motion duration of 0s
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_MOT_DUR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Zero motion threshold
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_ZRMOT_THR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Zero motion duration threshold
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_ZRMOT_DUR, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Disable sensor output to FIFO buffer
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_FIFO_EN, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # AUX I2C setup
            # Sets AUX I2C to single master control, plus other config
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_I2C_MST_CTRL, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Setup AUX I2C slaves
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV0_ADDR, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV0_REG, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV0_CTRL, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV1_ADDR, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV1_REG, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV1_CTRL, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV2_ADDR, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV2_REG, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV2_CTRL, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV3_ADDR, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV3_REG, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV3_CTRL, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV4_ADDR, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV4_REG, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV4_DO, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV4_CTRL, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV4_DI, 0x00)

            #---------------------------------------------------------------------------
            # Slave out, dont care
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV0_DO, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV1_DO, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV2_DO, 0x00)
            self.i2c.write8(self.__MPU6050_RA_I2C_SLV3_DO, 0x00)

            #---------------------------------------------------------------------------
            # More slave config
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_I2C_MST_DELAY_CTRL, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Reset sensor signal paths
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_SIGNAL_PATH_RESET, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Motion detection control
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_MOT_DETECT_CTRL, 0x00)
            time.sleep(0.005)

            #---------------------------------------------------------------------------
            # Data transfer to and from the FIFO buffer
            #---------------------------------------------------------------------------
            self.i2c.write8(self.__MPU6050_RA_FIFO_R_W, 0x00)
            time.sleep(0.005)

            self.CheckSetting()
        except:
            logger.critical('Unexpected error:', sys.exc_info()[0])

Example 42

Project: kamaelia_
Source File: TwitterSearch.py
View license
    def main(self):
        twitterurl = "http://api.twitter.com/1/users/search.json"

        if self.proxy:
            proxyhandler = urllib2.ProxyHandler({"http" : self.proxy})
            twitopener = urllib2.build_opener(proxyhandler)
            urllib2.install_opener(twitopener)

        headers = {'User-Agent' : "BBC R&D Grabber"}
        postdata = None

        if self.keypair == False:
            # Perform OAuth authentication - as we don't have the secret key pair we need to request it
            # This will require some user input
            request_token_url = 'http://api.twitter.com/oauth/request_token'
            access_token_url = 'http://api.twitter.com/oauth/access_token'
            authorize_url = 'http://api.twitter.com/oauth/authorize'

            token = None
            consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

            params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

            params['oauth_consumer_key'] = consumer.key

            req = oauth.Request(method="GET",url=request_token_url,parameters=params)

            signature_method = oauth.SignatureMethod_HMAC_SHA1()
            req.sign_request(signature_method, consumer, token)

            requestheaders = req.to_header()
            requestheaders['User-Agent'] = "BBC R&D Grabber"

            # Connect to Twitter
            try:
                req = urllib2.Request(request_token_url,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                conn1 = urllib2.urlopen(req)
            except httplib.BadStatusLine:
                e = sys.exc_info()[1]
                Print("PeopleSearch BadStatusLine error:", e )
                conn1 = False
            except urllib2.HTTPError:
                e = sys.exc_info()[1]
                Print("PeopleSearch HTTP error:", e.code)
#                sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                conn1 = False
            except urllib2.URLError:
                e = sys.exc_info()[1]
                Print("PeopleSearch URL error: ", e.reason)
#                sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                conn1 = False

            if conn1:
                content = conn1.read()
                conn1.close()

                request_token = dict(urlparse.parse_qsl(content))

                Print( "Request Token:")
                Print("     - oauth_token        = " , request_token['oauth_token'])
                Print("     - oauth_token_secret = " , request_token['oauth_token_secret'])
                Print("")

                # The user must confirm authorisation so a URL is Printed here
                Print("Go to the following link in your browser:")
                Print("%s?oauth_token=%s" % (authorize_url, request_token['oauth_token']) )
                Print("")

                accepted = 'n'
                # Wait until the user has confirmed authorisation
                while accepted.lower() == 'n':
                    accepted = raw_input('Have you authorized me? (y/n) ')
                oauth_verifier = raw_input('What is the PIN? ')

                token = oauth.Token(request_token['oauth_token'],
                    request_token['oauth_token_secret'])
                token.set_verifier(oauth_verifier)

                params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                params['oauth_token'] = token.key
                params['oauth_consumer_key'] = consumer.key

                req = oauth.Request(method="GET",url=access_token_url,parameters=params)

                signature_method = oauth.SignatureMethod_HMAC_SHA1()
                req.sign_request(signature_method, consumer, token)

                requestheaders = req.to_header()
                requestheaders['User-Agent'] = "BBC R&D Grabber"
                # Connect to Twitter
                try:
                    req = urllib2.Request(access_token_url,"oauth_verifier=%s" % oauth_verifier,requestheaders) # Why won't this work?!? Is it trying to POST?
                    conn1 = urllib2.urlopen(req)
                except httplib.BadStatusLine:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                    Print('PeopleSearch BadStatusLine error: ', e)
                    conn1 = False
                except urllib2.HTTPError:
                    e = sys.exc_info()[1]
                    Print('PeopleSearch HTTP error: ', e.code)
                    conn1 = False
                except urllib2.URLError:
                    e = sys.exc_info()[1]
#                    sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                    Print('PeopleSearch URL error: ', e.reason)
                    conn1 = False

                if conn1:
                    content = conn1.read()
                    conn1.close()
                    access_token = dict(urlparse.parse_qsl(content))

                    # Access tokens retrieved from Twitter
                    Print("Access Token:")
                    Print("     - oauth_token        = " , access_token['oauth_token'])
                    Print("     - oauth_token_secret = " , access_token['oauth_token_secret'])
                    Print("")
                    Print("You may now access protected resources using the access tokens above.")
                    Print("")

                    save = False
                    # Load config to save OAuth keys
                    try:
                        homedir = os.path.expanduser("~")
                        file = open(homedir + "/twitter-login.conf",'r')
                        save = True
                    except IOError:
                        e = sys.exc_info()[1]
                        Print ("Failed to load config file - not saving oauth keys: " , e)

                    if save:
                        raw_config = file.read()

                        file.close()

                        # Read config and add new values
                        config = cjson.decode(raw_config)
                        config['key'] = access_token['oauth_token']

                        config['secret'] = access_token['oauth_token_secret']

                        raw_config = cjson.encode(config)

                        # Write out the new config file
                        try:
                            file = open(homedir + "/twitter-login.conf",'w')
                            file.write(raw_config)
                            file.close()
                        except IOError:
                            e = sys.exc_info()[1]
                            Print ("Failed to save oauth keys: " , e)

                    self.keypair = [access_token['oauth_token'], access_token['oauth_token_secret']]
        

        while not self.finished():
            # TODO: Implement backoff algorithm in case of connection failures - watch out for the fact this could delay the requester component
            if self.dataReady("inbox"):
                # Retieve keywords to look up
                person = self.recv("inbox")

                # Ensure we're not rate limited during the first request - if so we'll wait for 15 mins before our next request
                if (datetime.today() - timedelta(minutes=15)) > self.ratelimited:
                    requesturl = twitterurl + "?q=" + urllib.quote(person) + "&per_page=5"

                    params = {
                        'oauth_version': "1.0",
                        'oauth_nonce': oauth.generate_nonce(),
                        'oauth_timestamp': int(time.time()),
                    }

                    token = oauth.Token(key=self.keypair[0],secret=self.keypair[1])
                    consumer = oauth.Consumer(key=self.consumerkeypair[0],secret=self.consumerkeypair[1])

                    params['oauth_token'] = token.key
                    params['oauth_consumer_key'] = consumer.key

                    req = oauth.Request(method="GET",url=requesturl,parameters=params)

                    signature_method = oauth.SignatureMethod_HMAC_SHA1()
                    req.sign_request(signature_method, consumer, token)

                    requestheaders = req.to_header()
                    requestheaders['User-Agent'] = "BBC R&D Grabber"

                    # Connect to Twitter
                    try:
                        req = urllib2.Request(requesturl,None,requestheaders) # Why won't this work?!? Is it trying to POST?
                        conn1 = urllib2.urlopen(req)
                    except httplib.BadStatusLine:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch BadStatusLine error: ' + str(e) + '\n')
                        Print('PeopleSearch BadStatusLine error: ', e)
                        conn1 = False
                    except urllib2.HTTPError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch HTTP error: ' + str(e.code) + '\n')
                        Print('PeopleSearch HTTP error: ', e.code)
                        conn1 = False
                    except urllib2.URLError:
                        e = sys.exc_info()[1]
#                        sys.stderr.write('PeopleSearch URL error: ' + str(e.reason) + '\n')
                        Print('PeopleSearch URL error: ', e.reason)
                        conn1 = False

                    if conn1:
                        # Check rate limiting here and Print current limit
                        headers = conn1.info()
                        try:
                            headerlist = string.split(str(headers),"\n")
                        except UnicodeEncodeError: # str may fail...
                            headerlist = []
                        for line in headerlist:
                            if line != "":
                                splitheader = line.split()
                                if splitheader[0] == "X-FeatureRateLimit-Remaining:" or splitheader[0] == "X-RateLimit-Remaining:":
                                    Print(splitheader[0] , " " , splitheader[1] )
                                    if int(splitheader[1]) < 5:
                                        self.ratelimited = datetime.today()
                        # Grab json format result of people search here
                        try:
                            data = conn1.read()
                            try:
                                content = cjson.decode(data)
                                self.send(content,"outbox")
                            except cjson.DecodeError:
                                self.send(dict(),"outbox")
                        except IOError:
                            e = sys.exc_info()[1]
#                            sys.stderr.write('PeopleSearch IO error: ' + str(e) + '\n')
                            Print('PeopleSearch IO error: ', e)
                            self.send(dict(),"outbox")
                        conn1.close()
                    else:
                        self.send(dict(),"outbox")
                else:
                   Print("Twitter search paused - rate limited")
                   self.send(dict(),"outbox")
            self.pause()
            yield 1

Example 43

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 44

View license
@error.context_aware
def run(test, params, env):
    """
    KVM migration with destination problems.
    Contains group of test for testing qemu behavior if some
    problems happens on destination side.

    Tests are described right in test classes comments down in code.

    Test needs params: nettype = bridge.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")

    test_rand = None
    mount_path = None
    while mount_path is None or os.path.exists(mount_path):
        test_rand = utils.generate_random_string(3)
        mount_path = ("%s/ni_mount_%s" %
                      (data_dir.get_data_dir(), test_rand))

    mig_dst = os.path.join(mount_path, "mig_dst")

    migration_exec_cmd_src = params.get("migration_exec_cmd_src",
                                        "gzip -c > %s")
    migration_exec_cmd_src = (migration_exec_cmd_src % (mig_dst))

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    def control_service(session, service, init_service, action, timeout=60):
        """
        Start service on guest.

        :param vm: Virtual machine for vm.
        :param service: service to stop.
        :param action: action with service (start|stop|restart)
        :param init_service: name of service for old service control.
        """
        status = utils_misc.get_guest_service_status(session, service,
                                                     service_former=init_service)
        if action == "start" and status == "active":
            logging.debug("%s already started, no need start it again.",
                          service)
            return
        if action == "stop" and status == "inactive":
            logging.debug("%s already stopped, no need stop it again.",
                          service)
            return
        try:
            session.cmd("systemctl --version", timeout=timeout)
            session.cmd("systemctl %s %s.service" % (action, service),
                        timeout=timeout)
        except:
            session.cmd("service %s %s" % (init_service, action),
                        timeout=timeout)

    def set_nfs_server(vm, share_cfg):
        """
        Start nfs server on guest.

        :param vm: Virtual machine for vm.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = "echo '%s' > /etc/exports" % (share_cfg)
        control_service(session, "nfs-server", "nfs", "stop")
        session.cmd(cmd)
        control_service(session, "nfs-server", "nfs", "start")
        session.cmd("iptables -F")
        session.close()

    def umount(mount_path):
        """
        Umount nfs server mount_path

        :param mount_path: path where nfs dir will be placed.
        """
        utils.run("umount -f %s" % (mount_path))

    def create_file_disk(dst_path, size):
        """
        Create file with size and create there ext3 filesystem.

        :param dst_path: Path to file.
        :param size: Size of file in MB
        """
        utils.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size))
        utils.run("mkfs.ext3 -F %s" % (dst_path))

    def mount(disk_path, mount_path, options=None):
        """
        Mount Disk to path

        :param disk_path: Path to disk
        :param mount_path: Path where disk will be mounted.
        :param options: String with options for mount
        """
        if options is None:
            options = ""
        else:
            options = "%s" % options

        utils.run("mount %s %s %s" % (options, disk_path, mount_path))

    def find_disk_vm(vm, disk_serial):
        """
        Find disk on vm which ends with disk_serial

        :param vm: VM where to find a disk.
        :param disk_serial: sufix of disk id.

        :return: string Disk path
        """
        session = vm.wait_for_login(timeout=login_timeout)

        disk_path = os.path.join("/", "dev", "disk", "by-id")
        disks = session.cmd("ls %s" % disk_path).split("\n")
        session.close()
        disk = filter(lambda x: x.endswith(disk_serial), disks)
        if not disk:
            return None
        return os.path.join(disk_path, disk[0])

    def prepare_disk(vm, disk_path, mount_path):
        """
        Create Ext3 on disk a send there data from main disk.

        :param vm: VM where to find a disk.
        :param disk_path: Path to disk in guest system.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        session.cmd("mkfs.ext3 -F %s" % (disk_path))
        session.cmd("mount %s %s" % (disk_path, mount_path))
        session.close()

    def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if dsize is None:
            dsize = 100
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ("nohup /bin/bash -c 'while true; do dd if=%s of=%s bs=1M "
               "count=%s; done;' 2> /dev/null &" % (src_path, dst_path, dsize))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class IscsiServer_tgt(object):

        """
        Class for set and start Iscsi server.
        """

        def __init__(self):
            self.server_name = "autotest_guest_" + test_rand
            self.user = "user1"
            self.passwd = "pass"
            self.config = """
<target %s:dev01>
    backing-store %s
    incominguser %s %s
</target>
"""

        def set_iscsi_server(self, vm_ds, disk_path, disk_size):
            """
            Set iscsi server with some variant.

            @oaram vm_ds: VM where should be iscsi server started.
            :param disk_path: path where should be disk placed.
            :param disk_size: size of new disk.
            """
            session = vm_ds.wait_for_login(timeout=login_timeout)

            session.cmd("dd if=/dev/zero of=%s bs=1M count=%s" % (disk_path,
                                                                  disk_size))
            status, output = session.cmd_status_output("setenforce 0")
            if status not in [0, 127]:
                logging.warn("Function setenforce fails.\n %s" % (output))

            config = self.config % (self.server_name, disk_path,
                                    self.user, self.passwd)
            cmd = "cat > /etc/tgt/conf.d/virt.conf << EOF" + config + "EOF"
            control_service(session, "tgtd", "tgtd", "stop")
            session.sendline(cmd)
            control_service(session, "tgtd", "tgtd", "start")
            session.cmd("iptables -F")
            session.close()

        def find_disk(self):
            disk_path = os.path.join("/", "dev", "disk", "by-path")
            disks = utils.run("ls %s" % disk_path).stdout.split("\n")
            disk = filter(lambda x: self.server_name in x, disks)
            if disk is []:
                return None
            return os.path.join(disk_path, disk[0].strip())

        def connect(self, vm_ds):
            """
            Connect to iscsi server on guest.

            :param vm_ds: Guest where is iscsi server running.

            :return: path where disk is connected.
            """
            ip_dst = vm_ds.get_address()
            utils.run("iscsiadm -m discovery -t st -p %s" % (ip_dst))

            server_ident = ('iscsiadm -m node --targetname "%s:dev01"'
                            ' --portal %s' % (self.server_name, ip_dst))
            utils.run("%s --op update --name node.session.auth.authmethod"
                      " --value CHAP" % (server_ident))
            utils.run("%s --op update --name node.session.auth.username"
                      " --value %s" % (server_ident, self.user))
            utils.run("%s --op update --name node.session.auth.password"
                      " --value %s" % (server_ident, self.passwd))
            utils.run("%s --login" % (server_ident))
            time.sleep(1.0)
            return self.find_disk()

        def disconnect(self):
            server_ident = ('iscsiadm -m node --targetname "%s:dev01"' %
                            (self.server_name))
            utils.run("%s --logout" % (server_ident))

    class IscsiServer(object):

        """
        Iscsi server implementation interface.
        """

        def __init__(self, iscsi_type, *args, **kargs):
            if iscsi_type == "tgt":
                self.ic = IscsiServer_tgt(*args, **kargs)
            else:
                raise NotImplementedError()

        def __getattr__(self, name):
            if self.ic:
                return self.ic.__getattribute__(name)
            raise AttributeError("Cannot find attribute %s in class" % name)

    class test_read_only_dest(MiniSubtest):

        """
        Migration to read-only destination by using a migration to file.

        1) Start guest with NFS server.
        2) Config NFS server share for read-only.
        3) Mount the read-only share to host.
        4) Start second guest and try to migrate to read-only dest.

        result) Migration should fail with error message about read-only dst.
        """

        def test(self):
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            vm_ds = env.get_vm("virt_test_vm2_data_server")
            vm_guest = env.get_vm("virt_test_vm1_guest")
            ro_timeout = int(params.get("read_only_timeout", "480"))
            exp_str = r".*Read-only file system.*"
            utils.run("mkdir -p %s" % (mount_path))

            vm_ds.verify_alive()
            vm_guest.create()
            vm_guest.verify_alive()

            set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")
            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=ro_timeout, first=2):
                raise error.TestFail("The Read-only file system warning not"
                                     " come in time limit.")

        def clean(self):
            if os.path.exists(mig_dst):
                os.remove(mig_dst)
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_low_space_dest(MiniSubtest):

        """
        Migrate to destination with low space.

        1) Start guest.
        2) Create disk with low space.
        3) Try to migratie to the disk.

        result) Migration should fail with warning about No left space on dev.
        """

        def test(self):
            self.disk_path = None
            while self.disk_path is None or os.path.exists(self.disk_path):
                self.disk_path = ("%s/disk_%s" %
                                  (test.tmpdir, utils.generate_random_string(3)))

            disk_size = utils.convert_data_size(params.get("disk_size", "10M"),
                                                default_sufix='M')
            disk_size /= 1024 * 1024    # To MB.

            exp_str = r".*gzip: stdout: No space left on device.*"
            vm_guest = env.get_vm("virt_test_vm1_guest")
            utils.run("mkdir -p %s" % (mount_path))

            vm_guest.verify_alive()
            vm_guest.wait_for_login(timeout=login_timeout)

            create_file_disk(self.disk_path, disk_size)
            mount(self.disk_path, mount_path, "-o loop")

            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=60, first=1):
                raise error.TestFail("The migration to destination with low "
                                     "storage space didn't fail as it should.")

        def clean(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.disk_path):
                os.remove(self.disk_path)

    class test_extensive_io(MiniSubtest):

        """
        Migrate after extensive_io abstract class. This class only define
        basic funtionaly and define interface. For other tests.

        1) Start ds_guest which starts data server.
        2) Create disk for data stress in ds_guest.
        3) Share and prepare disk from ds_guest
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size,
                "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (
                int(params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (self.vm_guest_params.
                                           object_params("image2"))

            env_process.preprocess_image(test,
                                         self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")

        def test_params(self):
            """
            Test specific params. Could be implemented in inherited class.
            """
            pass

        def config(self):
            """
            Test specific config.
            """
            raise NotImplementedError()

        def workload(self):
            disk_path = find_disk_vm(self.vm_guest, self.disk_serial)
            if disk_path is None:
                raise error.TestFail("It was impossible to find disk on VM")

            prepare_disk(self.vm_guest, disk_path, self.guest_mount_path)

            disk_path_src = find_disk_vm(self.vm_guest, self.disk_serial_src)
            dst_path = os.path.join(self.guest_mount_path, "test.data")
            self.copier_pid = disk_load(self.vm_guest, disk_path_src, dst_path,
                                        self.copy_timeout, self.copy_block_size)

        def restart_server(self):
            raise NotImplementedError()

        def clean_test(self):
            """
            Test specific cleanup.
            """
            pass

        def clean(self):
            if self.copier_pid:
                try:
                    if self.vm_guest.is_alive():
                        session = self.vm_guest.wait_for_login(timeout=login_timeout)
                        session.cmd("kill -9 %s" % (self.copier_pid))
                except:
                    logging.warn("It was impossible to stop copier. Something "
                                 "probably happened with GUEST or NFS server.")

            if params.get("kill_vm") == "yes":
                if self.vm_guest.is_alive():
                    self.vm_guest.destroy()
                    utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30,
                                        2, 2, "Waiting for dying of guest.")
                qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                                mount_path,
                                                None)
                qemu_img.check_image(self.image2_vm_guest_params,
                                     mount_path)

            self.clean_test()

    class test_extensive_io_nfs(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts NFS server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over NFS.
        4) Mount the disk to mount_path
        5) Create disk for second guest in the mounted path.
        6) Start second guest with prepared disk.
        7) Start stress on the prepared disk on second guest.
        8) Wait few seconds.
        9) Restart iscsi server.
        10) Migrate second guest.

        result) Migration should be successful.
        """

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            set_nfs_server(vm_ds, "/mnt *(rw,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "stop")  # Stop NFS server
            time.sleep(5)
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "start")  # Start NFS server

            """
            Touch waits until all previous requests are invalidated
            (NFS grace period). Without grace period qemu start takes
            to long and timers for machine creation dies.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_extensive_io_iscsi(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts iscsi server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over iscsi.
        4) Join to disk on host.
        5) Prepare partition on the disk.
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test_params(self):
            self.iscsi_variant = params.get("iscsi_variant", "tgt")
            self.ds_disk_path = os.path.join(self.guest_mount_path, "test.img")

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            self.isci_server = IscsiServer("tgt")
            disk_path = os.path.join(self.guest_mount_path, "disk1")
            self.isci_server.set_iscsi_server(vm_ds, disk_path,
                                              (int(float(self.disk_size) * 1.1) / (1024 * 1024)))
            self.host_disk_path = self.isci_server.connect(vm_ds)

            utils.run("mkfs.ext3 -F %s" % (self.host_disk_path))
            mount(self.host_disk_path, mount_path)

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "stop", 240)  # Stop Iscsi server
            time.sleep(5)
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "start", 240)  # Start Iscsi server

            """
            Wait for iscsi server after restart and will be again
            accessible.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.host_disk_path):
                self.isci_server.disconnect()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 45

Project: utter-pool
Source File: __init__.py
View license
def linkify(text, callbacks=DEFAULT_CALLBACKS, skip_pre=False,
            parse_email=False, tokenizer=HTMLSanitizer):
    """Convert URL-like strings in an HTML fragment to links.

    linkify() converts strings that look like URLs or domain names in a
    blob of text that may be an HTML fragment to links, while preserving
    (a) links already in the string, (b) urls found in attributes, and
    (c) email addresses.
    """
    text = force_unicode(text)

    if not text:
        return u''

    parser = html5lib.HTMLParser(tokenizer=tokenizer)

    forest = parser.parseFragment(text)

    def replace_nodes(tree, new_frag, node):
        new_tree = parser.parseFragment(new_frag)
        for n in new_tree.childNodes:
            # Prevent us from re-parsing links new links as existing links.
            if n.name == 'a':
                n._seen = True
            tree.insertBefore(n, node)
        tree.removeChild(node)
        # Return the number of new nodes.
        return len(new_tree.childNodes) - 1

    def strip_wrapping_parentheses(fragment):
        """Strips wrapping parentheses.

        Returns a tuple of the following format::

            (string stripped from wrapping parentheses,
             count of stripped opening parentheses,
             count of stripped closing parentheses)
        """
        opening_parentheses = closing_parentheses = 0
        # Count consecutive opening parentheses
        # at the beginning of the fragment (string).
        for char in fragment:
            if char == '(':
                opening_parentheses += 1
            else:
                break

        if opening_parentheses:
            newer_frag = ''
            # Cut the consecutive opening brackets from the fragment.
            fragment = fragment[opening_parentheses:]
            # Reverse the fragment for easier detection of parentheses
            # inside the URL.
            reverse_fragment = fragment[::-1]
            skip = False
            for char in reverse_fragment:
                # Remove the closing parentheses if it has a matching
                # opening parentheses (they are balanced).
                if (char == ')' and
                        closing_parentheses < opening_parentheses and
                        not skip):
                    closing_parentheses += 1
                    continue
                # Do not remove ')' from the URL itself.
                elif char != ')':
                    skip = True
                newer_frag += char
            fragment = newer_frag[::-1]

        return fragment, opening_parentheses, closing_parentheses

    def apply_callbacks(attrs, new):
        for cb in callbacks:
            attrs = cb(attrs, new)
            if attrs is None:
                return None
        return attrs

    def linkify_nodes(tree, parse_text=True):
        # I know this isn't Pythonic, but we're sometimes mutating
        # tree.childNodes, which ends up breaking the loop and causing us to
        # reparse code.
        children = len(tree.childNodes)
        current = 0  # A pointer to the "current" node.
        while current < children:
            node = tree.childNodes[current]
            if node.type == NODE_TEXT and parse_text:
                new_frag = _render(node)
                # Look for email addresses?
                if parse_email:
                    new_frag = re.sub(email_re, email_repl, new_frag)
                    if new_frag != _render(node):
                        adj = replace_nodes(tree, new_frag, node)
                        children += adj
                        current += adj
                        linkify_nodes(tree)
                        continue
                new_frag = re.sub(url_re, link_repl, new_frag)
                if new_frag != _render(node):
                    adj = replace_nodes(tree, new_frag, node)
                    children += adj
                    current += adj
            elif node.name == 'a' and not getattr(node, '_seen', False):
                if 'href' in node.attributes:
                    attrs = node.attributes
                    _text = attrs['_text'] = ''.join(_render(c) for
                                                     c in node.childNodes)
                    attrs = apply_callbacks(attrs, False)
                    if attrs is not None:
                        text = force_unicode(attrs.pop('_text'))
                        node.attributes = attrs
                        for n in node.childNodes:
                            node.removeChild(n)
                        node.insertText(text)
                        node._seen = True
                    else:
                        replace_nodes(tree, _text, node)
            elif skip_pre and node.name == 'pre':
                linkify_nodes(node, False)
            elif not getattr(node, '_seen', False):
                linkify_nodes(node)
            current += 1

    def email_repl(match):
        addr = match.group(0).replace('"', '&quot;')
        link = {
            '_text': addr,
            'href': 'mailto:%s' % addr,
        }
        link = apply_callbacks(link, True)

        if link is None:
            return addr

        _href = link.pop('href')
        _text = link.pop('_text')

        repl = '<a href="%s" %s>%s</a>'
        attribs = ' '.join('%s="%s"' % (k, v) for k, v in link.items())
        return repl % (_href, attribs, _text)

    def link_repl(match):
        url = match.group(0)
        open_brackets = close_brackets = 0
        if url.startswith('('):
            url, open_brackets, close_brackets = (
                    strip_wrapping_parentheses(url)
            )
        end = u''
        m = re.search(punct_re, url)
        if m:
            end = m.group(0)
            url = url[0:m.start()]
        if re.search(proto_re, url):
            href = url
        else:
            href = u''.join([u'http://', url])

        link = {
            '_text': url,
            'href': href,
        }

        link = apply_callbacks(link, True)

        if link is None:
            return url

        _text = link.pop('_text')
        _href = link.pop('href')

        repl = u'%s<a href="%s" %s>%s</a>%s%s'
        attribs = ' '.join('%s="%s"' % (k, v) for k, v in link.items())

        return repl % ('(' * open_brackets,
                       _href, attribs, _text, end,
                       ')' * close_brackets)

    try:
        linkify_nodes(forest)
    except (RECURSION_EXCEPTION), e:
        # If we hit the max recursion depth, just return what we've got.
        log.error('Probable recursion error: %r' % e, exc_info=sys.exc_info())

    return _render(forest)

Example 46

Project: fwbackups
Source File: backup.py
View license
  def backupPaths(self, paths, command):
    """Does the actual copying dirty work"""
    # this is in common
    self._current = 0
    if len(paths) == 0:
      return True
    self._total = len(paths)
    self._status = STATUS_BACKING_UP
    wasAnError = False
    if self.options['Engine'] == 'tar':
      if MSWINDOWS:
        self.logger.logmsg('INFO', _('Using %s on Windows: Cancel function will only take effect after a path has been completed.' % self.options['Engine']))
        import tarfile
        fh = tarfile.open(self.dest, 'w')
        for i in paths:
          self.ifCancel()
          self._current += 1
          self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s' % {'a': self._current, 'b': self._total, 'c': i}))
          fh.add(i, recursive=self.options['Recursive'])
        fh.close()
      else: # not MSWINDOWS
        for i in paths:
          i = fwbackups.escapeQuotes(i, 1)
          self.ifCancel()
          self._current += 1
          self.logger.logmsg('DEBUG', _("Running command: nice -n %(a)i %(b)s '%(c)s'" % {'a': self.options['Nice'], 'b': command, 'c': i}))
          sub = fwbackups.executeSub("nice -n %i %s '%s'" % (self.options['Nice'], command, i), env=self.environment, shell=True)
          self.pids.append(sub.pid)
          self.logger.logmsg('DEBUG', _('Starting subprocess with PID %s') % sub.pid)
          # track stdout
          errors = []
          # use nonblocking I/O
          fl = fcntl.fcntl(sub.stderr, fcntl.F_GETFL)
          fcntl.fcntl(sub.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK)
          while sub.poll() in ["", None]:
            time.sleep(0.01)
            try:
              errors += sub.stderr.readline()
            except IOError, description:
              pass
          self.pids.remove(sub.pid)
          retval = sub.poll()
          self.logger.logmsg('DEBUG', _('Subprocess with PID %(a)s exited with status %(b)s' % {'a': sub.pid, 'b': retval}))
          # Something wrong?
          if retval != EXIT_STATUS_OK and retval != 2:
            wasAnError = True
            self.logger.logmsg('ERROR', 'An error occurred while backing up path \'%s\'.\nPlease check the error output below to determine if any files are incomplete or missing.' % str(i))
            self.logger.logmsg('ERROR', _('Process exited with status %(a)s. Errors: %(b)s' % {'a': retval, 'b': ''.join(errors)}))

    elif self.options['Engine'] == 'tar.gz':
      self._total = 1
      if MSWINDOWS:
        self.logger.logmsg('INFO', _('Using %s on Windows: Cancel function will only take effect after a path has been completed.' % self.options['Engine']))
        import tarfile
        fh = tarfile.open(self.dest, 'w:gz')
        for i in paths:
          self._current += 1
          self.ifCancel()
          self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s' % {'a': self._current, 'b': self._total, 'c': i}))
          fh.add(i, recursive=self.options['Recursive'])
          self.logger.logmsg('DEBUG', _('Adding path `%s\' to the archive' % i))
        fh.close()
      else: # not MSWINDOWS
        self._current = 1
        escapedPaths = [fwbackups.escapeQuotes(i, 1) for i in paths]
        # This is a fancy way for getting i = "'one' 'two' 'three'"
        i = "'%s'" % "' '".join(escapedPaths)
        self.logger.logmsg('INFO', _('Using %s: Must backup all paths at once - Progress notification will be disabled.' % self.options['Engine']))
        self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s') % {'a': self._current, 'b': self._total, 'c': i.replace("'", '')})
        self.logger.logmsg('DEBUG', _("Running command: nice -n %(a)i %(b)s %(c)s" % {'a': self.options['Nice'], 'b': command, 'c': i}))
        # Don't wrap i in quotes; we did this above already when mering the paths
        sub = fwbackups.executeSub("nice -n %i %s %s" % (self.options['Nice'], command, i), env=self.environment, shell=True)
        self.pids.append(sub.pid)
        self.logger.logmsg('DEBUG', _('Starting subprocess with PID %s') % sub.pid)
        # track stdout
        errors = []
        # use nonblocking I/O
        fl = fcntl.fcntl(sub.stderr, fcntl.F_GETFL)
        fcntl.fcntl(sub.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK)
        while sub.poll() in ["", None]:
          time.sleep(0.01)
          try:
            errors += sub.stderr.readline()
          except IOError, description:
            pass
        self.pids.remove(sub.pid)
        retval = sub.poll()
        self.logger.logmsg('DEBUG', _('Subprocess with PID %(a)s exited with status %(b)s' % {'a': sub.pid, 'b': retval}))
        # Something wrong?
        if retval != EXIT_STATUS_OK and retval != 2:
          wasAnError = True
          self.logger.logmsg('ERROR', 'An error occurred while backing up path \'%s\'.\nPlease check the error output below to determine if any files are incomplete or missing.' % str(i))
          self.logger.logmsg('ERROR', _('Process exited with status %(a)s. Errors: %(b)s' % {'a': retval, 'b': ''.join(errors)}))

    elif self.options['Engine'] == 'tar.bz2':
      self._total = 1
      if MSWINDOWS:
        self.logger.logmsg('INFO', _('Using %s on Windows: Cancel function will only take effect after a path has been completed.' % self.options['Engine']))
        import tarfile
        fh = tarfile.open(self.dest, 'w:bz2')
        for i in paths:
          self._current += 1
          self.ifCancel()
          self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s' % {'a': self._current, 'b': self._total, 'c': i}))
          fh.add(i, recursive=self.options['Recursive'])
          self.logger.logmsg('DEBUG', _('Adding path `%s\' to the archive' % i))
        fh.close()
      else: # not MSWINDOWS
        self._current = 1
        escapedPaths = [fwbackups.escapeQuotes(i, 1) for i in paths]
        # This is a fancy way for getting i = "'one' 'two' 'three'"
        i = "'%s'" % "' '".join(escapedPaths)
        self.logger.logmsg('INFO', _('Using %s: Must backup all paths at once - Progress notification will be disabled.' % self.options['Engine']))
        self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s') % {'a': self._current, 'b': self._total, 'c': i})
        self.logger.logmsg('DEBUG', _("Running command: nice -n %(a)i %(b)s %(c)s" % {'a': self.options['Nice'], 'b': command, 'c': i}))
        # Don't wrap i in quotes; we did this above already when mering the paths
        sub = fwbackups.executeSub("nice -n %i %s %s" % (self.options['Nice'], command, i), env=self.environment, shell=True)
        self.pids.append(sub.pid)
        self.logger.logmsg('DEBUG', _('Starting subprocess with PID %s') % sub.pid)
        # track stdout
        errors = []
        # use nonblocking I/O
        fl = fcntl.fcntl(sub.stderr, fcntl.F_GETFL)
        fcntl.fcntl(sub.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK)
        while sub.poll() in ["", None]:
          time.sleep(0.01)
          try:
            errors += sub.stderr.readline()
          except IOError, description:
            pass
        self.pids.remove(sub.pid)
        retval = sub.poll()
        self.logger.logmsg('DEBUG', _('Subprocess with PID %(a)s exited with status %(b)s' % {'a': sub.pid, 'b': retval}))
        # Something wrong?
        if retval != EXIT_STATUS_OK and retval != 2:
          wasAnError = True
          self.logger.logmsg('ERROR', 'An error occurred while backing up path \'%s\'.\nPlease check the error output below to determine if any files are incomplete or missing.' % str(i))
          self.logger.logmsg('ERROR', _('Process exited with status %(a)s. Errors: %(b)s' % {'a': retval, 'b': ''.join(errors)}))

    elif self.options['Engine'] == 'rsync':
      # in this case, self.{folderdest,dest} both need to be created
      if self.options['DestinationType'] == 'remote (ssh)':
        client, sftpClient = sftp.connect(self.options['RemoteHost'], self.options['RemoteUsername'], self.options['RemotePassword'], self.options['RemotePort'])
        if not wasAnError:
          for i in paths:
            if self.toCancel:
              # Check if we need to cancel in between paths
              # If so, break and close the SFTP session
              # Immediately after, self.ifCancel() is run.
              break
            self._current += 1
            self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s') % {'a': self._current, 'b': self._total, 'c': i})
            if not os.path.exists(encode(i)):
              self.logger.logmsg('WARNING', _("Path %s is missing or cannot be read and will be excluded from the backup.") % i)
            sftp.put(sftpClient, encode(i), encode(os.path.normpath(self.options['RemoteFolder']+os.sep+os.path.basename(self.dest)+os.sep+os.path.dirname(i))), symlinks=not self.options['FollowLinks'], excludes=encode(self.options['Excludes'].split('\n')))
        sftpClient.close()
        client.close()
      else: # destination is local
        for i in paths:
          self.ifCancel()
          self._current += 1
          if MSWINDOWS:
            # let's deal with real paths
            self.logger.logmsg('DEBUG', _('Backing up path %(a)i/%(b)i: %(c)s' % {'a': self._current, 'b': self._total, 'c': i}))
            shutil_modded.copytree_fullpaths(encode(i), encode(self.dest))
          else: # not MSWINDOWS; UNIX/OS X can call rsync binary
            i = fwbackups.escapeQuotes(i, 1)
            self.logger.logmsg('DEBUG', _("Running command: nice -n %(a)i %(b)s %(c)s '%(d)s'" % {'a': self.options['Nice'], 'b': command, 'c': i, 'd': fwbackups.escapeQuotes(self.dest, 1)}))
            sub = fwbackups.executeSub("nice -n %i %s '%s' '%s'" % (self.options['Nice'], command, i, fwbackups.escapeQuotes(self.dest, 1)), env=self.environment, shell=True)
            self.pids.append(sub.pid)
            self.logger.logmsg('DEBUG', _('Starting subprocess with PID %s') % sub.pid)
            # track stdout
            errors = []
            # use nonblocking I/O
            fl = fcntl.fcntl(sub.stderr, fcntl.F_GETFL)
            fcntl.fcntl(sub.stderr, fcntl.F_SETFL, fl | os.O_NONBLOCK)
            while sub.poll() in ["", None]:
              time.sleep(0.01)
              try:
                errors += sub.stderr.readline()
              except IOError, description:
                pass
            self.pids.remove(sub.pid)
            retval = sub.poll()
            self.logger.logmsg('DEBUG', _('Subprocess with PID %(a)s exited with status %(b)s' % {'a': sub.pid, 'b': retval}))
            # Something wrong?
            if retval not in [EXIT_STATUS_OK, 2]:
              wasAnError = True
              self.logger.logmsg('ERROR', 'An error occurred while backing up path \'%s\'.\nPlease check the error output below to determine if any files are incomplete or missing.' % str(i))
              self.logger.logmsg('ERROR', _('Process exited with status %(a)s. Errors: %(b)s' % {'a': retval, 'b': ''.join(errors)}))

    self.ifCancel()
    # A test is included to ensure sure the archive actually exists, as if
    # wasAnError = True the archive might not even exist.
    if self.options['Engine'].startswith('tar') and self.options['DestinationType'] == 'remote (ssh)' and os.path.exists(encode(self.dest)):
      self.logger.logmsg('DEBUG', _('Sending files to server via SFTP'))
      self._status = STATUS_SENDING_TO_REMOTE
      client, sftpClient = sftp.connect(self.options['RemoteHost'], self.options['RemoteUsername'], self.options['RemotePassword'], self.options['RemotePort'])
      try:
        sftp.putFile(sftpClient, self.dest, self.options['RemoteFolder'])
        os.remove(self.dest)
      except:
        import sys
        import traceback
        wasAnError = True
        self.logger.logmsg('DEBUG', _('Could not send file(s) or folder to server:'))
        (etype, value, tb) = sys.exc_info()
        self.logger.logmsg('DEBUG', ''.join(traceback.format_exception(etype, value, tb)))
      sftpClient.close()
      client.close()

    # finally, we do this
    self._current = self._total
    time.sleep(1)
    self.ifCancel()
    return (not wasAnError)

Example 47

View license
    def urlopen(self, method, url, body=None, headers=None, retries=None,
                redirect=True, assert_same_host=True, timeout=_Default,
                pool_timeout=None, release_conn=None, **response_kw):
        """
        Get a connection from the pool and perform an HTTP request. This is the
        lowest level call for making a request, so you'll need to specify all
        the raw details.

        .. note::

           More commonly, it's appropriate to use a convenience method provided
           by :class:`.RequestMethods`, such as :meth:`request`.

        .. note::

           `release_conn` will only behave as expected if
           `preload_content=False` because we want to make
           `preload_content=False` the default behaviour someday soon without
           breaking backwards compatibility.

        :param method:
            HTTP request method (such as GET, POST, PUT, etc.)

        :param body:
            Data to send in the request body (useful for creating
            POST requests, see HTTPConnectionPool.post_url for
            more convenience).

        :param headers:
            Dictionary of custom headers to send, such as User-Agent,
            If-None-Match, etc. If None, pool headers are used. If provided,
            these headers completely replace any pool-specific headers.

        :param retries:
            Configure the number of retries to allow before raising a
            :class:`~urllib3.exceptions.MaxRetryError` exception.

            Pass ``None`` to retry until you receive a response. Pass a
            :class:`~urllib3.util.retry.Retry` object for fine-grained control
            over different types of retries.
            Pass an integer number to retry connection errors that many times,
            but no other types of errors. Pass zero to never retry.

            If ``False``, then retries are disabled and any exception is raised
            immediately. Also, instead of raising a MaxRetryError on redirects,
            the redirect response will be returned.

        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.

        :param redirect:
            If True, automatically handle redirects (status codes 301, 302,
            303, 307, 308). Each redirect counts as a retry. Disabling retries
            will disable redirect, too.

        :param assert_same_host:
            If ``True``, will make sure that the host of the pool requests is
            consistent else will raise HostChangedError. When False, you can
            use the pool on an HTTP proxy and request foreign hosts.

        :param timeout:
            If specified, overrides the default timeout for this one
            request. It may be a float (in seconds) or an instance of
            :class:`urllib3.util.Timeout`.

        :param pool_timeout:
            If set and the pool is set to block=True, then this method will
            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
            connection is available within the time period.

        :param release_conn:
            If False, then the urlopen call will not release the connection
            back into the pool once a response is received (but will release if
            you read the entire contents of the response such as when
            `preload_content=True`). This is useful if you're not preloading
            the response's content immediately. You will need to call
            ``r.release_conn()`` on the response ``r`` to return the connection
            back into the pool. If None, it takes the value of
            ``response_kw.get('preload_content', True)``.

        :param \**response_kw:
            Additional parameters are passed to
            :meth:`urllib3.response.HTTPResponse.from_httplib`
        """
        if headers is None:
            headers = self.headers

        if not isinstance(retries, Retry):
            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)

        if release_conn is None:
            release_conn = response_kw.get('preload_content', True)

        # Check host
        if assert_same_host and not self.is_same_host(url):
            raise HostChangedError(self, url, retries)

        conn = None

        # Merge the proxy headers. Only do this in HTTP. We have to copy the
        # headers dict so we can safely change it without those changes being
        # reflected in anyone else's copy.
        if self.scheme == 'http':
            headers = headers.copy()
            headers.update(self.proxy_headers)

        # Must keep the exception bound to a separate variable or else Python 3
        # complains about UnboundLocalError.
        err = None

        try:
            # Request a connection from the queue.
            timeout_obj = self._get_timeout(timeout)
            conn = self._get_conn(timeout=pool_timeout)

            conn.timeout = timeout_obj.connect_timeout

            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
            if is_new_proxy_conn:
                self._prepare_proxy(conn)

            # Make the request on the httplib connection object.
            httplib_response = self._make_request(conn, method, url,
                                                  timeout=timeout_obj,
                                                  body=body, headers=headers)

            # If we're going to release the connection in ``finally:``, then
            # the request doesn't need to know about the connection. Otherwise
            # it will also try to release it and we'll have a double-release
            # mess.
            response_conn = not release_conn and conn

            # Import httplib's response into our own wrapper object
            response = HTTPResponse.from_httplib(httplib_response,
                                                 pool=self,
                                                 connection=response_conn,
                                                 **response_kw)

            # else:
            #     The connection will be put back into the pool when
            #     ``response.release_conn()`` is called (implicitly by
            #     ``response.read()``)

        except Empty:
            # Timed out by queue.
            raise EmptyPoolError(self, "No pool connections are available.")

        except (BaseSSLError, CertificateError) as e:
            # Close the connection. If a connection is reused on which there
            # was a Certificate error, the next request will certainly raise
            # another Certificate error.
            if conn:
                conn.close()
                conn = None
            raise SSLError(e)

        except SSLError:
            # Treat SSLError separately from BaseSSLError to preserve
            # traceback.
            if conn:
                conn.close()
                conn = None
            raise

        except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
            if conn:
                # Discard the connection for these exceptions. It will be
                # be replaced during the next _get_conn() call.
                conn.close()
                conn = None

            if isinstance(e, SocketError) and self.proxy:
                e = ProxyError('Cannot connect to proxy.', e)
            elif isinstance(e, (SocketError, HTTPException)):
                e = ProtocolError('Connection aborted.', e)

            retries = retries.increment(method, url, error=e, _pool=self,
                                        _stacktrace=sys.exc_info()[2])
            retries.sleep()

            # Keep track of the error for the retry warning.
            err = e

        finally:
            if release_conn:
                # Put the connection back to be reused. If the connection is
                # expired then it will be None, which will get replaced with a
                # fresh connection during _get_conn.
                self._put_conn(conn)

        if not conn:
            # Try again
            log.warning("Retrying (%r) after connection "
                        "broken by '%r': %s" % (retries, err, url))
            return self.urlopen(method, url, body, headers, retries,
                                redirect, assert_same_host,
                                timeout=timeout, pool_timeout=pool_timeout,
                                release_conn=release_conn, **response_kw)

        # Handle redirect?
        redirect_location = redirect and response.get_redirect_location()
        if redirect_location:
            if response.status == 303:
                method = 'GET'

            try:
                retries = retries.increment(method, url, response=response, _pool=self)
            except MaxRetryError:
                if retries.raise_on_redirect:
                    raise
                return response

            log.info("Redirecting %s -> %s" % (url, redirect_location))
            return self.urlopen(method, redirect_location, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        # Check if we should retry the HTTP response.
        if retries.is_forced_retry(method, status_code=response.status):
            retries = retries.increment(method, url, response=response, _pool=self)
            retries.sleep()
            log.info("Forced retry: %s" % url)
            return self.urlopen(method, url, body, headers,
                    retries=retries, redirect=redirect,
                    assert_same_host=assert_same_host,
                    timeout=timeout, pool_timeout=pool_timeout,
                    release_conn=release_conn, **response_kw)

        return response

Example 48

Project: attention-lvcsr
Source File: link.py
View license
def raise_with_op(node, thunk=None, exc_info=None, storage_map=None):
    """
    Re-raise an exception while annotating the exception object with
    debug info.

    Parameters
    ----------
    node : Apply node
        The Apply node object that resulted in the raised exception.
    exc_info : tuple, optional
        A tuple containing the exception type, exception object and
        associated traceback, as would be returned by a call to
        `sys.exc_info()` (which is done if `None` is passed).
    storage_map: dict, optional
        storage map of the theano function that resulted in the
        raised exception.

    Notes
    -----
    This re-raises the exception described by `exc_info` (or the last
    one raised, if `exc_info` is omitted) and annotates the exception
    object with several new members which may be helpful for debugging
    Theano graphs. They are:

     * __op_instance__: The Op that is responsible for the exception
       being raised.
     * __thunk_trace__: A traceback corresponding to the code that
       actually generated the exception, if it is available.
     * __applynode_index__: The index of the Apply node corresponding
       to this op in `op.fgraph.toposort()`.

    The exception is not annotated if it is of type `KeyboardInterrupt`.

    """
    if exc_info is None:
        exc_info = sys.exc_info()
    exc_type, exc_value, exc_trace = exc_info
    if exc_type == KeyboardInterrupt:
        # print a simple traceback from KeyboardInterrupt
        reraise(exc_type, exc_value, exc_trace)
    try:
        trace = node.outputs[0].tag.trace
    except AttributeError:
        try:
            trace = node.op.tag.trace
        except AttributeError:
            trace = ()
    exc_value.__thunk_trace__ = trace
    exc_value.__op_instance__ = node
    topo = node.fgraph.toposort()
    if node in topo:
        node_index = topo.index(node)
    else:
        node_index = None
    exc_value.__applynode_index__ = node_index

    hints = []
    detailed_err_msg = "\nApply node that caused the error: " + str(node)
    if exc_value.__applynode_index__ is not None:
        detailed_err_msg += "\nToposort index: %d" % node_index

    types = [getattr(ipt, 'type', 'No type') for ipt in node.inputs]
    detailed_err_msg += "\nInputs types: %s\n" % types

    if thunk is not None:
        if hasattr(thunk, 'inputs'):
            shapes = [getattr(ipt[0], 'shape', 'No shapes')
                      for ipt in thunk.inputs]
            strides = [getattr(ipt[0], 'strides', 'No strides')
                       for ipt in thunk.inputs]
            scalar_values = []
            for ipt in thunk.inputs:
                if getattr(ipt[0], "size", -1) <= 5:
                    scalar_values.append(ipt[0])
                else:
                    scalar_values.append("not shown")
        else:
            shapes = "The thunk don't have an inputs attributes."
            strides = "So we can't access the strides of inputs values"
            scalar_values = "And can't print its inputs scalar value"
        clients = [[c[0] for c in var.clients] for var in node.outputs]
        detailed_err_msg += ("Inputs shapes: %s" % shapes +
                             "\nInputs strides: %s" % strides +
                             "\nInputs values: %s" % scalar_values)
        if hasattr(node.op, '__input_name__'):
            detailed_err_msg += "\nInputs name: %s\n" % str(node.op.__input_name__)

        detailed_err_msg += "\nOutputs clients: %s\n" % clients
    else:
        hints.append(
            "HINT: Use another linker then the c linker to"
            " have the inputs shapes and strides printed.")

    # Print node backtraces
    tr = getattr(node.outputs[0].tag, 'trace', [])
    if type(tr) is list and len(tr) > 0:
        detailed_err_msg += "\nBacktrace when the node is created(use Theano flag traceback.limit=N to make it longer):\n"

        # Print separate message for each element in the list of batcktraces
        sio = StringIO()
        for subtr in tr:
            traceback.print_list(subtr, sio)
        detailed_err_msg += str(sio.getvalue())
    else:
        hints.append(
            "HINT: Re-running with most Theano optimization disabled could"
            " give you a back-trace of when this node was created. This can"
            " be done with by setting the Theano flag"
            " 'optimizer=fast_compile'. If that does not work,"
            " Theano optimizations can be disabled with 'optimizer=None'.")

    if theano.config.exception_verbosity == 'high':

        f = StringIO()
        theano.printing.debugprint(node, file=f, stop_on_name=True,
                                   print_type=True)
        detailed_err_msg += "\nDebugprint of the apply node: \n"
        detailed_err_msg += f.getvalue()

    # Prints output_map
    if theano.config.exception_verbosity == 'high' and storage_map is not None:
        detailed_err_msg += "\nStorage map footprint:\n"
        shared_input_list = [
            item for item in node.fgraph.inputs
            if isinstance(item, theano.compile.SharedVariable)]
        nonshared_input_list = [
            item for item in node.fgraph.inputs
            if not isinstance(item, theano.compile.SharedVariable)]
        storage_map_list = []
        total_size = 0
        total_size_inputs = 0
        for k in storage_map:
            storage_map_item = []

            # storage_map_item[0]: the variable
            storage_map_item.append(str(k))

            # storage_map_item[1]: the shape
            shapeinfo = None
            if hasattr(storage_map[k][0], 'shape'):
                shapeinfo = storage_map[k][0].shape
                if len(shapeinfo) != 0:
                    storage_map_item.append(shapeinfo)
                else:
                    storage_map_item.append(tuple())
            else:
                storage_map_item.append(None)

            # storage_map_item[2]: itemsize
            # storage_map_item[3]: bytes
            if hasattr(storage_map[k][0], 'dtype'):
                dtype = storage_map[k][0].dtype
                storage_map_item.append(numpy.dtype(dtype).itemsize)
                if shapeinfo is None:
                    storage_map_item.append(-1)
                else:
                    sz = numpy.dtype(dtype).itemsize * numpy.prod(shapeinfo)
                    storage_map_item.append(sz)
                    total_size += sz
                    if not k.owner:
                        total_size_inputs += sz
                    else:
                        # If it is a view, don't count it twice.
                        if getattr(k.owner.op, 'view_map', None):
                            vmap = k.owner.op.view_map
                            out_idx = k.owner.outputs.index(k)
                            data = storage_map[k][0]
                            if out_idx in vmap:
                                assert len(vmap[out_idx]) == 1
                                input_data = storage_map[
                                    k.owner.inputs[vmap[out_idx][0]]][0]
                                if k.type.may_share_memory(data, input_data):
                                    total_size -= sz
                        # If it is a destroyed input, the input
                        # shouldn't be in the storage_map anymore
                        # except if there is a special flag used. So
                        # we still must check it.
                        if getattr(k.owner.op, 'destroy_map', None):
                            vmap = k.owner.op.destroy_map
                            out_idx = k.owner.outputs.index(k)
                            data = storage_map[k][0]
                            if out_idx in vmap:
                                assert len(vmap[out_idx]) == 1
                                input_data = storage_map[
                                    k.owner.inputs[vmap[out_idx][0]]][0]
                                if k.type.may_share_memory(data, input_data):
                                    total_size -= sz
            else:
                bytes = getsizeof(storage_map[k][0])
                storage_map_item.append(bytes)
                storage_map_item.append(-1)

            # Flag of shared val
            # storage_map_item[4]
            if k in shared_input_list:
                storage_map_item.append(True)
            elif k in nonshared_input_list:
                storage_map_item.append(False)
            else:
                storage_map_item.append(None)
            storage_map_list.append(storage_map_item)

        from operator import itemgetter
        storage_map_list.sort(key=itemgetter(3), reverse=True)
        for item in storage_map_list:
            if item[3] == -1:
                continue
            detailed_err_msg += " - " + item[0] + ", "
            if item[4] is True:
                detailed_err_msg += "Shared Input, "
            elif item[4] is False:
                detailed_err_msg += "Input, "
            if item[1] is not None:
                detailed_err_msg += "Shape: %s, " % str(item[1])
            detailed_err_msg += "ElemSize: %s Byte(s)" % item[2]
            if item[3] is not None:
                detailed_err_msg += ", TotalSize: %s Byte(s)\n" % item[3]
            else:
                detailed_err_msg += "\n"
        detailed_err_msg += " TotalSize: %s Byte(s) %.3f GB\n" % (
            total_size, total_size / 1024. / 1024 / 1024)
        detailed_err_msg += " TotalSize inputs: %s Byte(s) %.3f GB\n" % (
            total_size_inputs, total_size_inputs / 1024. / 1024 / 1024)

    else:
        hints.append(
            "HINT: Use the Theano flag 'exception_verbosity=high'"
            " for a debugprint and storage map footprint of this apply node.")

    try:
        exc_value = exc_type(str(exc_value) + detailed_err_msg +
                             '\n' + '\n'.join(hints))
    except TypeError:
        print("WARNING: %s error does not allow us to add extra error message" %
              str(exc_type))
        # Some exception need extra parameter in inputs. So forget the
        # extra long error message in that case.
        pass
    reraise(exc_type, exc_value, exc_trace)

Example 49

Project: peregrine
Source File: generate.py
View license
def generateSamples(outputFile,
                    sv_list,
                    encoder,
                    time0S,
                    nSamples,
                    outputConfig,
                    noiseSigma=None,
                    tcxo=None,
                    filterType="none",
                    groupDelays=None,
                    logFile=None,
                    threadCount=0,
                    pbar=None):
  '''
  Generates samples.

  Parameters
  ----------
  fileName : string
    Output file name.
  sv_list : list
    List of configured satellite objects.
  encoder : Encoder
    Output encoder object.
  time0S : float
    Time epoch for the first sample.
  nSamples : long
    Total number of samples to generate.
  outputConfig : object
    Output parameters
  noiseSigma : float, optional
    When specified, adds random noise to the output.
  tcxo : object, optional
    When specified, controls TCXO drift
  filterType : string, optional
    Controls IIR/FIR signal post-processing. Disabled by default.
  groupDelays : bool
    Flag if group delays are enabled.
  logFile : object
    Debug information destination file.
  threadCount : int
    Number of parallel threads for multi-process computation.
  pbar : object
    Progress bar object
  '''

  #
  # Print out parameters
  #
  logger.info(
    "Generating samples, sample rate={} Hz, interval={} seconds".format(
      outputConfig.SAMPLE_RATE_HZ, nSamples / outputConfig.SAMPLE_RATE_HZ))
  logger.debug("Jobs: %d" % threadCount)

  _count = 0l

  # Check which bands are enabled, configure band-specific parameters
  bands = [outputConfig.GPS.L1,
           outputConfig.GPS.L2,
           outputConfig.GLONASS.L1,
           outputConfig.GLONASS.L2]  # Supported bands
  lpf = [None] * outputConfig.N_GROUPS
  lpfFA_db = [0.] * outputConfig.N_GROUPS  # Filter attenuation levels
  bandsEnabled = [False] * outputConfig.N_GROUPS

  bandPass = filterType == 'bandpass'
  lowPass = filterType == 'lowpass'

  for band in bands:
    for sv in sv_list:
      bandsEnabled[band.INDEX] |= sv.isBandEnabled(band, outputConfig)
    sv = None

    filterObject = None
    ifHz = 0.
    if hasattr(band, "INTERMEDIATE_FREQUENCY_HZ"):
      ifHz = band.INTERMEDIATE_FREQUENCY_HZ
    elif hasattr(band, "INTERMEDIATE_FREQUENCIES_HZ"):
      ifHz = band.INTERMEDIATE_FREQUENCIES_HZ[0]
    else:  # pragma: no coverage
      assert False

    if lowPass:
      filterObject = LowPassFilter(outputConfig, ifHz)
    elif bandPass:
      filterObject = BandPassFilter(outputConfig, ifHz)
    if filterObject:
      lpf[band.INDEX] = filterObject
      lpfFA_db[band.INDEX] = filterObject.getPassBandAtt()
      logger.debug("Band %d filter NBW is %s" %
                   (band.INDEX, str(filterObject)))

  if noiseSigma is not None:
    noiseVariance = noiseSigma * noiseSigma
    noiseParams = NoiseParameters(outputConfig.SAMPLE_RATE_HZ, noiseSigma)
    logger.info("Selected noise sigma %f (variance %f)" %
                (noiseSigma, noiseVariance))

  else:
    noiseVariance = 0.
    noiseSigma = 0.
    noiseParams = NoiseParameters(outputConfig.SAMPLE_RATE_HZ, 0.)
    logger.info("SNR is not provided, noise is not generated.")

  # Print out parameters
  logger.info(
    "Generating samples, sample rate={} Hz, interval={} seconds".format(
      outputConfig.SAMPLE_RATE_HZ, nSamples / outputConfig.SAMPLE_RATE_HZ))
  logger.debug("Jobs: %d" % threadCount)
  # Print out SV parameters
  printSvInfo(sv_list, outputConfig, lpfFA_db, noiseParams, encoder)

  userTime_s = float(time0S)

  deltaUserTime_s = (float(outputConfig.SAMPLE_BATCH_SIZE) /
                     float(outputConfig.SAMPLE_RATE_HZ))
  debugFlag = logFile is not None

  if debugFlag:
    logFile.write("Index,Time")
    for sv in sv_list:
      svName = sv.getName()
      for band in bands:
        if sv.isBandEnabled(band, outputConfig):
          logFile.write(",%s/%s" % (svName, band.NAME))
    # End of line
    logFile.write("\n")

  if threadCount > 0:
    # Parallel execution: create worker pool
    workerPool = [Worker(outputConfig,
                         sv_list,
                         noiseParams,
                         tcxo,
                         lpf,
                         groupDelays,
                         bands,
                         debugFlag) for _ in range(threadCount)]

    for worker in workerPool:
      worker.start()
    # Each worker in the pool permits 2 tasks in the queue.
    maxTaskListSize = threadCount * 2
  else:
    # Synchronous execution: single worker
    workerPool = None
    task = Task(outputConfig,
                sv_list,
                noiseParams=noiseParams,
                tcxo=tcxo,
                signalFilters=lpf,
                groupDelays=groupDelays,
                bands=bands,
                generateDebug=debugFlag)
    maxTaskListSize = 1

  workerPutIndex = 0  # Worker index for adding task parameters with RR policy
  workerGetIndex = 0  # Worker index for getting task results with RR policy
  activeTasks = 0     # Number of active generation tasks

  totalSampleCounter = 0l
  taskQueuedCounter = 0
  taskReceivedCounter = 0

  totalEncodeTime_s = 0.
  totalWaitTime_s = 0.

  while True:
    while activeTasks < maxTaskListSize and totalSampleCounter < nSamples:
      # We have space in the task backlog and not all batchIntervals are issued

      userTime0_s = userTime_s

      if totalSampleCounter + outputConfig.SAMPLE_BATCH_SIZE > nSamples:
        # Last interval may contain less than full batch size of samples
        sampleCount = nSamples - totalSampleCounter
        userTimeX_s = userTime0_s + (float(sampleCount) /
                                     float(outputConfig.SAMPLE_RATE_HZ))
      else:
        # Normal internal: full batch size
        userTimeX_s = userTime_s + deltaUserTime_s
        sampleCount = outputConfig.SAMPLE_BATCH_SIZE

      # Parameters: time interval start, number of samples, sample index
      # counter for debug output
      params = (userTime0_s, sampleCount, totalSampleCounter)
      if workerPool is not None:
        # Parallel execution: add the next task parameters into the worker's
        # pool queue. Worker pool uses RR policy.
        workerPool[workerPutIndex].queueIn.put(params)
        workerPutIndex = (workerPutIndex + 1) % threadCount
      else:
        # Synchronous execution: update task parameters for the next interval
        task.update(userTime0_s, sampleCount, totalSampleCounter)
      activeTasks += 1

      # Update parameters for the next batch interval
      userTime_s = userTimeX_s
      totalSampleCounter += sampleCount
      taskQueuedCounter += 1

    # What for the data only if we have something to wait
    if taskReceivedCounter == taskQueuedCounter and \
       totalSampleCounter == nSamples:
      # No more tasks to issue to generator
      # No more tasks to wait
      break

    try:
      if workerPool is not None:
        # Parallel execution: wait for the next task result
        worker = workerPool[workerGetIndex]
        waitStartTime_s = time.time()
        result = worker.queueOut.get()
        workerGetIndex = (workerGetIndex + 1) % threadCount
        waitDuration_s = time.time() - waitStartTime_s
        totalWaitTime_s += waitDuration_s
      else:
        # Synchronous execution: execute task and get result
        result = task.perform()
    except:
      exType, exValue, exTraceback = sys.exc_info()
      traceback.print_exception(exType, exValue, exTraceback, file=sys.stderr)
      result = None
    taskReceivedCounter += 1
    activeTasks -= 1

    if result is None:
      logger.error("Error in processor; aborting.")
      break

    # Unpack result values.
    (inputParams, signalSamples, debugData) = result
    (_userTime0_s, _sampleCount, _firstSampleIndex) = inputParams

    if logFile is not None:
      # Data from all satellites is collected. Now we can dump the debug matrix

      userTimeAll_s = debugData['time']
      signalData = debugData['signalData']
      for smpl_idx in range(_sampleCount):
        logFile.write("{},{}".format(_firstSampleIndex + smpl_idx,
                                     userTimeAll_s[smpl_idx]))
        for svIdx in range(len(signalData)):
          signalSourceData = signalData[svIdx]['data']
          for band in signalSourceData:
            doppler = band['doppler']
            logFile.write(",{}".format(doppler[smpl_idx]))
        # End of line
        logFile.write("\n")

    encodeStartTime_s = time.time()
    # Feed data into encoder
    encodedSamples = encoder.addSamples(signalSamples)
    signalSamples = None

    if len(encodedSamples) > 0:
      _count += len(encodedSamples)
      encodedSamples.tofile(outputFile)
      encodedSamples = None

    totalEncodeTime_s += time.time() - encodeStartTime_s

    if pbar:
      pbar.update(_firstSampleIndex + _sampleCount)

  # Generation completed.

  # Flush any pending data in encoder
  encodedSamples = encoder.flush()
  if len(encodedSamples) > 0:
    encodedSamples.tofile(outputFile)

  # Close debug log file
  if debugFlag:
    logFile.close()

  # Terminate all worker processes
  if workerPool is not None:
    for i, worker in enumerate(workerPool):
      worker.queueIn.put(None)
    for worker in workerPool:
      try:
        statistics = worker.queueOut.get(timeout=2)
        logger.info("Worker [%d] statistics: %s" % (i, str(statistics)))
      except:
        exType, exValue, exTraceback = sys.exc_info()
        traceback.print_exception(
            exType, exValue, exTraceback, file=sys.stderr)
      worker.queueIn.close()
      worker.queueOut.close()
      worker.terminate()
      worker.join()

  # Print some statistical debug information
  logger.debug("MAIN: Encode duration: %f" % totalEncodeTime_s)
  logger.debug("MAIN: wait duration: %f" % totalWaitTime_s)

Example 50

Project: Canto
Source File: canto_fetch.py
View license
    def run(self):
        curfeed = self.get_curfeed()

        # Determine whether it's been long enough between
        # updates to warrant refetching the feed.

        if time.time() - curfeed["canto_update"] < self.fd.rate * 60 and\
                not self.force:
            return

        # Attempt to set the tag, if unspecified, by grabbing
        # it out of the previously downloaded info.

        if not self.fd.base_set:
            if "feed" in curfeed and "title" in curfeed["feed"]:
                replace = lambda x: x or curfeed["feed"]["title"]
                self.fd.tags = [ replace(x) for x in self.fd.tags]
                self.fd.base_set = 1
                self.log_func("Updating %s" % self.fd.tags[0])
            else:
                # This is the first time we've gotten this URL,
                # so just use the URL since we don't know the title.

                self.log_func("New feed %s" % self.fd.URL)
        else:
            self.log_func("Updating %s" % self.fd.tags[0])

        # This block set newfeed to a parsed feed.

        try:
            # Feed from script
            if self.fd.URL.startswith("script:"):
                script = self.spath + "/" + self.fd.URL[7:]
                out = commands.getoutput(script)
                newfeed = feedparser.parse(out)
            # Feed from URL
            else:
                request = urllib2.Request(self.fd.URL)
                request.add_header('User-Agent',\
                    "Canto/%d.%d.%d + http://codezen.org/canto" %\
                    VERSION_TUPLE)

                # Feed from URL w/ password
                if self.fd.username or self.fd.password:
                    mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
                    domain = urlparse.urlparse(self.fd.URL)[1]
                    mgr.add_password(None, domain,\
                            self.fd.username, self.fd.password)

                    # First, we try Basic Authentication
                    auth = urllib2.HTTPBasicAuthHandler(mgr)
                    opener = urllib2.build_opener(auth)
                    try:
                        newfeed = feedparser.parse(opener.open(request))
                    except:
                        # And, failing that, try Digest Authentication
                        auth = urllib2.HTTPDigestAuthHandler(mgr)
                        opener = urllib2.build_opener(auth)
                        newfeed = feedparser.parse(opener.open(request))
                # Feed with no password.
                else:
                    newfeed = feedparser.parse(\
                            feedparser.urllib2.urlopen(request))
        except:
            # Generally an exception is a connection refusal, but in any
            # case we either won't get data or can't trust the data, so
            # just skip processing this feed.

            enc = locale.getpreferredencoding()
            self.log_func("Exception trying to get feed %s : %s" % \
                    (self.fd.URL.encode(enc, "ignore"), sys.exc_info()[1]))

            return

        # I don't know why feedparser doesn't actually throw this
        # since all URLErrors are basically unrecoverable.

        if "bozo_exception" in newfeed:
            if type(newfeed["bozo_exception"]) == urllib2.URLError:
                self.log_func(\
                    "Feedparser exception getting %s : %s, bailing." %\
                    (self.fd.URL, newfeed["bozo_exception"].reason))
                return
            if not len(newfeed["entries"]):
                self.log_func(\
                    "Feedparser exception, no content in %s : %s, bailing." %\
                    (self.fd.URL, newfeed["bozo_exception"]))
                return

        # Filter out "No Content" message since we apparently have real content

        curfeed["entries"] = [ x for x in curfeed["entries"] if x["id"] !=\
                "canto-internal"]

        # For new feeds whose base tag is still not set, attempt to get a title
        # again.

        if not self.fd.base_set:
            if "feed" not in newfeed or "title" not in newfeed["feed"]:
                self.log_func("Ugh. Defaulting to URL for tag. No guarantees.")
                newfeed["feed"]["title"] = self.fd.URL

            replace = lambda x: x or newfeed["feed"]["title"]
            self.fd.tags = [ replace(x) for x in self.fd.tags]

        # Feedparser returns a very nice dict of information.
        # if there was something wrong with the feed (usu. encodings
        # being mis-declared or missing tags), it sets
        # bozo_exception.

        # These exceptions are recoverable and their objects are
        # un-Picklable so we log it and remove the value.

        if "bozo_exception" in newfeed:
            self.log_func("Recoverable error in feed %s: %s" % 
                        (self.fd.tags[0], newfeed["bozo_exception"]))
            newfeed["bozo_exception"] = None

        # Make state persist between feeds. Currently, this is completely
        # unused, as there's no state information that needs to be propagated.
        # This is a relic from when feeds and tags were the same thing, however
        # it could be useful when doing integration with another client /
        # website and thus, hasn't been removed.

        newfeed["canto_state"] = curfeed["canto_state"]
        newfeed["canto_update"] = time.time()

        # We can set this here, without checking curfeed.
        # Any migration should be done in the get_curfeed function,
        # when the old data is first loaded.

        newfeed["canto_version"] = VERSION_TUPLE

        # For all content that we would usually use, we escape all of the
        # slashes and other potential escapes, except for the link item,
        # which is escaped in the reader when it is displayed. This is to
        # prevent sending garbeled links to the exteranl browser.

        for key in newfeed["feed"]:
            if type(newfeed["feed"][key]) in [unicode,str]:
                newfeed["feed"][key] = utility.stripchars(newfeed["feed"][key])

        for entry in newfeed["entries"]:
            for subitem in ["content","enclosures"]:
                if subitem in entry:
                    for e in entry[subitem]:
                        for k in e.keys():
                            if type(e[k]) in [unicode,str]:
                                e[k] = utility.stripchars(e[k])

            for key in entry.keys():
                if type(entry[key]) in [unicode,str] and key != "link":
                    entry[key] = utility.stripchars(entry[key])

        for entry in newfeed["entries"]:
            # If the item didn't come with a GUID, then
            # use link and then title as an identifier.

            if not "id" in entry:
                if "link" in entry:
                    entry["id"] = entry["link"]
                elif "title" in entry:
                    entry["id"] = entry["title"]
                else:
                    entry["id"] = None

        # Then search through the current feed to
        # make item state persistent, and loop until
        # it's safe to update on disk.

        new = []
        while 1:
            for entry in newfeed["entries"]:
                for centry in curfeed["entries"]:
                    if entry["id"] == centry["id"]:
                        entry["canto_state"] = centry["canto_state"]
                        
                        # The entry is removed so that later it's
                        # not a candidate for being appended to the
                        # end of the feed.

                        curfeed["entries"].remove(centry)
                        break
                else:
                    new.append(entry)

                # Apply default state to genuinely new items.
                if "canto_state" not in entry:
                    entry["canto_state"] = self.fd.tags + [u"*"]

            # Tailor the list to the correct number of items. In canto < 0.7.0,
            # you could specify a keep that was lower than the number of items
            # in the feed. This was simply done, but ultimately it caused too
            # much "bounce" for social news feeds. Items get put into the feed,
            # are upvoted enough to be within the first n items, you change
            # their state, they move out of the first n items, are forgotten,
            # then are upvoted again into the first n item and (as far as c-f
            # knows) are treated like brand new items.

            # This will still be a problem if items get taken out of the feed
            # and put back into the feed (and the item isn't in the extra kept
            # items), but then it becomes a site problem, not a reader problem.

            if self.fd.keep and len(newfeed["entries"]) < self.fd.keep:
                newfeed["entries"] += curfeed["entries"]\
                        [:self.fd.keep - len(newfeed["entries"])]

            # Enforce the "never_discard" setting
            # We iterate through the stories and then the tag so that
            # feed order is preserved.

            for e in curfeed["entries"]:
                for tag in self.cfg.never_discard:
                    if tag == "unread":
                        if "read" in e["canto_state"]:
                            continue
                    elif tag not in e["canto_state"]:
                        continue
                    if e not in newfeed["entries"]:
                        newfeed["entries"].append(e)

            if self.cfg.new_hook:
                for entry in [e for e in new if e in newfeed["entries"]]:
                    self.cfg.new_hook(newfeed, entry, entry == new[-1])

            # Dump the output to the new file.

            # Locking and writing is counter-intuitive using fcntl. If you open
            # with "w" and fail to get the lock, the data is still deleted. The
            # solution is to open with "a", get the lock and then truncate the
            # file.

            f = open(self.fpath, "a")
            fcntl.flock(f.fileno(), fcntl.LOCK_EX)

            # The feed was modified out from under us.
            if self.prevtime and self.prevtime != os.stat(self.fpath).st_mtime:
                # Unlock.
                fcntl.flock(f.fileno(), fcntl.LOCK_UN)
                f.close()

                # Reread the state from disk.
                newer_curfeed = self.get_curfeed()

                # There was an actual c-f update done, bail.
                if newer_curfeed["canto_update"] != curfeed["canto_update"]:
                    self.log_func("%s updated already, bailing" %
                            self.fd.tags[0])
                    break

                # Just a state modification by the client, update and continue.
                else:
                    curfeed = newer_curfeed
                    continue

            # Truncate the file
            f.seek(0, 0)
            f.truncate()

            try:
                # Dump the feed item. It's important to flush afterwards to
                # avoid unlocking the file before all the IO is finished.
                cPickle.dump(newfeed, f)
                f.flush()
            except:
                self.log_func("cPickle dump exception on %s" % self.fpath)
            finally:
                # Unlock.
                fcntl.flock(f.fileno(), fcntl.LOCK_UN)
                f.close()

            # If we managed to write to disk, break out of the while loop and
            # the thread will exit.

            break