logging.debug

Here are the examples of the python api logging.debug taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 151

Project: netzob
Source File: test_USBMouseProtocol.py
View license
    def test_inferUSBMouseProtocol(self):
        """This method illustrates the very short script which
        allows to give some insights on the over USB protocol used
        by a traditionnal mouse."""

        # Put samples in an array
        samples = [
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"000010000000",
			"00ff0f000000",
			"000010000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"000010000000",
			"00ff0f000000",
			"000010000000",
			"00fe0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00fe0f000000",
			"000010000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"000010000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00ff1f000000",
			"000020000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"000010000000",
			"00fe1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00fd1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff2f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fc2f000000",
			"00fe1f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"000010000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fc2f000000",
			"00fb2f000000",
			"00fb2f000000",
			"00fa4f000000",
			"00f92f000000",
			"00fa3f000000",
			"00fe1f000000",
			"000010000000",
			"00ff1f000000",
			"00fd3f000000",
			"00fd3f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00ff2f000000",
			"00fe0f000000",
			"000010000000",
			"000010000000",
			"00ff1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff2f000000",
			"000010000000",
			"00fe1f000000",
			"000020000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff2f000000"
            ]

        # Create a message for each data
        messages = [RawMessage(data=binascii.unhexlify(sample)) for sample in samples]

        # Create a symbol to represent all the messages
        initialSymbol = Symbol(messages=messages)

        # Split following the value
        Format.splitStatic(initialSymbol, mergeAdjacentDynamicFields=False)

        initialSymbol.addEncodingFunction(TypeEncodingFunction(HexaString))

        logging.debug(initialSymbol)

Example 152

Project: netzob
Source File: test_USBMouseProtocol.py
View license
    def test_inferUSBMouseProtocol(self):
        """This method illustrates the very short script which
        allows to give some insights on the over USB protocol used
        by a traditionnal mouse."""

        # Put samples in an array
        samples = [
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"000010000000",
			"00ff0f000000",
			"000010000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"000010000000",
			"00ff0f000000",
			"000010000000",
			"00fe0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00fe0f000000",
			"000010000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"000010000000",
			"00fe1f000000",
			"000010000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff0f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe2f000000",
			"00fd1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00ff1f000000",
			"000020000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"000010000000",
			"00fe1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00ff1f000000",
			"00fd1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff2f000000",
			"00fe2f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe2f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00fe2f000000",
			"00fc2f000000",
			"00fe1f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00fe0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fe1f000000",
			"000010000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00fd2f000000",
			"00fe1f000000",
			"00fe1f000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff0f000000",
			"00ff1f000000",
			"00fe1f000000",
			"00fc2f000000",
			"00fb2f000000",
			"00fb2f000000",
			"00fa4f000000",
			"00f92f000000",
			"00fa3f000000",
			"00fe1f000000",
			"000010000000",
			"00ff1f000000",
			"00fd3f000000",
			"00fd3f000000",
			"00fe2f000000",
			"00fd2f000000",
			"00ff2f000000",
			"00fe0f000000",
			"000010000000",
			"000010000000",
			"00ff1f000000",
			"000010000000",
			"00ff1f000000",
			"00ff2f000000",
			"000010000000",
			"00fe1f000000",
			"000020000000",
			"00ff1f000000",
			"00ff1f000000",
			"00ff2f000000",
			"00fe1f000000",
			"00ff2f000000"
            ]

        # Create a message for each data
        messages = [RawMessage(data=binascii.unhexlify(sample)) for sample in samples]

        # Create a symbol to represent all the messages
        initialSymbol = Symbol(messages=messages)

        # Split following the value
        Format.splitStatic(initialSymbol, mergeAdjacentDynamicFields=False)

        initialSymbol.addEncodingFunction(TypeEncodingFunction(HexaString))

        logging.debug(initialSymbol)

Example 153

Project: pytrainer
Source File: gpx.py
View license
    def _getValues(self):
        '''
        Migrated to eTree XML processing 26 Nov 2009 - jblance
        '''
        logging.debug(">>")
        tree  = self.tree
        # Calories data comes within laps. Maybe more than one, adding them together - dgranda 20100114
        # Distance data comes within laps where present as well - dgranda 20110204
        laps = tree.findall(lapTag)
        if laps is not None and laps != "":
            totalDistance = 0
            totalDuration = 0
            for lap in laps:
                lapCalories = lap.findtext(calorieTag)                
                self.calories += int(lapCalories)
                lapDistance = lap.findtext(distanceTag)
                totalDistance += float(lapDistance)
                lapDuration_tmp = lap.findtext(elapsedTimeTag)
                # When retrieving data from TCX file -> seconds (float)
                # When retrieving data from GPX+ file -> hh:mm:ss
                # EAFP -> http://docs.python.org/glossary.html
                try:
                    lapDuration = float(lapDuration_tmp)
                except ValueError:
                    hour,minu,sec = lapDuration_tmp.split(":")
                    lapDuration = float(sec) + int(minu)*60 + int(hour)*3600
                totalDuration += lapDuration 
                logging.info("Lap distance: %s m | Duration: %s s | Calories: %s kcal" % (lapDistance, lapDuration, lapCalories))
            self.total_dist = float(totalDistance/1000.0) # Returning km
            self.total_time = int(totalDuration) # Returning seconds
            logging.info("Laps - Distance: %.02f km | Duration: %d s | Calories: %s kcal" % (self.total_dist, self.total_time, self.calories))
        else:
            laps = []

        retorno = []
        his_vel = []
        last_lat = None
        last_lon = None
        last_time = None
        total_dist = 0
        dist_elapsed = 0 # distance since the last time found
        total_hr = 0
        tmp_alt = 0
        len_validhrpoints = 0
        trkpoints = tree.findall(trackPointTag)
        if trkpoints is None or len(trkpoints) == 0:
            logging.debug( "No trkpoints found in file")
            return retorno
        logging.debug("%d trkpoints in file" % len(trkpoints))

        date_ = tree.find(timeTag).text
        if date_ is None:
            logging.info("time tag is blank")
            self.date = None
        else:
            mk_time = self.getDateTime(date_)[1] #Local Date
            self.date = mk_time.strftime("%Y-%m-%d")
            self.start_time = mk_time.strftime("%H:%M:%S")
        waiting_points = []
        logging.debug("date: %s | start_time: %s | mk_time: %s" % (self.date, self.start_time, mk_time))

        for i, trkpoint in enumerate(trkpoints):
            #Get data from trkpoint
            try:
                lat = float(trkpoint.get("lat"))
                lon = float(trkpoint.get("lon"))
            except Exception as e:
                logging.debug(str(e))
                lat = lon = None
            if lat is None or lat == "" or lat == 0 or lon is None or lon == "" or lon == 0:
                logging.debug("lat or lon is blank or zero")
                continue
            #get the heart rate value from the gpx extended format file
            hrResult = trkpoint.find(hrTag)
            if hrResult is not None:
                hr = int(hrResult.text)
                len_validhrpoints += 1
                total_hr += hr          #TODO fix
                if hr>self.maxhr:
                    self.maxhr = hr
            else:
                hr = None
            #get the cadence (if present)
            cadResult = trkpoint.find(cadTag)
            if cadResult is not None:
                cadence = int(cadResult.text)
            else:
                cadence = None

            #get the time
            timeResult = trkpoint.find(timeTag)
            if timeResult is not None:
                date_ = timeResult.text
                mk_time = self.getDateTime(date_)[0]
                time_ = time.mktime(mk_time.timetuple()) #Convert date to seconds
                if i == 0:
                    time_elapsed = 0
                else:
                    time_elapsed = time_ - self.trkpoints[i-1]['time'] if self.trkpoints[i-1]['time'] is not None else 0
                    if time_elapsed > 10:
                        logging.debug("%d seconds from last trkpt, someone took a break!" % time_elapsed)
                        # Calculating average lapse between trackpoints to add it
                        average_lapse = round(self.total_time_trkpts/i)
                        logging.debug("Adding %d seconds (activity average) as lapse from last point" % average_lapse)
                        self.total_time_trkpts += average_lapse
                    else:
                        self.total_time_trkpts += time_elapsed
            else:
                time_ = None
                time_elapsed = None

            #get the elevation
            eleResult = trkpoint.find(elevationTag)
            rel_alt = 0
            if eleResult is not None:
                try:
                    ele = float(eleResult.text)
                    #Calculate elevation change
                    if i != 0:
                        rel_alt = ele - self.trkpoints[i-1]['ele'] if self.trkpoints[i-1]['ele'] is not None else 0
                except Exception as e:
                    logging.debug(str(e))
                    ele = None
            else:
                ele = None
                
            #Get corrected elevation if it exists
            correctedEleResult = trkpoint.find(pyt_eleTag)
            if correctedEleResult is not None:
                try:
                    corEle = float(correctedEleResult.text)
                    #Calculate elevation change
                except Exception as e:
                    logging.debug(str(e))
                    corEle = None
            else:
                corEle = None

            #Calculate climb or decent amount
            #Allow for some 'jitter' in height here
            JITTER_VALUE = 0  #Elevation changes less than this value are not counted in +-
            if abs(rel_alt) < JITTER_VALUE:
                rel_alt = 0
            if rel_alt > 0:
                self.upositive += rel_alt
            elif rel_alt < 0:
                self.unegative -= rel_alt

            #Calculate distance between two points
            if i == 0: #First point
                dist = None
            else:
                dist = self._distance_between_points(lat1=self.trkpoints[i-1]['lat'], lon1=self.trkpoints[i-1]['lon'], lat2=lat, lon2=lon)

            #Accumulate distances
            if dist is not None:
                dist_elapsed += dist #TODO fix
                self.total_dist_trkpts += dist

            #Calculate speed...
            vel = self._calculate_speed(dist, time_elapsed, smoothing_factor=3)
            if vel>self.maxvel:
                self.maxvel=vel

            #The waiting point stuff....
            #This 'fills in' the data for situations where some times are missing from the GPX file
            if time_ is not None:
                if len(waiting_points) > 0:
                    for ((w_total_dist, w_dist, w_alt, w_total_time, w_lat, w_lon, w_hr, w_cadence, w_corEle)) in waiting_points:
                        w_time = (w_dist/dist_elapsed) * time_elapsed
                        w_vel = w_dist/((w_time)/3600.0)
                        w_total_time += w_time
                        logging.info("Time added: %f" % w_time)
                        retorno.append((w_total_dist, w_alt, w_total_time, w_vel, w_lat, w_lon, w_hr, w_cadence, w_corEle))
                    waiting_points = []
                    dist_elapsed = 0
                else:
                    retorno.append((self.total_dist_trkpts,ele, self.total_time,vel,lat,lon,hr,cadence,corEle))
                    dist_elapsed = 0
            else: # time_ is None
                waiting_points.append((self.total_dist_trkpts, dist_elapsed, ele, self.total_time, lat, lon, hr, cadence, corEle))

            #Add to dict of values to trkpoint list
            self.trkpoints.append({ 'id': i,
                                    'lat':lat,
                                    'lon':lon,
                                    'hr':hr,
                                    'cadence':cadence,
                                    'time':time_,
                                    'time_since_previous': time_elapsed,
                                    'time_elapsed': self.total_time_trkpts,
                                    'ele':ele,
                                    'ele_change': rel_alt,
                                    'distance_from_previous': dist,
                                    'elapsed_distance': self.total_dist_trkpts,
                                    'velocity':vel,
                                    'correctedElevation':corEle,

                                })

        #end of for trkpoint in trkpoints loop

        #Calculate averages etc
        self.hr_average = 0
        if len_validhrpoints > 0:
            self.hr_average = total_hr/len_validhrpoints
        # In case there is no other way to calculate distance, we rely on trackpoints (number of trackpoints is configurable!)
        if self.total_dist is None or self.total_dist == 0:
            self.total_dist = self.total_dist_trkpts
        else:
            dist_diff = 1000*(self.total_dist_trkpts - self.total_dist)
            logging.debug("Distance difference between laps and trkpts calculation: %f m" % dist_diff)
        if self.total_time is None or self.total_time == 0:
            self.total_time = self.total_time_trkpts
        else:
            time_diff = self.total_time_trkpts - self.total_time
            logging.debug("Duration difference between laps and trkpts calculation: %d s" % time_diff)
        logging.info("Values - Distance: %.02f km | Duration: %d s | Calories: %s kcal" % (self.total_dist, self.total_time, self.calories))
        logging.debug("<<")
        return retorno

Example 154

Project: BORIS
Source File: time_budget_widget.py
View license
    def pbSave_clicked(self):
        """
        save time budget analysis results in TSV, CSV, ODS, XLS format
        """

        def complete(l, max):
            """
            complete list with empty string until len = max
            """
            while len(l) < max:
                l.append("")
            return l

        logging.debug("save time budget results to file")

        while True:
            if QT_VERSION_STR[0] == "4":
                fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, "Save Time budget analysis", "", "Tab Separated Values (*.txt *.tsv);;Comma Separated Values (*.txt *.csv);;HTML (*.html);;Microsoft Excel XLS (*.xls);;Open Document Spreadsheet ODS (*.ods);;All files (*)")
            else:
                fileName, filter_ = QFileDialog(self).getSaveFileName(self, "Save Time budget analysis", "", "Tab Separated Values (*.txt *.tsv);;Comma Separated Values (*.txt *.csv);;HTML (*.html);;Microsoft Excel XLS (*.xls);;Open Document Spreadsheet ODS (*.ods);;All files (*)")

            if not fileName:
                return

            outputFormat = ""
            availableFormats = ("tsv", "csv", "xls", "ods", "html")
            for fileExtension in availableFormats:
                if fileExtension in filter_:
                    outputFormat = fileExtension
                    if not fileName.upper().endswith("." + fileExtension.upper()):
                        fileName += "." + fileExtension

                '''
                if fileExtension in filter_ and not fileName.upper().endswith("." + fileExtension.upper()):
                    fileName += "." + fileExtension
                if fileExtension in filter_:
                    outputFormat = fileExtension
                '''

            if not outputFormat:
                QMessageBox.warning(self, programName, "Choose a file format", QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)
            else:
                break

        if fileName:

            rows = []

            # observations list
            rows.append(["Observations:"])
            for idx in range(self.lw.count()):
                rows.append([""])
                rows.append([self.lw.item(idx).text()])

                if INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][self.lw.item(idx).text()]:
                    rows.append(["Independent variables:"])
                    for var in self.pj[OBSERVATIONS][self.lw.item(idx).text()][INDEPENDENT_VARIABLES]:
                        rows.append([var, self.pj[OBSERVATIONS][self.lw.item(idx).text()][INDEPENDENT_VARIABLES][var]])



            # check if only one observation was selected
            ''' added indep variables for each observation
            if self.lw.count() == 1:
                rows.append([""])

                # write independant variables to file
                if INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][self.lw.item(0).text()]:
                    rows.append(["Independent variables:"])
                    for var in self.pj[OBSERVATIONS][self.lw.item(0).text()][INDEPENDENT_VARIABLES]:
                        rows.append([var, self.pj[OBSERVATIONS][self.lw.item(0).text()][INDEPENDENT_VARIABLES][var]])
            '''

            rows.append([""])
            rows.append([""])
            rows.append(["Time budget:"])

            # write header
            cols = []
            for col in range(self.twTB.columnCount()):
                cols.append(self.twTB.horizontalHeaderItem(col).text())

            rows.append(cols)
            rows.append([""])

            for row in range(self.twTB.rowCount()):
                values = []
                for col in range(self.twTB.columnCount()):

                    values.append(intfloatstr(self.twTB.item(row,col).text()))

                    '''
                    try:
                        val = int(self.twTB.item(row,col).text())
                        values.append(val)
                    except:
                        try:
                            val = float(self.twTB.item(row,col).text())
                            values.append("{:0.3f}".format(val))
                        except:
                            values.append(self.twTB.item(row,col).text())
                    '''

                rows.append(values)

            maxLen = max([len(r) for r in rows])
            data = tablib.Dataset()
            data.title = "Time budget"

            for row in rows:
                data.append(complete(row, maxLen))

            if outputFormat == "tsv":
                with open(fileName, "wb") as f:
                    f.write(str.encode(data.tsv))
                return

            if outputFormat == "csv":
                with open(fileName, "wb") as f:
                    f.write(str.encode(data.csv))
                return

            if outputFormat == "html":
                with open(fileName, "wb") as f:
                    f.write(str.encode(data.html))
                return

            if outputFormat == "ods":
                with open(fileName, "wb") as f:
                    f.write(data.ods)
                return

            if outputFormat == "xls":
                with open(fileName, "wb") as f:
                    f.write(data.xls)
                return

Example 155

Project: pycounter
Source File: sushi.py
View license
def _raw_to_full(raw_report):
    """Convert a raw report to CounterReport.

    :param raw_report: raw XML report
    :return: a :class:`pycounter.report.CounterReport`
    """
    try:
        root = etree.fromstring(raw_report)
    except etree.XMLSyntaxError:
        logger.error("XML syntax error: %s", raw_report)
        raise pycounter.exceptions.SushiException(
            message="XML syntax error",
            raw=raw_report)
    o_root = objectify.fromstring(raw_report)
    rep = None
    try:
        rep = o_root.Body[_ns('sushicounter', "ReportResponse")]
        c_report = rep.Report[_ns('counter', 'Report')]
    except AttributeError:
        try:
            c_report = rep.Report[_ns('counter', 'Reports')].Report
        except AttributeError:
            logger.error("report not found in XML: %s", raw_report)
            raise pycounter.exceptions.SushiException(
                message="report not found in XML",
                raw=raw_report, xml=o_root)
    logger.debug("COUNTER report: %s", etree.tostring(c_report))
    start_date = datetime.datetime.strptime(
        root.find('.//%s' % _ns('sushi', 'Begin')).text,
        "%Y-%m-%d").date()

    end_date = datetime.datetime.strptime(
        root.find('.//%s' % _ns('sushi', 'End')).text,
        "%Y-%m-%d").date()

    report_data = {'period': (start_date, end_date)}

    rep_def = root.find('.//%s' % _ns('sushi', 'ReportDefinition'))
    report_data['report_version'] = int(rep_def.get('Release'))

    report_data['report_type'] = rep_def.get('Name')

    customer = root.find('.//%s' % _ns('counter', 'Customer'))
    try:
        report_data['customer'] = (customer.find('.//%s' %
                                                 _ns('counter', 'Name')).text)
    except AttributeError:
        report_data['customer'] = ""

    inst_id = customer.find('.//%s' % _ns('counter', 'ID')).text
    report_data['institutional_identifier'] = inst_id

    rep_root = root.find('.//%s' % _ns('counter', 'Report'))
    created_string = rep_root.get('Created')
    if created_string is not None:
        report_data['date_run'] = arrow.get(created_string)
    else:
        report_data['date_run'] = datetime.datetime.now()

    report = pycounter.report.CounterReport(**report_data)

    report.metric = pycounter.constants.METRICS.get(report_data['report_type'])

    for item in c_report.Customer.ReportItems:
        try:
            publisher_name = item.ItemPublisher.text
        except AttributeError:
            publisher_name = ""
        title = item.ItemName.text
        platform = item.ItemPlatform.text

        eissn = issn = isbn = ""

        try:
            for identifier in item.ItemIdentifier:
                if identifier.Type == "Print_ISSN":
                    issn = identifier.Value.text
                    if issn is None:
                        issn = ""
                elif identifier.Type == "Online_ISSN":
                    eissn = identifier.Value.text
                    if eissn is None:
                        eissn = ""
                elif identifier.Type == "Online_ISBN":
                    logging.debug("FOUND ISBN")
                    isbn = identifier.Value.text
                    if isbn is None:
                        isbn = ""

        except AttributeError:
            pass

        month_data = []
        html_usage = 0
        pdf_usage = 0

        metrics_for_db = collections.defaultdict(list)

        for perform_item in item.ItemPerformance:
            item_date = convert_date_run(perform_item.Period.Begin.text)
            logger.debug("perform_item date: %r", item_date)
            usage = None
            for inst in perform_item.Instance:
                if inst.MetricType == "ft_total":
                    usage = str(inst.Count)
                elif inst.MetricType == "ft_pdf":
                    pdf_usage += int(inst.Count)
                elif inst.MetricType == "ft_html":
                    html_usage += int(inst.Count)
                elif report.report_type.startswith('DB'):
                    metrics_for_db[inst.MetricType].append((item_date,
                                                            int(inst.Count)))
            if usage is not None:
                month_data.append((item_date, int(usage)))

        if report.report_type:
            if report.report_type.startswith('JR'):
                report.pubs.append(pycounter.report.CounterJournal(
                    title=title,
                    platform=platform,
                    publisher=publisher_name,
                    period=report.period,
                    metric=report.metric,
                    issn=issn,
                    eissn=eissn,
                    month_data=month_data,
                    html_total=html_usage,
                    pdf_total=pdf_usage
                ))
            elif report.report_type.startswith('BR'):
                report.pubs.append(
                    pycounter.report.CounterBook(
                        title=title,
                        platform=platform,
                        publisher=publisher_name,
                        period=report.period,
                        metric=report.metric,
                        issn=issn,
                        isbn=isbn,
                        month_data=month_data,
                    ))
            elif report.report_type.startswith('DB'):
                for metric_code, month_data in six.iteritems(metrics_for_db):
                    metric = pycounter.constants.DB_METRIC_MAP[metric_code]
                    report.pubs.append(
                        pycounter.report.CounterDatabase(
                            title=title,
                            platform=platform,
                            publisher=publisher_name,
                            period=report.period,
                            metric=metric,
                            month_data=month_data
                        ))

    return report

Example 156

Project: tp-libvirt
Source File: guest_numa.py
View license
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError("Hugepage size [%s] isn't supported, "
                                      "please verify kernel cmdline configuration."
                                      % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i, mem_size)
                if not int(mem_size):
                    raise error.TestNAError("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'],
                                              i['nodenum'], i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)

Example 157

Project: honeything
Source File: curl_httpclient.py
View license
def _curl_setup_request(curl, request, buffer, headers):
    curl.setopt(pycurl.URL, utf8(request.url))

    # libcurl's magic "Expect: 100-continue" behavior causes delays
    # with servers that don't support it (which include, among others,
    # Google's OpenID endpoint).  Additionally, this behavior has
    # a bug in conjunction with the curl_multi_socket_action API
    # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
    # which increases the delays.  It's more trouble than it's worth,
    # so just turn off the feature (yes, setting Expect: to an empty
    # value is the official way to disable this)
    if "Expect" not in request.headers:
        request.headers["Expect"] = ""

    # libcurl adds Pragma: no-cache by default; disable that too
    if "Pragma" not in request.headers:
        request.headers["Pragma"] = ""

    # Request headers may be either a regular dict or HTTPHeaders object
    if isinstance(request.headers, httputil.HTTPHeaders):
        curl.setopt(pycurl.HTTPHEADER,
                    [utf8("%s: %s" % i) for i in request.headers.get_all()])
    else:
        curl.setopt(pycurl.HTTPHEADER,
                    [utf8("%s: %s" % i) for i in request.headers.iteritems()])

    if request.header_callback:
        curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
    else:
        curl.setopt(pycurl.HEADERFUNCTION,
                    lambda line: _curl_header_callback(headers, line))
    if request.streaming_callback:
        curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback)
    else:
        curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
    curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
    curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
    curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
    curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
    if request.user_agent:
        curl.setopt(pycurl.USERAGENT, utf8(request.user_agent))
    else:
        curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
    if request.network_interface:
        curl.setopt(pycurl.INTERFACE, request.network_interface)
    if request.use_gzip:
        curl.setopt(pycurl.ENCODING, "gzip,deflate")
    else:
        curl.setopt(pycurl.ENCODING, "none")
    if request.proxy_host and request.proxy_port:
        curl.setopt(pycurl.PROXY, request.proxy_host)
        curl.setopt(pycurl.PROXYPORT, request.proxy_port)
        if request.proxy_username:
            credentials = '%s:%s' % (request.proxy_username,
                    request.proxy_password)
            curl.setopt(pycurl.PROXYUSERPWD, credentials)
    else:
        curl.setopt(pycurl.PROXY, '')
    if request.validate_cert:
        curl.setopt(pycurl.SSL_VERIFYPEER, 1)
        curl.setopt(pycurl.SSL_VERIFYHOST, 2)
    else:
        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
        curl.setopt(pycurl.SSL_VERIFYHOST, 0)
    if request.ca_certs is not None:
        curl.setopt(pycurl.CAINFO, request.ca_certs)
    else:
        # There is no way to restore pycurl.CAINFO to its default value
        # (Using unsetopt makes it reject all certificates).
        # I don't see any way to read the default value from python so it
        # can be restored later.  We'll have to just leave CAINFO untouched
        # if no ca_certs file was specified, and require that if any
        # request uses a custom ca_certs file, they all must.
        pass

    if request.allow_ipv6 is False:
        # Curl behaves reasonably when DNS resolution gives an ipv6 address
        # that we can't reach, so allow ipv6 unless the user asks to disable.
        # (but see version check in _process_queue above)
        curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)

    # Set the request method through curl's irritating interface which makes
    # up names for almost every single method
    curl_options = {
        "GET": pycurl.HTTPGET,
        "POST": pycurl.POST,
        "PUT": pycurl.UPLOAD,
        "HEAD": pycurl.NOBODY,
    }
    custom_methods = set(["DELETE"])
    for o in curl_options.values():
        curl.setopt(o, False)
    if request.method in curl_options:
        curl.unsetopt(pycurl.CUSTOMREQUEST)
        curl.setopt(curl_options[request.method], True)
    elif request.allow_nonstandard_methods or request.method in custom_methods:
        curl.setopt(pycurl.CUSTOMREQUEST, request.method)
    else:
        raise KeyError('unknown method ' + request.method)

    # Handle curl's cryptic options for every individual HTTP method
    if request.method in ("POST", "PUT"):
        request_buffer = cStringIO.StringIO(utf8(request.body))
        curl.setopt(pycurl.READFUNCTION, request_buffer.read)
        if request.method == "POST":
            def ioctl(cmd):
                if cmd == curl.IOCMD_RESTARTREAD:
                    request_buffer.seek(0)
            curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
            curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
        else:
            curl.setopt(pycurl.INFILESIZE, len(request.body))

    if request.auth_username is not None:
        userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
        curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
        curl.setopt(pycurl.USERPWD, utf8(userpwd))
        logging.debug("%s %s (username: %r)", request.method, request.url,
                      request.auth_username)
    else:
        curl.unsetopt(pycurl.USERPWD)
        logging.debug("%s %s", request.method, request.url)

    if request.client_cert is not None:
        curl.setopt(pycurl.SSLCERT, request.client_cert)

    if request.client_key is not None:
        curl.setopt(pycurl.SSLKEY, request.client_key)

    if threading.activeCount() > 1:
        # libcurl/pycurl is not thread-safe by default.  When multiple threads
        # are used, signals should be disabled.  This has the side effect
        # of disabling DNS timeouts in some environments (when libcurl is
        # not linked against ares), so we don't do it when there is only one
        # thread.  Applications that use many short-lived threads may need
        # to set NOSIGNAL manually in a prepare_curl_callback since
        # there may not be any other threads running at the time we call
        # threading.activeCount.
        curl.setopt(pycurl.NOSIGNAL, 1)
    if request.prepare_curl_callback is not None:
        request.prepare_curl_callback(curl)

Example 158

Project: avocado-vt
Source File: bootstrap.py
View license
def create_subtests_cfg(t_type):
    specific_test_list = []
    specific_file_list = []
    specific_subdirs = asset.get_test_provider_subdirs(t_type)
    provider_names_specific = asset.get_test_provider_names(t_type)
    config_filter = get_config_filter()

    provider_info_specific = []
    for specific_provider in provider_names_specific:
        provider_info_specific.append(
            asset.get_test_provider_info(specific_provider))

    for subdir in specific_subdirs:
        specific_test_list += data_dir.SubdirGlobList(subdir,
                                                      '*.py',
                                                      test_filter)
        specific_file_list += data_dir.SubdirGlobList(subdir,
                                                      '*.cfg',
                                                      config_filter)

    shared_test_list = []
    shared_file_list = []
    shared_subdirs = asset.get_test_provider_subdirs('generic')
    provider_names_shared = asset.get_test_provider_names('generic')

    provider_info_shared = []
    for shared_provider in provider_names_shared:
        provider_info_shared.append(
            asset.get_test_provider_info(shared_provider))

    if not t_type == 'lvsb':
        for subdir in shared_subdirs:
            shared_test_list += data_dir.SubdirGlobList(subdir,
                                                        '*.py',
                                                        test_filter)
            shared_file_list += data_dir.SubdirGlobList(subdir,
                                                        '*.cfg',
                                                        config_filter)

    all_specific_test_list = []
    for test in specific_test_list:
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_specific_test_list.append("%s.%s" %
                                          (provider_name,
                                           basename.split(".")[0]))
    all_shared_test_list = []
    for test in shared_test_list:
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_shared_test_list.append("%s.%s" %
                                        (provider_name,
                                         basename.split(".")[0]))

    all_specific_test_list.sort()
    all_shared_test_list.sort()

    first_subtest_file = []
    last_subtest_file = []
    non_dropin_tests = []
    tmp = []

    for shared_file in shared_file_list:
        provider_name = None
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if t_type not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    shared_file_list = tmp

    tmp = []
    for shared_file in specific_file_list:
        provider_name = None
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if value not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    specific_file_list = tmp

    subtests_cfg = os.path.join(data_dir.get_backend_dir(t_type), 'cfg',
                                'subtests.cfg')
    subtests_file = open(subtests_cfg, 'w')
    subtests_file.write(
        "# Do not edit, auto generated file from subtests config\n")

    subtests_file.write("variants subtest:\n")
    write_subtests_files(first_subtest_file, subtests_file)
    write_subtests_files(specific_file_list, subtests_file, t_type)
    write_subtests_files(shared_file_list, subtests_file)
    write_subtests_files(last_subtest_file, subtests_file)

    subtests_file.close()
    logging.debug("Config file %s auto generated from subtest samples",
                  subtests_cfg)

Example 159

Project: couchbase-cli
Source File: pump_dcp.py
View license
    def provide_dcp_batch_actual(self):
        batch = pump.Batch(self)

        batch_max_size = self.opts.extra['batch_max_size']
        batch_max_bytes = self.opts.extra['batch_max_bytes']
        delta_ack_size = batch_max_bytes * 10 / 4 #ack every 25% of buffer size
        last_processed = 0
        total_bytes_read = 0

        vbid = 0
        cmd = 0
        start_seqno = 0
        end_seqno = 0
        vb_uuid = 0
        hi_seqno = 0
        ss_start_seqno = 0
        ss_end_seqno = 0
        try:
            while (not self.dcp_done and
                   batch.size() < batch_max_size and
                   batch.bytes < batch_max_bytes):

                if self.response.empty():
                    if len(self.stream_list) > 0:
                        logging.debug("no response while there %s active streams" % len(self.stream_list))
                        time.sleep(.25)
                    else:
                        self.dcp_done = True
                    continue
                unprocessed_size = total_bytes_read - last_processed
                if unprocessed_size > delta_ack_size:
                    rv = self.ack_buffer_size(unprocessed_size)
                    if rv:
                        logging.error(rv)
                    else:
                        last_processed = total_bytes_read

                cmd, errcode, opaque, cas, keylen, extlen, data, datalen, dtype, bytes_read = \
                    self.response.get()
                total_bytes_read += bytes_read
                rv = 0
                metalen = flags = flg = exp = 0
                key = val = ext = ''
                need_ack = False
                seqno = 0
                if cmd == couchbaseConstants.CMD_DCP_REQUEST_STREAM:
                    if errcode == couchbaseConstants.ERR_SUCCESS:
                        pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                        start = 0
                        step = DCPStreamSource.HIGH_SEQNO_BYTE + DCPStreamSource.UUID_BYTE
                        while start+step <= datalen:
                            uuid, seqno = struct.unpack(
                                            couchbaseConstants.DCP_VB_UUID_SEQNO_PKT_FMT, \
                                            data[start:start + step])
                            if pair_index not in self.cur['failoverlog']:
                                self.cur['failoverlog'][pair_index] = {}
                            if opaque not in self.cur['failoverlog'][pair_index] or \
                               not self.cur['failoverlog'][pair_index][opaque]:
                                self.cur['failoverlog'][pair_index][opaque] = [(uuid, seqno)]
                            else:
                                self.cur['failoverlog'][pair_index][opaque].append((uuid, seqno))
                            start = start + step
                    elif errcode == couchbaseConstants.ERR_KEY_ENOENT:
                        logging.warn("producer doesn't know about the vbucket uuid, rollback to 0")
                        vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno = \
                            self.stream_list[opaque]
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_KEY_EEXISTS:
                       logging.warn("a stream exists on the connection for vbucket:%s" % opaque)
                    elif errcode ==  couchbaseConstants.ERR_NOT_MY_VBUCKET:
                        logging.warn("Vbucket is not active anymore, skip it:%s" % vbid)
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_ERANGE:
                        logging.warn("Start or end sequence numbers specified incorrectly,(%s, %s)" % \
                                     (start_seqno, end_seqno))
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_ROLLBACK:
                        vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_stop_seqno = \
                            self.stream_list[opaque]
                        start_seqno, = struct.unpack(couchbaseConstants.DCP_VB_SEQNO_PKT_FMT, data)
                        #find the most latest uuid, hi_seqno that fit start_seqno
                        if self.cur['failoverlog']:
                            pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                            if self.cur['failoverlog'][pair_index].get("vbid"):
                                for uuid, seqno in self.cur['failoverlog'][pair_index][vbid]:
                                    if start_seqno >= seqno:
                                        vb_uuid = uuid
                                        break
                        ss_start_seqno = start_seqno
                        ss_end_seqno = start_seqno
                        self.request_dcp_stream(vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno)

                        del self.stream_list[opaque]
                        self.stream_list[opaque] = \
                            (vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno)
                    else:
                        logging.error("unprocessed errcode:%s" % errcode)
                        del self.stream_list[opaque]
                elif cmd == couchbaseConstants.CMD_DCP_MUTATION:
                    vbucket_id = errcode
                    seqno, rev_seqno, flg, exp, locktime, metalen, nru = \
                        struct.unpack(couchbaseConstants.DCP_MUTATION_PKT_FMT, data[0:extlen])
                    key_start = extlen
                    val_start = key_start + keylen
                    val_len = datalen- keylen - metalen - extlen
                    meta_start = val_start + val_len
                    key = data[extlen:val_start]
                    val = data[val_start:meta_start]
                    conf_res = 0
                    if meta_start < datalen:
                        # handle extra conflict resolution fields
                        extra_meta = data[meta_start:]
                        extra_index = 0
                        version = extra_meta[extra_index]
                        extra_index += 1
                        while extra_index < metalen:
                            id, extlen = struct.unpack(couchbaseConstants.DCP_EXTRA_META_PKG_FMT, extra_meta[extra_index:extra_index+3])
                            extra_index += 3
                            if id == couchbaseConstants.DCP_EXTRA_META_CONFLICT_RESOLUTION:
                                if extlen == 1:
                                    conf_res, = struct.unpack(">B",extra_meta[extra_index:extra_index+1])
                                elif extlen == 2:
                                    conf_res, = struct.unpack(">H",extra_meta[extra_index:extra_index+2])
                                elif extlen == 4:
                                    conf_res, = struct.unpack(">I", extra_meta[extra_index:extra_index+4])
                                elif extlen == 8:
                                    conf_res, = struct.unpack(">Q", extra_meta[extra_index:extra_index+8])
                                else:
                                    logging.error("unsupported extra meta data format:%d" % extlen)
                                    conf_res = 0
                            extra_index += extlen

                    if not self.skip(key, vbucket_id):
                        msg = (cmd, vbucket_id, key, flg, exp, cas, rev_seqno, val, seqno, dtype, \
                               metalen, conf_res)
                        batch.append(msg, len(val))
                        self.num_msg += 1
                elif cmd == couchbaseConstants.CMD_DCP_DELETE or \
                     cmd == couchbaseConstants.CMD_DCP_EXPIRATION:
                    vbucket_id = errcode
                    seqno, rev_seqno, metalen = \
                        struct.unpack(couchbaseConstants.DCP_DELETE_PKT_FMT, data[0:extlen])
                    key_start = extlen
                    val_start = key_start + keylen
                    key = data[extlen:val_start]
                    if not self.skip(key, vbucket_id):
                        msg = (cmd, vbucket_id, key, flg, exp, cas, rev_seqno, val, seqno, dtype, \
                               metalen, 0)
                        batch.append(msg, len(val))
                        self.num_msg += 1
                    if cmd == couchbaseConstants.CMD_DCP_DELETE:
                        batch.adjust_size += 1
                elif cmd == couchbaseConstants.CMD_DCP_FLUSH:
                    logging.warn("stopping: saw CMD_DCP_FLUSH")
                    self.dcp_done = True
                    break
                elif cmd == couchbaseConstants.CMD_DCP_END_STREAM:
                    del self.stream_list[opaque]
                    if not len(self.stream_list):
                        self.dcp_done = True
                elif cmd == couchbaseConstants.CMD_DCP_SNAPSHOT_MARKER:
                    ss_start_seqno, ss_end_seqno, _ = \
                        struct.unpack(couchbaseConstants.DCP_SNAPSHOT_PKT_FMT, data[0:extlen])
                    pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                    if not self.cur['snapshot']:
                        self.cur['snapshot'] = {}
                    if pair_index not in self.cur['snapshot']:
                        self.cur['snapshot'][pair_index] = {}
                    self.cur['snapshot'][pair_index][opaque] = (ss_start_seqno, ss_end_seqno)
                elif cmd == couchbaseConstants.CMD_DCP_NOOP:
                    need_ack = True
                elif cmd == couchbaseConstants.CMD_DCP_BUFFER_ACK:
                    if errcode != couchbaseConstants.ERR_SUCCESS:
                        logging.warning("buffer ack response errcode:%s" % errcode)
                    continue
                else:
                    logging.warn("warning: unexpected DCP message: %s" % cmd)
                    return "unexpected DCP message: %s" % cmd, batch

                if need_ack:
                    self.ack_last = True
                    try:
                        self.dcp_conn._sendMsg(cmd, '', '', opaque, vbucketId=0,
                                          fmt=couchbaseConstants.RES_PKT_FMT,
                                          magic=couchbaseConstants.RES_MAGIC_BYTE)
                    except socket.error:
                        return ("error: socket.error on send();"
                                " perhaps the source server: %s was rebalancing"
                                " or had connectivity/server problems" %
                                (self.source_node['hostname'])), batch
                    except EOFError:
                        self.dcp_done = True
                        return ("error: EOFError on socket send();"
                                " perhaps the source server: %s was rebalancing"
                                " or had connectivity/server problems" %
                                (self.source_node['hostname'])), batch

                    # Close the batch when there's an ACK handshake, so
                    # the server can concurrently send us the next batch.
                    # If we are slow, our slow ACK's will naturally slow
                    # down the server.
                    self.ack_buffer_size(total_bytes_read - last_processed)
                    return 0, batch

                self.ack_last = False
                self.cmd_last = cmd

        except EOFError:
            if batch.size() <= 0 and self.ack_last:
                # A closed conn after an ACK means clean end of TAP dump.
                self.dcp_done = True

        if batch.size() <= 0:
            return 0, None
        self.ack_buffer_size(total_bytes_read - last_processed)
        return 0, batch

Example 160

Project: openode
Source File: send_email_notifications.py
View license
    def get_updated_threads_for_user(self, user):
        """
        retreive relevant question updates for the user
        according to their subscriptions and recorded question
        views
        """
        # set default language TODO - language per user - add user atribute
        activate(django_settings.LANGUAGE_CODE)

        user_feeds = EmailFeedSetting.objects.filter(
                                                subscriber=user
                                            ).exclude(
                                                frequency__in=('n', 'i')
                                            )

        should_proceed = False
        for feed in user_feeds:
            if feed.should_send_now() == True:
                should_proceed = True
                break

        #shortcircuit - if there is no ripe feed to work on for this user
        if should_proceed == False:
            logging.debug(u'Notification: %s not send - should proceed = False' % user.screen_name)
            return {}

        #these are placeholders for separate query sets per question group
        #there are four groups - one for each EmailFeedSetting.feed_type
        #and each group has subtypes A and B
        #that's because of the strange thing commented below
        #see note on Q and F objects marked with todo tag
        q_sel_A = None
        q_sel_B = None

        # q_ask_A = None
        # q_ask_B = None

        # q_ans_A = None
        # q_ans_B = None

        # q_all_A = None
        # q_all_B = None

        #base question query set for this user
        #basic things - not deleted, not closed, not too old
        #not last edited by the same user
        base_qs = Post.objects.filter(post_type__in=[const.POST_TYPE_DISCUSSION, const.POST_TYPE_QUESTION, const.POST_TYPE_DOCUMENT]).exclude(
                                thread__last_activity_by=user
                            ).exclude(
                                thread__last_activity_at__lt=user.date_joined  # exclude old stuff
                            ).exclude(
                                deleted=True
                            ).exclude(
                                thread__closed=True
                            ).order_by('-thread__last_activity_at')
        #todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
        #      Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
        #returns way more questions than you might think it should
        #so because of that I've created separate query sets Q_set2 and Q_set3
        #plus two separate queries run faster!

        #build two two queries based
        #questions that are not seen by the user at all
        not_seen_qs = base_qs.filter(~Q(thread__viewed__user=user))
        #questions that were seen, but before last modification
        seen_before_last_mod_qs = base_qs.filter(
                                    Q(
                                        thread__viewed__user=user,
                                        thread__viewed__last_visit__lt=F('thread__last_activity_at')
                                    )
                                )

        #shorten variables for convenience
        Q_set_A = not_seen_qs
        Q_set_B = seen_before_last_mod_qs

        for feed in user_feeds:
            #each group of updates represented by the corresponding
            #query set has it's own cutoff time
            #that cutoff time is computed for each user individually
            #and stored as a parameter "cutoff_time"

            #we won't send email for a given question if an email has been
            #sent after that cutoff_time
            if feed.should_send_now():
                feed.mark_reported_now()
                cutoff_time = feed.get_previous_report_cutoff_time()

                if feed.feed_type == 'q_sel':
                    q_sel_A = Q_set_A.filter(Q(thread__followed_by=user) | Q(thread__node__followed_by=user))
                    q_sel_A.cutoff_time = cutoff_time  # store cutoff time per query set
                    q_sel_B = Q_set_B.filter(thread__followed_by=user)
                    q_sel_B.cutoff_time = cutoff_time  # store cutoff time per query set
                    # print q_sel_A, q_sel_B
                # elif feed.feed_type == 'q_ask':
                #     q_ask_A = Q_set_A.filter(author=user)
                #     q_ask_A.cutoff_time = cutoff_time
                #     q_ask_B = Q_set_B.filter(author=user)
                #     q_ask_B.cutoff_time = cutoff_time

                # elif feed.feed_type == 'q_ans':
                #     q_ans_A = Q_set_A.filter(thread__posts__author=user, thread__posts__post_type='answer')
                #     q_ans_A = q_ans_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
                #     q_ans_A.cutoff_time = cutoff_time

                #     q_ans_B = Q_set_B.filter(thread__posts__author=user, thread__posts__post_type='answer')
                #     q_ans_B = q_ans_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
                #     q_ans_B.cutoff_time = cutoff_time

                # elif feed.feed_type == 'q_all':
                #     q_all_A = user.get_tag_filtered_questions(Q_set_A)
                #     q_all_B = user.get_tag_filtered_questions(Q_set_B)

                #     q_all_A = q_all_A[:openode_settings.MAX_ALERTS_PER_EMAIL]
                #     q_all_B = q_all_B[:openode_settings.MAX_ALERTS_PER_EMAIL]
                #     q_all_A.cutoff_time = cutoff_time
                #     q_all_B.cutoff_time = cutoff_time

        #build ordered list questions for the email report
        q_list = SortedDict()

        #todo: refactor q_list into a separate class?
        extend_question_list(q_sel_A, q_list)
        extend_question_list(q_sel_B, q_list)

        #build list of comment responses here
        #it is separate because posts are not marked as changed

        # extend_question_list(q_ask_A, q_list, limit=True)
        # extend_question_list(q_ask_B, q_list, limit=True)

        # extend_question_list(q_ans_A, q_list, limit=True)
        # extend_question_list(q_ans_B, q_list, limit=True)

        # if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
        #     extend_question_list(q_all_A, q_list, limit=True)
        #     extend_question_list(q_all_B, q_list, limit=True)

        ctype = ContentType.objects.get_for_model(Post)
        EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT

        #up to this point we still don't know if emails about
        #collected questions were sent recently
        #the next loop examines activity record and decides
        #for each question, whether it needs to be included or not
        #into the report

        for q, meta_data in q_list.items():
            #this loop edits meta_data for each question
            #so that user will receive counts on new edits new answers, etc
            #and marks questions that need to be skipped
            #because an email about them was sent recently enough

            #also it keeps a record of latest email activity per question per user
            try:
                #todo: is it possible to use content_object here, instead of
                #content type and object_id pair?
                update_info = Activity.objects.get(
                                            user=user,
                                            content_type=ctype,
                                            object_id=q.id,
                                            activity_type=EMAIL_UPDATE_ACTIVITY
                                        )
                emailed_at = update_info.active_at
            except Activity.DoesNotExist:
                update_info = Activity(
                                        user=user,
                                        content_object=q,
                                        activity_type=EMAIL_UPDATE_ACTIVITY
                                    )
                emailed_at = datetime.datetime(1970, 1, 1)  # long time ago
            except Activity.MultipleObjectsReturned:
                raise Exception(
                                'server error - multiple question email activities '
                                'found per user-question pair'
                                )

            cutoff_time = meta_data['cutoff_time']  # cutoff time for the question

            #skip question if we need to wait longer because
            #the delay before the next email has not yet elapsed
            #or if last email was sent after the most recent modification
            if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
                meta_data['skip'] = True
                continue

            #collect info on all sorts of news that happened after
            #the most recent emailing to the user about this question
            q_rev = q.revisions.filter(revised_at__gt=emailed_at).exclude(author=user)

            #now update all sorts of metadata per question
            meta_data['q_rev'] = len(q_rev)
            if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
                meta_data['q_rev'] = 0
                meta_data['new_q'] = True
            else:
                meta_data['new_q'] = False

            new_ans = Post.objects.get_answers(user).filter(
                thread=q.thread,
                added_at__gt=emailed_at,
                deleted=False,
            ).exclude(author=user)
            meta_data['new_ans'] = len(new_ans)

            ans_rev = PostRevision.objects.filter(
                post__post_type=const.POST_TYPE_THREAD_POST,
                post__thread=q.thread,
                revised_at__gt=emailed_at,
                post__deleted=False,
                revision__gte=2,
            ).exclude(author=user)
            meta_data['ans_rev'] = len(ans_rev)

            comments = meta_data.get('comments', 0)

            #print meta_data
            #finally skip question if there are no news indeed
            if len(q_rev) + len(new_ans) + len(ans_rev) + comments == 0:
                meta_data['skip'] = True
                #print 'skipping'
            else:
                meta_data['skip'] = False
                #print 'not skipping'
                update_info.active_at = datetime.datetime.now()
                if DEBUG_THIS_COMMAND == False:
                    update_info.save()  # save question email update activity
        #q_list is actually an ordered dictionary
        #print 'user %s gets %d' % (user.username, len(q_list.keys()))
        #todo: sort question list by update time
        return q_list

Example 161

Project: luci-py
Source File: bot_main.py
View license
def run_manifest(botobj, manifest, start):
  """Defers to task_runner.py.

  Return True if the task succeeded.
  """
  # Ensure the manifest is valid. This can throw a json decoding error. Also
  # raise if it is empty.
  if not manifest:
    raise ValueError('Empty manifest')

  # Necessary to signal an internal_failure. This occurs when task_runner fails
  # to execute the command. It is important to note that this data is extracted
  # before any I/O is done, like writting the manifest to disk.
  task_id = manifest['task_id']
  hard_timeout = manifest['hard_timeout'] or None
  # Default the grace period to 30s here, this doesn't affect the grace period
  # for the actual task.
  grace_period = manifest['grace_period'] or 30
  if manifest['hard_timeout']:
    # One for the child process, one for run_isolated, one for task_runner.
    hard_timeout += 3 * manifest['grace_period']
    # For isolated task, download time is not counted for hard timeout so add
    # more time.
    if not manifest['command']:
      hard_timeout += manifest['io_timeout'] or 600

  # Get the server info to pass to the task runner so it can provide updates.
  url = botobj.server
  is_grpc = botobj.remote.is_grpc()
  if not is_grpc and 'host' in manifest:
    # The URL in the manifest includes the version - eg not https://chromium-
    # swarm-dev.appspot.com, but https://<some-version>-dot-chromiium-swarm-
    # dev.appspot.com. That way, if a new server version becomes the default,
    # old bots will continue to work with a server version that can manipulate
    # the old data (the new server will only ever have to read it, which is
    # much simpler) while new bots won't accidentally contact an old server
    # which the GAE engine hasn't gotten around to updating yet.
    #
    # With a gRPC proxy, we could theoretically run into the same problem
    # if we change the meaning of some data without changing the protos.
    # However, if we *do* change the protos, we already need to make the
    # change in a few steps:
    #    1. Modify the Swarming server to accept the new data
    #    2. Modify the protos and the proxy to accept the new data
    #       in gRPC calls and translate it to "native" Swarming calls.
    #    3. Update the bots to transmit the new protos.
    # Throughout all this, the proto format itself irons out minor differences
    # and additions. But because we deploy in three steps, the odds of a
    # newer bot contacting an older server is very low.
    #
    # None of this applies if we don't actually update the protos but just
    # change the semantics. If this becomes a significant problem, we could
    # start transmitting the expected server version using gRPC metadata.
    #    - aludwin, Nov 2016
    url = manifest['host']

  task_dimensions = manifest['dimensions']
  task_result = {}

  failure = False
  internal_failure = False
  msg = None
  auth_params_dumper = None
  # Use 'w' instead of 'work' because path length is precious on Windows.
  work_dir = os.path.join(botobj.base_dir, 'w')
  try:
    try:
      if os.path.isdir(work_dir):
        file_path.rmtree(work_dir)
    except OSError:
      # If a previous task created an undeleteable file/directory inside 'w',
      # make sure that following tasks are not affected. This is done by working
      # around the undeleteable directory by creating a temporary directory
      # instead. This is not normal behavior. The bot will report a failure on
      # start.
      work_dir = tempfile.mkdtemp(dir=botobj.base_dir, prefix='w')
    else:
      os.makedirs(work_dir)

    env = os.environ.copy()
    # Windows in particular does not tolerate unicode strings in environment
    # variables.
    env['SWARMING_TASK_ID'] = task_id.encode('ascii')
    env['SWARMING_SERVER'] = botobj.server.encode('ascii')

    task_in_file = os.path.join(work_dir, 'task_runner_in.json')
    with open(task_in_file, 'wb') as f:
      f.write(json.dumps(manifest))
    handle, bot_file = tempfile.mkstemp(
        prefix='bot_file', suffix='.json', dir=work_dir)
    os.close(handle)
    call_hook(botobj, 'on_before_task', bot_file)
    task_result_file = os.path.join(work_dir, 'task_runner_out.json')
    if os.path.exists(task_result_file):
      os.remove(task_result_file)

    # Start a thread that periodically puts authentication headers and other
    # authentication related information to a file on disk. task_runner reads it
    # from there before making authenticated HTTP calls.
    auth_params_file = os.path.join(work_dir, 'bot_auth_params.json')
    if botobj.remote.uses_auth:
      auth_params_dumper = file_refresher.FileRefresherThread(
          auth_params_file,
          lambda: bot_auth.prepare_auth_params_json(botobj, manifest))
      auth_params_dumper.start()

    command = [
      sys.executable, THIS_FILE, 'task_runner',
      '--swarming-server', url,
      '--in-file', task_in_file,
      '--out-file', task_result_file,
      '--cost-usd-hour', str(botobj.state.get('cost_usd_hour') or 0.),
      # Include the time taken to poll the task in the cost.
      '--start', str(start),
      '--min-free-space', str(get_min_free_space(botobj)),
      '--bot-file', bot_file,
    ]
    if botobj.remote.uses_auth:
      command.extend(['--auth-params-file', auth_params_file])
    if is_grpc:
      command.append('--is-grpc')
    logging.debug('Running command: %s', command)

    # Put the output file into the current working directory, which should be
    # the one containing swarming_bot.zip.
    log_path = os.path.join(botobj.base_dir, 'logs', 'task_runner_stdout.log')
    os_utilities.roll_log(log_path)
    os_utilities.trim_rolled_log(log_path)
    with open(log_path, 'a+b') as f:
      proc = subprocess42.Popen(
          command,
          detached=True,
          cwd=botobj.base_dir,
          env=env,
          stdin=subprocess42.PIPE,
          stdout=f,
          stderr=subprocess42.STDOUT,
          close_fds=sys.platform != 'win32')
      try:
        proc.wait(hard_timeout)
      except subprocess42.TimeoutExpired:
        # That's the last ditch effort; as task_runner should have completed a
        # while ago and had enforced the timeout itself (or run_isolated for
        # hard_timeout for isolated task).
        logging.error('Sending SIGTERM to task_runner')
        proc.terminate()
        internal_failure = True
        msg = 'task_runner hung'
        try:
          proc.wait(grace_period)
        except subprocess42.TimeoutExpired:
          logging.error('Sending SIGKILL to task_runner')
          proc.kill()
        proc.wait()
        return False

    logging.info('task_runner exit: %d', proc.returncode)
    if os.path.exists(task_result_file):
      with open(task_result_file, 'rb') as fd:
        task_result = json.load(fd)

    if proc.returncode:
      msg = 'Execution failed: internal error (%d).' % proc.returncode
      internal_failure = True
    elif not task_result:
      logging.warning('task_runner failed to write metadata')
      msg = 'Execution failed: internal error (no metadata).'
      internal_failure = True
    elif task_result[u'must_signal_internal_failure']:
      msg = (
        'Execution failed: %s' % task_result[u'must_signal_internal_failure'])
      internal_failure = True

    failure = bool(task_result.get('exit_code')) if task_result else False
    return not internal_failure and not failure
  except Exception as e:
    # Failures include IOError when writing if the disk is full, OSError if
    # swarming_bot.zip doesn't exist anymore, etc.
    logging.exception('run_manifest failed')
    msg = 'Internal exception occured: %s\n%s' % (
        e, traceback.format_exc()[-2048:])
    internal_failure = True
  finally:
    if auth_params_dumper:
      auth_params_dumper.stop()
    if internal_failure:
      post_error_task(botobj, msg, task_id)
    call_hook(
        botobj, 'on_after_task', failure, internal_failure, task_dimensions,
        task_result)
    if os.path.isdir(work_dir):
      try:
        file_path.rmtree(work_dir)
      except Exception as e:
        botobj.post_error(
            'Failed to delete work directory %s: %s' % (work_dir, e))

Example 162

Project: tp-libvirt
Source File: virsh_iface.py
View license
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name", "ENTER.BRIDGE.NAME")
    iface_xml = params.get("iface_xml")
    iface_type = params.get("iface_type", "ethernet")
    iface_pro = params.get("iface_pro", "")
    iface_eth = params.get("iface_eth", "")
    iface_tag = params.get("iface_tag", "0")
    if iface_type == "vlan":
        iface_name = iface_eth + "." + iface_tag
    iface_eth_using = "yes" == params.get("iface_eth_using", "no")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    net_restart = "yes" == params.get("iface_net_restart", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    if ping_ip.count("ENTER"):
        raise error.TestNAError("Please input a valid ip address")
    if iface_name.count("ENTER"):
        raise error.TestNAError("Please input a existing bridge/ethernet name")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user', "EXAMPLE")
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {'debug': True}
    list_dumpxml_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        if not list_dumpxml_acl:
            virsh_dargs['uri'] = uri
            virsh_dargs['unprivileged_user'] = unprivileged_user
        else:
            list_dumpxml_dargs['uri'] = uri
            list_dumpxml_dargs['unprivileged_user'] = unprivileged_user
            list_dumpxml_dargs['ignore_status'] = False

    # acl api negative testing params
    write_save_status_error = "yes" == params.get("write_save_status_error",
                                                  "no")
    start_status_error = "yes" == params.get("start_status_error", "no")
    stop_status_error = "yes" == params.get("stop_status_error", "no")
    delete_status_error = "yes" == params.get("delete_status_error", "no")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm:
        xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_bridge = utils_net.Bridge()
    if use_exist_iface:
        if iface_type == "bridge":
            if iface_name not in net_bridge.list_br():
                raise error.TestError("Bridge '%s' not exists" % iface_name)
            ifaces = net_bridge.get_structure()[iface_name]
            if len(ifaces) < 1:
                # In this situation, dhcp maybe cannot get ip address
                # Unless you use static, we'd better skip such case
                raise error.TestNAError("Bridge '%s' has no interface"
                                        " bridged, perhaps cannot get"
                                        " ipaddress" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")
        iface_xml = os.path.join(test.tmpdir, iface_xml)
        create_xml_file(iface_xml, params)

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_path.find_command("NetworkManager")
    except utils_path.CmdNotFoundError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            if list_dumpxml_acl:
                virsh.iface_list(**list_dumpxml_dargs)
            xml = virsh.iface_dumpxml(iface_name, "--inactive",
                                      to_file=iface_xml,
                                      **list_dumpxml_dargs)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, **virsh_dargs)
        if (params.get('setup_libvirt_polkit') == 'yes' and
                write_save_status_error):
            # acl_test negative test
            libvirt.check_exit_status(result, write_save_status_error)
            virsh.iface_define(iface_xml, debug=True)
        elif iface_type == "bond" and not ping_ip:
            libvirt.check_exit_status(result, True)
            return
        else:
            libvirt.check_exit_status(result, status_error)

        if net_restart:
            network = service.Factory.create_service("network")
            network.restart()

        # After network restart, (ethernet)interface will be started
        if (not net_restart and iface_type in ("bridge", "ethernet")) or\
           (not use_exist_iface and iface_type in ("vlan", "bond")):
            # Step 3
            # List inactive interfaces
            list_option = "--inactive"
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s." % iface_name)

            # Step 4
            # Start interface
            result = virsh.iface_start(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    start_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, start_status_error)
                virsh.iface_start(iface_name, debug=True)
            elif (not net_restart and not use_exist_iface and
                    (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or
                        iface_type == "bridge" and iface_pro == "dhcp")):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                iface_ip = net_iface.get_ip()
                ping_ip = ping_ip if not iface_ip else iface_ip
                if ping_ip:
                    if not libvirt.check_iface(iface_name, "ping", ping_ip):
                        raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        if use_exist_iface or\
           (iface_pro != "dhcp" and iface_type == "bridge") or\
           (iface_eth_using and iface_type == "vlan"):
            list_option = ""
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s in active "
                                         "interface list" % iface_name)
            if vm:
                if vm.is_alive():
                    vm.destroy()
                iface_index = 0
                iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name)
                # Before test, detach all interfaces in guest
                for mac in iface_mac_list:
                    iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                    type = iface_info.get('type')
                    virsh.detach_interface(vm_name,
                                           "--type %s --mac %s"
                                           " --config" % (type, mac))
                    # After detach interface, vm.virtnet also need update, the
                    # easy way is free these mac addresses before start VM
                    vm.free_mac_address(iface_index)
                    iface_index += 1
                virsh.attach_interface(vm_name,
                                       "--type %s --source %s"
                                       " --config" % (iface_type, iface_name))
                vm.start()
                try:
                    # Test if guest can be login
                    vm.wait_for_login()
                except remote.LoginError:
                    raise error.TestFail("Cannot login guest with %s" %
                                         iface_name)

        # Step 6
        # Dumpxml for interface
        if list_dumpxml_acl:
            virsh.iface_list(**list_dumpxml_dargs)
        xml = virsh.iface_dumpxml(iface_name, "", to_file="",
                                  **list_dumpxml_dargs)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error and result.stdout.strip():
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        # Bridge's Mac equal to bridged interface's mac
        if iface_type not in ("bridge", "vlan") and result.stdout.strip():
            iface_mac = net_iface.get_mac()
            result = virsh.iface_name(iface_mac, debug=True)
            libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.0
            # check if interface's state is active before destroy
            if libvirt.check_iface(iface_name, "state", "--all"):
                # Step 9.1
                # Destroy interface
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                elif (not net_restart and iface_type == "ethernet" and
                        iface_pro in ["", "dhcp"] or iface_type == "bridge" and
                        iface_pro == "dhcp"):
                    libvirt.check_exit_status(result, True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if os.path.exists(iface_xml):
            os.remove(iface_xml)
        if os.path.exists(iface_script):
            os.remove(iface_script)

        if use_exist_iface:
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up and\
               not libvirt.check_iface(iface_name, "exists", ""):
                # Need reload script
                utils.run("ifup %s" % iface_name)
            elif not iface_is_up and libvirt.check_iface(iface_name,
                                                         "exists", ""):
                net_iface.down()
            if vm:
                xml_bak.sync()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                try:
                    utils_net.bring_down_ifname(iface_name)
                except utils_net.TAPBringDownError:
                    pass
            if iface_type == "bridge":
                if iface_name in net_bridge.list_br():
                    try:
                        net_bridge.del_bridge(iface_name)
                    except IOError:
                        pass
        if NM_is_running:
            NM_service.start()

Example 163

Project: baidu-fuse
Source File: baidufuse.py
View license
    def _add_file_to_buffer(self, path,file_info):
        foo = File()
        foo['st_ctime'] = file_info['local_ctime']
        foo['st_mtime'] = file_info['local_mtime']
        foo['st_mode'] = (stat.S_IFDIR | 0777) if file_info['isdir'] \
            else (stat.S_IFREG | 0777)
        foo['st_nlink'] = 2 if file_info['isdir'] else 1
        foo['st_size'] = file_info['size']
        self.buffer[path] = foo

    def _del_file_from_buffer(self,path):
        self.buffer.pop(path)

    def getattr(self, path, fh=None):
        #print 'getattr *',path
        # 先看缓存中是否存在该文件

        if not self.buffer.has_key(path):
            print path,'未命中'
            #print self.buffer
            #print self.traversed_folder
            jdata = json.loads(self.disk.meta([path]).content)
            try:
                if 'info' not in jdata:
                    raise FuseOSError(errno.ENOENT)
                if jdata['errno'] != 0:
                    raise FuseOSError(errno.ENOENT)
                file_info = jdata['info'][0]
                self._add_file_to_buffer(path,file_info)
                st = self.buffer[path].getDict()
                return st
            except:
                raise FuseOSError(errno.ENOENT)
        else:
            #print path,'命中'
            return self.buffer[path].getDict()



    def readdir(self, path, offset):
        self.uploadLock.acquire()
        while True:
            try:
                foo = json.loads(self.disk.list_files(path).text)
                break
            except:
                print 'error'


        files = ['.', '..']
        abs_files = [] # 该文件夹下文件的绝对路径
        for file in foo['list']:
            files.append(file['server_filename'])
            abs_files.append(file['path'])
        # 缓存文件夹下文件信息,批量查询meta info

        # Update:解决meta接口一次不能查询超过100条记录
        # 分成 ceil(file_num / 100.0) 组,利用商群
        if not self.traversed_folder.has_key(path) or self.traversed_folder[path] == False:
            print '正在对',path,'缓存中'
            file_num = len(abs_files)
            group = int(math.ceil(file_num / 100.0))
            for i in range(group):
                obj = [f for n,f in enumerate(abs_files) if n % group == i] #一组数据
                while 1:
                    try:
                        ret = json.loads(self.disk.meta(obj).text)
                        break
                    except:
                        print 'error'

                for file_info in ret['info']:
                    if not self.buffer.has_key(file_info['path']):
                        self._add_file_to_buffer(file_info['path'],file_info)
            #print self.buffer
            print '对',path,'的缓存完成'
            self.traversed_folder[path] = True
        for r in files:
            yield r
        self.uploadLock.release()

    def _update_file_manual(self,path):
        while 1:
            try:
                jdata = json.loads(self.disk.meta([path]).content)
                break
            except:
                print 'error'

        if 'info' not in jdata:
            raise FuseOSError(errno.ENOENT)
        if jdata['errno'] != 0:
            raise FuseOSError(errno.ENOENT)
        file_info = jdata['info'][0]
        self._add_file_to_buffer(path,file_info)


    def rename(self, old, new):
        #logging.debug('* rename',old,os.path.basename(new))
        print '*'*10,'RENAME CALLED',old,os.path.basename(new),type(old),type(new)
        while True:
            try:
                ret = self.disk.rename([(old,os.path.basename(new))]).content
                jdata = json.loads(ret)
                break
            except:
                print 'error'

        if jdata['errno'] != 0:
            # 文件名已存在,删除原文件
            print self.disk.delete([new]).content
            print self.disk.rename([(old,os.path.basename(new))])
        self._update_file_manual(new)
        self.buffer.pop(old)


    def open(self, path, flags):
        self.readLock.acquire()
        print '*'*10,'OPEN CALLED',path,flags
        #print '[****]',path
        """
        Permission denied

        accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
        if (flags & accmode) != os.O_RDONLY:
            raise FuseOSError(errno.EACCES)
        """
        self.fd += 1
        self.readLock.release()
        
        return self.fd

    def create(self, path, mode,fh=None):
        # 创建文件
        # 中文路径有问题
        print '*'*10,'CREATE CALLED',path,mode,type(path)
        #if 'outputstream' not in path:
        tmp_file = tempfile.TemporaryFile('r+w+b')
        foo = self.disk.upload(os.path.dirname(path),tmp_file,os.path.basename(path)).content
        ret = json.loads(foo)
        print ret
        print 'create-not-outputstream',ret
        if ret['path'] != path:
            # 文件已存在
            print '文件已存在'
            raise FuseOSError(errno.EEXIST)
        '''
        else:
            print 'create:',path
            foo = File()
            foo['st_ctime'] = int(time.time())
            foo['st_mtime'] = int(time.time())
            foo['st_mode'] = (stat.S_IFREG | 0777)
            foo['st_nlink'] = 1
            foo['st_size'] = 0
            self.buffer[path] = foo
        '''


        '''
        dict(st_mode=(stat.S_IFREG | mode), st_nlink=1,
                                st_size=0, st_ctime=time.time(), st_mtime=time.time(),
                                st_atime=time.time())
        '''
        self.fd += 1
        return 0

    def write(self, path, data, offset, fp):
        # 上传文件时会调用
        # 4kb ( 4096 bytes ) 每块,data中是块中的数据
        # 最后一块的判断:len(data) < 4096
        # 文件大小 = 最后一块的offset + len(data)

        # 4kb传太慢了,合计成2M传一次

        #print '*'*10,path,offset, len(data)

        def _block_size(stream):
            stream.seek(0,2)
            return stream.tell()

        _BLOCK_SIZE = 16 * 2 ** 20
        # 第一块的任务
        if offset == 0:
            #self.uploadLock.acquire()
            #self.readLock.acquire()
            # 初始化块md5列表
            self.upload_blocks[path] = {'tmp':None,
                                        'blocks':[]}
            # 创建缓冲区临时文件
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file

        # 向临时文件写入数据,检查是否>= _BLOCK_SIZE 是则上传该块并将临时文件清空
        try:
            tmp = self.upload_blocks[path]['tmp']
        except KeyError:
            return 0
        tmp.write(data)

        if _block_size(tmp) > _BLOCK_SIZE:
            print path,'发生上传'
            tmp.seek(0)
            try:
                foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                foofoo = json.loads(foo)
                block_md5 = foofoo['md5']
            except:
                 print foo



            # 在 upload_blocks 中插入本块的 md5
            self.upload_blocks[path]['blocks'].append(block_md5)
            # 创建缓冲区临时文件
            self.upload_blocks[path]['tmp'].close()
            tmp_file = tempfile.TemporaryFile('r+w+b')
            self.upload_blocks[path]['tmp'] = tmp_file
            print '创建临时文件',tmp_file.name

        # 最后一块的任务
        if len(data) < 4096:
            # 检查是否有重名,有重名则删除它
            while True:
                try:
                    foo = self.disk.meta([path]).content
                    foofoo = json.loads(foo)
                    break
                except:
                    print 'error'


            if foofoo['errno'] == 0:
                logging.debug('Deleted the file which has same name.')
                self.disk.delete([path])
            # 看看是否需要上传
            if _block_size(tmp) != 0:
                # 此时临时文件有数据,需要上传
                print path,'发生上传,块末尾,文件大小',_block_size(tmp)
                tmp.seek(0)
                while True:
                    try:
                        foo = self.disk.upload_tmpfile(tmp,callback=ProgressBar()).content
                        foofoo = json.loads(foo)
                        break
                    except:
                        print 'exception, retry.'

                block_md5 = foofoo['md5']
                # 在 upload_blocks 中插入本块的 md5
                self.upload_blocks[path]['blocks'].append(block_md5)

            # 调用 upload_superfile 以合并块文件
            print '合并文件',path,type(path)
            self.disk.upload_superfile(path,self.upload_blocks[path]['blocks'])
            # 删除upload_blocks中数据
            self.upload_blocks.pop(path)
            # 更新本地文件列表缓存
            self._update_file_manual(path)
            #self.readLock.release()
            #self.uploadLock.release()
        return len(data)


    def mkdir(self, path, mode):
        logger.debug("mkdir is:" + path)
        self.disk.mkdir(path)

    def rmdir(self, path):
        logger.debug("rmdir is:" + path)
        self.disk.delete([path])

    def read(self, path, size, offset, fh):
        #print '*'*10,'READ CALLED',path,size,offset
        #logger.debug("read is: " + path)
        paras = {'Range': 'bytes=%s-%s' % (offset, offset + size - 1)}
        while True:
            try:
                foo = self.disk.download(path, headers=paras).content
                return foo
            except:
                pass

    access = None
    statfs = None

Example 164

Project: reviewstats
Source File: utils.py
View license
def get_changes(projects, ssh_user, ssh_key, only_open=False, stable='',
                server='review.openstack.org'):
    """Get the changesets data list.

    :param projects: List of gerrit project names.
    :type projects: list of str
    :param str ssh_user: Gerrit username.
    :param str ssh_key: Filename of one SSH key registered at gerrit.
    :param bool only_open: If True, get only the not closed reviews.
    :param str stable:
        Name of the stable branch. If empty string, the changesets are not
        filtered by any branch.

    :return: List of de-serialized JSON changeset data as returned by gerrit.
    :rtype: list

    .. note::
        If all the issue are requested whatever the branch is, a cache system
        override the gerrit request.
        Requesting only the open changes isn't nearly as big of a deal,
        so just get the current data.
        Also do not use cache for stable stats as they cover different
        results.
        Cached results are pickled per project in the following filenames:
        “.{projectname}-changes.pickle”.
    """
    all_changes = {}

    client = paramiko.SSHClient()
    client.load_system_host_keys()
    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    connected = False

    for project in projects:
        changes = {}
        new_count = 0
        logging.debug('Getting changes for project %s' % project['name'])

        if not only_open and not stable:
            # Only use the cache for *all* changes (the entire history).
            pickle_fn = '.%s-changes.pickle' % project['name']

            if os.path.isfile(pickle_fn):
                with open(pickle_fn, 'r') as f:
                    try:
                        changes = pickle.load(f)
                    except MemoryError:
                        changes = {}

        if not isinstance(changes, dict):
            # The cache is in the old list format
            changes = {}

        if changes:
            for k, v in changes.items():
                # The cache is only using the id as a key.  We now need both
                # id and branch.
                if not isinstance(k, tuple):
                    changes = {}
                break

        while True:
            connect_attempts = 3
            for attempt in range(connect_attempts):
                if connected:
                    break
                try:
                    client.connect(server, port=29418,
                                   key_filename=ssh_key,
                                   username=ssh_user)
                except paramiko.SSHException:
                    try:
                        client.connect(server, port=29418,
                                       key_filename=ssh_key,
                                       username=ssh_user,
                                       allow_agent=False)
                    except paramiko.SSHException:
                        if attempt == connect_attempts + 1:
                            raise
                        time.sleep(3)
                        continue
                connected = True
                break

            cmd = ('gerrit query %s --all-approvals --patch-sets '
                   '--format JSON' % projects_q(project))
            if only_open:
                cmd += ' status:open'
            if stable:
                cmd += ' branch:stable/%s' % stable
            if new_count:
                cmd += ' --start %d' % new_count
            else:
                # Get a small set the first time so we can get to checking
                # againt the cache sooner
                cmd += ' limit:5'
            try:
                stdin, stdout, stderr = client.exec_command(cmd)
            except paramiko.SSHException:
                try:
                    client.close()
                except Exception:
                    pass
                connected = False
                time.sleep(5)
                continue
            end_of_changes = False
            for l in stdout:
                new_change = json.loads(l)
                if 'rowCount' in new_change:
                    if new_change['rowCount'] == 0:
                        # We've reached the end of all changes
                        end_of_changes = True
                        break
                    else:
                        break
                if changes.get((new_change['id'],
                                new_change['project'],
                                new_change['branch']), None) == new_change:
                    # Changes are ordered by latest to be updated.  As soon
                    # as we hit one that hasn't changed since our cached
                    # version, we're done.
                    end_of_changes = True
                    break
                changes[(new_change['id'],
                         new_change['project'],
                         new_change['branch'])] = new_change
                new_count += 1
            if end_of_changes:
                break

        if not only_open and not stable:
            with open(pickle_fn, 'w') as f:
                pickle.dump(changes, f)

        all_changes.update(changes)

    if connected:
        try:
            client.close()
        except Exception:
            pass

    # changes used to be a list, but is now a dict.  Convert it back to a list
    # for the sake of not having to change all the code that calls this
    # function (yet, anyway).

    all_changes = [value for value in all_changes.itervalues()]

    return all_changes

Example 165

Project: pyqso
Source File: adif.py
View license
    def is_valid(self, field_name, data, data_type):
        """ Validate the data in a field with respect to the ADIF specification.

        :arg str field_name: The name of the ADIF field.
        :arg str data: The data of the ADIF field to validate.
        :arg str data_type: The type of data to be validated. See http://www.adif.org/304/ADIF_304.htm#Data_Types for the full list with descriptions.
        :returns: True or False to indicate whether the data is valid or not.
        :rtype: bool
        """

        logging.debug("Validating the following data in field '%s': %s" % (field_name, data))

        # Allow an empty string, in case the user doesn't want
        # to fill in this field.
        if(data == ""):
            return True

        if(data_type == "N"):
            # Allow a decimal point before and/or after any numbers,
            # but don't allow a decimal point on its own.
            m = re.match(r"-?(([0-9]+\.?[0-9]*)|([0-9]*\.?[0-9]+))", data)
            if(m is None):
                # Did not match anything.
                return False
            else:
                # Make sure we match the whole string,
                # otherwise there may be an invalid character after the match.
                return (m.group(0) == data)

        elif(data_type == "B"):
            # Boolean
            m = re.match(r"(Y|N)", data)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "D"):
            # Date
            pattern = re.compile(r"([0-9]{4})")
            m_year = pattern.match(data, 0)
            if((m_year is None) or (int(m_year.group(0)) < 1930)):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{2})")
                m_month = pattern.match(data, 4)
                if((m_month is None) or int(m_month.group(0)) > 12 or int(m_month.group(0)) < 1):
                    # Did not match anything.
                    return False
                else:
                    pattern = re.compile(r"([0-9]{2})")
                    m_day = pattern.match(data, 6)
                    days_in_month = calendar.monthrange(int(m_year.group(0)), int(m_month.group(0)))
                    if((m_day is None) or int(m_day.group(0)) > days_in_month[1] or int(m_day.group(0)) < 1):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 8)

        elif(data_type == "T"):
            # Time
            pattern = re.compile(r"([0-9]{2})")
            m_hour = pattern.match(data, 0)
            if((m_hour is None) or (int(m_hour.group(0)) < 0) or (int(m_hour.group(0)) > 23)):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{2})")
                m_minutes = pattern.match(data, 2)
                if((m_minutes is None) or int(m_minutes.group(0)) < 0 or int(m_minutes.group(0)) > 59):
                    # Did not match anything.
                    return False
                else:
                    if(len(data) == 4):
                        # HHMM format
                        return True
                    pattern = re.compile(r"([0-9]{2})")
                    m_seconds = pattern.match(data, 4)
                    if((m_seconds is None) or int(m_seconds.group(0)) < 0 or int(m_seconds.group(0)) > 59):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 6)  # HHMMSS format

        # FIXME: Need to make sure that the "S" and "M" data types accept ASCII-only characters
        # in the range 32-126 inclusive.
        elif(data_type == "S"):
            # String
            m = re.match(r"(.+)", data)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "I"):
            # IntlString
            m = re.match(r"(.+)", data, re.UNICODE)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "G"):
            # IntlMultilineString
            m = re.match(r"(.+(\r\n)*.*)", data, re.UNICODE)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "M"):
            # MultilineString
            # m = re.match(r"(.+(\r\n)*.*)", data)
            # if(m is None):
            #   return False
            # else:
            #   return (m.group(0) == data)
            return True

        elif(data_type == "L"):
            # Location
            pattern = re.compile(r"([EWNS]{1})", re.IGNORECASE)
            m_directional = pattern.match(data, 0)
            if(m_directional is None):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{3})")
                m_degrees = pattern.match(data, 1)
                if((m_degrees is None) or int(m_degrees.group(0)) < 0 or int(m_degrees.group(0)) > 180):
                    # Did not match anything.
                    return False
                else:
                    pattern = re.compile(r"([0-9]{2}\.[0-9]{3})")
                    m_minutes = pattern.match(data, 4)
                    if((m_minutes is None) or float(m_minutes.group(0)) < 0 or float(m_minutes.group(0)) > 59.999):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 10)

        elif(data_type == "E" or data_type == "A"):
            # Enumeration, AwardList.
            if(field_name == "MODE"):
                return (data in list(MODES.keys()))
            elif(field_name == "BAND"):
                return (data in BANDS)
            else:
                return True

        else:
            return True

Example 166

Project: temci
Source File: cli.py
View license
@document_func(misc_commands_description["completion"]["zsh"], common_options)
def temci__completion__zsh():
    subcommands = "\n\t".join(['"{}:{}"'.format(cmd, command_docs[cmd])
                               for cmd in sorted(command_docs.keys())])

    def process_options(options: CmdOptionList, one_line=False):
        typecheck(options, CmdOptionList)
        strs = []
        for option in sorted(options):
            multiple = isinstance(option.type_scheme, List) or isinstance(option.type_scheme, ListOrTuple)
            rounds = 10 if multiple else 1 # hack to allow multiple applications of an option
            assert isinstance(option, CmdOption)
            descr = "{}".format(option.description) if option.description is not None else "Undoc"
            option_str = "--{}".format(option.option_name)
            if option.has_short:
                option_str = "{{-{},--{}}}".format(option.short, option.option_name)
            if option.is_flag:
                option_str = "{{--{o},--no-{o}}}".format(o=option.option_name)
            new_completion = ""
            if option.has_completion_hints and "zsh" in option.completion_hints:
                new_completion = '{option_str}\"[{descr}]: :{hint}"'.format(
                    option_str=option_str, descr=descr, hint=option.completion_hints["zsh"]
                )
            else:
                format_str = '{option_str}\"[{descr}]"' if option.is_flag else '{option_str}\"[{descr}]: :()"'
                new_completion = format_str.format(
                    option_str=option_str, descr=descr
                )
            for i in range(rounds):
                strs.append(new_completion)

        if one_line:
            return " ".join(strs)
        return "\n\t".join(strs)

    misc_cmds_wo_subcmds = list(filter(lambda x: isinstance(misc_commands[x], CmdOptionList), misc_commands.keys()))
    misc_cmds_w_subcmds = list(filter(lambda x: isinstance(misc_commands[x], dict), misc_commands.keys()))

    ret_str = """
# Auto generated tab completion for the temci ({version}) benchmarking tool.


#compdef temci
_temci(){{
    # printf '%s ' "${{words[@]}}" > /tmp/out
    local ret=11 state

    local -a common_opts
    common_opts=(
        {common_opts}
    )

    typeset -A opt_args
    _arguments   -C  ':subcommand:->subcommand' '2: :->second_level' '*::options:->options' && ret=0
    #echo $state > tmp_file

    local sub_cmd=""
    case $words[1] in
        temci)
            sub_cmd=$words[2]
            ;;
        *)
            sub_cmd=$words[1]
    esac

    #echo $words[@] >> tmp_file

    case $words[2] in
        ({misc_cmds_wo_subs})
            state="options"
            ;;
    esac


    case $state in
    subcommand)
        local -a subcommands
        subcommands=(
            {subcommands}
        )

        _describe -t subcommands 'temci subcommand' subcommands && ret=0
    ;;
    """.format(common_opts=process_options(common_options),
               subcommands=" ".join("\"{}:{}\"".format(cmd, command_docs[cmd]) for cmd in command_docs),
               misc_cmds_wo_subs="|".join(misc_cmds_wo_subcmds),
               version=temci.scripts.version.version)
    ret_str += """
    second_level)

        #echo $words[@] > tmp_file
        case $words[2] in
    """
    for misc_cmd in misc_cmds_w_subcmds:
        ret_str += """
            ({misc_cmd})
                #echo "here" > tmp_file
                local -a subcommands
                subcommands=(
                    {sub_cmds}
                )
                _describe -t subcommands 'temci subcommand' subcommands && ret=0 && return 0
                ;;
        """.format(misc_cmd=misc_cmd,
                   sub_cmds="\n\t".join("\"{}:{}\"".format(x, misc_commands_description[misc_cmd][x])
                                        for x in misc_commands_description[misc_cmd]))
    ret_str += """
            (build|report|{drivers})
                _arguments "2: :_files -g '*\.yaml' "\
            ;;
            (exec_package|run_package)
                _arguments "2: :_files -g '*\.temci' "\
            ;;
        esac
        ;;
        """.format(drivers="|".join(sorted(run_driver.RunDriverRegistry.registry.keys())))
    ret_str +="""
    (options)
        local -a args
        args=(
        $common_opts
        )
        #echo "options" $words[@] > tmp_file


        case $words[1] in

        """

    for driver in run_driver.RunDriverRegistry.registry.keys():
        ret_str += """
        {driver})
            case $words[2] in
                *.yaml)
                    args=(
                    $common_opts
                    {opts}
                    )
                    _arguments "1:: :echo 3" $args && ret=0
                ;;
                *)
                    _arguments "1:: :echo 3" && ret=0
            esac
        ;;
        """.format(driver=driver, opts=process_options(run_options["run_driver_specific"][driver]))

    cmds = {
        "report": {
            "pattern": "*.yaml",
            "options": report_options,
        },
        "build": {
            "pattern": "*.yaml",
            "options": build_options
        },
        "exec_package|run_package": {
            "pattern": "*.temci",
            "options": package_options
        }
    }

    for name in cmds:
        ret_str += """
            ({name})
                #echo "({name})" $words[2]
                case $words[2] in
                    {pattern})
                        args=(
                        $common_opts
                        {options}
                        )
                        _arguments "1:: :echo 3" $args && ret=0
                    ;;
                    *)
                        _arguments "1:: :echo 3" && ret=0
                esac
            ;;
        """.format(name=name, pattern=cmds[name]["pattern"], options=process_options(cmds[name]["options"]))

    for misc_cmd in misc_cmds_w_subcmds:
        ret_str += """
        ({misc_cmd})
            case $words[2] in
            """.format(misc_cmd=misc_cmd)
        for sub_cmd in misc_commands[misc_cmd]["sub_commands"]:
            ret_str +="""
                {sub_cmd})
                    #echo "{sub_cmd}" $words[@] > tmp_file
                    args+=(
                        {common_opts}
                        {opts}
                    )

                    #echo "sdf" $args[@] > tmp_file
                    _arguments "1:: :echo 3" $args && ret=0
                ;;
            """.format(sub_cmd=sub_cmd,
                       opts=process_options(misc_commands[misc_cmd]["sub_commands"][sub_cmd]),
                       common_opts=process_options(misc_commands[misc_cmd]["common"]))
        ret_str += """
            esac
            ;;
        """

    ret_str += """
    esac



        case $sub_cmd in
    """

    for misc_cmd in misc_cmds_wo_subcmds:
        ret_str += """
        {misc_cmd})
            # echo "{misc_cmd}" $words[@] >> tmp_file
            args+=(
                {opts}
            )
            case $words[2] in
                $sub_cmd)
                    _arguments "1:: :echo 3" $args && ret=0
                    ;;
                *)
                    # echo "Hi" >> tmp_file
                    _arguments $args && ret=0
                    ;;
            esac
        ;;
    """.format(misc_cmd=misc_cmd, opts=process_options(misc_commands[misc_cmd]))

    ret_str += """
        esac

        #_arguments $common_opts && ret=0 && return 0
    ;;
    esac
    }

    compdef _temci temci=temci
    """
    create_completion_dir()
    file_name = completion_file_name("zsh")
    if not os.path.exists(os.path.dirname(file_name)):
        os.mkdir(os.path.dirname(file_name))
    with open(file_name, "w") as f:
        f.write(ret_str)
        logging.debug("\n".join("{:>3}: {}".format(i, s) for (i, s) in enumerate(ret_str.split("\n"))))
        f.flush()
    os.chmod(file_name, 0o777)
    print(file_name)

Example 167

Project: thus
Source File: auto_partition.py
View license
    def run(self):
        key_files = ["/tmp/.keyfile-root", "/tmp/.keyfile-home"]

        # Partition sizes are expressed in MiB

        # Get just the disk size in MiB
        device = self.auto_device

        logging.debug(_("Following device detected: {0}".format(device)))

        device_name = check_output("basename {0}".format(device))
        base_path = os.path.join("/sys/block", device_name)
        size_path = os.path.join(base_path, "size")
        if os.path.exists(size_path):
            logical_path = os.path.join(base_path, "queue/logical_block_size")
            with open(logical_path, 'r') as f:
                logical_block_size = int(f.read())
            with open(size_path, 'r') as f:
                size = int(f.read())
            disk_size = ((logical_block_size * (size - 68)) / 1024) / 1024
        else:
            txt = _("Setup cannot detect size of your device, please use advanced "
                    "installation routine for partitioning and mounting devices.")
            logging.error(txt)
            raise InstallError(txt)

        if self.GPT:
            start_part_sizes = 0
        else:
            # We start with a 1MiB offset before the first partition in MBR mode
            start_part_sizes = 1

        part_sizes = self.get_part_sizes(disk_size, start_part_sizes)
        self.log_part_sizes(part_sizes)

        # Disable swap and all mounted partitions, umount / last!
        unmount_all(self.dest_dir)
        remove_lvm(device)
        close_luks_devices()

        printk(False)

        # WARNING:
        # Our computed sizes are all in mebibytes (MiB) i.e. powers of 1024, not metric megabytes.
        # These are 'M' in sgdisk and 'MiB' in parted.
        # If you use 'M' in parted you'll get MB instead of MiB, and you're gonna have a bad time.

        if self.GPT:
            # Clean partition table to avoid issues!
            sgdisk("zap-all", device)

            # Clear all magic strings/signatures - mdadm, lvm, partition tables etc.
            dd("/dev/zero", device, bs=512, count=2048)
            wipefs(device)

            # Create fresh GPT
            sgdisk("clear", device)

            # Inform the kernel of the partition change. Needed if the hard disk had a MBR partition table.
            try:
                subprocess.check_call(["partprobe", device])
            except subprocess.CalledProcessError as err:
                txt = _("Error informing the kernel of the partition change.")
                logging.error(txt)
                logging.error(_("Command {0} failed".format(err.cmd)))
                logging.error(_("Output: {0}".format(err.output)))
                raise InstallError(txt)

            part_num = 1

            if not self.UEFI:
                # We don't allow BIOS+GPT right now, so this code will be never executed
                # We leave here just for future reference
                # Create BIOS Boot Partition
                # GPT GUID: 21686148-6449-6E6F-744E-656564454649
                # This partition is not required if the system is UEFI based,
                # as there is no such embedding of the second-stage code in that case
                sgdisk_new(device, part_num, "BIOS_BOOT", gpt_bios_grub_part_size, "EF02")
                part_num += 1

            # Create EFI System Partition (ESP)
            # GPT GUID: C12A7328-F81F-11D2-BA4B-00A0C93EC93B
            if self.bootloader == "grub2":
                sgdisk_new(device, part_num, "UEFI_SYSTEM", part_sizes['efi'], "EF00")
                part_num += 1

            # Create Boot partition
            if self.bootloader == "systemd-boot":
                sgdisk_new(device, part_num, "MANJARO_BOOT", part_sizes['boot'], "EF00")
            else:
                sgdisk_new(device, part_num, "MANJARO_BOOT", part_sizes['boot'], "8300")
            part_num += 1

            if self.lvm:
                # Create partition for lvm (will store root, swap and home (if desired) logical volumes)
                sgdisk_new(device, part_num, "MANJARO_LVM", 0, "8E00")
            else:
                sgdisk_new(device, part_num, "MANJARO_ROOT", part_sizes['root'], "8300")
                part_num += 1
                if self.home:
                    sgdisk_new(device, part_num, "MANJARO_HOME", part_sizes['home'], "8302")
                    part_num += 1
                sgdisk_new(device, part_num, "MANJARO_SWAP", 0, "8200")

            logging.debug(check_output("sgdisk --print {0}".format(device)))
        else:
            # DOS MBR partition table
            # Start at sector 1 for 4k drive compatibility and correct alignment
            # Clean partitiontable to avoid issues!
            dd("/dev/zero", device, bs=512, count=2048)
            wipefs(device)

            # Create DOS MBR
            parted_mktable(device, "msdos")

            # Create boot partition (all sizes are in MiB)
            # if start is -1 parted_mkpart assumes that our partition starts at 1 (first partition in disk)
            start = -1
            end = part_sizes['boot']
            parted_mkpart(device, "primary", start, end)

            # Set boot partition as bootable
            parted_set(device, "1", "boot", "on")

            if self.lvm:
                # Create partition for lvm (will store root, swap and home (if desired) logical volumes)
                start = end
                parted_mkpart(device, "primary", start, "-1s")

                # Set lvm flag
                parted_set(device, "2", "lvm", "on")
            else:
                # Create root partition
                start = end
                end = start + part_sizes['root']
                parted_mkpart(device, "primary", start, end)

                if self.home:
                    # Create home partition
                    start = end
                    end = start + part_sizes['home']
                    parted_mkpart(device, "primary", start, end)

                # Create an extended partition where we will put our swap partition
                start = end
                parted_mkpart(device, "extended", start, "-1s")

                # Now create a logical swap partition
                start += 1
                parted_mkpart(device, "logical", start, "-1s", "linux-swap")

        printk(True)

        # Wait until /dev initialized correct devices
        subprocess.check_call(["udevadm", "settle"])

        devices = self.get_devices

        if self.GPT and self.bootloader == "grub2":
            logging.debug("EFI: {0}".format(devices['efi']))

        logging.debug("Boot: {0}".format(devices['boot']))
        logging.debug("Swap: {0}".format(devices['swap']))
        logging.debug("Root: {0}".format(devices['root']))

        if self.home:
            logging.debug("Home: {0}".format(devices['home']))

        if self.luks:
            setup_luks(devices['luks'], "cryptManjaro", self.luks_password, key_files[0])
            if self.home and not self.lvm:
                setup_luks(devices['luks2'], "cryptManjaroHome", self.luks_password, key_files[1])

        if self.lvm:
            logging.debug(_("Thus will setup LVM on device {0}".format(devices['lvm'])))

            try:
                subprocess.check_call(["pvcreate", "-f", "-y", devices['lvm']])
            except subprocess.CalledProcessError as err:
                txt = _("Error creating LVM physical volume")
                logging.error(txt)
                logging.error(_("Command {0} failed".format(err.cmd)))
                logging.error(_("Output: {0}".format(err.output)))
                raise InstallError(txt)

            try:
                subprocess.check_call(["vgcreate", "-f", "-y", "ManjaroVG", devices['lvm']])
            except subprocess.CalledProcessError as err:
                txt = _("Error creating LVM volume group")
                logging.error(txt)
                logging.error(_("Command {0} failed".format(err.cmd)))
                logging.error(_("Output: {0}".format(err.output)))
                raise InstallError(txt)

            # Fix issue 180
            # Check space we have now for creating logical volumes
            vg_info = check_output("vgdisplay -c ManjaroVG")
            # Get column number 12: Size of volume group in kilobytes
            vg_size = int(vg_info.split(":")[11]) / 1024
            if part_sizes['lvm_pv'] > vg_size:
                logging.debug("Real ManjaroVG volume group size: %d MiB", vg_size)
                logging.debug("Reajusting logical volume sizes")
                diff_size = part_sizes['lvm_pv'] - vg_size
                part_sizes = self.get_part_sizes(disk_size - diff_size, start_part_sizes)
                self.log_part_sizes(part_sizes)

            try:
                size = str(int(part_sizes['root']))
                cmd = ["lvcreate", "--name", "ManjaroRoot", "--yes", "--size", size, "ManjaroVG"]
                subprocess.check_call(cmd)

                if not self.home:
                    # Use the remainig space for our swap volume
                    cmd = ["lvcreate", "--name", "ManjaroSwap", "--yes", "--extents", "100%FREE", "ManjaroVG"]
                    subprocess.check_call(cmd)
                else:
                    size = str(int(part_sizes['swap']))
                    cmd = ["lvcreate", "--name", "ManjaroSwap", "--yes", "--size", size, "ManjaroVG"]
                    subprocess.check_call(cmd)
                    # Use the remaining space for our home volume
                    cmd = ["lvcreate", "--name", "ManjaroHome", "--yes", "--extents", "100%FREE", "ManjaroVG"]
                    subprocess.check_call(cmd)
            except subprocess.CalledProcessError as err:
                txt = _("Error creating LVM logical volume")
                logging.error(txt)
                logging.error(_("Command {0} failed".format(err.cmd)))
                logging.error(_("Output: {0}".format(err.output)))
                raise InstallError(txt)

        # We have all partitions and volumes created. Let's create its filesystems with mkfs.

        mount_points = {
            'efi': '/boot/efi',
            'boot': '/boot',
            'root': '/',
            'home': '/home',
            'swap': ''}

        labels = {
            'efi': 'UEFI_SYSTEM',
            'boot': 'ManjaroBoot',
            'root': 'ManjaroRoot',
            'home': 'ManjaroHome',
            'swap': 'ManjaroSwap'}

        fs_devices = self.get_fs_devices()

        # Note: Make sure the "root" partition is defined first!
        self.mkfs(devices['root'], fs_devices[devices['root']], mount_points['root'], labels['root'])
        self.mkfs(devices['swap'], fs_devices[devices['swap']], mount_points['swap'], labels['swap'])

        if self.GPT and self.bootloader == "systemd-boot":
            # Format EFI System Partition (ESP) with vfat (fat32)
            self.mkfs(devices['boot'], fs_devices[devices['boot']], mount_points['boot'], labels['boot'], "-F 32")
        else:
            self.mkfs(devices['boot'], fs_devices[devices['boot']], mount_points['boot'], labels['boot'])

        if self.GPT and self.bootloader == "grub2":
            # Format EFI System Partition (ESP) with vfat (fat32)
            self.mkfs(devices['efi'], fs_devices[devices['efi']], mount_points['efi'], labels['efi'], "-F 32")

        if self.home:
            self.mkfs(devices['home'], fs_devices[devices['home']], mount_points['home'], labels['home'])

        # NOTE: encrypted and/or lvm2 hooks will be added to mkinitcpio.conf in process.py if necessary
        # NOTE: /etc/default/grub, /etc/stab and /etc/crypttab will be modified in process.py, too.

        if self.luks and self.luks_password == "":
            # Copy root keyfile to boot partition and home keyfile to root partition
            # user will choose what to do with it
            # THIS IS NONSENSE (BIG SECURITY HOLE), BUT WE TRUST THE USER TO FIX THIS
            # User shouldn't store the keyfiles unencrypted unless the medium itself is reasonably safe
            # (boot partition is not)

            try:
                os.chmod(key_files[0], 0o400)
                cmd = ['mv', key_files[0], os.path.join(self.dest_dir, "boot")]
                subprocess.check_call(cmd)
                if self.home and not self.lvm:
                    os.chmod(key_files[1], 0o400)
                    luks_dir = os.path.join(self.dest_dir, 'etc/luks-keys')
                    os.makedirs(luks_dir, mode=0o755, exist_ok=True)
                    subprocess.check_call(['mv', key_files[1], luks_dir])
            except subprocess.CalledProcessError as err:
                txt = _("Can't copy LUKS keyfile to the installation device")
                logging.warning(txt)
                logging.warning(_("Command {0} failed".format(err.cmd)))
                logging.warning(_("Output: {0}".format(err.output)))

Example 168

Project: pathomx
Source File: runqueue.py
View license
    def __init__(self, tool, varsi, *args, **kwargs):
        """
        Generate an execution queue from the supplied singular tool
        current tool is in the head position, execution will start from there.

        As passing each tool, lock the code and config into the tool ensure static
        for the whole of execution, ignore future changes.

        A set of tool objects will uniquely describe this job,
        and allow superset or == jobs to delete this job.
        """
        super(ToolJob, self).__init__(*args, **kwargs)

        # Global varsi must be passed at the start of each Exec job as may not be present otherwise
        # otherwise. Stored here for re-use. n.b. within a Job these should not change (persistance)
        # may need to account for this by taking a copy of styles here?
        # self.global_varsi = {
        #    'rcParams': {k: v for k, v in rcParams.items() if k not in strip_rcParams},
        #    '_pathomx_database_path': os.path.join(utils.scriptdir, 'database'),
        #    'styles': styles,
        #    }

        global_varsi = varsi.copy()

        # Build the queue of Exec objects;
        self.execs_queue = []
        self.exec_tool_lookup = {}
        tool_task_lookup = {}

        process_queue = [tool]
        process_done = []

        tool_list = []
        exec_list = []

        previous_tool = None

        while len(process_queue) > 0:

            # Remove duplicates
            process_queue = list(set(process_queue))
            t = process_queue.pop(0)  # From front

            # Check for what that this tool depends on
            parents = t.get_parents()

            if len(parents) > 0 \
                and len( set(parents) & set(process_queue) ) > 0:
                # waiting on something here, push to the back of the list for later
                process_queue.append(t)
                continue

            # Build an exec object for the given tool: note that at this point we cannot determine whether the
            # vars are on the correct kernel. We can't seed at this point either, as the result of subsequent
            # calculations will not be reflected. The solution is to populate Exec.varsi{} at runtime dispatch.
            # In order to make the neccessary data available at that time, we here store it via lookup.
            varsi = {
                    'config': t.config.as_dict(),
                    '_pathomx_tool_path': t.plugin.path,
                    '_pathomx_expected_output_vars': list( t.data.o.keys() ),
                    }

            # Build the IO magic
            # - if the source did not run on the current runner we'll need to push the data over (later)
            io = {'input': {}, 'output': {}, }
            for i, sm in t.data.i.items():
                if sm:
                    mo, mi = sm
                    io['input'][i] = "_%s_%s" % (mi, id(mo.v))
                else:
                    io['input'][i] = None
            for o in t.data.o.keys():
                io['output'][o] = "_%s_%s" % (o, id(t))
            varsi['_io'] = io


            e = Execute(
                varsi=varsi,
                code=[
                      "from pathomx.kernel_helpers import pathomx_notebook_start, pathomx_notebook_stop, progress, open_with_progress; pathomx_notebook_start(vars());",
                      t.code,
                      "pathomx_notebook_stop(vars());",
                    ],
                varso=['varso'],
                language=t.language,
                metadata={'name': t.name, 'tool': t },
            )

            e.progress.connect(t.progress.emit)
            e.result.connect(t._worker_result_callback)

            # Store the tool for this Exec object; for dispatch calculation
            self.exec_tool_lookup[e] = tool


            watchers = [w.v for k, v in t.data.watchers.items() for w in v]
            for w in watchers:
                if w not in process_done and w not in process_queue:
                    # Put all watchers at the front of the list, this will iteratively ensure we can straight-line
                    # down at least one path once started
                    process_queue.insert(0, w)


            # Determine the position of the object
            # Is before fork; if there is more than 1 tool watching this one
            is_before_fork = len(watchers) > 1
            # Is the end of a fork (watchers have > 1 parent)
            is_end_of_fork = len([p for w in watchers for p in w.get_parents()]) > 1


            if previous_tool is not None and previous_tool not in parents:
                # We are not a direct descendant we're going to have to start a new Task.
                # There should be more effort to mitigate this in ordering of the task-queue
                task = Task(self, execute=exec_list)
                self.tasks_queued.append(task)

                for pt in tool_list:
                    tool_task_lookup[pt] = task

                exec_list = []
                tool_list = []

            # If this is the first execute object in the queue (list is empty), update it with the global vars for run
            # and store the head of branch tool for later dependencies
            if not exec_list:
                e.varsi.update( global_varsi )

            tool_list.append(t)
            exec_list.append(e)

            if is_before_fork or is_end_of_fork:
                # We've got >1 children, we need to create a split task again
                task = Task(self, execute=exec_list)
                self.tasks_queued.append(task)

                for pt in tool_list:
                    tool_task_lookup[pt] = task

                exec_list = []
                tool_list = []

            process_done.append(t)
            previous_tool = t

        if exec_list: # Remainders
            task = Task(self, execute=exec_list)
            self.tasks_queued.append( task )

        logging.debug("task_queue: %s" % self.tasks_queued)


        # Apply the dependencies to each task: we need to do this at the end to avoid missing due to order
        for t in self.tasks_queued:
            if t.execute:
                e0 = t.execute[0]
                dependencies = []
                for ti in e0.metadata['tool'].get_parents():
                    if ti in tool_task_lookup:
                        dependencies.append( tool_task_lookup[ti] )
                t.dependencies = dependencies

        self.tool_list = process_done

Example 169

Project: thus
Source File: process.py
View license
    def configure_system(self):
        """ Final install steps
            Set clock, language, timezone
            Run mkinitcpio
            Populate pacman keyring
            Setup systemd services
            ... and more """

        # First and last thing we do here mounting/unmouting special dirs.
        chroot.mount_special_dirs(DEST_DIR)

        self.queue_event('pulse', 'start')
        self.queue_event('action', _("Configuring your new system"))

        self.auto_fstab()
        logging.debug(_('fstab file generated.'))

        # Copy configured networks in Live medium to target system
        if self.network_manager == 'NetworkManager':
            self.copy_network_config()

        logging.debug(_('Network configuration copied.'))

        # enable services
        # self.enable_services([self.network_manager])

        # cups_service = os.path.join(DEST_DIR, "usr/lib/systemd/system/org.cups.cupsd.service")
        # if os.path.exists(cups_service):
        #    self.enable_services(['org.cups.cupsd'])"""

        # enable targets
        # self.enable_targets(['remote-fs.target'])

        # logging.debug('Enabled installed services.')

        # Wait FOREVER until the user sets the timezone
        while self.settings.get('timezone_done') is False:
            # wait five seconds and try again
            time.sleep(5)

        if self.settings.get("use_ntp"):
            self.enable_services(["ntpd"])

        # Set timezone
        zoneinfo_path = os.path.join("/usr/share/zoneinfo", self.settings.get("timezone_zone"))
        chroot_run(['ln', '-s', zoneinfo_path, "/etc/localtime"])

        logging.debug(_('Time zone set.'))

        # Wait FOREVER until the user sets his params
        while self.settings.get('user_info_done') is False:
            # wait five seconds and try again
            time.sleep(5)

        # Set user parameters
        username = self.settings.get('username')
        fullname = self.settings.get('fullname')
        password = self.settings.get('password')
        root_password = self.settings.get('root_password')
        hostname = self.settings.get('hostname')

        sudoers_path = os.path.join(DEST_DIR, "etc/sudoers.d/10-installer")

        with open(sudoers_path, "w") as sudoers:
            sudoers.write('{0} ALL=(ALL) ALL\n'.format(username))

        subprocess.check_call(["chmod", "440", sudoers_path])

        logging.debug(_('Sudo configuration for user {0} done.'.format(username)))

        default_groups = 'lp,video,network,storage,wheel,users'

        if self.settings.get('require_password') is False:
            chroot_run(['groupadd', 'autologin'])
            default_groups += ',autologin'

        chroot_run(['useradd', '-m', '-s', '/bin/bash', '-U', '-G', default_groups, username])

        logging.debug(_('User {0} added.'.format(username)))

        self.change_user_password(username, password)

        chroot_run(['chfn', '-f', fullname, username])

        chroot_run(['chown', '-R', '{0}:{1}'.format(username, username), "/home/{0}".format(username)])

        hostname_path = os.path.join(DEST_DIR, "etc/hostname")
        with open(hostname_path, "w") as hostname_file:
            hostname_file.write(hostname)

        logging.debug(_('Hostname  {0} set.'.format(hostname)))

        # Set root password
        if root_password is not '':
            self.change_user_password('root', root_password)
            logging.debug(_('Set root password.'))
        else:
            self.change_user_password('root', password)
            logging.debug(_('Set the same password to root.'))

        # Generate locales
        locale = self.settings.get("locale")

        self.queue_event('info', _("Generating locales ..."))
        self.uncomment_locale_gen(locale)
        chroot_run(['locale-gen'])

        locale_conf_path = os.path.join(DEST_DIR, "etc/locale.conf")
        with open(locale_conf_path, "w") as locale_conf:
            locale_conf.write('LANG={0}\n'.format(locale))

        keyboard_layout = self.settings.get("keyboard_layout")
        keyboard_variant = self.settings.get("keyboard_variant")
        # Set /etc/vconsole.conf
        vconsole_conf_path = os.path.join(DEST_DIR, "etc/vconsole.conf")
        with open(vconsole_conf_path, "w") as vconsole_conf:
            vconsole_conf.write('KEYMAP={0}\n'.format(keyboard_layout))

        # Write xorg keyboard configuration
        xorg_conf_dir = os.path.join(DEST_DIR, "etc/X11/xorg.conf.d")
        os.makedirs(xorg_conf_dir, exist_ok=True)
        fname = "{0}/etc/X11/xorg.conf.d/00-keyboard.conf".format(DEST_DIR)
        with open(fname, 'w') as file:
            default_keyboard_layout = "us"
            default_keyboard_model = "pc105"
            if keyboard_layout == default_keyboard_layout:
                xkblayout = "{}".format(keyboard_layout)
                xkbvariant = "{}".format(keyboard_variant)
            else:
                xkblayout = "{},{}".format(keyboard_layout,
                                           default_keyboard_layout)
                xkbvariant = "{},".format(keyboard_variant)

            file.write("\n"
                       "Section \"InputClass\"\n"
                       " Identifier \"system-keyboard\"\n"
                       " MatchIsKeyboard \"on\"\n"
                       " Option \"XkbLayout\" \"{}\"\n"
                       " Option \"XkbModel\" \"{}\"\n"
                       " Option \"XkbVariant\" \"{}\"\n"
                       " Option \"XkbOptions\" \"{}\"\n"
                       "EndSection\n"
                       .format(xkblayout,
                               default_keyboard_model,
                               xkbvariant,
                               "terminate:ctrl_alt_bksp,grp:alt_shift_toggle"))

        self.queue_event('info', _("Adjusting hardware clock ..."))
        self.auto_timesetting()

        # Install configs for root
        # chroot_run(['cp', '-av', '/etc/skel/.', '/root/'])

        self.queue_event('info', _("Configuring hardware ..."))

        # Configure ALSA
        self.alsa_mixer_setup()
        logging.debug(_("Updated Alsa mixer settings"))

        '''# Set pulse
        if os.path.exists(os.path.join(DEST_DIR, "usr/bin/pulseaudio-ctl")):
            chroot_run(['pulseaudio-ctl', 'set', '75%'])'''

        if os.path.exists("/opt/livecd"):
            repo_path="/opt/livecd/pacman-gfx.conf"
        else:
            repo_path="/opt/live/pacman-gfx.conf"

        # Install xf86-video driver
        if os.path.exists(repo_path):
            self.queue_event('info', _("Installing drivers ..."))
            mhwd_script_path = os.path.join(self.settings.get("thus"), "scripts", MHWD_SCRIPT)
            try:
                subprocess.check_call(["/usr/bin/bash", mhwd_script_path])
                logging.debug("Finished installing drivers.")
            except subprocess.CalledProcessError as e:
                txt = "CalledProcessError.output = {0}".format(e.output)
                logging.error(txt)
                self.queue_fatal_event(txt)
                return False

        self.queue_event('info', _("Configure display manager ..."))
        # Setup slim
        if os.path.exists("/usr/bin/slim"):
            self.desktop_manager = 'slim'

        # Setup sddm
        if os.path.exists("/usr/bin/sddm"):
            self.desktop_manager = 'sddm'

        # setup lightdm
        if os.path.exists("{0}/usr/bin/lightdm".format(DEST_DIR)):
            default_desktop_environment = self.find_desktop_environment()
            if default_desktop_environment is not None:
                os.system("sed -i -e 's/^.*user-session=.*/user-session={0}/' \
		{1}/etc/lightdm/lightdm.conf".format(default_desktop_environment.desktop_file, DEST_DIR))
                os.system("ln -s /usr/lib/lightdm/lightdm/gdmflexiserver {0}/usr/bin/gdmflexiserver".format(DEST_DIR))
            os.system("chmod +r {0}/etc/lightdm/lightdm.conf".format(DEST_DIR))
            self.desktop_manager = 'lightdm'

        # Setup gdm
        if os.path.exists("{0}/usr/bin/gdm".format(DEST_DIR)):
            default_desktop_environment = self.find_desktop_environment()
            os.system("echo \"[User]\" > {0}/var/lib/AccountsService/users/{1}".format(DEST_DIR, username))
            if default_desktop_environment is not None:
                os.system("echo \"XSession={0}\" >> \
                {1}/var/lib/AccountsService/users/{2}".format(default_desktop_environment.desktop_file, DEST_DIR, username))
                os.system("echo \"Icon=\" >> {0}/var/lib/AccountsService/users/{1}".format(DEST_DIR, username))
            self.desktop_manager = 'gdm'

        # Setup mdm
        if os.path.exists("{0}/usr/bin/mdm".format(DEST_DIR)):
            default_desktop_environment = self.find_desktop_environment()
            if default_desktop_environment is not None:
                os.system("sed -i 's|default.desktop|{0}.desktop|g' \
                {1}/etc/mdm/custom.conf".format(default_desktop_environment.desktop_file, DEST_DIR))
            self.desktop_manager = 'mdm'

        # Setup lxdm
        if os.path.exists("{0}/usr/bin/lxdm".format(DEST_DIR)):
            default_desktop_environment = self.find_desktop_environment()
            if default_desktop_environment is not None:
                os.system("sed -i -e 's|^.*session=.*|session={0}|' \
                {1}/etc/lxdm/lxdm.conf".format(default_desktop_environment.executable, DEST_DIR))
            self.desktop_manager = 'lxdm'

        # Setup kdm
        if os.path.exists("{0}/usr/bin/kdm".format(DEST_DIR)):
            self.desktop_manager = 'kdm'

        self.queue_event('info', _("Configure System ..."))

        # Adjust Steam-Native when libudev.so.0 is available
        if (os.path.exists("{0}/usr/lib/libudev.so.0".format(DEST_DIR)) or
                os.path.exists("{0}/usr/lib32/libudev.so.0".format(DEST_DIR))):
            os.system("echo -e \"STEAM_RUNTIME=0\nSTEAM_FRAME_FORCE_CLOSE=1\" >> {0}/etc/environment".format(DEST_DIR))

        # Remove thus
        if os.path.exists("{0}/usr/bin/thus".format(DEST_DIR)):
            self.queue_event('info', _("Removing live configuration (packages)"))
            chroot_run(['pacman', '-R', '--noconfirm', 'thus'])

        # Remove virtualbox driver on real hardware
        p1 = subprocess.Popen(["mhwd"], stdout=subprocess.PIPE)
        p2 = subprocess.Popen(["grep", "0300:80ee:beef"], stdin=p1.stdout, stdout=subprocess.PIPE)
        num_res = p2.communicate()[0]
        if num_res == "0":
            chroot_run(['sh', '-c', 'pacman -Rsc --noconfirm $(pacman -Qq | grep virtualbox-guest-modules)'])

        # Set unique machine-id
        chroot_run(['dbus-uuidgen', '--ensure=/etc/machine-id'])
        chroot_run(['dbus-uuidgen', '--ensure=/var/lib/dbus/machine-id'])

        # Setup pacman
        self.queue_event("action", _("Configuring package manager"))

        # Copy mirror list
        shutil.copy2('/etc/pacman.d/mirrorlist',
                     os.path.join(DEST_DIR, 'etc/pacman.d/mirrorlist'))

        # Copy random generated keys by pacman-init to target
        if os.path.exists("{0}/etc/pacman.d/gnupg".format(DEST_DIR)):
            os.system("rm -rf {0}/etc/pacman.d/gnupg".format(DEST_DIR))
        os.system("cp -a /etc/pacman.d/gnupg {0}/etc/pacman.d/".format(DEST_DIR))
        chroot_run(['pacman-key', '--populate', 'archlinux', 'manjaro'])
        self.queue_event('info', _("Finished configuring package manager."))

        # Workaround for pacman-key bug FS#45351 https://bugs.archlinux.org/task/45351
        # We have to kill gpg-agent because if it stays around we can't reliably unmount
        # the target partition.
        chroot_run(['killall', '-9', 'gpg-agent'])

        # Let's start without using hwdetect for mkinitcpio.conf.
        # I think it should work out of the box most of the time.
        # This way we don't have to fix deprecated hooks.
        # NOTE: With LUKS or LVM maybe we'll have to fix deprecated hooks.
        self.queue_event('info', _("Running mkinitcpio ..."))
        mkinitcpio.run(DEST_DIR, self.settings, self.mount_devices, self.blvm)
        self.queue_event('info', _("Running mkinitcpio - done"))

        # Set autologin if selected
        # In openbox "desktop", the post-install script writes /etc/slim.conf
        # so we always have to call set_autologin AFTER the post-install script.
        if self.settings.get('require_password') is False:
            self.set_autologin()

        # Encrypt user's home directory if requested
        # FIXME: This is not working atm
        if self.settings.get('encrypt_home'):
            logging.debug(_("Encrypting user home dir..."))
            encfs.setup(username, DEST_DIR)
            logging.debug(_("User home dir encrypted"))

        # Install boot loader (always after running mkinitcpio)
        if self.settings.get('bootloader_install'):
            try:
                self.queue_event('info', _("Installing bootloader..."))
                from installation import bootloader

                boot_loader = bootloader.Bootloader(DEST_DIR,
                                                    self.settings,
                                                    self.mount_devices)
                boot_loader.install()
            except Exception as error:
                logging.error(_("Couldn't install boot loader: {0}"
                                .format(error)))

        self.queue_event('pulse', 'stop')
        chroot.umount_special_dirs(DEST_DIR)

Example 170

Project: bsdpy
Source File: bsdpserver.py
View license
def getSysIdEntitlement(nbisources, clientsysid, clientmacaddr, bsdpmsgtype):
    """
        The getSysIdEntitlement function takes a list of previously compiled NBI
        sources and a clientsysid parameter to determine which of the entries in
        nbisources the clientsysid is entitled to.

        The function:
        - Initializes the 'hasdupes' variable as False.
        - Checks for an enabledmacaddrs value:
            - If an empty list, no filtering is performed
            - It will otherwise contain one or more MAC addresses, and thisnbi
              will be skipped if the client's MAC address is not in this list.
            - Apple's NetInstall service also may create a "DisabledMACAddresses"
              blacklist, but this never seems to be used.
        - Checks for duplicate clientsysid entries in enabled/disabledsysids:
            - If found, there is a configuration issue with
              NBImageInfo.plist and thisnbi is skipped; a warning
              is thrown for the admin to act on. The hasdupes variable will be
              set to True.
        - Checks if hasdupes is False:
            - If True, continue with the tests below, otherwise iterate next.
        - Checks for empty disabledsysids and enabledsysids lists:
            - If both lists are zero length thisnbi is added to nbientitlements.
        - Checks for a missing clientsysid entry in enabledsysids OR a matching
          clientsysid entry in disabledsysids:
            - If if either is True thisnbi is skipped.
        - Checks for matching clientsysid entry in enabledsysids AND a missing
          clientsysid entry in disabledsysids:
            - If both are True thisnbi is added to nbientitlements.
    """

    # Globals are used to give other functions access to these later
    global defaultnbi
    global imagenameslist
    global hasdefault

    logging.debug('Determining image list for system ID ' + clientsysid)

    # Initialize lists for nbientitlements and imagenameslist, both will
    #   contain a series of dicts
    nbientitlements = []
    imagenameslist = []

    try:
        # Iterate over the NBI list
        for thisnbi in nbisources:

            # First a sanity check for duplicate system ID entries
            hasdupes = False

            if clientsysid in thisnbi['disabledsysids'] and \
               clientsysid in thisnbi['enabledsysids']:

                # Duplicate entries are bad mkay, so skip this NBI and warn
                logging.debug('!!! Image "' + thisnbi['description'] +
                        '" has duplicate system ID entries '
                        'for model "' + clientsysid + '" - skipping !!!')
                hasdupes = True

            # Check whether both disabledsysids and enabledsysids are empty and
            #   if so add the NBI to the list, there are no restrictions.
            if not hasdupes:
                # If the NBI had a non-empty EnabledMACAddresses array present,
                # skip this image if this client's MAC is not in the list.
                if thisnbi['enabledmacaddrs'] and \
                    clientmacaddr not in thisnbi['enabledmacaddrs']:
                    logging.debug('MAC address ' + clientmacaddr + ' is not '
                                  'in the enabled MAC list - skipping "' +
                                  thisnbi['description'] + '"')
                    continue

                if len(thisnbi['disabledsysids']) == 0 and \
                   len(thisnbi['enabledsysids']) == 0:
                    logging.debug('Image "' + thisnbi['description'] +
                            '" has no restrictions, adding to list')
                    nbientitlements.append(thisnbi)

                # Check for a missing entry in enabledsysids, this means we skip
                elif clientsysid in thisnbi['disabledsysids']:
                    logging.debug('System ID "' + clientsysid + '" is disabled'
                                    ' - skipping "' + thisnbi['description'] + '"')

                # Check for an entry in enabledsysids
                elif clientsysid not in thisnbi['enabledsysids'] or \
                     (clientsysid in thisnbi['enabledsysids'] and
                     clientsysid not in thisnbi['disabledsysids']):
                    logging.debug('Found enabled system ID ' + clientsysid +
                          ' - adding "' + thisnbi['description'] + '" to list')
                    nbientitlements.append(thisnbi)

    except:
        logging.debug("Unexpected error filtering image entitlements: %s" %
                        sys.exc_info()[1])
        raise

    try:
        # Now we iterate through the entitled NBIs in search of a default
        #   image, as determined by its "IsDefault" key
        for image in nbientitlements:

            # Check for an isdefault entry in the current NBI
            if image['isdefault'] is True:
                logging.debug('Found default image ID ' + str(image['id']))

                # By default defaultnbi is 0, so change it to the matched NBI's
                #   id. If more than one is found (shouldn't) we use the highest
                #   id found. This behavior may be changed if it proves to be
                #   problematic, such as breaking out of the for loop instead.
                if defaultnbi < image['id']:
                    defaultnbi = image['id']
                    hasdefault = True
                    # logging.debug('Setting default image ID ' + str(defaultnbi))
                    # logging.debug('hasdefault is: ' + str(hasdefault))

            # This is to match cases where there is  no default image found,
            #   a possibility. In that case we use the highest found id as the
            #   default. This too could be changed at a later time.
            elif not hasdefault:
                if defaultnbi < image['id']:
                    defaultnbi = image['id']
                    # logging.debug('Changing default image ID ' + str(defaultnbi))

            # Next we construct our imagenameslist which is a list of ints that
            #   encodes the image id, total name length and its name for use
            #   by the packet encoder

            # The imageid should be a zero-padded 4 byte string represented as
            #   ints
            imageid = '%04X' % image['id']

            # Our skip interval within the list; the "[129,0]" header each image
            #   ID requires, we don't want to count it for the length
            n = 2

            # Construct the list by iterating over the imageid, converting to a
            #   16 bit string as we go, for proper packet encoding
            imageid = [int(imageid[i:i+n], 16) \
                for i in range(0, len(imageid), n)]
            imagenameslist += [129,0] + imageid + [image['length']] + \
                              strlist(image['name']).list()
    except:
        logging.debug("Unexpected error setting default image: %s" %
                        sys.exc_info()[1])
        raise

    # print 'Entitlements: ' + str(len(nbientitlements)) + '\n' + str(nbientitlements) + '\n'
    # print imagenameslist

    # All done, pass the finalized list of NBIs the given clientsysid back
    return nbientitlements

Example 171

Project: FaST-LMM
Source File: single_snp.py
View license
def single_snp(test_snps, pheno, K0=None,
                 K1=None, mixing=None,
                 covar=None, covar_by_chrom=None, leave_out_one_chrom=True, output_file_name=None, h2=None, log_delta=None,
                 cache_file = None, GB_goal=None, interact_with_snp=None, force_full_rank=False, force_low_rank=False, G0=None, G1=None, runner=None,
                 count_A1=None):
    """
    Function performing single SNP GWAS using cross validation over the chromosomes and REML. Will reorder and intersect IIDs as needed.
    (For backwards compatibility, you may use 'leave_out_one_chrom=False' to skip cross validation, but that is not recommended.)

    :param test_snps: SNPs to test. Can be any :class:`.SnpReader`. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
           (For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
    :type test_snps: a :class:`.SnpReader` or a string

    :param pheno: A single phenotype: Can be any :class:`.SnpReader`, for example, :class:`.Pheno` or :class:`.SnpData`.
           If you give a string, it should be the file name of a PLINK phenotype-formatted file.
           Any IIDs with missing values will be removed.
           (For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
    :type pheno: a :class:`.SnpReader` or a string

    :param K0: SNPs from which to create a similarity matrix. If not given, will use test_snps.
           Can be any :class:`.SnpReader`. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
           (When leave_out_one_chrom is False, can be a :class:`.KernelReader` or a :class:`.KernelNpz`-formated file name.)
    :type K0: :class:`.SnpReader` or a string (or :class:`.KernelReader`)

    :param K1: SNPs from which to create a second similarity matrix, optional. (Also, see 'mixing').
           Can be any :class:`.SnpReader`. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
           (When leave_out_one_chrom is False, can be a :class:`.KernelReader` or a :class:`.KernelNpz`-formated file name.)
    :type K1: :class:`.SnpReader` or a string (or :class:`.KernelReader`)

    :param mixing: Weight between 0.0 (inclusive, default) and 1.0 (inclusive) given to K1 relative to K0.
            If you give no mixing number and a K1 is given, the best weight will be learned.
    :type mixing: number

    :param covar: covariate information, optional: Can be any :class:`.SnpReader`, for example, :class:`.Pheno` or :class:`.SnpData`.
           If you give a string, it should be the file name of a PLINK phenotype-formatted file.
           (For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
    :type covar: a :class:`.SnpReader` or a string

    :param leave_out_one_chrom: Perform single SNP GWAS via cross validation over the chromosomes. Default to True.
           (Warning: setting False can cause proximal contamination.)
    :type leave_out_one_chrom: boolean
    

    :param output_file_name: Name of file to write results to, optional. If not given, no output file will be created. The output format is tab-deleted text.
    :type output_file_name: file name

    :param h2: A parameter to LMM learning, optional
            If not given will search for best value.
            If mixing is unspecified, then h2 must also be unspecified.
    :type h2: number

    :param log_delta: a re-parameterization of h2 provided for backwards compatibility. h2 is 1./(exp(log_delta)+1)
    :type log_delta: number

    :param cache_file: Name of  file to read or write cached precomputation values to, optional.
                If not given, no cache file will be used.
                If given and file does not exist, will write precomputation values to file.
                If given and file does exist, will read precomputation values from file.
                The file contains the U and S matrix from the decomposition of the training matrix. It is in Python's np.savez (\*.npz) format.
                Calls using the same cache file should have the same 'K0' and 'K1'
                If given and the file does exist then K0 and K1 need not be given.
    :type cache_file: file name

    :param GB_goal: gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks the same size as the kernel,
        which is memory efficient with little overhead on computation time.
    :type GB_goal: number

    :param interact_with_snp: index of a covariate to perform an interaction test with. 
            Allows for interaction testing (interact_with_snp x snp will be tested)
            default: None

    :param force_full_rank: Even if kernels are defined with fewer SNPs than IIDs, create an explicit iid_count x iid_count kernel. Cannot be True if force_low_rank is True.
    :type force_full_rank: Boolean

    :param force_low_rank: Even if kernels are defined with fewer IIDs than SNPs, create a low-rank iid_count x sid_count kernel. Cannot be True if force_full_rank is True.
    :type force_low_rank: Boolean

    :param G0: Same as K0. Provided for backwards compatibility. Cannot be given if K0 is given.
    :type G0: :class:`.SnpReader` or a string (or :class:`.KernelReader`)

    :param G1: Same as K1. Provided for backwards compatibility. Cannot be given if K1 is given.
    :type G1: :class:`.SnpReader` or a string (or :class:`.KernelReader`)

    :param runner: a runner, optional: Tells how to run locally, multi-processor, or on a cluster.
        If not given, the function is run locally.
    :type runner: a runner.

    :param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
         alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
    :type count_A1: bool


    :rtype: Pandas dataframe with one row per test SNP. Columns include "PValue"



    :Example:

    >>> import logging
    >>> import numpy as np
    >>> from fastlmm.association import single_snp
    >>> from pysnptools.snpreader import Bed
    >>> logging.basicConfig(level=logging.INFO)
    >>> pheno_fn = "../feature_selection/examples/toydata.phe"
    >>> results_dataframe = single_snp(test_snps="../feature_selection/examples/toydata.5chrom", pheno=pheno_fn)
    >>> print results_dataframe.iloc[0].SNP,round(results_dataframe.iloc[0].PValue,7),len(results_dataframe)
    null_576 1e-07 10000


    """
    t0 = time.time()
    if force_full_rank and force_low_rank:
        raise Exception("Can't force both full rank and low rank")

    assert test_snps is not None, "test_snps must be given as input"
    test_snps = _snps_fixup(test_snps, count_A1=count_A1)
    pheno = _pheno_fixup(pheno, count_A1=count_A1).read()
    assert pheno.sid_count == 1, "Expect pheno to be just one variable"
    pheno = pheno[(pheno.val==pheno.val)[:,0],:]
    covar = _pheno_fixup(covar, iid_if_none=pheno.iid, count_A1=count_A1)

    if not leave_out_one_chrom:
        assert covar_by_chrom is None, "When 'leave_out_one_chrom' is False, 'covar_by_chrom' must be None"
        K0 = _kernel_fixup(K0 or G0 or test_snps, iid_if_none=test_snps.iid, standardizer=Unit(),count_A1=count_A1)
        K1 = _kernel_fixup(K1 or G1, iid_if_none=test_snps.iid, standardizer=Unit(),count_A1=count_A1)
        K0, K1, test_snps, pheno, covar  = pstutil.intersect_apply([K0, K1, test_snps, pheno, covar])
        logging.debug("# of iids now {0}".format(K0.iid_count))
        K0, K1, block_size = _set_block_size(K0, K1, mixing, GB_goal, force_full_rank, force_low_rank)

        frame =  _internal_single(K0=K0, test_snps=test_snps, pheno=pheno,
                                    covar=covar, K1=K1,
                                    mixing=mixing, h2=h2, log_delta=log_delta,
                                    cache_file = cache_file, force_full_rank=force_full_rank,force_low_rank=force_low_rank,
                                    output_file_name=output_file_name,block_size=block_size, interact_with_snp=interact_with_snp,
                                    runner=runner)
        sid_index_range = IntRangeSet(frame['sid_index'])
        assert sid_index_range == (0,test_snps.sid_count), "Some SNP rows are missing from the output"
    else: 
        chrom_list = list(set(test_snps.pos[:,0])) # find the set of all chroms mentioned in test_snps, the main testing data
        assert not np.isnan(chrom_list).any(), "chrom list should not contain NaN"
        input_files = [test_snps, pheno, K0, G0, K1, G1, covar] + ([] if covar_by_chrom is None else covar_by_chrom.values())

        def nested_closure(chrom):
            test_snps_chrom = test_snps[:,test_snps.pos[:,0]==chrom]
            covar_chrom = _create_covar_chrom(covar, covar_by_chrom, chrom)
            cache_file_chrom = None if cache_file is None else cache_file + ".{0}".format(chrom)

            K0_chrom = _K_per_chrom(K0 or G0 or test_snps, chrom, test_snps.iid)
            K1_chrom = _K_per_chrom(K1 or G1, chrom, test_snps.iid)

            K0_chrom, K1_chrom, test_snps_chrom, pheno_chrom, covar_chrom  = pstutil.intersect_apply([K0_chrom, K1_chrom, test_snps_chrom, pheno, covar_chrom])
            logging.debug("# of iids now {0}".format(K0_chrom.iid_count))
            K0_chrom, K1_chrom, block_size = _set_block_size(K0_chrom, K1_chrom, mixing, GB_goal, force_full_rank, force_low_rank)

            distributable = _internal_single(K0=K0_chrom, test_snps=test_snps_chrom, pheno=pheno_chrom,
                                        covar=covar_chrom, K1=K1_chrom,
                                        mixing=mixing, h2=h2, log_delta=log_delta, cache_file=cache_file_chrom,
                                        force_full_rank=force_full_rank,force_low_rank=force_low_rank,
                                        output_file_name=None, block_size=block_size, interact_with_snp=interact_with_snp,
                                        runner=Local())
            
            return distributable

        def reducer_closure(frame_sequence):
            frame = pd.concat(frame_sequence)
            frame.sort_values(by="PValue", inplace=True)
            frame.index = np.arange(len(frame))
            if output_file_name is not None:
                frame.to_csv(output_file_name, sep="\t", index=False)
            logging.info("PhenotypeName\t{0}".format(pheno.sid[0]))
            logging.info("SampleSize\t{0}".format(test_snps.iid_count))
            logging.info("SNPCount\t{0}".format(test_snps.sid_count))
            logging.info("Runtime\t{0}".format(time.time()-t0))

            return frame

        frame = map_reduce(chrom_list,
                   mapper = nested_closure,
                   reducer = reducer_closure,
                   input_files = input_files,
                   output_files = [output_file_name],
                   name = "single_snp (leave_out_one_chrom), out='{0}'".format(output_file_name),
                   runner = runner)

    return frame

Example 172

Project: pytrainer
Source File: waypointeditor.py
View license
	def createHtml(self,default_waypoint=None):
		logging.debug(">>")
		tmpdir = self.pytrainer_main.profile.tmpdir
		filename = tmpdir+"/waypointeditor.html"
	
		points = self.waypoint.getAllWaypoints()
		londef = 0
		latdef = 0
		content = """

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"  xmlns:v="urn:schemas-microsoft-com:vml">
  <head>
    <meta http-equiv="content-type" content="text/html; charset=utf-8"/>
    <title>edit waypoints</title>

    <script id="googleapiimport" src="http://maps.google.com/maps/api/js?sensor=false"
            type="text/javascript"></script>
    <script type="text/javascript">
"""
		i = 0
		arrayjs = ""
		if default_waypoint is None and points: 
			default_waypoint = points[0][0]
		for point in points:
			if point[0] == default_waypoint:
				londef = point[2]
				latdef = point[1]
			content += "lon = '%f';\n"%point[2]
			content += "lat = '%f';\n"%point[1]
			content += "name = '%s';\n"%point[6]
			content += "description = '%s';\n"%point[4]
			content += "sym = '%s';\n"%point[7]
			content += "id = '%d';\n"%point[0]
			content += """waypoint%d = Array (lon,lat,name,description,sym,id);\n"""%i
			if i>0:
				arrayjs+=","
			arrayjs +="waypoint%d"%i
			i = i+1
		content += """waypointList = Array (%s);\n""" %arrayjs
		content += """ 
	is_addmode = 0;
    //<![CDATA[

	function addWaypoint(lon,lat) {
		document.title = "call:addWaypoint(" + lon + "," + lat + ")";
  		}  	
	
	function updateWaypoint(lon,lat,id) {
		document.title = "call:updateWaypoint(" + lon + "," + lat + "," + id + ")"; 
  		}  	

	function createMarker(waypoint) {
		var lon = waypoint[0];
		var lat = waypoint[1];
		var id = waypoint[5];
		var sym = waypoint[4];
		
		var point = new GLatLng(lat,lon);
		var text = "<b>"+waypoint[2]+"</b><br/>"+waypoint[3];

		var icon = new GIcon();
		if (sym=="Summit") {
			icon.image = \""""+os.path.abspath(self.data_path)+"""/glade/summit.png\";
			}
		else {
			icon.image = \""""+os.path.abspath(self.data_path)+"""/glade/waypoint.png\";
			}
		icon.iconSize = new GSize(32, 32);
		icon.iconAnchor = new GPoint(16, 16);
		icon.infoWindowAnchor = new GPoint(5, 1);
		
		var markerD = new GMarker(point, {icon:icon, draggable: true}); 
		map.addOverlay(markerD);

		markerD.enableDragging();

		GEvent.addListener(markerD, "mouseup", function(){
			position = markerD.getPoint();
			updateWaypoint(position.lng(),position.lat(),id);
		});
  		return markerD;
		}

	function load() {
		if (GBrowserIsCompatible()) {
			//Dibujamos el mapa
			map = new GMap2(document.getElementById("map"));
        		map.addControl(new GLargeMapControl());
        		map.addControl(new GMapTypeControl());
			map.addControl(new GScaleControl());
	"""
		if londef != 0:
        		content +="""
				lon = %s;
				lat = %s;
				""" %(londef,latdef)
		else:
			 content += """
				lon = 0;
				lat = 0;
				"""
		content +="""
			map.setCenter(new GLatLng(lat, lon), 11);

			//Dibujamos el minimapa
			ovMap=new GOverviewMapControl();
			map.addControl(ovMap);
			mini=ovMap.getOverviewMap();

			//Dibujamos los waypoints
			for (i=0; i<waypointList.length; i++){
  				createMarker(waypointList[i]);
				map.enableDragging();
				}

			//Preparamos los eventos para anadir nuevos waypoints
			GEvent.addListener(map, "click", function(marker, point) {
    				if (is_addmode==1){
					map.enableDragging();
					//map.addOverlay(new GMarker(point));
					var lon = point.lng();
					var lat = point.lat();
				
					var waypoint_id = addWaypoint(lon,lat);
					var waypoint = Array (lon,lat,"","","",waypoint_id);
  					createMarker(waypoint);
					is_addmode = 0;
					}
				});
      			}
    		}	

	function addmode(){
		is_addmode = 1;
		map.disableDragging();
		}

    //]]>
    </script>
<style>
.form {
	position: absolute;
	top: 200px;
	left: 300px;
	background: #ffffff;
	}
</style>

  </head>
  <body onload="load()" onunload="GUnload()" style="cursor:crosshair" border=0>
    		<div id="map" style="width: 100%; height: 460px; top: 0px; left: 0px"></div>
    		<div id="addButton" style="position: absolute; top: 32px;left: 86px;">
			<input type="button" value="New Waypoint" onclick="javascript:addmode();">
		</div>


  </body>
</html>
"""
		file = fileUtils(filename,content)
		file.run()
		logging.debug("<<")

Example 173

Project: tp-libvirt
Source File: virsh_volume.py
View license
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """

    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            raise error.TestFail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K',
               'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K',
               'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M',
               'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G',
               'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T',
               'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T'
               }
        val = {'B': 1,
               'K': 1024,
               'M': 1048576,
               'G': 1073741824,
               'T': 1099511627776
               }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value in"
                                 " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value "
                                 "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
                  "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
                  "vol_format:%s", pool_name, pool_type, pool_target,
                  vol_name, vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr,
                                                               source_name,
                                                               volume_name)
                utils.run("qemu-img create -f %s %s %s" % (vol_format,
                                                           expected_vol['path'],
                                                           capacity))
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            raise error.TestFail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image, source_name=source_name)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()

Example 174

Project: allianceauth
Source File: authenticator.py
View license
def do_main_program():
    #
    # --- Authenticator implementation
    #    All of this has to go in here so we can correctly daemonize the tool
    #    without loosing the file descriptors opened by the Ice module
    slicedir = Ice.getSliceDir()
    if not slicedir:
        slicedir = ["-I/usr/share/Ice/slice", "-I/usr/share/slice"]
    else:
        slicedir = ['-I' + slicedir]
    Ice.loadSlice('', slicedir + [cfg.ice.slice])
    import Murmur

    class allianceauthauthenticatorApp(Ice.Application):
        def run(self, args):
            self.shutdownOnInterrupt()

            if not self.initializeIceConnection():
                return 1

            if cfg.ice.watchdog > 0:
                self.failedWatch = True
                self.checkConnection()

            # Serve till we are stopped
            self.communicator().waitForShutdown()
            self.watchdog.cancel()

            if self.interrupted():
                warning('Caught interrupt, shutting down')

            threadDB.disconnect()
            return 0

        def initializeIceConnection(self):
            """
            Establishes the two-way Ice connection and adds the authenticator to the
            configured servers
            """
            ice = self.communicator()

            if cfg.ice.secret:
                debug('Using shared ice secret')
                ice.getImplicitContext().put("secret", cfg.ice.secret)
            elif not cfg.glacier.enabled:
                warning('Consider using an ice secret to improve security')

            if cfg.glacier.enabled:
                # info('Connecting to Glacier2 server (%s:%d)', glacier_host, glacier_port)
                error('Glacier support not implemented yet')
                # TODO: Implement this

            info('Connecting to Ice server (%s:%d)', cfg.ice.host, cfg.ice.port)
            base = ice.stringToProxy('Meta:tcp -h %s -p %d' % (cfg.ice.host, cfg.ice.port))
            self.meta = Murmur.MetaPrx.uncheckedCast(base)

            adapter = ice.createObjectAdapterWithEndpoints('Callback.Client', 'tcp -h %s' % cfg.ice.host)
            adapter.activate()

            metacbprx = adapter.addWithUUID(metaCallback(self))
            self.metacb = Murmur.MetaCallbackPrx.uncheckedCast(metacbprx)

            authprx = adapter.addWithUUID(allianceauthauthenticator())
            self.auth = Murmur.ServerUpdatingAuthenticatorPrx.uncheckedCast(authprx)

            return self.attachCallbacks()

        def attachCallbacks(self, quiet=False):
            """
            Attaches all callbacks for meta and authenticators
            """

            # Ice.ConnectionRefusedException
            # debug('Attaching callbacks')
            try:
                if not quiet: info('Attaching meta callback')

                self.meta.addCallback(self.metacb)

                for server in self.meta.getBootedServers():
                    if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                        if not quiet: info('Setting authenticator for virtual server %d', server.id())
                        server.setAuthenticator(self.auth)

            except (Murmur.InvalidSecretException, Ice.UnknownUserException, Ice.ConnectionRefusedException) as e:
                if isinstance(e, Ice.ConnectionRefusedException):
                    error('Server refused connection')
                elif isinstance(e, Murmur.InvalidSecretException) or \
                                isinstance(e, Ice.UnknownUserException) and (
                                    e.unknown == 'Murmur::InvalidSecretException'):
                    error('Invalid ice secret')
                else:
                    # We do not actually want to handle this one, re-raise it
                    raise e

                self.connected = False
                return False

            self.connected = True
            return True

        def checkConnection(self):
            """
            Tries reapplies all callbacks to make sure the authenticator
            survives server restarts and disconnects.
            """
            # debug('Watchdog run')

            try:
                if not self.attachCallbacks(quiet=not self.failedWatch):
                    self.failedWatch = True
                else:
                    self.failedWatch = False
            except Ice.Exception as e:
                error('Failed connection check, will retry in next watchdog run (%ds)', cfg.ice.watchdog)
                debug(str(e))
                self.failedWatch = True

            # Renew the timer
            self.watchdog = Timer(cfg.ice.watchdog, self.checkConnection)
            self.watchdog.start()

    def checkSecret(func):
        """
        Decorator that checks whether the server transmitted the right secret
        if a secret is supposed to be used.
        """
        if not cfg.ice.secret:
            return func

        def newfunc(*args, **kws):
            if 'current' in kws:
                current = kws["current"]
            else:
                current = args[-1]

            if not current or 'secret' not in current.ctx or current.ctx['secret'] != cfg.ice.secret:
                error('Server transmitted invalid secret. Possible injection attempt.')
                raise Murmur.InvalidSecretException()

            return func(*args, **kws)

        return newfunc

    def fortifyIceFu(retval=None, exceptions=(Ice.Exception,)):
        """
        Decorator that catches exceptions,logs them and returns a safe retval
        value. This helps preventing the authenticator getting stuck in
        critical code paths. Only exceptions that are instances of classes
        given in the exceptions list are not caught.
        
        The default is to catch all non-Ice exceptions.
        """

        def newdec(func):
            def newfunc(*args, **kws):
                try:
                    return func(*args, **kws)
                except Exception as e:
                    catch = True
                    for ex in exceptions:
                        if isinstance(e, ex):
                            catch = False
                            break

                    if catch:
                        critical('Unexpected exception caught')
                        exception(e)
                        return retval
                    raise

            return newfunc

        return newdec

    class metaCallback(Murmur.MetaCallback):
        def __init__(self, app):
            Murmur.MetaCallback.__init__(self)
            self.app = app

        @fortifyIceFu()
        @checkSecret
        def started(self, server, current=None):
            """
            This function is called when a virtual server is started
            and makes sure an authenticator gets attached if needed.
            """
            if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                info('Setting authenticator for virtual server %d', server.id())
                try:
                    server.setAuthenticator(app.auth)
                # Apparently this server was restarted without us noticing
                except (Murmur.InvalidSecretException, Ice.UnknownUserException) as e:
                    if hasattr(e, "unknown") and e.unknown != "Murmur::InvalidSecretException":
                        # Special handling for Murmur 1.2.2 servers with invalid slice files
                        raise e

                    error('Invalid ice secret')
                    return
            else:
                debug('Virtual server %d got started', server.id())

        @fortifyIceFu()
        @checkSecret
        def stopped(self, server, current=None):
            """
            This function is called when a virtual server is stopped
            """
            if self.app.connected:
                # Only try to output the server id if we think we are still connected to prevent
                # flooding of our thread pool
                try:
                    if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                        info('Authenticated virtual server %d got stopped', server.id())
                    else:
                        debug('Virtual server %d got stopped', server.id())
                    return
                except Ice.ConnectionRefusedException:
                    self.app.connected = False

            debug('Server shutdown stopped a virtual server')

    if cfg.user.reject_on_error:  # Python 2.4 compat
        authenticateFortifyResult = (-1, None, None)
    else:
        authenticateFortifyResult = (-2, None, None)

    class allianceauthauthenticator(Murmur.ServerUpdatingAuthenticator):
        texture_cache = {}

        def __init__(self):
            Murmur.ServerUpdatingAuthenticator.__init__(self)

        @fortifyIceFu(authenticateFortifyResult)
        @checkSecret
        def authenticate(self, name, pw, certlist, certhash, strong, current=None):
            """
            This function is called to authenticate a user
            """

            # Search for the user in the database
            FALL_THROUGH = -2
            AUTH_REFUSED = -1

            if name == 'SuperUser':
                debug('Forced fall through for SuperUser')
                return (FALL_THROUGH, None, None)

            try:
                sql = 'SELECT id, pwhash, groups FROM %sservices_mumbleuser WHERE username = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [name])
            except threadDbException:
                return (FALL_THROUGH, None, None)

            res = cur.fetchone()
            cur.close()
            if not res:
                info('Fall through for unknown user "%s"', name)
                return (FALL_THROUGH, None, None)

            uid, upwhash, ugroups = res

            if ugroups:
                groups = ugroups.split(',')
            else:
                groups = []

            if allianceauth_check_hash(pw, upwhash):
                info('User authenticated: "%s" (%d)', name, uid + cfg.user.id_offset)
                debug('Group memberships: %s', str(groups))
                return (uid + cfg.user.id_offset, entity_decode(name), groups)

            info('Failed authentication attempt for user: "%s" (%d)', name, uid + cfg.user.id_offset)
            return (AUTH_REFUSED, None, None)

        @fortifyIceFu((False, None))
        @checkSecret
        def getInfo(self, id, current=None):
            """
            Gets called to fetch user specific information
            """

            # We do not expose any additional information so always fall through
            debug('getInfo for %d -> denied', id)
            return (False, None)

        @fortifyIceFu(-2)
        @checkSecret
        def nameToId(self, name, current=None):
            """
            Gets called to get the id for a given username
            """

            FALL_THROUGH = -2
            if name == 'SuperUser':
                debug('nameToId SuperUser -> forced fall through')
                return FALL_THROUGH

            try:
                sql = 'SELECT id FROM %sservices_mumbleuser WHERE username = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [name])
            except threadDbException:
                return FALL_THROUGH

            res = cur.fetchone()
            cur.close()
            if not res:
                debug('nameToId %s -> ?', name)
                return FALL_THROUGH

            debug('nameToId %s -> %d', name, (res[0] + cfg.user.id_offset))
            return res[0] + cfg.user.id_offset

        @fortifyIceFu("")
        @checkSecret
        def idToName(self, id, current=None):
            """
            Gets called to get the username for a given id
            """

            FALL_THROUGH = ""
            # Make sure the ID is in our range and transform it to the actual smf user id
            if id < cfg.user.id_offset:
                return FALL_THROUGH
            bbid = id - cfg.user.id_offset

            # Fetch the user from the database
            try:
                sql = 'SELECT username FROM %sservices_mumbleuser WHERE id = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [bbid])
            except threadDbException:
                return FALL_THROUGH

            res = cur.fetchone()
            cur.close()
            if res:
                if res[0] == 'SuperUser':
                    debug('idToName %d -> "SuperUser" catched')
                    return FALL_THROUGH

                debug('idToName %d -> "%s"', id, res[0])
                return res[0]

            debug('idToName %d -> ?', id)
            return FALL_THROUGH

        @fortifyIceFu("")
        @checkSecret
        def idToTexture(self, id, current=None):
            """
            Gets called to get the corresponding texture for a user
            """

            FALL_THROUGH = ""

            debug('idToTexture "%s" -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu(-2)
        @checkSecret
        def registerUser(self, name, current=None):
            """
            Gets called when the server is asked to register a user.
            """

            FALL_THROUGH = -2
            debug('registerUser "%s" -> fall through', name)
            return FALL_THROUGH

        @fortifyIceFu(-1)
        @checkSecret
        def unregisterUser(self, id, current=None):
            """
            Gets called when the server is asked to unregister a user.
            """

            FALL_THROUGH = -1
            # Return -1 to fall through to internal server database, we will not modify the smf database
            # but we can make murmur delete all additional information it got this way.
            debug('unregisterUser %d -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu({})
        @checkSecret
        def getRegisteredUsers(self, filter, current=None):
            """
            Returns a list of usernames in the AllianceAuth database which contain
            filter as a substring.
            """

            if not filter:
                filter = '%'

            try:
                sql = 'SELECT id, username FROM %sservices_mumbleuser WHERE username LIKE %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [filter])
            except threadDbException:
                return {}

            res = cur.fetchall()
            cur.close()
            if not res:
                debug('getRegisteredUsers -> empty list for filter "%s"', filter)
                return {}
            debug('getRegisteredUsers -> %d results for filter "%s"', len(res), filter)
            return dict([(a + cfg.user.id_offset, b) for a, b in res])

        @fortifyIceFu(-1)
        @checkSecret
        def setInfo(self, id, info, current=None):
            """
            Gets called when the server is supposed to save additional information
            about a user to his database
            """

            FALL_THROUGH = -1
            # Return -1 to fall through to the internal server handler. We must not modify
            # the smf database so the additional information is stored in murmurs database
            debug('setInfo %d -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu(-1)
        @checkSecret
        def setTexture(self, id, texture, current=None):
            """
            Gets called when the server is asked to update the user texture of a user
            """

            FALL_THROUGH = -1

            debug('setTexture %d -> fall through', id)
            return FALL_THROUGH

    class CustomLogger(Ice.Logger):
        """
        Logger implementation to pipe Ice log messages into
        our own log
        """

        def __init__(self):
            Ice.Logger.__init__(self)
            self._log = getLogger('Ice')

        def _print(self, message):
            self._log.info(message)

        def trace(self, category, message):
            self._log.debug('Trace %s: %s', category, message)

        def warning(self, message):
            self._log.warning(message)

        def error(self, message):
            self._log.error(message)

    #
    # --- Start of authenticator
    #
    info('Starting AllianceAuth mumble authenticator')
    initdata = Ice.InitializationData()
    initdata.properties = Ice.createProperties([], initdata.properties)
    for prop, val in cfg.iceraw:
        initdata.properties.setProperty(prop, val)

    initdata.properties.setProperty('Ice.ImplicitContext', 'Shared')
    initdata.properties.setProperty('Ice.Default.EncodingVersion', '1.0')
    initdata.logger = CustomLogger()

    app = allianceauthauthenticatorApp()
    state = app.main(sys.argv[:1], initData=initdata)
    info('Shutdown complete')

Example 175

Project: pyqso
Source File: preferences_dialog.py
View license
    def __init__(self):
        logging.debug("Setting up the Records page of the preferences dialog...")

        Gtk.VBox.__init__(self, spacing=2)

        # Remember that the have_config conditional in the PyQSO class may be out-of-date the next time the user opens up the preferences dialog
        # because a configuration file may have been created after launching the application. Let's check to see if one exists again...
        config = configparser.ConfigParser()
        have_config = (config.read(PREFERENCES_FILE) != [])

        self.sources = {}

        # Autocomplete frame
        frame = Gtk.Frame()
        frame.set_label("Autocomplete")
        vbox = Gtk.VBox()
        self.sources["AUTOCOMPLETE_BAND"] = Gtk.CheckButton("Autocomplete the Band field")
        (section, option) = ("records", "autocomplete_band")
        if(have_config and config.has_option(section, option)):
            self.sources["AUTOCOMPLETE_BAND"].set_active(config.get(section, option) == "True")
        else:
            self.sources["AUTOCOMPLETE_BAND"].set_active(True)
        vbox.pack_start(self.sources["AUTOCOMPLETE_BAND"], False, False, 2)

        self.sources["USE_UTC"] = Gtk.CheckButton("Use UTC when autocompleting the Date and Time")
        (section, option) = ("records", "use_utc")
        if(have_config and config.has_option(section, option)):
            self.sources["USE_UTC"].set_active(config.get(section, option) == "True")
        else:
            self.sources["USE_UTC"].set_active(True)
        vbox.pack_start(self.sources["USE_UTC"], False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        # Default values frame
        frame = Gtk.Frame()
        frame.set_label("Default values")
        vbox = Gtk.VBox()

        # Mode
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Mode: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_MODE"] = Gtk.ComboBoxText()
        for mode in sorted(MODES.keys()):
            self.sources["DEFAULT_MODE"].append_text(mode)
        (section, option) = ("records", "default_mode")
        if(have_config and config.has_option(section, option)):
            mode = config.get(section, option)
        else:
            mode = ""
        self.sources["DEFAULT_MODE"].set_active(sorted(MODES.keys()).index(mode))
        self.sources["DEFAULT_MODE"].connect("changed", self._on_mode_changed)
        hbox_temp.pack_start(self.sources["DEFAULT_MODE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Submode
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Submode: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_SUBMODE"] = Gtk.ComboBoxText()
        for submode in MODES[mode]:
            self.sources["DEFAULT_SUBMODE"].append_text(submode)
        (section, option) = ("records", "default_submode")
        if(have_config and config.has_option(section, option)):
            submode = config.get(section, option)
        else:
            submode = ""
        self.sources["DEFAULT_SUBMODE"].set_active(MODES[mode].index(submode))
        hbox_temp.pack_start(self.sources["DEFAULT_SUBMODE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Power
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("TX Power (W): ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_POWER"] = Gtk.Entry()
        (section, option) = ("records", "default_power")
        if(have_config and config.has_option(section, option)):
            self.sources["DEFAULT_POWER"].set_text(config.get(section, option))
        else:
            self.sources["DEFAULT_POWER"].set_text("")
        hbox_temp.pack_start(self.sources["DEFAULT_POWER"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        # Callsign lookup frame
        frame = Gtk.Frame()
        frame.set_label("Callsign lookup")
        vbox = Gtk.VBox()

        # Callsign database
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Database: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["CALLSIGN_DATABASE"] = Gtk.ComboBoxText()
        callsign_database = ["", "qrz.com", "hamqth.com"]
        for database in callsign_database:
            self.sources["CALLSIGN_DATABASE"].append_text(database)
        (section, option) = ("records", "callsign_database")
        if(have_config and config.has_option(section, option)):
            self.sources["CALLSIGN_DATABASE"].set_active(callsign_database.index(config.get(section, option)))
        else:
            self.sources["CALLSIGN_DATABASE"].set_active(callsign_database.index(""))
        hbox_temp.pack_start(self.sources["CALLSIGN_DATABASE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Login details
        subframe = Gtk.Frame()
        subframe.set_label("Login details")
        inner_vbox = Gtk.VBox()

        hbox = Gtk.HBox()
        label = Gtk.Label("Username: ")
        label.set_width_chars(9)
        label.set_alignment(0, 0.5)
        hbox.pack_start(label, False, False, 2)
        self.sources["CALLSIGN_DATABASE_USERNAME"] = Gtk.Entry()
        (section, option) = ("records", "callsign_database_username")
        if(have_config and config.has_option(section, option)):
            self.sources["CALLSIGN_DATABASE_USERNAME"].set_text(config.get(section, option))
        hbox.pack_start(self.sources["CALLSIGN_DATABASE_USERNAME"], False, False, 2)
        inner_vbox.pack_start(hbox, False, False, 2)

        hbox = Gtk.HBox()
        label = Gtk.Label("Password: ")
        label.set_width_chars(9)
        label.set_alignment(0, 0.5)
        hbox.pack_start(label, False, False, 2)
        self.sources["CALLSIGN_DATABASE_PASSWORD"] = Gtk.Entry()
        self.sources["CALLSIGN_DATABASE_PASSWORD"].set_visibility(False)  # Mask the password with the "*" character.
        (section, option) = ("records", "callsign_database_password")
        if(have_config and config.has_option(section, option)):
            password = base64.b64decode(config.get(section, option)).decode("utf-8")
            self.sources["CALLSIGN_DATABASE_PASSWORD"].set_text(password)
        hbox.pack_start(self.sources["CALLSIGN_DATABASE_PASSWORD"], False, False, 2)
        inner_vbox.pack_start(hbox, False, False, 2)

        label = Gtk.Label("Warning: Login details are currently stored as\nBase64-encoded plain text in the configuration file.")
        inner_vbox.pack_start(label, False, False, 2)

        subframe.add(inner_vbox)
        vbox.pack_start(subframe, False, False, 2)

        self.sources["IGNORE_PREFIX_SUFFIX"] = Gtk.CheckButton("Ignore callsign prefixes and/or suffixes")
        (section, option) = ("records", "ignore_prefix_suffix")
        if(have_config and config.has_option(section, option)):
            self.sources["IGNORE_PREFIX_SUFFIX"].set_active(config.get(section, option) == "True")
        else:
            self.sources["IGNORE_PREFIX_SUFFIX"].set_active(True)
        vbox.pack_start(self.sources["IGNORE_PREFIX_SUFFIX"], False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        logging.debug("Records page of the preferences dialog ready!")
        return

Example 176

Project: pyqso
Source File: record_dialog.py
View license
    def __init__(self, parent, log, index=None):
        """ Set up the layout of the record dialog, populate the various fields with the QSO details (if the record already exists), and show the dialog to the user.

        :arg parent: The parent Gtk window.
        :arg log: The log to which the record belongs (or will belong).
        :arg int index: If specified, then the dialog turns into 'edit record mode' and fills the data sources (e.g. the Gtk.Entry boxes) with the existing data in the log. If not specified (i.e. index is None), then the dialog starts off with nothing in the data sources.
        """

        logging.debug("Setting up the record dialog...")

        if(index is not None):
            title = "Edit Record %d" % index
        else:
            title = "Add Record"
        Gtk.Dialog.__init__(self, title=title, parent=parent, flags=Gtk.DialogFlags.DESTROY_WITH_PARENT, buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OK, Gtk.ResponseType.OK))

        # Check if a configuration file is present, since we might need it to set up the rest of the dialog.
        config = configparser.ConfigParser()
        have_config = (config.read(expanduser('~/.config/pyqso/preferences.ini')) != [])

        # QSO DATA FRAME
        qso_frame = Gtk.Frame()
        qso_frame.set_label("QSO Information")
        self.vbox.add(qso_frame)

        hbox_inner = Gtk.HBox(spacing=2)

        vbox_inner = Gtk.VBox(spacing=2)
        hbox_inner.pack_start(vbox_inner, True, True, 2)

        # Create label:entry pairs and store them in a dictionary
        self.sources = {}

        # CALL
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["CALL"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["CALL"] = Gtk.Entry()
        self.sources["CALL"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["CALL"], False, False, 2)
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_INFO, Gtk.IconSize.MENU)
        button = Gtk.Button()
        button.add(icon)
        button.connect("clicked", self.lookup_callback)  # Looks up the callsign using an online database, for callsign and station information.
        button.set_tooltip_text("Callsign lookup")
        hbox_temp.pack_start(button, True, True, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # DATE
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["QSO_DATE"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["QSO_DATE"] = Gtk.Entry()
        self.sources["QSO_DATE"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["QSO_DATE"], False, False, 2)
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_GO_BACK, Gtk.IconSize.MENU)
        button = Gtk.Button()
        button.add(icon)
        button.connect("clicked", self.calendar_callback)
        button.set_tooltip_text("Select date from calendar")
        hbox_temp.pack_start(button, True, True, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # TIME
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["TIME_ON"], halign=Gtk.Align.START)
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["TIME_ON"] = Gtk.Entry()
        self.sources["TIME_ON"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["TIME_ON"], False, False, 2)
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_MEDIA_PLAY, Gtk.IconSize.MENU)
        button = Gtk.Button()
        button.add(icon)
        button.connect("clicked", self.set_current_datetime_callback)
        button.set_tooltip_text("Use the current time and date")
        hbox_temp.pack_start(button, True, True, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # FREQ
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["FREQ"], halign=Gtk.Align.START)
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["FREQ"] = Gtk.Entry()
        self.sources["FREQ"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["FREQ"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # BAND
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["BAND"], halign=Gtk.Align.START)
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["BAND"] = Gtk.ComboBoxText()
        for band in BANDS:
            self.sources["BAND"].append_text(band)
        self.sources["BAND"].set_active(0)  # Set an empty string as the default option.
        hbox_temp.pack_start(self.sources["BAND"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # MODE
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["MODE"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["MODE"] = Gtk.ComboBoxText()
        for mode in sorted(MODES.keys()):
            self.sources["MODE"].append_text(mode)
        self.sources["MODE"].set_active(0)  # Set an empty string as the default option.
        self.sources["MODE"].connect("changed", self._on_mode_changed)
        hbox_temp.pack_start(self.sources["MODE"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # SUBMODE
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["SUBMODE"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["SUBMODE"] = Gtk.ComboBoxText()
        self.sources["SUBMODE"].append_text("")
        self.sources["SUBMODE"].set_active(0)  # Set an empty string initially. As soon as the user selects a particular MODE, the available SUBMODES will appear.
        hbox_temp.pack_start(self.sources["SUBMODE"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # POWER
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["TX_PWR"], halign=Gtk.Align.START)
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["TX_PWR"] = Gtk.Entry()
        self.sources["TX_PWR"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["TX_PWR"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        vbox_inner = Gtk.VBox(spacing=2)
        hbox_inner.pack_start(Gtk.SeparatorToolItem(), False, False, 0)
        hbox_inner.pack_start(vbox_inner, True, True, 2)

        # RST_SENT
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["RST_SENT"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["RST_SENT"] = Gtk.Entry()
        self.sources["RST_SENT"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["RST_SENT"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # RST_RCVD
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["RST_RCVD"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["RST_RCVD"] = Gtk.Entry()
        self.sources["RST_RCVD"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["RST_RCVD"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # QSL_SENT
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["QSL_SENT"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        qsl_options = ["", "Y", "N", "R", "I"]
        self.sources["QSL_SENT"] = Gtk.ComboBoxText()
        for option in qsl_options:
            self.sources["QSL_SENT"].append_text(option)
        self.sources["QSL_SENT"].set_active(0)  # Set an empty string as the default option.
        hbox_temp.pack_start(self.sources["QSL_SENT"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # QSL_RCVD
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["QSL_RCVD"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        qsl_options = ["", "Y", "N", "R", "I"]
        self.sources["QSL_RCVD"] = Gtk.ComboBoxText()
        for option in qsl_options:
            self.sources["QSL_RCVD"].append_text(option)
        self.sources["QSL_RCVD"].set_active(0)  # Set an empty string as the default option.
        hbox_temp.pack_start(self.sources["QSL_RCVD"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # NOTES
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["NOTES"])
        label.set_alignment(0, 0.5)
        label.set_width_chars(15)
        hbox_temp.pack_start(label, False, False, 2)
        self.textview = Gtk.TextView()
        sw = Gtk.ScrolledWindow()
        sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
        sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
        sw.add(self.textview)
        self.sources["NOTES"] = self.textview.get_buffer()
        hbox_temp.pack_start(sw, True, True, 2)
        vbox_inner.pack_start(hbox_temp, True, True, 2)

        qso_frame.add(hbox_inner)

        # STATION INFORMATION FRAME
        station_frame = Gtk.Frame()
        station_frame.set_label("Station Information")
        self.vbox.add(station_frame)

        hbox_inner = Gtk.HBox(spacing=2)

        vbox_inner = Gtk.VBox(spacing=2)
        hbox_inner.pack_start(vbox_inner, True, True, 2)

        # NAME
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["NAME"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["NAME"] = Gtk.Entry()
        self.sources["NAME"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["NAME"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # ADDRESS
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["ADDRESS"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["ADDRESS"] = Gtk.Entry()
        self.sources["ADDRESS"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["ADDRESS"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # STATE
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["STATE"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["STATE"] = Gtk.Entry()
        self.sources["STATE"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["STATE"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # COUNTRY
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["COUNTRY"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["COUNTRY"] = Gtk.Entry()
        self.sources["COUNTRY"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["COUNTRY"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        vbox_inner = Gtk.VBox(spacing=2)
        hbox_inner.pack_start(Gtk.SeparatorToolItem(), False, False, 0)
        hbox_inner.pack_start(vbox_inner, True, True, 2)

        # DXCC
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["DXCC"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["DXCC"] = Gtk.Entry()
        self.sources["DXCC"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["DXCC"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # CQZ
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["CQZ"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["CQZ"] = Gtk.Entry()
        self.sources["CQZ"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["CQZ"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # ITUZ
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["ITUZ"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["ITUZ"] = Gtk.Entry()
        self.sources["ITUZ"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["ITUZ"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        # IOTA
        hbox_temp = Gtk.HBox(spacing=0)
        label = Gtk.Label(AVAILABLE_FIELD_NAMES_FRIENDLY["IOTA"], halign=Gtk.Align.START)
        label.set_width_chars(15)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)
        self.sources["IOTA"] = Gtk.Entry()
        self.sources["IOTA"].set_width_chars(15)
        hbox_temp.pack_start(self.sources["IOTA"], False, False, 2)
        vbox_inner.pack_start(hbox_temp, False, False, 2)

        station_frame.add(hbox_inner)

        # Populate various fields, if possible.
        if(index is not None):
            # The record already exists, so display its current data in the input boxes.
            record = log.get_record_by_index(index)
            field_names = AVAILABLE_FIELD_NAMES_ORDERED
            for i in range(0, len(field_names)):
                data = record[field_names[i].lower()]
                if(data is None):
                    data = ""
                if(field_names[i] == "BAND"):
                    self.sources[field_names[i]].set_active(BANDS.index(data))
                elif(field_names[i] == "MODE"):
                    self.sources[field_names[i]].set_active(sorted(MODES.keys()).index(data))

                    submode_data = record["submode"]
                    if(submode_data is None):
                        submode_data = ""
                    self.sources["SUBMODE"].set_active(MODES[data].index(submode_data))
                elif(field_names[i] == "SUBMODE"):
                    continue
                elif(field_names[i] == "QSL_SENT" or field_names[i] == "QSL_RCVD"):
                    self.sources[field_names[i]].set_active(qsl_options.index(data))
                elif(field_names[i] == "NOTES"):
                    # Remember to put the new line escape characters back in when displaying the data in a Gtk.TextView
                    text = data.replace("\\n", "\n")
                    self.sources[field_names[i]].set_text(text)
                else:
                    self.sources[field_names[i]].set_text(data)
        else:
            # Automatically fill in the current date and time
            self.set_current_datetime_callback()

            # Set up default field values
            # Mode
            (section, option) = ("records", "default_mode")
            if(have_config and config.has_option(section, option)):
                mode = config.get(section, option)
            else:
                mode = ""
            self.sources["MODE"].set_active(sorted(MODES.keys()).index(mode))

            # Submode
            (section, option) = ("records", "default_submode")
            if(have_config and config.has_option(section, option)):
                submode = config.get(section, option)
            else:
                submode = ""
            self.sources["SUBMODE"].set_active(MODES[mode].index(submode))

            # Power
            (section, option) = ("records", "default_power")
            if(have_config and config.has_option(section, option)):
                power = config.get(section, option)
            else:
                power = ""
            self.sources["TX_PWR"].set_text(power)

            if(have_hamlib):
                # If the Hamlib module is present, then use it to fill in the Frequency field if desired.
                if(have_config and config.has_option("hamlib", "autofill") and config.has_option("hamlib", "rig_model") and config.has_option("hamlib", "rig_pathname")):
                    autofill = (config.get("hamlib", "autofill") == "True")
                    rig_model = config.get("hamlib", "rig_model")
                    rig_pathname = config.get("hamlib", "rig_pathname")
                    if(autofill):
                        # Use Hamlib (if available) to get the frequency
                        try:
                            Hamlib.rig_set_debug(Hamlib.RIG_DEBUG_NONE)
                            rig = Hamlib.Rig(Hamlib.__dict__[rig_model])  # Look up the model's numerical index in Hamlib's symbol dictionary
                            rig.set_conf("rig_pathname", rig_pathname)
                            rig.open()
                            frequency = "%.6f" % (rig.get_freq()/1.0e6)  # Converting to MHz here
                            self.sources["FREQ"].set_text(frequency)
                            rig.close()
                        except:
                            logging.error("Could not obtain Frequency data via Hamlib!")

        # Do we want PyQSO to autocomplete the Band field based on the Frequency field?
        (section, option) = ("records", "autocomplete_band")
        if(have_config and config.get(section, option)):
            autocomplete_band = (config.get(section, option) == "True")
            if(autocomplete_band):
                self.sources["FREQ"].connect("changed", self._autocomplete_band)
        else:
            # If no configuration file exists, autocomplete the Band field by default.
            self.sources["FREQ"].connect("changed", self._autocomplete_band)

        self.show_all()

        logging.debug("Record dialog ready!")

        return

Example 177

Project: rf_helicopter
Source File: Helicopter.py
View license
    def update(self):
        """
        Increment the Agent in the World by one

        :return: Boolean
        """
        # Get the Current State
        location = self.current_location
        world_val = self.world.check_location(location[0],
                                              location[1])
        state = self.find_states(self.current_location)
        # Record State
        self.state_record.append(state)

        # Is Current State Obstacle?
        if world_val == -1:
            logging.debug(
                "------------Helicopter Crashed on the Course-----------")
            self.crashed += 1
            self.reward_sum += self.reward_crashed
            self.prev_reward = self.reward_crashed

            if self.model_version == 3:  # Neural Network
                self.ai.update_train(p_state=self.lastState,
                                     action=self.lastAction,
                                     p_reward=self.reward_no_obstacle,
                                     new_state=state,
                                     terminal=[self.reward_completed,
                                               self.reward_crashed])

            if self.lastState is not None and self.model_version != 3:
                self.ai.learn(
                    self.lastState,
                    self.lastAction,
                    self.reward_crashed,
                    state)

            self.final_location.append([self.current_location[0],
                                        self.trial_n,
                                        self.current_location[1],
                                        self.reward_sum])
            self.r_matrix.append([self.lastState,
                                  self.lastAction,
                                  self.reward_crashed])
            self.q_matrix.append([self.lastState,
                                  state,
                                  self.reward_crashed])
            self.trial_n += 1
            # Agent Crashed - Reset the world
            return False

        # Is the Current State on the Finish Line?
        if world_val == 10:
            logging.debug("-----------Helicopter Completed Course-----------")
            self.completed += 1
            self.reward_sum += self.reward_completed
            self.prev_reward = self.reward_completed

            if self.model_version == 3:  # Neural Network
                self.ai.update_train(p_state=self.lastState,
                                     action=self.lastAction,
                                     p_reward=self.reward_no_obstacle,
                                     new_state=state,
                                     terminal=[self.reward_completed,
                                               self.reward_crashed])

            if self.lastState is not None and self.model_version != 3:
                self.ai.learn(self.lastState,
                              self.lastAction,
                              self.reward_completed,
                              state)

            self.final_location.append([self.current_location[0],
                                        self.trial_n,
                                        self.current_location[1],
                                        self.reward_sum])
            self.r_matrix.append([self.lastState,
                                  self.lastAction,
                                  self.reward_completed])
            self.trial_n += 1
            # Agent Completed Course - Reset the world
            return False

        # Is the Current in the Open - Continue Journey
        self.reward_sum += self.reward_no_obstacle
        self.prev_reward = self.reward_no_obstacle

        if self.lastState is not None and self.model_version != 3:
            self.ai.learn(self.lastState,
                          self.lastAction,
                          self.reward_no_obstacle,
                          state)

        # Select an Action
        if self.model_version < 3:
            action = self.ai.choose_Action(state)
        else:
            action = self.ai.choose_Action(state=state,
                                           pstate=self.lastState,
                                           paction=self.lastAction,
                                           preward=self.reward_no_obstacle)

        self.r_matrix.append([self.lastState,
                              self.lastAction,
                              self.reward_no_obstacle])
        self.q_matrix.append([self.lastState,
                              state,
                              self.reward_no_obstacle])
        self.lastState = state
        self.lastAction = action
        # Move Depending on the Wind at the current location
        self.current_location = self.action_wind(world_val,
                                                 self.current_location)

        if self.current_location is None:
            return False

        # Move Depending on the Action from Q-Learning
        self.current_location = self.action_move(action,
                                                 self.current_location)
        self.new_state = state

        if self.model_version == 3:  # Neural Network
            self.ai.update_train(p_state=self.lastState,
                                 action=self.lastAction,
                                 p_reward=self.reward_no_obstacle,
                                 new_state=state,
                                 terminal=[self.completed,
                                           self.crashed])
        return True

Example 178

Project: rbtools
Source File: git.py
View license
    def get_repository_info(self):
        """Get repository information for the current Git working tree.

        This function changes the directory to the top level directory of the
        current working tree.
        """
        if not check_install(['git', '--help']):
            # CreateProcess (launched via subprocess, used by check_install)
            # does not automatically append .cmd for things it finds in PATH.
            # If we're on Windows, and this works, save it for further use.
            if (sys.platform.startswith('win') and
                check_install(['git.cmd', '--help'])):
                self.git = 'git.cmd'
            else:
                logging.debug('Unable to execute "git --help" or "git.cmd '
                              '--help": skipping Git')
                return None

        git_dir = execute([self.git, "rev-parse", "--git-dir"],
                          ignore_errors=True).rstrip("\n")

        if git_dir.startswith("fatal:") or not os.path.isdir(git_dir):
            return None

        # Sometimes core.bare is not set, and generates an error, so ignore
        # errors. Valid values are 'true' or '1'.
        bare = execute([self.git, 'config', 'core.bare'],
                       ignore_errors=True).strip()
        self.bare = bare in ('true', '1')

        # If we are not working in a bare repository, then we will change
        # directory to the top level working tree lose our original position.
        # However, we need the original working directory for file exclusion
        # patterns, so we save it here.
        if self._original_cwd is None:
            self._original_cwd = os.getcwd()

        # Running in directories other than the top level of
        # of a work-tree would result in broken diffs on the server
        if not self.bare:
            git_top = execute([self.git, "rev-parse", "--show-toplevel"],
                              ignore_errors=True).rstrip("\n")

            # Top level might not work on old git version se we use git dir
            # to find it.
            if (git_top.startswith('fatal:') or not os.path.isdir(git_dir)
                or git_top.startswith('cygdrive')):
                git_top = git_dir

            os.chdir(os.path.abspath(git_top))

        self.head_ref = execute([self.git, 'symbolic-ref', '-q',
                                 'HEAD'], ignore_errors=True).strip()

        # We know we have something we can work with. Let's find out
        # what it is. We'll try SVN first, but only if there's a .git/svn
        # directory. Otherwise, it may attempt to create one and scan
        # revisions, which can be slow. Also skip SVN detection if the git
        # repository was specified on command line.
        git_svn_dir = os.path.join(git_dir, 'svn')

        if (not getattr(self.options, 'repository_url', None) and
            os.path.isdir(git_svn_dir) and len(os.listdir(git_svn_dir)) > 0):
            data = execute([self.git, "svn", "info"], ignore_errors=True)

            m = re.search(r'^Repository Root: (.+)$', data, re.M)

            if m:
                path = m.group(1)
                m = re.search(r'^URL: (.+)$', data, re.M)

                if m:
                    base_path = m.group(1)[len(path):] or "/"
                    m = re.search(r'^Repository UUID: (.+)$', data, re.M)

                    if m:
                        uuid = m.group(1)
                        self.type = "svn"

                        # Get SVN tracking branch
                        if getattr(self.options, 'tracking', None):
                            self.upstream_branch = self.options.tracking
                        else:
                            data = execute([self.git, "svn", "rebase", "-n"],
                                           ignore_errors=True)
                            m = re.search(r'^Remote Branch:\s*(.+)$', data,
                                          re.M)

                            if m:
                                self.upstream_branch = m.group(1)
                            else:
                                sys.stderr.write('Failed to determine SVN '
                                                 'tracking branch. Defaulting'
                                                 'to "master"\n')
                                self.upstream_branch = 'master'

                        return SVNRepositoryInfo(path=path,
                                                 base_path=base_path,
                                                 uuid=uuid,
                                                 supports_parent_diffs=True)
            else:
                # Versions of git-svn before 1.5.4 don't (appear to) support
                # 'git svn info'.  If we fail because of an older git install,
                # here, figure out what version of git is installed and give
                # the user a hint about what to do next.
                version = execute([self.git, "svn", "--version"],
                                  ignore_errors=True)
                version_parts = re.search('version (\d+)\.(\d+)\.(\d+)',
                                          version)
                svn_remote = execute(
                    [self.git, "config", "--get", "svn-remote.svn.url"],
                    ignore_errors=True)

                if (version_parts and svn_remote and
                    not is_valid_version((int(version_parts.group(1)),
                                          int(version_parts.group(2)),
                                          int(version_parts.group(3))),
                                         (1, 5, 4))):
                    die("Your installation of git-svn must be upgraded to "
                        "version 1.5.4 or later")

        # Okay, maybe Perforce (git-p4).
        git_p4_ref = os.path.join(git_dir, 'refs', 'remotes', 'p4', 'master')
        if os.path.exists(git_p4_ref):
            data = execute([self.git, 'config', '--get', 'git-p4.port'],
                           ignore_errors=True)
            m = re.search(r'(.+)', data)
            if m:
                port = m.group(1)
            else:
                port = os.getenv('P4PORT')

            if port:
                self.type = 'perforce'
                self.upstream_branch = 'remotes/p4/master'
                return RepositoryInfo(path=port,
                                      base_path='',
                                      supports_parent_diffs=True)

        # Nope, it's git then.
        # Check for a tracking branch and determine merge-base
        self.upstream_branch = ''
        if self.head_ref:
            short_head = self._strip_heads_prefix(self.head_ref)
            merge = execute([self.git, 'config', '--get',
                             'branch.%s.merge' % short_head],
                            ignore_errors=True).strip()
            remote = execute([self.git, 'config', '--get',
                              'branch.%s.remote' % short_head],
                             ignore_errors=True).strip()

            merge = self._strip_heads_prefix(merge)

            if remote and remote != '.' and merge:
                self.upstream_branch = '%s/%s' % (remote, merge)

        url = None
        if getattr(self.options, 'repository_url', None):
            url = self.options.repository_url
            self.upstream_branch = self.get_origin(self.upstream_branch,
                                                   True)[0]
        else:
            self.upstream_branch, origin_url = \
                self.get_origin(self.upstream_branch, True)

            if not origin_url or origin_url.startswith("fatal:"):
                self.upstream_branch, origin_url = self.get_origin()

            url = origin_url.rstrip('/')

            # Central bare repositories don't have origin URLs.
            # We return git_dir instead and hope for the best.
            if not url:
                url = os.path.abspath(git_dir)

                # There is no remote, so skip this part of upstream_branch.
                self.upstream_branch = self.upstream_branch.split('/')[-1]

        if url:
            self.type = "git"
            return RepositoryInfo(path=url, base_path='',
                                  supports_parent_diffs=True)
        return None

Example 179

View license
def run(test, params, env):
    """
    Test disk attachement of multiple disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()

    def get_vm_disk_xml(dev_type, dev_name, **options):
        """
        Create a disk xml object and return it.

        :param dev_type. Disk type.
        :param dev_name. Disk device name.
        :param options. Disk options.
        :return: Disk xml object.
        """
        # Create disk xml
        disk_xml = Disk(type_name=dev_type)
        disk_xml.device = options["disk_device"]
        if options.has_key("sgio") and options["sgio"] != "":
            disk_xml.sgio = options["sgio"]
            disk_xml.device = "lun"
            disk_xml.rawio = "no"

        if dev_type == "block":
            disk_attr = "dev"
        else:
            disk_attr = "file"

        disk_xml.target = {'dev': options["target"],
                           'bus': options["bus"]}
        disk_xml.source = disk_xml.new_disk_source(
            **{'attrs': {disk_attr: dev_name}})

        # Add driver options from parameters.
        driver_dict = {"name": "qemu"}
        if options.has_key("driver"):
            for driver_option in options["driver"].split(','):
                if driver_option != "":
                    d = driver_option.split('=')
                    logging.debug("disk driver option: %s=%s", d[0], d[1])
                    driver_dict.update({d[0].strip(): d[1].strip()})

        disk_xml.driver = driver_dict
        if options.has_key("share"):
            if options["share"] == "shareable":
                disk_xml.share = True

        if options.has_key("readonly"):
            if options["readonly"] == "readonly":
                disk_xml.readonly = True

        logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

        return disk_xml

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Disk specific attributes.
    vms_sgio = params.get("virt_disk_vms_sgio", "").split()
    vms_share = params.get("virt_disk_vms_share", "").split()
    vms_readonly = params.get("virt_disk_vms_readonly", "").split()
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_device = params.get("virt_disk_device", "disk")
    disk_format = params.get("virt_disk_format", "")
    scsi_options = params.get("scsi_options", "")
    disk_driver_options = params.get("disk_driver_options", "")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    test_error_policy = "yes" == params.get("virt_disk_test_error_policy",
                                            "no")
    test_shareable = "yes" == params.get("virt_disk_test_shareable", "no")
    test_readonly = "yes" == params.get("virt_disk_test_readonly", "no")
    disk_source_path = test.tmpdir
    disk_path = ""
    tmp_filename = "cdrom_te.tmp"
    tmp_readonly_file = ""

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in range(2):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)

    try:
        # Create disk images if needed.
        disks = []
        if disk_format == "scsi":
            disk_source = libvirt.create_scsi_disk(scsi_options)
            if not disk_source:
                raise error.TestNAError("Get scsi disk failed.")
            disks.append({"format": "scsi", "source": disk_source})

        elif disk_format == "iscsi":
            # Create iscsi device if neened.
            image_size = params.get("image_size", "100M")
            disk_source = libvirt.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, image_size=image_size)
            logging.debug("iscsi dev name: %s", disk_source)
            # Format the disk and make the file system.
            libvirt.mk_part(disk_source, size="10M")
            libvirt.mkfs("%s1" % disk_source, "ext3")
            disk_source += "1"
            disks.append({"format": disk_format,
                          "source": disk_source})
        elif disk_format in ["raw", "qcow2"]:
            disk_path = "%s/test.%s" % (disk_source_path, disk_format)
            disk_source = libvirt.create_local_disk("file", disk_path, "1",
                                                    disk_format=disk_format)
            libvirt.mkfs(disk_source, "ext3")
            disks.append({"format": disk_format,
                          "source": disk_source})

        if disk_device == "cdrom":
            tmp_readonly_file = "/root/%s" % tmp_filename
            with open(tmp_readonly_file, 'w') as f:
                f.write("teststring\n")
            disk_path = "%s/test.iso" % disk_source_path
            disk_source = libvirt.create_local_disk("iso", disk_path, "1")
            disks.append({"source": disk_source})

        # Compose the new domain xml
        vms_list = []
        for i in range(2):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            if disk_bus == "scsi":
                set_vm_controller_xml(vmxml)
            disk_sgio = ""
            if len(vms_sgio) > i:
                disk_sgio = vms_sgio[i]
            shareable = ""
            if len(vms_share) > i:
                shareable = vms_share[i]
            readonly = ""
            if len(vms_readonly) > i:
                readonly = vms_readonly[i]
            disk_xml = get_vm_disk_xml(disk_type, disk_source,
                                       sgio=disk_sgio, share=shareable,
                                       target=disk_target, bus=disk_bus,
                                       driver=disk_driver_options,
                                       disk_device=disk_device,
                                       readonly=readonly)
            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                vmxml.sync()
            vms_list.append({"name": vm_names[i], "vm": vm,
                             "status": "yes" == status_error[i],
                             "disk": disk_xml})
            logging.debug("vms_list %s" % vms_list)

        for i in range(len(vms_list)):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    raise error.TestFail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        raise error.TestFail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        raise error.TestFail('Hotplug disk device unexpectedly.')

                # Check disk error_policy option in VMs.
                if test_error_policy:
                    error_policy = vms_list[i]['disk'].driver["error_policy"]
                    if i == 0:
                        # If we testing enospace error policy, only 1 vm used
                        if error_policy == "enospace":
                            cmd = ("mount /dev/%s /mnt && dd if=/dev/zero of=/mnt/test"
                                   " bs=1M count=2000 2>&1 | grep 'No space left'"
                                   % disk_target)
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm0 exit %s; output: %s", s, o)
                            if 0 != s:
                                raise error.TestFail("Test error_policy %s: cann't see"
                                                     " error messages")
                            session.close()
                            break

                        if session.cmd_status("fdisk -l /dev/%s && mount /dev/%s /mnt; ls /mnt"
                                              % (disk_target, disk_target)):
                            session.close()
                            raise error.TestFail("Test error_policy: "
                                                 "failed to mount disk")
                    if i == 1:
                        try:
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mkfs.ext3 -F /dev/%s "
                                   % (disk_target, disk_target))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("error_policy in vm1 exit %s; output: %s", s, o)
                            session.close()
                            cmd = ("dd if=/dev/zero of=/mnt/test bs=1M count=100 && dd if="
                                   "/mnt/test of=/dev/null bs=1M;dmesg | grep 'I/O error'")
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if error_policy == "report":
                                if s:
                                    raise error.TestFail("Test error_policy %s: cann't report"
                                                         " error" % error_policy)
                            elif error_policy == "ignore":
                                if 0 == s:
                                    raise error.TestFail("Test error_policy %s: error cann't"
                                                         " be ignored" % error_policy)
                            session0.close()
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            if error_policy == "stop":
                                if not vms_list[0]['vm'].is_paused():
                                    raise error.TestFail("Test error_policy %s: cann't stop"
                                                         " VM" % error_policy)
                            else:
                                logging.error(str(e))
                                raise error.TestFail("Test error_policy %s: login failed"
                                                     % error_policy)

                if test_shareable:
                    # Check shared file selinux label with type and MCS as
                    # svirt_image_t:s0
                    if disk_path:
                        se_label = utils_selinux.get_context_of_file(disk_path)
                        logging.debug("Context of shared img '%s' is '%s'" %
                                      (disk_path, se_label))
                        if "svirt_image_t:s0" not in se_label:
                            raise error.TestFail("Context of shared img is not"
                                                 " expected.")
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to write on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && echo '%s' "
                                   "> /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM0 failed")
                            session0.close()
                            # Try to read on vm1.
                            cmd = ("fdisk -l /dev/%s && mount /dev/%s /mnt && grep %s"
                                   " /mnt/test && umount /mnt"
                                   % (disk_target, disk_target, test_str))
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test disk shareable on VM1 failed")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail("Test disk shareable: login failed")

                if test_readonly:
                    # Check shared file selinux label with type and MCS as
                    # virt_content_t:s0
                    if disk_path:
                        se_label = utils_selinux.get_context_of_file(disk_path)
                        logging.debug("Context of shared iso '%s' is '%s'" %
                                      (disk_path, se_label))
                        if "virt_content_t:s0" not in se_label:
                            raise error.TestFail("Context of shared iso is not"
                                                 " expected.")
                    if i == 1:
                        try:
                            test_str = "teststring"
                            # Try to read on vm0.
                            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
                            cmd = "mount -o ro /dev/cdrom /mnt && grep "
                            cmd += "%s /mnt/%s" % (test_str, tmp_filename)
                            s, o = session0.cmd_status_output(cmd)
                            logging.debug("session in vm0 exit %s; output: %s", s, o)
                            session0.close()
                            if s:
                                raise error.TestFail("Test file not found in VM0 cdrom")
                            # Try to read on vm1.
                            s, o = session.cmd_status_output(cmd)
                            logging.debug("session in vm1 exit %s; output: %s", s, o)
                            if s:
                                raise error.TestFail("Test file not found in VM1 cdrom")
                        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
                            logging.error(str(e))
                            raise error.TestFail("Test disk shareable: login failed")
                session.close()
            except virt_vm.VMStartError:
                if vms_list[i]['status']:
                    raise error.TestFail('VM Failed to start'
                                         ' for some reason!')
    finally:
        # Stop VMs.
        for i in range(len(vms_list)):
            if vms_list[i]['vm'].is_alive():
                vms_list[i]['vm'].destroy(gracefully=False)

        # Recover VMs.
        for vmxml_backup in vms_backup:
            vmxml_backup.sync()

        # Remove disks.
        for img in disks:
            if img.has_key('format'):
                if img["format"] == "scsi":
                    libvirt.delete_scsi_disk()
                elif img["format"] == "iscsi":
                    libvirt.setup_or_cleanup_iscsi(is_setup=False)
            elif img.has_key("source"):
                os.remove(img["source"])

        if tmp_readonly_file:
            if os.path.exists(tmp_readonly_file):
                os.remove(tmp_readonly_file)

Example 180

Project: rbtools
Source File: post.py
View license
    def post_request(self, repository_info, repository, server_url, api_root,
                     review_request_id=None, changenum=None, diff_content=None,
                     parent_diff_content=None, commit_id=None,
                     base_commit_id=None,
                     submit_as=None, retries=3, base_dir=None):
        """Creates or updates a review request, and uploads a diff.

        On success the review request id and url are returned.
        """
        supports_posting_commit_ids = \
            self.tool.capabilities.has_capability('review_requests',
                                                  'commit_ids')

        if review_request_id:
            review_request = get_review_request(
                review_request_id, api_root,
                only_fields='absolute_url,bugs_closed,id,status,public',
                only_links='diffs,draft')

            if review_request.status == 'submitted':
                raise CommandError(
                    'Review request %s is marked as %s. In order to update '
                    'it, please reopen the review request and try again.'
                    % (review_request_id, review_request.status))
        else:
            # No review_request_id, so we will create a new review request.
            try:
                # Until we are Python 2.7+ only, the keys in request_data have
                # to be bytes. See bug 3753 for details.
                request_data = {
                    b'repository': repository
                }

                if changenum:
                    request_data[b'changenum'] = changenum
                elif commit_id and supports_posting_commit_ids:
                    request_data[b'commit_id'] = commit_id

                if submit_as:
                    request_data[b'submit_as'] = submit_as

                if self.tool.can_bookmark:
                    bookmark = self.tool.get_current_bookmark()
                    request_data[b'extra_data__local_bookmark'] = bookmark
                elif self.tool.can_branch:
                    branch = self.tool.get_current_branch()
                    request_data[b'extra_data__local_branch'] = branch

                review_requests = api_root.get_review_requests(
                    only_fields='',
                    only_links='create')
                review_request = review_requests.create(**request_data)
            except APIError as e:
                if e.error_code == 204 and changenum:
                    # The change number is already in use. Get the review
                    # request for that change and update it instead.
                    rid = e.rsp['review_request']['id']
                    review_request = api_root.get_review_request(
                        review_request_id=rid,
                        only_fields='absolute_url,bugs_closed,id,status',
                        only_links='diffs,draft')
                else:
                    raise CommandError('Error creating review request: %s' % e)

        if (not repository_info.supports_changesets or
            not self.options.change_only):
            try:
                diff_kwargs = {
                    'parent_diff': parent_diff_content,
                    'base_dir': base_dir,
                }

                if (base_commit_id and
                    self.tool.capabilities.has_capability('diffs',
                                                          'base_commit_ids')):
                    # Both the Review Board server and SCMClient support
                    # base commit IDs, so pass that along when creating
                    # the diff.
                    diff_kwargs['base_commit_id'] = base_commit_id

                review_request.get_diffs(only_fields='').upload_diff(
                    diff_content, **diff_kwargs)
            except APIError as e:
                error_msg = [
                    u'Error uploading diff\n\n',
                ]

                if e.error_code == 101 and e.http_status == 403:
                    error_msg.append(
                        u'You do not have permissions to modify '
                        u'this review request\n')
                elif e.error_code == 219:
                    error_msg.append(
                        u'The generated diff file was empty. This '
                        u'usually means no files were\n'
                        u'modified in this change.\n')
                else:
                    error_msg.append(str(e).decode('utf-8') + u'\n')

                error_msg.append(
                    u'Your review request still exists, but the diff is '
                    u'not attached.\n')

                error_msg.append(u'%s\n' % review_request.absolute_url)

                raise CommandError(u'\n'.join(error_msg))

        try:
            draft = review_request.get_draft(only_fields='commit_id')
        except APIError as e:
            raise CommandError('Error retrieving review request draft: %s' % e)

        # Stamp the commit message with the review request URL before posting
        # the review, so that we can use the stamped commit message when
        # guessing the description. This enables the stamped message to be
        # present on the review if the user has chosen to publish immediately
        # upon posting.
        if self.options.stamp_when_posting:
            if not self.tool.can_amend_commit:
                print('Cannot stamp review URL onto the commit message; '
                      'stamping is not supported with %s.' % self.tool.name)

            else:
                try:
                    stamp_commit_with_review_url(self.revisions,
                                                 review_request.absolute_url,
                                                 self.tool)
                    print('Stamped review URL onto the commit message.')
                except AlreadyStampedError:
                    print('Commit message has already been stamped')
                except Exception as e:
                    logging.debug('Caught exception while stamping the '
                                  'commit message. Proceeding to post '
                                  'without stamping.', exc_info=True)
                    print('Could not stamp review URL onto the commit '
                          'message.')

        # If the user has requested to guess the summary or description,
        # get the commit message and override the summary and description
        # options. The guessing takes place after stamping so that the
        # guessed description matches the commit when rbt exits.
        if not self.options.diff_filename:
            self.check_guess_fields()

        # Update the review request draft fields based on options set
        # by the user, or configuration.
        update_fields = {}

        update_fields.update(self.options.extra_fields)

        if self.options.target_groups:
            update_fields['target_groups'] = self.options.target_groups

        if self.options.target_people:
            update_fields['target_people'] = self.options.target_people

        if self.options.depends_on:
            update_fields['depends_on'] = self.options.depends_on

        if self.options.summary:
            update_fields['summary'] = self.options.summary

        if self.options.branch:
            update_fields['branch'] = self.options.branch

        if self.options.bugs_closed:
            # Append to the existing list of bugs.
            self.options.bugs_closed = self.options.bugs_closed.strip(', ')
            bug_set = (set(re.split('[, ]+', self.options.bugs_closed)) |
                       set(review_request.bugs_closed))
            self.options.bugs_closed = ','.join(bug_set)
            update_fields['bugs_closed'] = self.options.bugs_closed

        if self.options.description:
            update_fields['description'] = self.options.description

        if self.options.testing_done:
            update_fields['testing_done'] = self.options.testing_done

        if ((self.options.description or self.options.testing_done) and
            self.options.markdown and
            self.tool.capabilities.has_capability('text', 'markdown')):
            # The user specified that their Description/Testing Done are
            # valid Markdown, so tell the server so it won't escape the text.
            update_fields['text_type'] = 'markdown'

        if self.options.publish:
            update_fields['public'] = True

            if (self.options.trivial_publish and
                self.tool.capabilities.has_capability('review_requests',
                                                      'trivial_publish')):
                update_fields['trivial'] = True

        if self.options.change_description is not None:
            if review_request.public:
                update_fields['changedescription'] = \
                    self.options.change_description

                if (self.options.markdown and
                    self.tool.capabilities.has_capability('text', 'markdown')):
                    update_fields['changedescription_text_type'] = 'markdown'
                else:
                    update_fields['changedescription_text_type'] = 'plain'
            else:
                logging.error(
                    'The change description field can only be set when '
                    'publishing an update. Use --description instead.')

        if supports_posting_commit_ids and commit_id != draft.commit_id:
            update_fields['commit_id'] = commit_id or ''

        if update_fields:
            try:
                draft = draft.update(**update_fields)
            except APIError as e:
                raise CommandError(
                    'Error updating review request draft: %s\n\n'
                    'Your review request still exists, but the diff is not '
                    'attached.\n\n'
                    '%s\n'
                    % (e, review_request.absolute_url))

        return review_request.id, review_request.absolute_url

Example 181

Project: tp-libvirt
Source File: iface_options.py
View license
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and
                    source.has_key('dev') and
                    source['dev'] not in net_ifs):
                logging.warn("Source device %s is not a interface"
                             " of host, reset to %s",
                             source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            utils.run(cmd)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            utils.run("chmod a+rw %s" % vmxml.xml)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml,
                                      ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            vmxml.sync()

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {"csum": "tx-checksumming",
                    "gso": "generic-segmentation-offload",
                    "tso4": "tcp-segmentation-offload",
                    "tso6": "tx-tcp6-segmentation",
                    "ecn": "tx-tcp-ecn-segmentation",
                    "ufo": "udp-fragmentation-offload"}
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = utils.run("ethtool -k %s | head -18" % if_name)
            ret, output = out.exit_status, out.stdout
        if ret:
            raise error.TestFail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in driver_options.keys():
            if offloads.has_key(offload):
                if (output.count(offloads[offload]) and
                    not output.count("%s: %s" % (
                        offloads[offload], driver_options[offload]))):
                    raise error.TestFail("offloads option %s: %s isn't"
                                         " correct in ethtool output" %
                                         (offloads[offload],
                                          driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            raise error.TestFail("Can't find interface with mac"
                                 " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in driver_dict.keys():
            if not driver_dict[driver_opt] == iface.driver.driver_attr[driver_opt]:
                raise error.TestFail("Can't see driver option %s=%s in vm xml"
                                     % (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if (not iface.target.has_key("dev") or
                    not iface.target["dev"].startswith(iface_target)):
                raise error.TestFail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and iface.source.has_key("mode"):
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = utils.run(cmd).stdout
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not output.count("macvtap  mode %s" % mode):
                    raise error.TestFail("Failed to verify macvtap mode")

    def run_cmdline_test(iface_mac):
        """
        Test for qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = utils.run(cmd)
        logging.debug("Command line %s", ret.stdout)
        if test_vhost_net:
            if not ret.stdout.count("vhost=on") and not rm_vhost_driver:
                raise error.TestFail("Can't see vhost options in"
                                     " qemu-kvm command line")

        if iface_model == "virtio":
            model_option = "device virtio-net-pci"
        else:
            model_option = "device rtl8139"
        iface_cmdline = re.findall(r"%s,(.+),mac=%s" %
                                   (model_option, iface_mac), ret.stdout)
        if not iface_cmdline:
            raise error.TestFail("Can't see %s with mac %s in command"
                                 " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in iface_driver_dict.keys():
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    driver_dict["vectors"] = str(int(
                        iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in driver_dict.keys():
            if (not cmd_opt.has_key(driver_opt) or
                    not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                raise error.TestFail("Can't see option '%s=%s' in qemu-kvm "
                                     " command line" %
                                     (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if utils.system(cmd, ignore_status=True):
                raise error.TestFail("Guest process didn't open backend file"
                                     " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if utils.system(cmd, ignore_status=True):
                raise error.TestFail("Guest process didn't open backend file"
                                     " %s" % backend["tap"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(
            lambda: utils_net.get_guest_ip_addr(session, mac), 10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            raise error.TestFail("Duplicated IP address on guest. "
                                 "Check bug: https://bugzilla.redhat."
                                 "com/show_bug.cgi?id=1147238")

        for vm_ip in vm_ips:
            if vm_ip is None or not vm_ip.startswith("10.0.2."):
                raise error.TestFail("Found wrong IP address"
                                     " on guest")
        # Check gateway address
        gateway = utils_net.get_net_gateway(session.cmd_output)
        if gateway != "10.0.2.2":
            raise error.TestFail("The gateway on guest is not"
                                 " right")
        # Check dns server address
        ns_list = utils_net.get_net_nameserver(session.cmd_output)
        if "10.0.2.3" not in ns_list:
            raise error.TestFail("The dns server can't be found"
                                 " on guest")

    def check_mcast_network(session):
        """
        Check multicast ip address on guests
        """
        username = params.get("username")
        password = params.get("password")
        src_addr = ast.literal_eval(iface_source)['address']
        add_session = additional_vm.wait_for_serial_login(username=username,
                                                          password=password)
        vms_sess_dict = {vm_name: session,
                         additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestFail("Can't find multicast ip address"
                                 " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in vms_sess_dict.keys():
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                raise error.TestFail("Can't get multicast ip"
                                     " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            raise error.TestFail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_misc.yum_install(["omping"]):
            raise error.TestError("Failed to install omping"
                                  " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr, "192.168.122.1 %s" %
                ' '.join(vms_ip_dict.values())))
        # Run a backgroup job waiting for connection of client
        bgjob = utils.AsyncJob(cmd)

        # Run omping client on guests
        for vms in vms_sess_dict.keys():
            # omping should be installed first
            if not utils_misc.yum_install(["omping"], vms_sess_dict[vms]):
                raise error.TestError("Failed to install omping"
                                      " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" %
                    vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or
                    not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                raise error.TestFail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = params.get("iface_model", "virtio")
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    attach_device = params.get("attach_iface_device")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "yes" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get(
                      "test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get(
                      "test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get(
                     "test_vhost_net", "no")
    test_option_offloads = "yes" == params.get(
                           "test_option_offloads", "no")
    test_iface_user = "yes" == params.get(
                      "test_iface_user", "no")
    test_iface_mcast = "yes" == params.get(
                       "test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            raise error.TestNAError("Offloading/backend options not "
                                    "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            raise error.TestNAError("Queues options not supported"
                                    " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            raise error.TestNAError("qemu-bridge-helper not supported"
                                    " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        utils.run(cmd)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    utils.run("modprobe vhost-net")
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # Check vhost driver.
                kvm_version = os.uname()[2]
                driver_path = ("/lib/modules/%s/kernel/drivers/vhost/"
                               "vhost_net.ko" % kvm_version)
                driver_backup = driver_path + ".bak"
                cmd = ("modprobe -r {0}; lsmod | "
                       "grep {0}".format("vhost_net"))
                if not utils.system(cmd, ignore_status=True):
                    raise error.TestError("Failed to remove vhost_net driver")
                # Move the vhost_net driver
                if os.path.exists(driver_path):
                    os.rename(driver_path, driver_backup)
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                utils.system(cmd)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Clone additional vm
            if additional_guest:
                guest_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                                True, timeout=timeout)
                additional_vm = vm.clone(guest_name)
                additional_vm.start()
                # additional_vm.wait_for_login()

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'"
                       % (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), "[\#\$]", 30)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    raise error.TestError("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                raise error.TestFail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(
                        ifname_guest, ast.literal_eval(
                            iface_driver_host), session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name,
                                                          iface_mac)
                    check_offloads_option(
                        ifname_host, ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    raise error.TestFail("Guest can't get a"
                                         " valid ip address")

            session.close()
            # Restart libvirtd and guest, then test again
            if test_libvirtd:
                libvirtd.restart()
                vm.destroy()
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device:
                ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="", ignore_status=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                raise error.TestFail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            if os.path.exists(driver_backup):
                os.rename(driver_backup, driver_path)
        if unprivileged_user:
            virsh.remove_domain(vm_name, "--remove-all-storage",
                                **virsh_dargs)
        if additional_vm:
            virsh.remove_domain(additional_vm.name,
                                "--remove-all-storage")
            # Kill all omping server process on host
            utils.system("pidof omping && killall omping",
                         ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()

Example 182

Project: fdroidserver
Source File: checkupdates.py
View license
def checkupdates_app(app, first=True):

    # If a change is made, commitmsg should be set to a description of it.
    # Only if this is set will changes be written back to the metadata.
    commitmsg = None

    tag = None
    msg = None
    vercode = None
    noverok = False
    mode = app.UpdateCheckMode
    if mode.startswith('Tags'):
        pattern = mode[5:] if len(mode) > 4 else None
        (version, vercode, tag) = check_tags(app, pattern)
        if version == 'Unknown':
            version = tag
        msg = vercode
    elif mode == 'RepoManifest':
        (version, vercode) = check_repomanifest(app)
        msg = vercode
    elif mode.startswith('RepoManifest/'):
        tag = mode[13:]
        (version, vercode) = check_repomanifest(app, tag)
        msg = vercode
    elif mode == 'RepoTrunk':
        (version, vercode) = check_repotrunk(app)
        msg = vercode
    elif mode == 'HTTP':
        (version, vercode) = check_http(app)
        msg = vercode
    elif mode in ('None', 'Static'):
        version = None
        msg = 'Checking disabled'
        noverok = True
    else:
        version = None
        msg = 'Invalid update check method'

    if version and vercode and app.VercodeOperation:
        oldvercode = str(int(vercode))
        op = app.VercodeOperation.replace("%c", oldvercode)
        vercode = str(eval(op))
        logging.debug("Applied vercode operation: %s -> %s" % (oldvercode, vercode))

    if version and any(version.startswith(s) for s in [
            '${',  # Gradle variable names
            '@string/',  # Strings we could not resolve
            ]):
        version = "Unknown"

    updating = False
    if version is None:
        logmsg = "...{0} : {1}".format(app.id, msg)
        if noverok:
            logging.info(logmsg)
        else:
            logging.warn(logmsg)
    elif vercode == app.CurrentVersionCode:
        logging.info("...up to date")
    else:
        app.CurrentVersion = version
        app.CurrentVersionCode = str(int(vercode))
        updating = True

    commitmsg = fetch_autoname(app, tag)

    if updating:
        name = common.getappname(app)
        ver = common.getcvname(app)
        logging.info('...updating to version %s' % ver)
        commitmsg = 'Update CV of %s to %s' % (name, ver)

    if options.auto:
        mode = app.AutoUpdateMode
        if not app.CurrentVersionCode:
            logging.warn("Can't auto-update app with no current version code: " + app.id)
        elif mode in ('None', 'Static'):
            pass
        elif mode.startswith('Version '):
            pattern = mode[8:]
            if pattern.startswith('+'):
                try:
                    suffix, pattern = pattern.split(' ', 1)
                except ValueError:
                    raise MetaDataException("Invalid AUM: " + mode)
            else:
                suffix = ''
            gotcur = False
            latest = None
            for build in app.builds:
                if int(build.vercode) >= int(app.CurrentVersionCode):
                    gotcur = True
                if not latest or int(build.vercode) > int(latest.vercode):
                    latest = build

            if int(latest.vercode) > int(app.CurrentVersionCode):
                logging.info("Refusing to auto update, since the latest build is newer")

            if not gotcur:
                newbuild = copy.deepcopy(latest)
                newbuild.disable = False
                newbuild.vercode = app.CurrentVersionCode
                newbuild.version = app.CurrentVersion + suffix
                logging.info("...auto-generating build for " + newbuild.version)
                commit = pattern.replace('%v', newbuild.version)
                commit = commit.replace('%c', newbuild.vercode)
                newbuild.commit = commit
                app.builds.append(newbuild)
                name = common.getappname(app)
                ver = common.getcvname(app)
                commitmsg = "Update %s to %s" % (name, ver)
        else:
            logging.warn('Invalid auto update mode "' + mode + '" on ' + app.id)

    if commitmsg:
        metadatapath = os.path.join('metadata', app.id + '.txt')
        metadata.write_metadata(metadatapath, app)
        if options.commit:
            logging.info("Commiting update for " + metadatapath)
            gitcmd = ["git", "commit", "-m", commitmsg]
            if 'auto_author' in config:
                gitcmd.extend(['--author', config['auto_author']])
            gitcmd.extend(["--", metadatapath])
            if subprocess.call(gitcmd) != 0:
                logging.error("Git commit failed")
                sys.exit(1)

Example 183

Project: Reactor-3
Source File: rawparse.py
View license
def create_function_map():
	FUNCTION_MAP.update({'is_family': stats.is_family,
		'name': lambda life: ' '.join(life['name']),
		'is_same_species': stats.is_same_species,
		'can_trust': judgement.can_trust,
		'is_dangerous': judgement.is_target_dangerous,
		'can_bite': stats.can_bite,
		'can_scratch': stats.can_scratch,
		'weapon_equipped_and_ready': combat.weapon_equipped_and_ready,
		'prepare_for_ranged': combat.prepare_for_ranged,
		'explore_unknown_chunks': survival.explore_unknown_chunks,
		'is_nervous': stats.is_nervous,
		'is_aggravated': stats.is_aggravated,
		'is_scared': judgement.is_scared,
		'is_safe': judgement.is_safe,
		'is_healthy': None,
		'is_intimidated': stats.is_intimidated,
		'is_intimidating': lambda life, life_id: stats.is_intimidated_by(LIFE[life_id], life['id']),
		'is_confident': stats.is_confident,
		'is_situation_tense': lambda life: judgement.get_tension(life)>=10,
		'is_combat_ready': lambda life, life_id: not LIFE[life_id]['state'] in ['hiding', 'hidden'],
		'is_surrendering': lambda life, life_id: LIFE[life_id]['state'] == 'surrender',
		'is_being_surrendered_to': lambda life: len(judgement.get_combat_targets(life, ignore_escaped=True, filter_func=lambda life, life_id: LIFE[life_id]['state'] == 'surrender'))>0,
		'closest': None,
		'kill': lambda life: lfe.kill(life, 'their own dumb self'),
		'has_attacked_trusted': stats.has_attacked_trusted,
		'has_attacked_self': stats.has_attacked_self,
		'distance_to_pos': stats.distance_from_pos_to_pos,
		'current_chunk_has_flag': lambda life, flag: chunks.get_flag(life, lfe.get_current_chunk_id(life), flag)>0,
		'is_idle': lambda life: life['state'] == 'idle',
		'is_relaxed': lambda life: life['state_tier'] == TIER_RELAXED,
		'is_child_of': stats.is_child_of,
		'is_parent_of': stats.is_parent_of,
		'has_parent': stats.has_parent,
		'has_child': stats.has_child,
		'is_night': logic.is_night,
		'is_born_leader': stats.is_born_leader,
		'is_psychotic': stats.is_psychotic,
		'is_safe_in_shelter': stats.is_safe_in_shelter,
		'is_incapacitated': stats.is_incapacitated,
		'is_target': lambda life, life_id: life_id in judgement.get_targets(life) or life_id in judgement.get_combat_targets(life),
		'seen_target_recently': lambda life, life_id: brain.knows_alife_by_id(life, life_id)['last_seen_time']<=150,
		'is_combat_target': lambda life, life_id: life_id in judgement.get_combat_targets(life),
		'is_traitor': lambda life, life_id: len(lfe.get_memory(life, matches={'text': 'traitor', 'target': life_id}))>0,
		'is_awake': judgement.is_target_awake,
		'is_dead': judgement.is_target_dead,
		'is_target_dead': judgement.is_target_dead,
		'is_raiding': lambda life: (life['group'] and groups.get_stage(life, life['group'])==STAGE_RAIDING)==True,
		'find_and_announce_shelter': groups.find_and_announce_shelter,
		'desires_shelter': stats.desires_shelter,
		'travel_to_position': movement.travel_to_position,
		'find_target': movement.find_target,
		'can_see_target': sight.can_see_target,
		'has_threats': lambda life: len(judgement.get_threats(life, ignore_escaped=1))>0,
		'has_visible_threat': lambda life: len(judgement.get_visible_threats(life))>0,
		'has_combat_targets': lambda life: len(judgement.get_combat_targets(life))>0,
		'has_lost_threat': lambda life: len(judgement.get_threats(life, escaped_only=True, ignore_escaped=2))>0,
		'has_ready_combat_targets': lambda life: len(judgement.get_ready_combat_targets(life, recent_only=True, limit_distance=sight.get_vision(life)+10))>0,
		'danger_close': stats.is_threat_too_close,
		'number_of_alife_in_chunk_matching': lambda life, chunk_key, matching, amount: len(chunks.get_alife_in_chunk_matching(chunk_key, matching))>amount,
		'number_of_alife_in_reference_matching': lambda life, reference_id, matching, amount: len(references.get_alife_in_reference_matching(reference_id, matching))>amount,
		'announce_to_group': groups.announce,
		'is_in_chunk': chunks.is_in_chunk,
		'is_in_shelter': lfe.is_in_shelter,
		'has_shelter': lambda life: len(judgement.get_known_shelters(life))>0,
		'has_completed_job': lambda life, job_id: job_id in life['completed_jobs'],
		'has_completed_task': lambda life, job_id: job_id in life['completed_jobs'],
		'retrieve_from_memory': brain.retrieve_from_memory,
		'pick_up_and_hold_item': lfe.pick_up_and_hold_item,
		'has_usable_weapon': lambda life: not combat.has_ready_weapon(life) == False,
		'has_potentially_usable_weapon': lambda life: combat.has_potentially_usable_weapon(life) == True,
		'target_is_combat_ready': judgement.target_is_combat_ready,
		'create_item_need': survival.add_needed_item,
		'group_needs_resources': lambda life, group_id: groups.needs_resources(group_id),
		'has_needs_to_meet': survival.has_needs_to_meet,
		'has_unmet_needs': survival.has_unmet_needs,
		'has_needs_to_satisfy': survival.has_needs_to_satisfy,
		'has_needs': lambda life: survival.has_needs_to_meet(life) or survival.has_unmet_needs(life) or survival.has_needs_to_satisfy(life),
		'has_number_of_items_matching': lambda life, matching, amount: len(lfe.get_all_inventory_items(life, matches=matching))>=amount,
		'flag_item_matching': lambda life, matching, flag: lfe.get_all_inventory_items(life, matches=[matching]) and brain.flag_item(life, lfe.get_all_inventory_items(life, matches=[matching])[0], flag)>0,
		'drop_item_matching': lambda life, matching: lfe.get_all_inventory_items(life, matches=[matching]) and lfe.drop_item(life, lfe.get_all_inventory_items(life, matches=[matching])[0]['uid'])>0,
		'has_target_to_follow': lambda life: judgement.get_target_to_follow(life)>0,
		'has_target_to_guard': lambda life: judgement.get_target_to_guard(life)>0,
		'get_recent_events': speech.get_recent_events,
		'get_target': lambda life, life_id: speech.get_target(life,
	                                                           lfe.has_dialog_with(life, life_id),
	                                                           dialog.get_flag(lfe.has_dialog_with(life, life_id),
	                                                                           'NEXT_GIST')),
		'get_needs': lambda life, life_id: speech.get_needs(life,
	                                                         lfe.has_dialog_with(life, life_id),
	                                                         dialog.get_flag(lfe.has_dialog_with(life, life_id),
	                                                                         'NEXT_GIST')),
		'get_location': lambda life: '%s, %s' % (life['pos'][0], life['pos'][1]),
		'join_group': lambda life, **kwargs: groups.join_group(life, kwargs['group_id']),
		'add_group_member': lambda life, life_id: groups.add_member(life, life['group'], life_id),
		'claim_to_be_group_leader': lambda life, life_id: groups.set_leader(life, life['group'], life['id']),
		'is_group_leader': lambda life: groups.is_leader_of_any_group(life)==True,
		'is_in_same_group': lambda life, life_id: (life['group'] and LIFE[life_id]['group'] == life['group'])>0,
		'is_target_group_leader': lambda life, life_id: (groups.is_leader_of_any_group(LIFE[life_id]))==True,
		'is_in_group': lambda life: life['group']>0,
		'is_target_hostile': lambda life, life_id: brain.knows_alife_by_id(life, life_id) and brain.knows_alife_by_id(life, life_id)['alignment'] == 'hostile',
		'is_target_in_group': lambda life, life_id, **kwargs: brain.knows_alife_by_id(life, life_id) and brain.knows_alife_by_id(life, life_id)['group']==kwargs['group'],
		'is_target_in_any_group': lambda life, life_id: brain.knows_alife_by_id(life, life_id) and brain.knows_alife_by_id(life, life_id)['group']>0,
		'is_target_group_friendly': lambda life, life_id: brain.knows_alife_by_id(life, life_id) and brain.knows_alife_by_id(life, life_id)['group'] and groups.get_group_memory(life, brain.knows_alife_by_id(life, life_id)['group'], 'alignment')=='trust',
		'is_target_group_hostile': groups.is_target_group_hostile,
		'is_target_group_neutral': lambda life, life_id: brain.knows_alife_by_id(life, life_id) and brain.knows_alife_by_id(life, life_id)['group'] and groups.get_group_memory(life, brain.knows_alife_by_id(life, life_id)['group'], 'alignment')=='neutral',
		'is_group_hostile': lambda life, **kwargs: groups.get_group_memory(life, kwargs['group_id'], 'alignment')=='hostile',
		'is_injured': lambda life: len(lfe.get_cut_limbs(life)) or len(lfe.get_bleeding_limbs(life)),
		'inform_of_group_members': speech.inform_of_group_members,
		'update_group_members': speech.update_group_members,
		'get_group_flag': groups.get_flag,
		'get_flag': brain.get_flag,
		'get_group': lambda life: life['group'],
		'discover_group': lambda life, **kwargs: groups.discover_group(life, kwargs['group_id']),
		'add_target_to_known_group': lambda life, life_id, **kwargs: groups.add_member(life, kwargs['group_id'], life_id),
		'knows_about_group': lambda life, **kwargs: groups.group_exists(life, kwargs['group_id']),
		'group_has_shelter': lambda life: groups.get_shelter(life, life['group'])>0,
		'declare_group_hostile': lambda life, **kwargs: stats.declare_group_hostile(life, kwargs['group_id']),
		'declare_group_trusted': lambda life, **kwargs: stats.declare_group_trusted(life, kwargs['group_id']),
		'declare_group_target': lambda life, life_id: stats.declare_group_target(life, life_id, 'hostile'),
		'get_group_shelter': lambda life: groups.get_shelter(life, life['group']),
		'set_group_shelter': lambda life, **kwargs: groups.set_shelter(life, kwargs['group_id'], kwargs['shelter']),
		'get_group_stage': lambda life: groups.get_stage(life, life['group']),
		'get_group_stage_message': speech.get_group_stage_message,
		'set_group_stage': lambda life, **kwargs: groups.set_stage(life, kwargs['group_id'], kwargs['stage']),
		'is_group_motivated_for_crime': lambda life: life['group'] and groups.get_motive(life, life['group']) == 'crime',
		'wants_to_leave_group_for_group': lambda life: stats.wants_to_abandon_group(life, life['group']),
		'knows_items_matching': lambda life, **kwargs: len(brain.get_multi_matching_remembered_items(life, kwargs['items'], no_owner=True))>0,
		'get_known_group': speech.get_known_group,
		'inform_of_group': speech.inform_of_group,
		'force_inform_of_group': speech.force_inform_of_group,
		'inform_of_items': lambda life, life_id, **kwargs: speech.inform_of_items(life, life_id, kwargs['items']),
		'update_location': lambda life, life_id: brain.update_known_life(life, life_id, 'last_seen_at', LIFE[life_id]['pos'][:]),
		'has_questions_for_target': lambda life, life_id: len(memory.get_questions_for_target(life, life_id))>0,
		'has_orders_for_target': lambda life, life_id: len(memory.get_orders_for_target(life, life_id))>0,
		'ask_target_question': memory.ask_target_question,
		'give_target_order_message': memory.give_target_order_message,
		'give_target_order': memory.give_target_order,
		'take_order': memory.take_order,
		'reject_order': memory.reject_order,
		'get_introduction_message': speech.get_introduction_message,
		'get_introduction_gist': speech.get_introduction_gist,
		'establish_trust': stats.establish_trust,
		'establish_feign_trust': stats.establish_feign_trust,
		'establish_aggressive': stats.establish_aggressive,
		'establish_hostile': stats.establish_hostile,
		'establish_scared': stats.establish_scared,
		'claim_hostile': lambda life, target, **kwargs: stats.establish_hostile(life, kwargs['target_id']),
		'describe_target': lambda life, life_id, **kwargs: speech.describe_target(life, kwargs['target']),
		'consume': lfe.consume,
		'explode': items.explode,
		'is_player': lambda life: 'player' in life,
		'is_neutral': lambda life, life_id: brain.knows_alife_by_id(life, life_id)['alignment'] == 'neutral',
		'reset_think_timer': lfe.reset_think_rate,
		'mark_target_as_combat_ready': lambda life, life_id: brain.flag_alife(life, life_id, 'combat_ready'),
		'mark_target_as_not_combat_ready': lambda life, life_id: brain.flag_alife(life, life_id, 'combat_ready', value=False),
		'saw_target_recently': lambda life, **kwargs: brain.knows_alife_by_id(life, kwargs['target_id']) and -1<brain.knows_alife_by_id(life, kwargs['target_id'])['last_seen_time']<6000,
		'update_location_of_target_from_target': speech.update_location_of_target_from_target,
		'ping': lambda life: logging.debug('%s: Ping!' % ' '.join(life['name'])),
		'wander': lambda life: alife_discover.tick(life),
		'pick_up_item': lambda life: alife_needs.tick(life),
		'take_shelter': lambda life: alife_shelter.tick(life),
		'has_non_relaxed_goal': lambda life: life['state_tier']>TIER_RELAXED,
		'needs_to_manage_inventory': lambda life: alife_manage_items.conditions(life) == True,
		'manage_inventory': lambda life: alife_manage_items.tick(life),
		'cover_exposed': lambda life: len(combat.get_exposed_positions(life))>0,
		'ranged_ready': lambda life: lfe.execute_raw(life, 'combat', 'ranged_ready'),
		'ranged_attack': lambda life: alife_combat.ranged_attack(life),
		'melee_ready': lambda life: lfe.execute_raw(life, 'combat', 'melee_ready'),
		'melee_attack': lambda life: alife_combat.melee_attack(life),
		'take_cover': lambda life: alife_cover.tick(life),
		'hide': lambda life: alife_escape.tick(life),
		'stop': lfe.stop,
		'search_for_threat': lambda life: alife_search.tick(life),
		'has_low_recoil': lambda life: life['recoil']>=.25,
		'has_medium_recoil': lambda life: life['recoil']>=.5,
		'has_high_recoil': lambda life: life['recoil']>=.75,
		'has_focus_point': lambda life: len(lfe.get_memory(life, matches={'text': 'focus_on_chunk'}))>0,
		'walk_to': lambda life: movement.travel_to_chunk(life, lfe.get_memory(life, matches={'text': 'focus_on_chunk'})[len(lfe.get_memory(life, matches={'text': 'focus_on_chunk'}))-1]['chunk_key']),
		'follow_target': alife_follow.tick,
		'guard_focus_point': lambda life: movement.guard_chunk(life, lfe.get_memory(life, matches={'text': 'focus_on_chunk'})[0]['chunk_key']),
		'disarm': lambda life, life_id: brain.flag_alife(life, life_id, 'disarm', value=WORLD_INFO['ticks']),
		'drop_weapon': lambda life: lfe.drop_item(life, lfe.get_held_items(life, matches={'type': 'gun'})[0]),
		'is_disarming': lambda life, life_id: brain.get_alife_flag(life, life_id, 'disarm')>0,
		'set_raid_location': lambda life, **kwargs: lfe.memory(life, 'focus_on_chunk', chunk_key=kwargs['chunk_key']),
		'move_to_chunk': lambda life, **kwargs:  movement.set_focus_point(life, kwargs['chunk_key']),
		'move_to_chunk_key': movement.set_focus_point,
		'recruiting': lambda life, life_id: speech.send(life, life_id, 'recruit'),
		'is_raiding': lambda life: life['group'] and groups.get_stage(life, life['group']) == STAGE_ATTACKING,
		'is_in_target_chunk': lambda life, target_id: lfe.get_current_chunk_id(life) == lfe.get_current_chunk_id(LIFE[target_id]),
		'get_chunk_key': lfe.get_current_chunk_id,
		'has_threat_in_combat_range': stats.has_threat_in_combat_range,
		'find_nearest_chunk_in_reference': references.find_nearest_chunk_key_in_reference_of_type,
		'has_item_type': lambda life, item_match: not lfe.get_inventory_item_matching(life, item_match) == None,
		'move_to_target': lambda life, target_id: movement.travel_to_position(life, LIFE[target_id]['pos']),
		'is_in_range_of_target': lambda life, target_id, distance: numbers.distance(life['pos'], LIFE[target_id]['pos'])<=int(distance),
		'track_target': lambda life, target_id: brain.meet_alife(life, LIFE[target_id]) and judgement.track_target(life, target_id),
		'untrack_target': judgement.untrack_target,
		'clear_tracking': lambda life: brain.flag(life, 'tracking_targets', []),
		'can_see_item': lambda life, item_uid: item_uid in life['seen_items'],
		'has_item': lambda life, item_uid: item_uid in life['inventory'],
		'pick_up_item': movement.pick_up_item,
		'create_mission': missions.create_mission_for_self,
		'give_mission': missions.create_mission_and_give,
		'do_mission': alife_work.tick,
		'has_mission': lambda life: len(life['missions'])>0,
		'drop_item': lfe.drop_item,
		'get_id': lambda life: life['id'],
		'always': lambda life: 1==1,
		'pass': lambda life, *a, **k: True,
		'never': lambda life: 1==2})

Example 184

Project: s3cmd
Source File: FileLists.py
View license
def fetch_remote_list(args, require_attribs = False, recursive = None, uri_params = {}):
    def _get_remote_attribs(uri, remote_item):
        response = S3(cfg).object_info(uri)
        if not response.get('headers'):
            return

        remote_item.update({
        'size': int(response['headers']['content-length']),
        'md5': response['headers']['etag'].strip('"\''),
        'timestamp' : dateRFC822toUnix(response['headers']['last-modified'])
        })
        try:
            md5 = response['s3cmd-attrs']['md5']
            remote_item.update({'md5': md5})
            debug(u"retreived md5=%s from headers" % md5)
        except KeyError:
            pass

    def _get_filelist_remote(remote_uri, recursive = True):
        ## If remote_uri ends with '/' then all remote files will have
        ## the remote_uri prefix removed in the relative path.
        ## If, on the other hand, the remote_uri ends with something else
        ## (probably alphanumeric symbol) we'll use the last path part
        ## in the relative path.
        ##
        ## Complicated, eh? See an example:
        ## _get_filelist_remote("s3://bckt/abc/def") may yield:
        ## { 'def/file1.jpg' : {}, 'def/xyz/blah.txt' : {} }
        ## _get_filelist_remote("s3://bckt/abc/def/") will yield:
        ## { 'file1.jpg' : {}, 'xyz/blah.txt' : {} }
        ## Furthermore a prefix-magic can restrict the return list:
        ## _get_filelist_remote("s3://bckt/abc/def/x") yields:
        ## { 'xyz/blah.txt' : {} }

        info(u"Retrieving list of remote files for %s ..." % remote_uri)
        empty_fname_re = re.compile(r'\A\s*\Z')

        total_size = 0

        s3 = S3(Config())
        response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(),
                                  recursive = recursive, uri_params = uri_params)

        rem_base_original = rem_base = remote_uri.object()
        remote_uri_original = remote_uri
        if rem_base != '' and rem_base[-1] != '/':
            rem_base = rem_base[:rem_base.rfind('/')+1]
            remote_uri = S3Uri(u"s3://%s/%s" % (remote_uri.bucket(), rem_base))
        rem_base_len = len(rem_base)
        rem_list = FileDict(ignore_case = False)
        break_now = False
        for object in response['list']:
            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
                ## We asked for one file and we got that file :-)
                key = unicodise(os.path.basename(deunicodise(object['Key'])))
                object_uri_str = remote_uri_original.uri()
                break_now = True
                rem_list = FileDict(ignore_case = False)   ## Remove whatever has already been put to rem_list
            else:
                key = object['Key'][rem_base_len:]      ## Beware - this may be '' if object['Key']==rem_base !!
                object_uri_str = remote_uri.uri() + key
            if empty_fname_re.match(key):
                # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems.
                warning(u"Empty object name on S3 found, ignoring.")
                continue
            rem_list[key] = {
                'size' : int(object['Size']),
                'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
                'md5' : object['ETag'].strip('"\''),
                'object_key' : object['Key'],
                'object_uri_str' : object_uri_str,
                'base_uri' : remote_uri,
                'dev' : None,
                'inode' : None,
            }
            if '-' in rem_list[key]['md5']: # always get it for multipart uploads
                _get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
            md5 = rem_list[key]['md5']
            rem_list.record_md5(key, md5)
            total_size += int(object['Size'])
            if break_now:
                break
        return rem_list, total_size

    cfg = Config()
    remote_uris = []
    remote_list = FileDict(ignore_case = False)

    if type(args) not in (list, tuple, set):
        args = [args]

    if recursive == None:
        recursive = cfg.recursive

    for arg in args:
        uri = S3Uri(arg)
        if not uri.type == 's3':
            raise ParameterError("Expecting S3 URI instead of '%s'" % arg)
        remote_uris.append(uri)

    total_size = 0

    if recursive:
        for uri in remote_uris:
            objectlist, tmp_total_size = _get_filelist_remote(uri, recursive = True)
            total_size += tmp_total_size
            for key in objectlist:
                remote_list[key] = objectlist[key]
                remote_list.record_md5(key, objectlist.get_md5(key))
    else:
        for uri in remote_uris:
            uri_str = uri.uri()
            ## Wildcards used in remote URI?
            ## If yes we'll need a bucket listing...
            wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1)
            if len(wildcard_split_result) == 2: # wildcards found
                prefix, rest = wildcard_split_result
                ## Only request recursive listing if the 'rest' of the URI,
                ## i.e. the part after first wildcard, contains '/'
                need_recursion = '/' in rest
                objectlist, tmp_total_size = _get_filelist_remote(S3Uri(prefix), recursive = need_recursion)
                total_size += tmp_total_size
                for key in objectlist:
                    ## Check whether the 'key' matches the requested wildcards
                    if glob.fnmatch.fnmatch(objectlist[key]['object_uri_str'], uri_str):
                        remote_list[key] = objectlist[key]
            else:
                ## No wildcards - simply append the given URI to the list
                key = unicodise(os.path.basename(deunicodise(uri.object())))
                if not key:
                    raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
                remote_item = {
                    'base_uri': uri,
                    'object_uri_str': uri.uri(),
                    'object_key': uri.object()
                }
                if require_attribs:
                    _get_remote_attribs(uri, remote_item)

                remote_list[key] = remote_item
                md5 = remote_item.get('md5')
                if md5:
                    remote_list.record_md5(key, md5)
                total_size += remote_item.get('size', 0)

    remote_list, exclude_list = filter_exclude_include(remote_list)
    return remote_list, exclude_list, total_size

Example 185

Project: GrepBugs
Source File: grepbugs.py
View license
def html_report(scan_id):
	"""
	Create html report for a given scan_id
	"""
	
	if 'mysql' == gbconfig.get('database', 'database'):
		try:
			import MySQLdb
			mysqldb  = MySQLdb.connect(host=gbconfig.get('database', 'host'), user=gbconfig.get('database', 'dbuname'), passwd=gbconfig.get('database', 'dbpword'), db=gbconfig.get('database', 'dbname'))
			mysqlcur = mysqldb.cursor()
		except Exception as e:
			print 'Error connecting to MySQL! See log file for details.'
			logging.debug('Error connecting to MySQL: ' + str(e))
			sys.exit(1)

	else:
		try:
			import sqlite3 as lite
			db  = lite.connect(dbfile)
			cur = db.cursor()

		except lite.Error as e:
			print 'Error connecting to db file! See log file for details.'
			logging.debug('Error connecting to db file: ' + str(e))
			sys.exit(1)
		except Exception as e:
			print 'CRITICAL: Unhandled exception occured! Quiters gonna quit! See log file for details.'
			logging.critical('Unhandled exception: ' + str(e))
			sys.exit(1)

	html   = ''
	h      = 'ICAgX19fX19fICAgICAgICAgICAgICAgIF9fX18KICAvIF9fX18vX19fX19fXyAgX19fXyAgLyBfXyApX18gIF9fX19fXyBfX19fX18KIC8gLyBfXy8gX19fLyBfIFwvIF9fIFwvIF9fICAvIC8gLyAvIF9fIGAvIF9fXy8KLyAvXy8gLyAvICAvICBfXy8gL18vIC8gL18vIC8gL18vIC8gL18vIChfXyAgKQpcX19fXy9fLyAgIFxfX18vIC5fX18vX19fX18vXF9fLF8vXF9fLCAvX19fXy8KICAgICAgICAgICAgICAvXy8gICAgICAgICAgICAgICAgL19fX18v'
	params = [scan_id]

	if 'mysql' == gbconfig.get('database', 'database'):
		mysqlcur.execute("SELECT a.repo, a.account, a.project, b.scan_id, b.date_time, b.cloc_out FROM projects a, scans b WHERE a.project_id=b.project_id AND b.scan_id=%s LIMIT 1;", params)
		rows = mysqlcur.fetchall()
	else:
		cur.execute("SELECT a.repo, a.account, a.project, b.scan_id, b.date_time, b.cloc_out FROM projects a, scans b WHERE a.project_id=b.project_id AND b.scan_id=? LIMIT 1;", params)
		rows = cur.fetchall()

	# for loop on rows, but only one row
	for row in rows:
		print 'writing report...'
		htmlfile = os.path.dirname(os.path.abspath(__file__)) + '/out/' + row[0] + '.' + row[1] + '.' + row[2].replace("/", "_") + '.' + row[3] + '.html'
		tabfile  = os.path.dirname(os.path.abspath(__file__)) + '/out/' + row[0] + '.' + row[1] + '.' + row[2].replace("/", "_") + '.' + row[3] + tabsext

		if not os.path.exists(os.path.dirname(htmlfile)):
			os.makedirs(os.path.dirname(htmlfile))
		
		# include repo/account/project link
		if 'github' == row[0]:
			project_base_url = 'https://github.com/' + row[1] + '/' + row[2]
			link             = '(<a href="' + project_base_url + '" target="_new">' + project_base_url + '</a>)'
		else:
			project_base_url = ''
			link             = '';

		o = open(htmlfile, 'w')
		o.write("""<!DOCTYPE html><head>
<style>
	pre { font-size: 90%; }
	.t { color: darkgreen;  font-size: 150%;  font-weight: 900; text-shadow: 3px 3px darkgreen; }    /* title */
	h3 { margin-left: 15px;    font-variant: small-caps; } /* language */
	.d { margin-left:15px;   color: darkred; }    /* descriptive problem */
	.r { font-weight:bold;      margin-left:15px; }    /* regex */
	pre.f { margin-left: 50px; }  /* finding */
	pre.f span {color: grey; }  /* finding title */
</style></head><body>""")
		o.write("<pre class=\"t\">\n" + h.decode('base64') + "</pre>")
		o.write("\n\n<pre>"
				+ "\nrepo:     " + row[0]
				+ "\naccount:  " + row[1]
				+ "\nproject:  " + row[2] + "   " + link
				+ "\nscan id:  " + row[3]
				+ "\ndate:     " + str(row[4]) + "</pre>\n")
		#o.write("<pre>\n" + str(row[5]).replace("\n", "<br>") + "</pre>")
		o.write("<pre>\n" + row[5] + "</pre>")
		o.close()
		
		t = open(tabfile, 'w')
		t.write("GrepBugs\n")
		t.write("repo:\t" + row[0] + "\naccount:\t" + row[1] + "\nproject:\t" + row[2] + " " + link + "\nscan id:\t" + row[3] + "\ndate:\t" + str(row[4]) + "\n")
		t.close()

		if 'mysql' == gbconfig.get('database', 'database'):
			mysqlcur.execute("SELECT b.language, b.regex_text, b.description, c.result_detail_id, c.file, c.line, c.code FROM scans a, results b, results_detail c WHERE a.scan_id=%s AND a.scan_id=b.scan_id AND b.result_id=c.result_id ORDER BY b.language, b.regex_id, c.file;", params)
			rs = mysqlcur.fetchall()
		else:
			cur.execute("SELECT b.language, b.regex_text, b.description, c.result_detail_id, c.file, c.line, c.code FROM scans a, results b, results_detail c WHERE a.scan_id=? AND a.scan_id=b.scan_id AND b.result_id=c.result_id ORDER BY b.language, b.regex_id, c.file;", params)
			rs = cur.fetchall()

		o        = open(htmlfile, 'a')
		t        = open(tabfile, 'a')
		html     = "\n\n"
		tabs     = "\n\nlang\tdescription\tfile\tline\tc.code\n"
		language = ''
		regex    = ''
		count    = 0
		
		# loop through all results, do some fancy coordination for output
		for r in rs:
			tab_lang = r[0].replace("\t"," ").replace("\n","  ").replace("\r","  ")
			#tab_regex = r[1].replace("\t"," ").replace("\n","  ").replace("\r","  ")
			tab_desc = r[2].replace("\t"," ").replace("\n","  ").replace("\r","  ")
			#tab_id = r[3].replace("\t"," ").replace("\n","  ").replace("\r","  ")
			tab_file = r[4].replace("\t"," ").replace("\n","  ").replace("\r","  ")
			tab_line = str(r[5])
			tab_code = r[6].replace("\t"," ").replace("\n","  ").replace("\r","  ")
		
			tabs += tab_lang +"\t"+ tab_desc +"\t"+ tab_file +"\t"+ tab_line +"\t"+ tab_code +"\n"
			
			if regex != r[1]:
				if 0 != count:
					html += '	</div>' + "\n"; # end result set for regex

			if language != r[0]:
				html += '<h3>' + r[0] + '</h3>' + "\n"

			if regex != r[1]:
				html += '	<div class="d"><a style="cursor: pointer;" onclick="javascript:o=document.getElementById(\'r' + str(r[3]) + '\');if(o.style.display==\'none\'){ o.style.display=\'block\';} else {o.style.display=\'none\';}">+ ' + r[2] + "</a></div>\n"
				html += '	<div id="r' + str(r[3]) + '" style="display:none;margin-left:15px;">' + "\n" # description
				html += '		<div class="r"><pre>' +  cgi.escape(r[1]) + '</pre></div>' + "\n" #regex

			# include repo/account/project/file link
			if 'github' == row[0]:
				begin       = r[4].index('GrepBugs/remotesrc') + len('GrepBugs/remotesrc') # determine beginning position of repo path 
				file_link   = '<a href="https://github.com/' + r[4][r[4].index(row[1], begin):].replace(row[1] + '/' + row[2] + '/', row[1] + '/' + row[2] + '/blob/master/') + '#L' + str(r[5]) + '" target="_new">' + str(r[5]) + '</a>'
				ltrim_by    = row[1]
				ltrim_begin = begin
			else:
				file_link   = str(r[5]);
				ltrim_by    = row[2]
				ltrim_begin = 0

			html += '		<pre class="f"><span>' + r[4][r[4].index(ltrim_by, ltrim_begin):] + ' ' + file_link + ':</span> &nbsp; ' + cgi.escape(r[6]) + '</pre>' + "\n" # finding

			try:
				html += '                              <pre class="f"><span>' + r[4][r[4].index(ltrim_by, ltrim_begin):] + ' ' + file_link + ':</span> &nbsp; ' + "\n                                       " + cgi.escape(r[6]) + '</pre>' + "\n" # finding

			except ValueError:
				html += 'Exception ValueError: Got a value error on a substring for ' + r[4] + '/' + r[5] + "\n"
				logging.error('Using grep binary ' + grepbin)

			count   += 1
			language = r[0]
			regex    = r[1]

		if 0 == count:
			html += '<h3>No bugs found!</h3><div>Contribute regular expressions to find bugs in this code at <a href="https://grepbugs.com">GrepBugs.com</a></div>';
			tabs += "No bugs found\n\nContribute regular expressions to find bugs in this code at https://GrepBugs.com\n";
		else:
			html += '	</div>' + "\n"
			tabs += "\n"
		
		html += '</html>'
		o.write(html)
		o.close()
		t.write(tabs)
		t.close()

	if 'mysql' == gbconfig.get('database', 'database'):
		mysqldb.close()
	else:
		db.close()

Example 186

Project: ganeti
Source File: bootstrap.py
View license
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
                master_netmask, master_netdev, file_storage_dir,
                shared_file_storage_dir, gluster_storage_dir,
                candidate_pool_size, ssh_key_type, ssh_key_bits,
                secondary_ip=None, vg_name=None, beparams=None, nicparams=None,
                ndparams=None, hvparams=None, diskparams=None,
                enabled_hypervisors=None, modify_etc_hosts=True,
                modify_ssh_setup=True, maintain_node_health=False,
                drbd_helper=None, uid_pool=None, default_iallocator=None,
                default_iallocator_params=None, primary_ip_version=None,
                ipolicy=None, prealloc_wipe_disks=False,
                use_external_mip_script=False, hv_state=None, disk_state=None,
                enabled_disk_templates=None, install_image=None,
                zeroing_image=None, compression_tools=None,
                enabled_user_shutdown=False):
  """Initialise the cluster.

  @type candidate_pool_size: int
  @param candidate_pool_size: master candidate pool size

  @type enabled_disk_templates: list of string
  @param enabled_disk_templates: list of disk_templates to be used in this
    cluster

  @type enabled_user_shutdown: bool
  @param enabled_user_shutdown: whether user shutdown is enabled cluster
                                wide

  """
  # TODO: complete the docstring
  if config.ConfigWriter.IsCluster():
    raise errors.OpPrereqError("Cluster is already initialised",
                               errors.ECODE_STATE)

  data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
  queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
  archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
  for ddir in [queue_dir, data_dir, archive_dir]:
    if os.path.isdir(ddir):
      for entry in os.listdir(ddir):
        if not os.path.isdir(os.path.join(ddir, entry)):
          raise errors.OpPrereqError(
            "%s contains non-directory entries like %s. Remove left-overs of an"
            " old cluster before initialising a new one" % (ddir, entry),
            errors.ECODE_STATE)

  if not enabled_hypervisors:
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
                               " least one member", errors.ECODE_INVAL)
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
  if invalid_hvs:
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
                               " entries: %s" % invalid_hvs,
                               errors.ECODE_INVAL)

  _InitCheckEnabledDiskTemplates(enabled_disk_templates)

  try:
    ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
  except errors.ProgrammerError:
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
                               primary_ip_version, errors.ECODE_INVAL)

  hostname = netutils.GetHostname(family=ipcls.family)
  if not ipcls.IsValid(hostname.ip):
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
                               " address." % (hostname.ip, primary_ip_version),
                               errors.ECODE_INVAL)

  if ipcls.IsLoopback(hostname.ip):
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
                               " address. Please fix DNS or %s." %
                               (hostname.ip, pathutils.ETC_HOSTS),
                               errors.ECODE_ENVIRON)

  if not ipcls.Own(hostname.ip):
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                               " to %s,\nbut this ip address does not"
                               " belong to this host" %
                               hostname.ip, errors.ECODE_ENVIRON)

  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)

  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
    raise errors.OpPrereqError("Cluster IP already active",
                               errors.ECODE_NOTUNIQUE)

  if not secondary_ip:
    if primary_ip_version == constants.IP6_VERSION:
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
                                 " IPv4 address must be given as secondary",
                                 errors.ECODE_INVAL)
    secondary_ip = hostname.ip

  if not netutils.IP4Address.IsValid(secondary_ip):
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
                               " IPv4 address." % secondary_ip,
                               errors.ECODE_INVAL)

  if not netutils.IP4Address.Own(secondary_ip):
    raise errors.OpPrereqError("You gave %s as secondary IP,"
                               " but it does not belong to this host." %
                               secondary_ip, errors.ECODE_ENVIRON)

  if master_netmask is not None:
    if not ipcls.ValidateNetmask(master_netmask):
      raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
                                  (master_netmask, primary_ip_version),
                                 errors.ECODE_INVAL)
  else:
    master_netmask = ipcls.iplen

  if vg_name:
    # Check if volume group is valid
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
                                          constants.MIN_VG_SIZE)
    if vgstatus:
      raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)

  drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
  _InitCheckDrbdHelper(drbd_helper, drbd_enabled)

  logging.debug("Stopping daemons (if any are running)")
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
  if result.failed:
    raise errors.OpExecError("Could not stop daemons, command %s"
                             " had exitcode %s and error '%s'" %
                             (result.cmd, result.exit_code, result.output))

  file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
                                         file_storage_dir)
  shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
                                                      shared_file_storage_dir)
  gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
                                               gluster_storage_dir)

  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
                               errors.ECODE_INVAL)

  if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
    # Do not do this check if mode=openvswitch, since the openvswitch is not
    # created yet
    result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
    if result.failed:
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
                                 (master_netdev,
                                  result.output.strip()), errors.ECODE_INVAL)

  dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
  utils.EnsureDirs(dirs)

  objects.UpgradeBeParams(beparams)
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)

  objects.NIC.CheckParameterSyntax(nicparams)

  full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
  _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)

  if ndparams is not None:
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
  else:
    ndparams = dict(constants.NDC_DEFAULTS)

  # This is ugly, as we modify the dict itself
  # FIXME: Make utils.ForceDictType pure functional or write a wrapper
  # around it
  if hv_state:
    for hvname, hvs_data in hv_state.items():
      utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
      hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
  else:
    hv_state = dict((hvname, constants.HVST_DEFAULTS)
                    for hvname in enabled_hypervisors)

  # FIXME: disk_state has no default values yet
  if disk_state:
    for storage, ds_data in disk_state.items():
      if storage not in constants.DS_VALID_TYPES:
        raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
                                   storage, errors.ECODE_INVAL)
      for ds_name, state in ds_data.items():
        utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
        ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)

  # hvparams is a mapping of hypervisor->hvparams dict
  for hv_name, hv_params in hvparams.iteritems():
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
    hv_class = hypervisor.GetHypervisor(hv_name)
    hv_class.CheckParameterSyntax(hv_params)

  # diskparams is a mapping of disk-template->diskparams dict
  for template, dt_params in diskparams.items():
    param_keys = set(dt_params.keys())
    default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
    if not (param_keys <= default_param_keys):
      unknown_params = param_keys - default_param_keys
      raise errors.OpPrereqError("Invalid parameters for disk template %s:"
                                 " %s" % (template,
                                          utils.CommaJoin(unknown_params)),
                                 errors.ECODE_INVAL)
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
    if template == constants.DT_DRBD8 and vg_name is not None:
      # The default METAVG value is equal to the VG name set at init time,
      # if provided
      dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name

  try:
    utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
  except errors.OpPrereqError, err:
    raise errors.OpPrereqError("While verify diskparam options: %s" % err,
                               errors.ECODE_INVAL)

  # set up ssh config and /etc/hosts
  rsa_sshkey = ""
  dsa_sshkey = ""
  if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
    sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
    rsa_sshkey = sshline.split(" ")[1]
  if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
    sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
    dsa_sshkey = sshline.split(" ")[1]
  if not rsa_sshkey and not dsa_sshkey:
    raise errors.OpPrereqError("Failed to find SSH public keys",
                               errors.ECODE_ENVIRON)

  if modify_etc_hosts:
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)

  if modify_ssh_setup:
    ssh.InitSSHSetup(ssh_key_type, ssh_key_bits)

  if default_iallocator is not None:
    alloc_script = utils.FindFile(default_iallocator,
                                  constants.IALLOCATOR_SEARCH_PATH,
                                  os.path.isfile)
    if alloc_script is None:
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
                                 " specified" % default_iallocator,
                                 errors.ECODE_INVAL)
  else:
    # default to htools
    if utils.FindFile(constants.IALLOC_HAIL,
                      constants.IALLOCATOR_SEARCH_PATH,
                      os.path.isfile):
      default_iallocator = constants.IALLOC_HAIL

  # check if we have all the users we need
  try:
    runtime.GetEnts()
  except errors.ConfigurationError, err:
    raise errors.OpPrereqError("Required system user/group missing: %s" %
                               err, errors.ECODE_ENVIRON)

  candidate_certs = {}

  now = time.time()

  if compression_tools is not None:
    cluster.CheckCompressionTools(compression_tools)

  initial_dc_config = dict(active=True,
                           interval=int(constants.MOND_TIME_INTERVAL * 1e6))
  data_collectors = dict(
      (name, initial_dc_config.copy())
      for name in constants.DATA_COLLECTOR_NAMES)

  # init of cluster config file
  cluster_config = objects.Cluster(
    serial_no=1,
    rsahostkeypub=rsa_sshkey,
    dsahostkeypub=dsa_sshkey,
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
    mac_prefix=mac_prefix,
    volume_group_name=vg_name,
    tcpudp_port_pool=set(),
    master_ip=clustername.ip,
    master_netmask=master_netmask,
    master_netdev=master_netdev,
    cluster_name=clustername.name,
    file_storage_dir=file_storage_dir,
    shared_file_storage_dir=shared_file_storage_dir,
    gluster_storage_dir=gluster_storage_dir,
    enabled_hypervisors=enabled_hypervisors,
    beparams={constants.PP_DEFAULT: beparams},
    nicparams={constants.PP_DEFAULT: nicparams},
    ndparams=ndparams,
    hvparams=hvparams,
    diskparams=diskparams,
    candidate_pool_size=candidate_pool_size,
    modify_etc_hosts=modify_etc_hosts,
    modify_ssh_setup=modify_ssh_setup,
    uid_pool=uid_pool,
    ctime=now,
    mtime=now,
    maintain_node_health=maintain_node_health,
    data_collectors=data_collectors,
    drbd_usermode_helper=drbd_helper,
    default_iallocator=default_iallocator,
    default_iallocator_params=default_iallocator_params,
    primary_ip_family=ipcls.family,
    prealloc_wipe_disks=prealloc_wipe_disks,
    use_external_mip_script=use_external_mip_script,
    ipolicy=full_ipolicy,
    hv_state_static=hv_state,
    disk_state_static=disk_state,
    enabled_disk_templates=enabled_disk_templates,
    candidate_certs=candidate_certs,
    osparams={},
    osparams_private_cluster={},
    install_image=install_image,
    zeroing_image=zeroing_image,
    compression_tools=compression_tools,
    enabled_user_shutdown=enabled_user_shutdown,
    ssh_key_type=ssh_key_type,
    ssh_key_bits=ssh_key_bits,
    )
  master_node_config = objects.Node(name=hostname.name,
                                    primary_ip=hostname.ip,
                                    secondary_ip=secondary_ip,
                                    serial_no=1,
                                    master_candidate=True,
                                    offline=False, drained=False,
                                    ctime=now, mtime=now,
                                    )
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
  cfg = config.ConfigWriter(offline=True)
  ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
  cfg.Update(cfg.GetClusterInfo(), logging.error)
  ssconf.WriteSsconfFiles(cfg.GetSsconfValues())

  master_uuid = cfg.GetMasterNode()
  if modify_ssh_setup:
    ssh.InitPubKeyFile(master_uuid, ssh_key_type)
  # set up the inter-node password and certificate
  _InitGanetiServerSetup(hostname.name, cfg)

  logging.debug("Starting daemons")
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
  if result.failed:
    raise errors.OpExecError("Could not start daemons, command %s"
                             " had exitcode %s and error %s" %
                             (result.cmd, result.exit_code, result.output))

  _WaitForMasterDaemon()

Example 187

Project: BigJob
Source File: bigjob_agent.py
View license
    def execute_job(self, job_url, job_dict):
        """ obtain job attributes from c&c and execute process """
        state=str(job_dict["state"])
        
        if(state==str(bigjob.state.Unknown) or
            state==str(bigjob.state.New)):
            try:
                #job_dict["state"]=str(saga.job.New)  
                job_id = job_dict["job-id"]              
                logger.debug("Start job id %s specification %s: "%(job_id, str(job_dict)))        
                numberofprocesses = "1"
                try:
                    if (job_dict.has_key("NumberOfProcesses") == True):
                        numberofprocesses = job_dict["NumberOfProcesses"]
                except:
                    pass # ignore in particular if Bliss is used
                
                spmdvariation="single"
                try:
                    if (job_dict.has_key("SPMDVariation") == True):
                        spmdvariation = job_dict["SPMDVariation"]
                except:
                    pass  # ignore in particular if Bliss is used
                
                arguments = ""
                if (job_dict.has_key("Arguments") == True):
                    arguments_raw = job_dict['Arguments'];
                    if type(arguments_raw) == types.ListType:
                        arguments_list = arguments_raw
                    else:
                        arguments_list = eval(job_dict["Arguments"])                    
                    for i in arguments_list:
                        arguments = arguments + " " + str(i)
                        
                environment = os.environ
                envi = ""
                self.number_subjobs=1
                if (job_dict.has_key("Environment") == True):
                    env_raw = job_dict['Environment']
                    if type(env_raw) == types.ListType:
                        env_list = env_raw
                    else:
                        env_list = eval(job_dict["Environment"])

                    logger.debug("Environment: " + str(env_list))
                    for i in env_list:
                        logger.debug("Eval " + i)
                        # Hack for conduction experiments on Kraken
                        # Kraken specific support for running n sub-jobs at a time
                        if i.startswith("NUMBER_SUBJOBS"):
                            self.number_subjobs=int(i.split("=")[1].strip())
                            logger.debug("NUMBER_SUBJOBS: " + str(self.number_subjobs))
                        else:
                            envi_1 = "export " + i +"; "
                            envi = envi + envi_1
                            logger.debug(envi) 
                
                executable = job_dict["Executable"]
                executable = self.__expand_directory(executable)
                
                workingdirectory = os.path.join(os.getcwd(), job_id)  
                if (job_dict.has_key("WorkingDirectory") == True):
                        workingdirectory =  job_dict["WorkingDirectory"]
                        workingdirectory = self.__expand_directory(workingdirectory)
                try:
                    os.makedirs(workingdirectory)
                except:
                    logger.debug("Directory %s already exists."%workingdirectory)
                logging.debug("Sub-Job: %s, Working_directory: %s"%(job_id, workingdirectory))
                
                output="stdout"
                if (job_dict.has_key("Output") == True):
                    output = job_dict["Output"]
                if not os.path.isabs(output):
                    output=os.path.join(workingdirectory, output)
                    
                error=os.path.join(workingdirectory,"stderr")
                if (job_dict.has_key("Error") == True):
                    error = job_dict["Error"]
                if not os.path.isabs(error):
                    error=os.path.join(workingdirectory, error)
                
                                
                # append job to job list
                self.jobs.append(job_url)

                ####################################################################################################### 
                # special setup for MPI NAMD jobs
                machinefile = self.allocate_nodes(job_dict)
                host = "localhost"
                try:
                    machine_file_handler = open(machinefile, "r")
                    node= machine_file_handler.readlines()
                    machine_file_handler.close()
                    host = node[0].strip()
                except:
                    pass


                if(machinefile==None):
                    logger.debug("Not enough resources to run: " + job_url)
                    self.coordination.set_job_state(job_url, str(bigjob.state.New))
                    self.coordination.queue_job(self.base_url, job_url)
                    return # job cannot be run at the moment
                
                ####################################################################################################### 
                # File Stage-In of dependent data units
                if job_dict.has_key("InputData"):
                    self.coordination.set_job_state(job_url, str(bigjob.state.Staging))
                    self.__stage_in_data_units(eval(job_dict["InputData"]), workingdirectory)
                
                # File Stage-In - Move pilot-level files to working directory of sub-job
                if self.pilot_description!=None:
                    try:
                        if self.pilot_description.has_key("description"):
                            file_list = eval(self.pilot_description["description"])
                            if file_list != None and len(file_list)>0:
                                logger.debug("Copy %d files to SJ work dir"%len(file_list)>0)
                                for i in file_list:
                                    logger.debug("Process file: %s"%i)
                                    if i.find(">")>0:
                                        base_filename = os.path.basename(i[:i.index(">")].strip())
                                        if environment.has_key("_CONDOR_SCRATCH_DIR"):
                                            source_filename = os.path.join(environment["_CONDOR_SCRATCH_DIR"], base_filename)
                                        else:
                                            source_filename = os.path.join(self.work_dir, base_filename)
                                        target_filename = os.path.join(workingdirectory, base_filename)
                                        try:
                                            logger.debug("Copy: %s to %s"%(source_filename, target_filename))
                                            shutil.copyfile(source_filename, target_filename)                
                                        except:
                                            logger.error("Error copy: %s to %s"%(source_filename, target_filename))
                    except:
                        logger.debug("Moving of stage-in files failed.")
                
                # create stdout/stderr file descriptors
                output_file = os.path.abspath(output)
                error_file = os.path.abspath(error)
                logger.debug("stdout: " + output_file + " stderr: " + error_file)
                stdout = open(output_file, "w")
                stderr = open(error_file, "w")
                # build execution command
                if self.LAUNCH_METHOD=="aprun":                    
                    if (spmdvariation.lower()=="mpi"):
                        command = envi + "aprun  -n " + str(numberofprocesses) + " " + executable + " " + arguments                   
                    else:
                        #env_strip = envi.strip()
                        #env_command = env_strip[:(len(env_strip)-1)]
                        command = envi + "aprun  -n " + str(self.number_subjobs) + " -d " + numberofprocesses + " " + executable + " " + arguments

                    # MPMD Mode => all subjobs on Kraken fail because aprun returns 1 as returncode
                    #command = "aprun"
                    #for i in range(0, self.number_subjobs):
                    #    command = command +   " -d " + numberofprocesses + " " + executable + " " + arguments  
                    #    # + " 1 > "+ str(i)+ "-out.txt " + " 2 > "+ str(i)+ "-err.txt"
                    #    if i != self.number_subjobs-1:
                    #        command = command + " : "
                elif self.LAUNCH_METHOD=="ibrun" and spmdvariation.lower()=="mpi": 
                    # Non MPI launch is handled via standard SSH
                    command = envi + "mpirun_rsh   -np " +str(numberofprocesses) + " -hostfile " + machinefile + "  `build_env.pl` " + executable + " " + arguments
                elif (spmdvariation.lower()!="mpi"):
                    command =  envi + executable + " " + arguments
                    # In particular for Condor - if executable is staged x flag is not set
                    #command ="chmod +x " + executable +";export PATH=$PATH:" + workingdirectory + ";" +command                    
                else:
                    # Environment variables need to be handled later!
                    command =  envi + executable + " " + arguments

                # add working directory and ssh command
                if self.LAUNCH_METHOD == "aprun" or (self.LAUNCH_METHOD== "ibrun" and spmdvariation.lower()=="mpi"):
                    command ="cd " + workingdirectory + "; " + command
                elif self.LAUNCH_METHOD == "local":
                    command ="cd " + workingdirectory + "; " + command
                else: # ssh launch is default
                    if (spmdvariation.lower( )=="mpi"):
                        command = "cd " + workingdirectory + "; " + envi +  self.MPIRUN + " -np " + numberofprocesses + " -machinefile " + machinefile + " " + executable + " " + arguments
                    elif host == "localhost":
                        command ="cd " + workingdirectory + "; " + command
                    else:    
                        command ="ssh  " + host + " \'cd " + workingdirectory + "; " + command +"\'"
                        
                
                # start application process                    
                shell = self.SHELL 
                logger.debug("execute: " + command + " in " + workingdirectory + " from: " + str(socket.gethostname()) + " (Shell: " + shell +")")
                # bash works fine for launching on QB but fails for Abe :-(
                p = subprocess.Popen(args=command, executable=shell, stderr=stderr,
                                     stdout=stdout, cwd=workingdirectory, 
                                     env=environment, shell=True)
                logger.debug("started " + command)
                self.processes[job_url] = p
                self.coordination.set_job_state(job_url, str(bigjob.state.Running))
            except:
                traceback.print_exc(file=sys.stderr)

Example 188

Project: capirca
Source File: aclgen.py
View license
def RenderFile(input_file, output_directory, definitions,
               exp_info, write_files):
  """Render a single file.

  Args:
    input_file: the name of the input policy file.
    output_directory: the directory in which we place the rendered file.
    definitions: the definitions from naming.Naming().
    exp_info: print a info message when a term is set to expire
              in that many weeks.
    write_files: a list of file tuples, (output_file, acl_text), to write
  """
  logging.debug('rendering file: %s into %s', input_file,
                output_directory)
  pol = None
  jcl = False
  acl = False
  asacl = False
  aacl = False
  bacl = False
  eacl = False
  gcefw = False
  ips = False
  ipt = False
  spd = False
  nsx = False
  pcap_accept = False
  pcap_deny = False
  pf = False
  srx = False
  jsl = False
  nft = False
  win_afw = False
  xacl = False

  try:
    conf = open(input_file).read()
    logging.debug('opened and read %s', input_file)
  except IOError as e:
    logging.warn('bad file: \n%s', e)
    raise

  try:
    pol = policy.ParsePolicy(
        conf, definitions, optimize=FLAGS.optimize,
        base_dir=FLAGS.base_directory, shade_check=FLAGS.shade_check)
  except policy.ShadingError as e:
    logging.warn('shading errors for %s:\n%s', input_file, e)
    return
  except (policy.Error, naming.Error):
    raise ACLParserError('Error parsing policy file %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

  platforms = set()
  for header in pol.headers:
    platforms.update(header.platforms)

  if 'juniper' in platforms:
    jcl = copy.deepcopy(pol)
  if 'cisco' in platforms:
    acl = copy.deepcopy(pol)
  if 'ciscoasa' in platforms:
    asacl = copy.deepcopy(pol)
  if 'brocade' in platforms:
    bacl = copy.deepcopy(pol)
  if 'arista' in platforms:
    eacl = copy.deepcopy(pol)
  if 'aruba' in platforms:
    aacl = copy.deepcopy(pol)
  if 'ipset' in platforms:
    ips = copy.deepcopy(pol)
  if 'iptables' in platforms:
    ipt = copy.deepcopy(pol)
  if 'nsxv' in platforms:
    nsx = copy.deepcopy(pol)
  if 'packetfilter' in platforms:
    pf = copy.deepcopy(pol)
  if 'pcap' in platforms:
    pcap_accept = copy.deepcopy(pol)
    pcap_deny = copy.deepcopy(pol)
  if 'speedway' in platforms:
    spd = copy.deepcopy(pol)
  if 'srx' in platforms:
    srx = copy.deepcopy(pol)
  if 'srxlo' in platforms:
    jsl = copy.deepcopy(pol)
  if 'windows_advfirewall' in platforms:
    win_afw = copy.deepcopy(pol)
  if 'ciscoxr' in platforms:
    xacl = copy.deepcopy(pol)
  if 'nftables' in platforms:
    nft = copy.deepcopy(pol)
  if 'gce' in platforms:
    gcefw = copy.deepcopy(pol)

  if not output_directory.endswith('/'):
    output_directory += '/'

  try:
    if jcl:
      acl_obj = juniper.Juniper(jcl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if srx:
      acl_obj = junipersrx.JuniperSRX(srx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if acl:
      acl_obj = cisco.Cisco(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if asacl:
      acl_obj = ciscoasa.CiscoASA(acl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if aacl:
      acl_obj = aruba.Aruba(aacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if bacl:
      acl_obj = brocade.Brocade(bacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if eacl:
      acl_obj = arista.Arista(eacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ips:
      acl_obj = ipset.Ipset(ips, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if ipt:
      acl_obj = iptables.Iptables(ipt, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nsx:
      acl_obj = nsxv.Nsxv(nsx, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if spd:
      acl_obj = speedway.Speedway(spd, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_accept:
      acl_obj = pcap.PcapFilter(pcap_accept, exp_info)
      RenderACL(str(acl_obj), '-accept' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pcap_deny:
      acl_obj = pcap.PcapFilter(pcap_deny, exp_info, invert=True)
      RenderACL(str(acl_obj), '-deny' + acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if pf:
      acl_obj = packetfilter.PacketFilter(pf, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if win_afw:
      acl_obj = windows_advfirewall.WindowsAdvFirewall(win_afw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if jsl:
      acl_obj = srxlo.SRXlo(jsl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if xacl:
      acl_obj = ciscoxr.CiscoXR(xacl, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if nft:
      acl_obj = nftables.Nftables(nft, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
    if gcefw:
      acl_obj = gce.GCE(gcefw, exp_info)
      RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
                input_file, write_files)
  # TODO(robankeny) add additional errors.
  except (juniper.Error, junipersrx.Error, cisco.Error, ipset.Error,
          iptables.Error, speedway.Error, pcap.Error,
          aclgenerator.Error, aruba.Error, nftables.Error, gce.Error):
    raise ACLGeneratorError('Error generating target ACL for %s:\n%s%s' % (
        input_file, sys.exc_info()[0], sys.exc_info()[1]))

Example 189

Project: avocado-vt
Source File: qemu_virtio_port.py
View license
    def run_debug(self):
        """
        viz run_normal.
        Additionally it stores last n verified characters and in
        case of failures it quickly receive enough data to verify failure or
        allowed loss and then analyze this data. It provides more info about
        the situation.
        Unlike normal run this one supports booth - loss and duplications.
        It's not friendly to data corruption.
        """
        logging.debug("ThRecvCheck %s: run", self.getName())
        attempt = 10
        max_loss = 0
        sum_loss = 0
        verif_buf = deque(maxlen=max(self.blocklen, self.sendlen))
        while not self.exitevent.isSet():
            ret = select.select([self.port.sock], [], [], 1.0)
            if ret[0] and (not self.exitevent.isSet()):
                buf = self.port.sock.recv(self.blocklen)
                if buf:
                    # Compare the received data with the control data
                    for idx_char in xrange(len(buf)):
                        _char = self.buff.popleft()
                        if buf[idx_char] == _char:
                            self.idx += 1
                            verif_buf.append(_char)
                        else:
                            # Detect the duplicated/lost characters.
                            logging.debug("ThRecvCheck %s: fail to receive "
                                          "%dth character.", self.getName(),
                                          self.idx)
                            buf = buf[idx_char:]
                            for i in xrange(100):
                                if len(self.buff) < self.sendidx:
                                    time.sleep(0.01)
                                else:
                                    break
                            sendidx = min(self.sendidx, len(self.buff))
                            if sendidx < self.sendidx:
                                logging.debug("ThRecvCheck %s: sendidx was "
                                              "lowered as there is not enough "
                                              "data after 1s. Using sendidx="
                                              "%s.", self.getName(), sendidx)
                            for _ in xrange(sendidx / self.blocklen):
                                if self.exitevent.isSet():
                                    break
                                buf += self.port.sock.recv(self.blocklen)
                            queue = _char
                            for _ in xrange(sendidx):
                                queue += self.buff[_]
                            offset_a = None
                            offset_b = None
                            for i in xrange(sendidx):
                                length = min(len(buf[i:]), len(queue))
                                if buf[i:] == queue[:length]:
                                    offset_a = i
                                    break
                            for i in xrange(sendidx):
                                length = min(len(queue[i:]), len(buf))
                                if queue[i:][:length] == buf[:length]:
                                    offset_b = i
                                    break

                            if (offset_b and offset_b < offset_a) or offset_a:
                                # Data duplication
                                self.sendidx -= offset_a
                                max_loss = max(max_loss, offset_a)
                                sum_loss += offset_a
                                logging.debug("ThRecvCheck %s: DUP %s (out of "
                                              "%s)", self.getName(), offset_a,
                                              sendidx)
                                buf = buf[offset_a + 1:]
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                verif_buf.extend(buf)
                                self.idx += len(buf)
                            elif offset_b:  # Data loss
                                max_loss = max(max_loss, offset_b)
                                sum_loss += offset_b
                                logging.debug("ThRecvCheck %s: LOST %s (out of"
                                              " %s)", self.getName(), offset_b,
                                              sendidx)
                                # Pop-out the lost characters from verif_queue
                                # (first one is already out)
                                self.sendidx -= offset_b
                                for i in xrange(offset_b - 1):
                                    self.buff.popleft()
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                self.idx += len(buf)
                                verif_buf.extend(buf)
                            else:   # Too big data loss or duplication
                                verif = ""
                                for _ in xrange(-min(sendidx, len(verif_buf)),
                                                0):
                                    verif += verif_buf[_]
                                logging.error("ThRecvCheck %s: mismatched data"
                                              ":\nverified: ..%s\nreceived:   "
                                              "%s\nsent:       %s",
                                              self.getName(), repr(verif),
                                              repr(buf), repr(queue))
                                raise exceptions.TestFail("Recv and sendqueue "
                                                          "don't match with any offset.")
                            # buf was changed, break from this loop
                            attempt = 10
                            break
                    attempt = 10
                else:   # ! buf
                    # Broken socket
                    if attempt > 0:
                        attempt -= 1
                        if self.migrate_event is None:
                            self.exitevent.set()
                            raise exceptions.TestFail("ThRecvCheck %s: Broken pipe."
                                                      " If this is expected behavior set migrate"
                                                      "_event to support reconnection." %
                                                      self.getName())
                        logging.debug("ThRecvCheck %s: Broken pipe "
                                      ", reconnecting. ", self.getName())
                        self.reload_loss_idx()
                        # Wait until main thread sets the new self.port
                        while not (self.exitevent.isSet() or
                                   self.migrate_event.wait(1)):
                            pass
                        if self.exitevent.isSet():
                            break
                        logging.debug("ThRecvCheck %s: Broken pipe resumed, "
                                      "reconnecting...", self.getName())

                        self.port.sock = False
                        self.port.open()
        if self.sendidx >= 0:
            self.minsendidx = min(self.minsendidx, self.sendidx)
        if (self.sendlen - self.minsendidx):
            logging.debug("ThRecvCheck %s: Data loss occurred during socket"
                          "reconnection. Maximal loss was %d per one "
                          "migration.", self.getName(),
                          (self.sendlen - self.minsendidx))
        if sum_loss > 0:
            logging.debug("ThRecvCheck %s: Data offset detected, cumulative "
                          "err: %d, max err: %d(%d)", self.getName(), sum_loss,
                          max_loss, float(max_loss) / self.blocklen)
        logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
                      self.idx)
        self.ret_code = 0

Example 190

Project: temci
Source File: cli.py
View license
@document_func(misc_commands_description["completion"]["bash"], common_options)
def temci__completion__bash():
    subcommands = "\n\t".join(sorted(command_docs.keys()))

    def process_options(options: CmdOptionList) -> str:
        typecheck(options, CmdOptionList)
        strs = []
        for option in sorted(options.options):
            strs.append("--" + option.option_name)
            if option.short is not None:
                strs.append("-" + option.short)
            if option.is_flag:
                strs.append("--no-" + option.option_name)
        return "\n\t".join(strs)

    def process_misc_commands():
        ret_str = ""
        for misc_cmd in misc_commands:
            if "sub_commands" not in misc_commands[misc_cmd]:
                continue
            ret_str += """
                case ${{COMP_WORDS[1]}} in
                {misc_cmd})
                    case ${{COMP_WORDS[2]}} in
            """.format(misc_cmd=misc_cmd)
            for sub_cmd in misc_commands[misc_cmd]["sub_commands"].keys():
                ret_str += """
                        {sub_cmd})
                            args=(
                                ${{common_opts[@]}}
                                {common_opts}
                                {cmd_ops}
                            )
                            # printf '   _%s ' "${{args[@]}}" >> /tmp/out
                            # printf '   __%s ' "${{args[*]}}" >> /tmp/out
                            COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) ) && return 0
                        ;;
                """.format(sub_cmd=sub_cmd,
                           cmd_ops=process_options(misc_commands[misc_cmd]["sub_commands"][sub_cmd]),
                           common_opts=process_options(misc_commands[misc_cmd]["common"]))
            ret_str += """
                        *)
                            local args=( )
                            COMPREPLY=( $(compgen -W "" -- $cur) ) && return 0
                    esac
                    ;;
                *)
                ;;
              esac
            """
        return ret_str

    def process_misc_commands_case():
        ret_str = ""
        for misc_cmd in misc_commands:
            args = []
            if "sub_commands" in misc_commands[misc_cmd]:
                args = " ".join(sorted(misc_commands[misc_cmd]["sub_commands"].keys()))
            else:
                typecheck(misc_commands[misc_cmd], CmdOptionList)
                args = process_options(misc_commands[misc_cmd].append(common_options))
            ret_str += """
            {misc_cmd})
                args=({sub_cmds})
                ;;
            """.format(misc_cmd=misc_cmd, sub_cmds=args)
        return ret_str

    run_cmd_file_code = ""
    for driver in run_driver.RunDriverRegistry.registry:
        run_cmd_file_code += """
            {driver})
                case ${{COMP_WORDS[2]}} in
                *.yaml)
                    args=(
                        $common_opts
                        $run_common_opts
                        {driver_opts}
                    )
                    COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) ) && return 0
                ;;
                esac
                ;;
        """.format(driver=driver, driver_opts=process_options(run_options["run_driver_specific"][driver]))

    file_structure = """
    # Auto generated tab completion for the temci ({version}) benchmarking tool.


    _temci(){{
        local cur=${{COMP_WORDS[COMP_CWORD]}}
        local prev=${{COMP_WORDS[COMP_CWORD-1]}}

        local common_opts=(
            {common_opts}
        )
        local args=(
            {common_opts}
        )
        local run_common_opts=(
            {run_common_opts}
        )
        local report_common_opts=(
            {report_common_opts}
        )
        local build_common_opts=(
            {build_common_opts}
        )

        {misc_commands_code}

        case ${{COMP_WORDS[1]}} in
            report)
                case ${{COMP_WORDS[2]}} in
                *.yaml)
                    args=(
                        $common_opts
                        $report_common_opts
                    )
                    COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) ) && return 0
                ;;
                esac
                ;;
            build)
                case ${{COMP_WORDS[2]}} in
                *.yaml)
                    args=(
                        $common_opts
                        $build_common_opts
                    )
                    COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) ) && return 0
                ;;
                esac
                ;;
            run_package|exec_package)
                case ${{COMP_WORDS[2]}} in
                *.temci)
                    args=(
                        $common_opts
                        {package_opts}
                    )
                    COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) ) && return 0
                ;;
                esac
                ;;
            {run_cmd_file_code}
            *)
            ;;
        esac

        case ${{COMP_WORDS[1]}} in
            (report|build|{run_drivers})
                local IFS=$'\n'
                local LASTCHAR=' '
                COMPREPLY=($(compgen -o plusdirs -o nospace -f -X '!*.yaml' -- "${{COMP_WORDS[COMP_CWORD]}}"))

                if [ ${{#COMPREPLY[@]}} = 1 ]; then
                    [ -d "$COMPREPLY" ] && LASTCHAR=/
                    COMPREPLY=$(printf %q%s "$COMPREPLY" "$LASTCHAR")
                else
                    for ((i=0; i < ${{#COMPREPLY[@]}}; i++)); do
                        [ -d "${{COMPREPLY[$i]}}" ] && COMPREPLY[$i]=${{COMPREPLY[$i]}}/
                    done
                fi
                return 0
                ;;
            (run_package|exec_package)
                local IFS=$'\n'
                local LASTCHAR=' '
                COMPREPLY=($(compgen -o plusdirs -o nospace -f -X '!*.temci' -- "${{COMP_WORDS[COMP_CWORD]}}"))

                if [ ${{#COMPREPLY[@]}} = 1 ]; then
                    [ -d "$COMPREPLY" ] && LASTCHAR=/
                    COMPREPLY=$(printf %q%s "$COMPREPLY" "$LASTCHAR")
                else
                    for ((i=0; i < ${{#COMPREPLY[@]}}; i++)); do
                        [ -d "${{COMPREPLY[$i]}}" ] && COMPREPLY[$i]=${{COMPREPLY[$i]}}/
                    done
                fi
                return 0
                ;;
            {misc_commands_case_code}
            *)
                args=({commands})
        esac
        COMPREPLY=( $(compgen -W "${{args[*]}}" -- $cur) )
    }}
    shopt -s extglob
    complete -F _temci temci
    """.format(common_opts=process_options(common_options),
               run_common_opts=process_options(run_options["common"]),
               report_common_opts=process_options(report_options),
               commands=" ".join(sorted(command_docs.keys())),
               run_drivers="|".join(run_options["run_driver_specific"].keys()),
               misc_commands_case_code=process_misc_commands_case(),
               misc_commands_code=process_misc_commands(),
               build_common_opts=process_options(build_options),
               run_cmd_file_code=run_cmd_file_code,
               version=temci.scripts.version.version,
               package_opts=process_options(package_options))
    create_completion_dir()
    file_name = completion_file_name("bash")
    with open(file_name, "w") as f:
        f.write(file_structure)
        logging.debug("\n".join("{:>3}: {}".format(i, s) for (i, s) in enumerate(file_structure.split("\n"))))
        f.flush()
    os.chmod(file_name, 0o777)
    print(file_name)

Example 191

Project: tp-qemu
Source File: whql_submission.py
View license
def run(test, params, env):
    """
    WHQL submission test:
    1) Log into the client machines and into a DTM server machine
    2) Copy the automation program binary (dsso_test_binary) to the server machine
    3) Run the automation program
    4) Pass the program all relevant parameters (e.g. device_data)
    5) Wait for the program to terminate
    6) Parse and report job results
    (logs and HTML reports are placed in test.debugdir)

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    # Log into all client VMs
    login_timeout = int(params.get("login_timeout", 360))
    vms = []
    sessions = []
    for vm_name in params.objects("vms"):
        vms.append(env.get_vm(vm_name))
        vms[-1].verify_alive()
        sessions.append(vms[-1].wait_for_login(timeout=login_timeout))

    # Make sure all NICs of all client VMs are up
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Collect parameters
    server_address = params.get("server_address")
    server_shell_port = int(params.get("server_shell_port"))
    server_file_transfer_port = int(params.get("server_file_transfer_port"))
    server_studio_path = params.get("server_studio_path", "%programfiles%\\ "
                                    "Microsoft Driver Test Manager\\Studio")
    dsso_test_binary = params.get("dsso_test_binary",
                                  "deps/whql_submission_15.exe")
    dsso_test_binary = utils_misc.get_path(test.virtdir, dsso_test_binary)
    dsso_delete_machine_binary = params.get("dsso_delete_machine_binary",
                                            "deps/whql_delete_machine_15.exe")
    dsso_delete_machine_binary = utils_misc.get_path(test.virtdir,
                                                     dsso_delete_machine_binary)
    test_timeout = float(params.get("test_timeout", 600))

    # Copy dsso binaries to the server
    for filename in dsso_test_binary, dsso_delete_machine_binary:
        rss_client.upload(server_address, server_file_transfer_port,
                          filename, server_studio_path, timeout=60)

    # Open a shell session with the server
    server_session = remote.remote_login("nc", server_address,
                                         server_shell_port, "", "",
                                         sessions[0].prompt,
                                         sessions[0].linesep)
    server_session.set_status_test_command(sessions[0].status_test_command)

    # Get the computer names of the server and clients
    cmd = "echo %computername%"
    server_name = server_session.cmd_output(cmd).strip()
    client_names = [session.cmd_output(cmd).strip() for session in sessions]

    # Delete all client machines from the server's data store
    server_session.cmd("cd %s" % server_studio_path)
    for client_name in client_names:
        cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
                            server_name, client_name)
        server_session.cmd(cmd, print_func=logging.debug)

    # Reboot the client machines
    sessions = utils_misc.parallel((vm.reboot, (session,))
                                   for vm, session in zip(vms, sessions))

    # Check the NICs again
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Run whql_pre_command and close the sessions
    if params.get("whql_pre_command"):
        for session in sessions:
            session.cmd(params.get("whql_pre_command"),
                        int(params.get("whql_pre_command_timeout", 600)))
            session.close()

    # Run the automation program on the server
    pool_name = "%s_pool" % client_names[0]
    submission_name = "%s_%s" % (client_names[0],
                                 params.get("submission_name"))
    cmd = "%s %s %s %s %s %s" % (os.path.basename(dsso_test_binary),
                                 server_name, pool_name, submission_name,
                                 test_timeout, " ".join(client_names))
    server_session.sendline(cmd)

    # Helper function: wait for a given prompt and raise an exception if an
    # error occurs
    def find_prompt(prompt):
        m, o = server_session.read_until_last_line_matches(
            [prompt, server_session.prompt], print_func=logging.info,
            timeout=600)
        if m != 0:
            errors = re.findall("^Error:.*$", o, re.I | re.M)
            if errors:
                raise error.TestError(errors[0])
            else:
                raise error.TestError("Error running automation program: "
                                      "could not find '%s' prompt" % prompt)

    # Tell the automation program which device to test
    find_prompt("Device to test:")
    server_session.sendline(params.get("test_device"))

    # Tell the automation program which jobs to run
    find_prompt("Jobs to run:")
    server_session.sendline(params.get("job_filter", ".*"))

    # Set submission DeviceData
    find_prompt("DeviceData name:")
    for dd in params.objects("device_data"):
        dd_params = params.object_params(dd)
        if dd_params.get("dd_name") and dd_params.get("dd_data"):
            server_session.sendline(dd_params.get("dd_name"))
            server_session.sendline(dd_params.get("dd_data"))
    server_session.sendline()

    # Set submission descriptors
    find_prompt("Descriptor path:")
    for desc in params.objects("descriptors"):
        desc_params = params.object_params(desc)
        if desc_params.get("desc_path"):
            server_session.sendline(desc_params.get("desc_path"))
    server_session.sendline()

    # Set machine dimensions for each client machine
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Dimension name\b.*:")
        for dp in vm_params.objects("dimensions"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dim_name") and dp_params.get("dim_value"):
                server_session.sendline(dp_params.get("dim_name"))
                server_session.sendline(dp_params.get("dim_value"))
        server_session.sendline()

    # Set extra parameters for tests that require them (e.g. NDISTest)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Parameter name\b.*:")
        for dp in vm_params.objects("device_params"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dp_name") and dp_params.get("dp_regex"):
                server_session.sendline(dp_params.get("dp_name"))
                server_session.sendline(dp_params.get("dp_regex"))
                # Make sure the prompt appears again (if the device isn't found
                # the automation program will terminate)
                find_prompt(r"Parameter name\b.*:")
        server_session.sendline()

    # Wait for the automation program to terminate
    try:
        o = server_session.read_up_to_prompt(print_func=logging.info,
                                             timeout=test_timeout + 300)
        # (test_timeout + 300 is used here because the automation program is
        # supposed to terminate cleanly on its own when test_timeout expires)
        done = True
    except aexpect.ExpectError, e:
        o = e.output
        done = False
    server_session.close()

    # Look for test results in the automation program's output
    result_summaries = re.findall(r"---- \[.*?\] ----", o, re.DOTALL)
    if not result_summaries:
        raise error.TestError("The automation program did not return any "
                              "results")
    results = result_summaries[-1].strip("-")
    results = eval("".join(results.splitlines()))

    # Download logs and HTML reports from the server
    for r in results:
        if "report" in r:
            try:
                rss_client.download(server_address,
                                    server_file_transfer_port,
                                    r["report"], test.debugdir)
            except rss_client.FileTransferNotFoundError:
                pass
        if "logs" in r:
            try:
                rss_client.download(server_address,
                                    server_file_transfer_port,
                                    r["logs"], test.debugdir)
            except rss_client.FileTransferNotFoundError:
                pass
            else:
                try:
                    # Create symlinks to test log dirs to make it easier
                    # to access them (their original names are not human
                    # readable)
                    link_name = "logs_%s" % r["report"].split("\\")[-1]
                    link_name = link_name.replace(" ", "_")
                    link_name = link_name.replace("/", "_")
                    os.symlink(r["logs"].split("\\")[-1],
                               os.path.join(test.debugdir, link_name))
                except (KeyError, OSError):
                    pass

    # Print result summary (both to the regular logs and to a file named
    # 'summary' in test.debugdir)
    def print_summary_line(f, line):
        logging.info(line)
        f.write(line + "\n")
    if results:
        # Make sure all results have the required keys
        for r in results:
            r["id"] = str(r.get("id"))
            r["job"] = str(r.get("job"))
            r["status"] = str(r.get("status"))
            r["pass"] = int(r.get("pass", 0))
            r["fail"] = int(r.get("fail", 0))
            r["notrun"] = int(r.get("notrun", 0))
            r["notapplicable"] = int(r.get("notapplicable", 0))
        # Sort the results by failures and total test count in descending order
        results = [(r["fail"],
                    r["pass"] + r["fail"] + r["notrun"] + r["notapplicable"],
                    r) for r in results]
        results.sort(reverse=True)
        results = [r[-1] for r in results]
        # Print results
        logging.info("")
        logging.info("Result summary:")
        name_length = max(len(r["job"]) for r in results)
        fmt = "%%-6s %%-%ds %%-15s %%-8s %%-8s %%-8s %%-15s" % name_length
        f = open(os.path.join(test.debugdir, "summary"), "w")
        print_summary_line(f, fmt % ("ID", "Job", "Status", "Pass", "Fail",
                                     "NotRun", "NotApplicable"))
        print_summary_line(f, fmt % ("--", "---", "------", "----", "----",
                                     "------", "-------------"))
        for r in results:
            print_summary_line(f, fmt % (r["id"], r["job"], r["status"],
                                         r["pass"], r["fail"], r["notrun"],
                                         r["notapplicable"]))
        f.close()
        logging.info("(see logs and HTML reports in %s)", test.debugdir)

    # Kill the client VMs and fail if the automation program did not terminate
    # on time
    if not done:
        utils_misc.parallel(vm.destroy for vm in vms)
        raise error.TestFail("The automation program did not terminate "
                             "on time")

    # Fail if there are failed or incomplete jobs (kill the client VMs if there
    # are incomplete jobs)
    failed_jobs = [r["job"] for r in results
                   if r["status"].lower() == "investigate"]
    running_jobs = [r["job"] for r in results
                    if r["status"].lower() == "inprogress"]
    errors = []
    if failed_jobs:
        errors += ["Jobs failed: %s." % failed_jobs]
    if running_jobs:
        for vm in vms:
            vm.destroy()
        errors += ["Jobs did not complete on time: %s." % running_jobs]
    if errors:
        raise error.TestFail(" ".join(errors))

Example 192

Project: scikit-beam
Source File: spectroscopy.py
View license
def integrate_ROI(x, y, x_min, x_max):
    """Integrate region(s) of input data.

    If `x_min` and `x_max` are arrays/lists they must be equal in
    length. The values contained in the 'x' must be monotonic (up or
    down).  The returned value is the sum of all the regions and a
    single scalar value is returned.  Each region is computed
    independently, if regions overlap the overlapped area will be
    included multiple times in the final sum.

    This function assumes that `y` is a function of
    `x` sampled at `x`.

    Parameters
    ----------
    x : array
        Independent variable, any unit

    y : array
        Dependent variable, any units

    x_min : float or array
        The lower edge of the integration region(s)
        in units of x.

    x_max : float or array
        The upper edge of the integration region(s)
        in units of x.

    Returns
    -------
    float
        The totals integrated value in same units as `y`
    """
    # make sure x (x-values) and y (y-values) are arrays
    x = np.asarray(x)
    y = np.asarray(y)

    if x.shape != y.shape:
        raise ValueError("Inputs (x and y) must be the same "
                         "size. x.shape = {0} and y.shape = "
                         "{1}".format(x.shape, y.shape))

    # use np.sign() to obtain array which has evaluated sign changes in all
    # diff in input x_value array. Checks and tests are then run on the
    # evaluated sign change array.
    eval_x_arr_sign = np.sign(np.diff(x))

    # check to make sure no outliers exist which violate the monotonically
    # increasing requirement, and if exceptions exist, then error points to the
    # location within the source array where the exception occurs.
    if not np.all(eval_x_arr_sign == eval_x_arr_sign[0]):
        error_locations = np.where(eval_x_arr_sign != eval_x_arr_sign[0])[0]
        raise ValueError("Independent variable must be monotonically "
                         "increasing. Erroneous values found at x-value "
                         "array index locations:\n" +
                         _formatter_array_regions(x, error_locations))

    # check whether the sign of all diff measures are negative in the
    # x. If so, then the input array for both x_values and
    # count are reversed so that they are positive, and monotonically increase
    # in value
    if eval_x_arr_sign[0] == -1:
        x = x[::-1]
        y = y[::-1]
        logging.debug("Input values for 'x' were found to be "
                      "monotonically decreasing. The 'x' and "
                      "'y' arrays have been reversed prior to "
                      "integration.")

    # up-cast to 1d and make sure it is flat
    x_min = np.atleast_1d(x_min).ravel()
    x_max = np.atleast_1d(x_max).ravel()

    # verify that the number of minimum and maximum boundary values are equal
    if len(x_min) != len(x_max):
        raise ValueError("integration bounds must have same lengths")

    # verify that the specified minimum values are actually less than the
    # sister maximum value, and raise error if any minimum value is actually
    # greater than the sister maximum value.
    if np.any(x_min >= x_max):
        raise ValueError("All lower integration bounds must be less than "
                         "upper integration bounds.")

    # check to make sure that all specified minimum and maximum values are
    # actually contained within the extents of the independent variable array
    if np.any(x_min < x[0]):
        error_locations = np.where(x_min < x[0])[0]
        raise ValueError("Specified lower integration boundary values are "
                         "outside the spectrum range. All minimum integration "
                         "boundaries must be greater than, or equal to the "
                         "lowest value in spectrum range. The erroneous x_min_"
                         "array indices are:\n" +
                         _formatter_array_regions(x_min,
                                                  error_locations, window=0))

    if np.any(x_max > x[-1]):
        error_locations = np.where(x_max > x[-1])[0]
        raise ValueError("Specified upper integration boundary values "
                         "are outside the spectrum range. All maximum "
                         "integration boundary values must be less "
                         "than, or equal to the highest value in the spectrum "
                         "range. The erroneous x_max array indices are: "
                         "\n" +
                         _formatter_array_regions(x_max,
                                                  error_locations, window=0))

    # find the bottom index of each integration bound
    bottom_indx = x.searchsorted(x_min)
    # find the top index of each integration bound
    # NOTE: +1 required for correct slicing for integration function
    top_indx = x.searchsorted(x_max) + 1

    # set up temporary variables
    accum = 0
    # integrate each region
    for bot, top in zip(bottom_indx, top_indx):
        # Note: If an odd number of intervals is specified, then the
        # even='avg' setting calculates and averages first AND last
        # N-2 intervals using trapezoidal rule.
        # If calculation speed become an issue, then consider changing
        # setting to 'first', or 'last' in which case trap rule is only
        # applied to either first or last N-2 intervals.
        accum += simps(y[bot:top], x[bot:top], even='avg')

    return accum

Example 193

Project: capirca
Source File: nsxv.py
View license
  def __str__(self):
    """Convert term to a rule string.

    Returns:
      A rule as a string.

    Raises:
      NsxvAclTermError: When unknown icmp-types are specified

    """
    # Verify platform specific terms. Skip whole term if platform does not
    # match.
    if self.term.platform:
      if 'nsxv' not in self.term.platform:
        return ''
    if self.term.platform_exclude:
      if 'nsxv' in self.term.platform_exclude:
        return ''

    ret_str = ['']

    # Don't render icmpv6 protocol terms under inet, or icmp under inet6
    if ((self.af == 6 and 'icmp' in self.term.protocol) or
        (self.af == 4 and 'icmpv6' in self.term.protocol)):
      logging.debug(self.NO_AF_LOG_PROTO.substitute(term=self.term.name,
                                                    proto=self.term.protocol,
                                                    af=self.filter_type))
      return ''

    # Term verbatim is not supported
    if self.term.verbatim:
      raise NsxvAclTermError(
          'Verbatim are not implemented in standard ACLs')

    # Term option is not supported
    if self.term.option:
      raise NsxvAclTermError(
          'Option are not implemented in standard ACLs')

    # check for keywords Nsxv does not support
    term_keywords = self.term.__dict__
    unsupported_keywords = []
    for key  in term_keywords:
      if term_keywords[key]:
        # translated is obj attribute not keyword
        if ('translated' not in key) and (key not in _NSXV_SUPPORTED_KEYWORDS):
          unsupported_keywords.append(key)
    if unsupported_keywords:
      logging.warn('WARNING: The keywords %s in Term %s are not supported in '
                   'Nsxv ', unsupported_keywords, self.term.name)

    name = '%s%s%s' % (_XML_TABLE.get('nameStart'), self.term.name,
                       _XML_TABLE.get('nameEnd'))

    notes = ''
    if self.term.comment:
      for comment in self.term.comment:
        notes = '%s%s' %(notes, comment)
      notes = '%s%s%s' % (_XML_TABLE.get('noteStart'), notes,
                          _XML_TABLE.get('noteEnd'))

    # protocol
    protocol = None

    if self.term.protocol:
      protocol = map(self.PROTO_MAP.get, self.term.protocol, self.term.protocol)

      # icmp-types
      icmp_types = ['']
      if self.term.icmp_type:
        icmp_types = self.NormalizeIcmpTypes(self.term.icmp_type,
                                             self.term.protocol,
                                             self.af)

    # for mixed filter type get both IPV4address and IPv6Address
    af_list = []
    if self.filter_type == 'mixed':
      af_list = [4, 6]
    else:
      af_list = [self.af]

    source_address = None
    destination_address = None
    source_addr = []
    destination_addr = []

    for af in af_list:
      # source address
      if self.term.source_address:
        source_address = self.term.GetAddressOfVersion('source_address', af)
        source_address_exclude = self.term.GetAddressOfVersion(
            'source_address_exclude', af)
        if source_address_exclude:
          source_address = nacaddr.ExcludeAddrs(
              source_address,
              source_address_exclude)
        if not source_address:
          logging.warn(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
                                                      direction='source',
                                                      af=self.filter_type))
          return ''
        if not source_addr:
          source_addr.extend(source_address)
        else:
          source_addr = source_address

      # destination address
      if self.term.destination_address:
        destination_address = self.term.GetAddressOfVersion(
            'destination_address', af)
        destination_address_exclude = self.term.GetAddressOfVersion(
            'destination_address_exclude', af)
        if destination_address_exclude:
          destination_address = nacaddr.ExcludeAddrs(
              destination_address,
              destination_address_exclude)
        if not destination_address:
          logging.warn(self.NO_AF_LOG_ADDR.substitute(term=self.term.name,
                                                      direction='destination',
                                                      af=self.filter_type))
          return ''
        destination_addr.extend(destination_address)

    # ports
    source_port = None
    destination_port = None
    if self.term.source_port:
      source_port = self.term.source_port
    if self.term.destination_port:
      destination_port = self.term.destination_port

    # logging
    log = 'false'
    if self.term.logging:
      log = 'true'

    sources = ''
    if source_addr:
      sources = '<sources excluded="false">'
      for saddr in source_addr:

        # inet4
        if type(saddr) is nacaddr.IPv4:
          if saddr.numhosts > 1:
            saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'),
                                saddr.with_prefixlen,
                                _XML_TABLE.get('srcIpv4End'),)
          else:
            saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv4Start'),
                                saddr.ip,
                                _XML_TABLE.get('srcIpv4End'))
          sources = '%s%s' %(sources, saddr)
        # inet6
        if type(saddr) is nacaddr.IPv6:
          if saddr.numhosts > 1:
            saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv6Start'),
                                saddr.with_prefixlen,
                                _XML_TABLE.get('srcIpv6End'),)
          else:
            saddr = '%s%s%s' % (_XML_TABLE.get('srcIpv6Start'),
                                saddr.ip, _XML_TABLE.get('srcIpv6End'))
          sources = '%s%s' %(sources, saddr)
      sources = '%s%s' %(sources, '</sources>')

    destinations = ''
    if destination_addr:
      destinations = '<destinations excluded="false">'
      for daddr in destination_addr:
        # inet4
        if type(daddr) is nacaddr.IPv4:
          if daddr.numhosts > 1:
            daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'),
                                daddr.with_prefixlen,
                                _XML_TABLE.get('destIpv4End'),)
          else:
            daddr = '%s%s%s' % (_XML_TABLE.get('destIpv4Start'),
                                daddr.ip,
                                _XML_TABLE.get('destIpv4End'))
          destinations = '%s%s' %(destinations, daddr)
        # inet6
        if type(daddr) is nacaddr.IPv6:
          if daddr.numhosts > 1:
            daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'),
                                daddr.with_prefixlen,
                                _XML_TABLE.get('destIpv6End'),)
          else:
            daddr = '%s%s%s' % (_XML_TABLE.get('destIpv6Start'),
                                daddr.ip,
                                _XML_TABLE.get('destIpv6End'))
          destinations = '%s%s' %(destinations, daddr)
      destinations = '%s%s' %(destinations, '</destinations>')

    services = []
    if protocol:
      services.append('<services>')
      for proto in protocol:
        if proto != 'any':
          services.append(self._ServiceToString(proto,
                                                source_port,
                                                destination_port,
                                                icmp_types))
      services.append('</services>')

    service = ''
    for s in services:
      service = '%s%s' % (service, s)

    # action
    action = '%s%s%s' % (_XML_TABLE.get('actionStart'),
                         _ACTION_TABLE.get(str(self.term.action[0])),
                         _XML_TABLE.get('actionEnd'))

    ret_lines = []
    ret_lines.append('<rule logged="%s"> %s %s %s %s %s %s </rule>' %
                     (log, name, action, sources, destinations, service, notes))

    # remove any trailing spaces and replace multiple spaces with singles
    stripped_ret_lines = [re.sub(r'\s+', ' ', x).rstrip() for x in ret_lines]
    ret_str.extend(stripped_ret_lines)
    return '\n'.join(ret_str)

Example 194

Project: cauliflowervest
Source File: __init__.py
View license
def create_fancy_connection(tunnel_host=None, key_file=None,
                            cert_file=None, ca_certs=None,
                            proxy_authorization=None):
  # This abomination brought to you by the fact that
  # the HTTPHandler creates the connection instance in the middle
  # of do_open so we need to add the tunnel host to the class.

  class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
    """An HTTPS connection that uses a proxy defined by the enclosing scope."""

    def __init__(self, *args, **kwargs):
      httplib.HTTPSConnection.__init__(self, *args, **kwargs)

      self._tunnel_host = tunnel_host
      if tunnel_host:
        logging.debug("Creating preset proxy https conn: %s", tunnel_host)

      self.key_file = key_file
      self.cert_file = cert_file
      self.ca_certs = ca_certs
      if can_validate_certs():
        if self.ca_certs:
          self.cert_reqs = ssl.CERT_REQUIRED
        else:
          self.cert_reqs = ssl.CERT_NONE

    def _get_hostport(self, host, port):
      # Python 2.7.7rc1 (hg r90728:568041fd8090), 3.4.1 and 3.5 rename
      # _set_hostport to _get_hostport and changes it's functionality.  The
      # Python 2.7.7rc1 version of this method is included here for
      # compatibility with earlier versions of Python.  Without this, HTTPS over
      # HTTP CONNECT proxies cannot be used.

      # This method may be removed if compatibility with Python <2.7.7rc1 is not
      # required.

      # Python bug: http://bugs.python.org/issue7776
      if port is None:
        i = host.rfind(":")
        j = host.rfind("]")         # ipv6 addresses have [...]
        if i > j:
          try:
            port = int(host[i+1:])
          except ValueError:
            if host[i+1:] == "":  # http://foo.com:/ == http://foo.com/
              port = self.default_port
            else:
              raise httplib.InvalidURL("nonnumeric port: '%s'" % host[i+1:])
          host = host[:i]
        else:
          port = self.default_port
        if host and host[0] == "[" and host[-1] == "]":
          host = host[1:-1]

      return (host, port)

    def _tunnel(self):
      self.host, self.port = self._get_hostport(self._tunnel_host, None)
      logging.info("Connecting through tunnel to: %s:%d",
                   self.host, self.port)

      self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))

      if proxy_authorization:
        self.send("Proxy-Authorization: %s\r\n" % proxy_authorization)

      # blank line
      self.send("\r\n")

      response = self.response_class(self.sock, strict=self.strict,
                                     method=self._method)
      # pylint: disable=protected-access
      (_, code, message) = response._read_status()

      if code != 200:
        self.close()
        raise socket.error("Tunnel connection failed: %d %s" %
                           (code, message.strip()))

      while True:
        line = response.fp.readline()
        if line == "\r\n":
          break

    def _get_valid_hosts_for_cert(self, cert):
      """Returns a list of valid host globs for an SSL certificate.

      Args:
        cert: A dictionary representing an SSL certificate.
      Returns:
        list: A list of valid host globs.
      """
      if "subjectAltName" in cert:
        return [x[1] for x in cert["subjectAltName"] if x[0].lower() == "dns"]
      else:
        # Return a list of commonName fields
        return [x[0][1] for x in cert["subject"]
                if x[0][0].lower() == "commonname"]

    def _validate_certificate_hostname(self, cert, hostname):
      """Perform RFC2818/6125 validation against a cert and hostname.

      Args:
        cert: A dictionary representing an SSL certificate.
        hostname: The hostname to test.
      Returns:
        bool: Whether or not the hostname is valid for this certificate.
      """
      hosts = self._get_valid_hosts_for_cert(cert)
      for host in hosts:
        # Wildcards are only valid when the * exists at the end of the last
        # (left-most) label, and there are at least 3 labels in the expression.
        if ("*." in host and host.count("*") == 1 and
            host.count(".") > 1 and "." in hostname):
          left_expected, right_expected = host.split("*.")
          left_hostname, right_hostname = hostname.split(".", 1)
          if (left_hostname.startswith(left_expected) and
              right_expected == right_hostname):
            return True
        elif host == hostname:
          return True
      return False

    def connect(self):
      # TODO(frew): When we drop support for <2.6 (in the far distant future),
      # change this to socket.create_connection.
      self.sock = _create_connection((self.host, self.port))

      if self._tunnel_host:
        self._tunnel()

      # ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
      # with fallback. Note: Since can_validate_certs() just checks for the
      # ssl module, it's equivalent to attempting to import ssl from
      # the function, but doesn't require a dynamic import, which doesn't
      # play nicely with dev_appserver.
      if can_validate_certs():
        self.sock = ssl.wrap_socket(self.sock,
                                    keyfile=self.key_file,
                                    certfile=self.cert_file,
                                    ca_certs=self.ca_certs,
                                    cert_reqs=self.cert_reqs)

        if self.cert_reqs & ssl.CERT_REQUIRED:
          cert = self.sock.getpeercert()
          hostname = self.host.split(":", 0)[0]
          if not self._validate_certificate_hostname(cert, hostname):
            raise InvalidCertificateException(hostname, cert,
                                              "hostname mismatch")
      else:
        ssl_socket = socket.ssl(self.sock,
                                keyfile=self.key_file,
                                certfile=self.cert_file)
        self.sock = httplib.FakeSocket(self.sock, ssl_socket)

  return PresetProxyHTTPSConnection

Example 195

Project: ScratchABit
Source File: elf.py
View license
def load_segments(aspace, elffile):
    log.debug("Loading ELF segments")

    wordsz = elffile.elfclass // 8

    for seg in elffile.iter_segments():
        #print(seg)
        #print(seg.header)
        #print("p_vaddr=%x p_memsz=%x" % (seg["p_vaddr"], seg["p_memsz"]))
        #print()
        if seg["p_type"] == "PT_LOAD":
            if seg["p_memsz"]:
                access = p_flags_to_access(seg["p_flags"])
                aspace.add_area(seg["p_vaddr"], seg["p_vaddr"] + seg["p_memsz"] - 1, {"access": access})
                seg.stream.seek(seg['p_offset'])
                aspace.load_content(seg.stream, seg["p_vaddr"], seg["p_filesz"])
            else:
                log.warning("Skipping empty ELF segment: %s", seg.header)
        elif seg["p_type"] == "PT_DYNAMIC":
            aspace.set_label(seg["p_vaddr"], "ELF.DYNAMIC")

            symtab = {}
            for i, s in enumerate(seg.iter_symbols()):
                #print(s.name, hex(s["st_value"]), s.entry)
                symtab[i] = s
                if s["st_shndx"] != "SHN_UNDEF":
                    aspace.make_unique_label(s["st_value"], str(s.name, "utf-8"))

                    if s["st_info"]["type"] == "STT_FUNC":
                        aspace.analisys_stack_push(s["st_value"])
                    if s["st_info"]["type"] == "STT_OBJECT":
                        # TODO: Set as data of given s["st_size"]
                        pass

            rel = relsz = relent = None
            pltrel = pltrelsz = pltenttype = None

            for tag in seg.iter_tags():
                d_ptr = tag["d_ptr"]
                #print(tag, hex(d_ptr))
                if tag['d_tag'] == 'DT_PLTGOT':
                    aspace.set_label(d_ptr, "ELF.PLTGOT")
                    aspace.make_data(d_ptr, wordsz)
                    aspace.make_arg_offset(d_ptr, 0, aspace.get_data(d_ptr, wordsz))

                    aspace.set_label(d_ptr + wordsz, "ELF.CUR_OBJ")
                    aspace.make_data(d_ptr + wordsz, wordsz)
                    aspace.append_comment(d_ptr + wordsz, "Identifier of this ELF object")

                    aspace.set_label(d_ptr + wordsz * 2, "ELF.SYM_LOOKUP")
                    aspace.make_data(d_ptr + wordsz * 2, wordsz)
                    aspace.append_comment(d_ptr + wordsz * 2, "Dynamic linker routine for symbol lookup")

                elif tag['d_tag'] == 'DT_JMPREL':
                    aspace.set_label(d_ptr, "ELF.JMPREL")
                    pltrel = d_ptr
                elif tag['d_tag'] == 'DT_PLTRELSZ':
                    pltrelsz = d_ptr
                elif tag['d_tag'] == 'DT_PLTREL':
                    pltenttype = d_ptr

                elif tag['d_tag'] == 'DT_REL':
                    rel = d_ptr
                    aspace.set_label(d_ptr, "ELF.REL")
                elif tag['d_tag'] == 'DT_RELSZ':
                    relsz = d_ptr
                elif tag['d_tag'] == 'DT_RELENT':
                    relent = d_ptr

                elif tag['d_tag'] == 'DT_RELA':
                    aspace.set_label(d_ptr, "ELF.RELA")

                elif tag['d_tag'] == 'DT_INIT_ARRAY':
                    aspace.set_label(d_ptr, "ELF.INIT_ARRAY")
                elif tag['d_tag'] == 'DT_FINI_ARRAY':
                    aspace.set_label(d_ptr, "ELF.FINI_ARRAY")
                elif tag['d_tag'] == 'DT_INIT':
                    aspace.set_label(d_ptr, "ELF.INIT")
                    aspace.analisys_stack_push(d_ptr)
                elif tag['d_tag'] == 'DT_FINI':
                    aspace.set_label(d_ptr, "ELF.FINI")
                    aspace.analisys_stack_push(d_ptr)

            if rel is not None:
                aspace.make_data_array(rel, wordsz, relsz // wordsz)

            if pltrel is not None:
                aspace.make_data_array(pltrel, wordsz, pltrelsz // wordsz)

                if pltenttype == ENUM_D_TAG["DT_RELA"]:
                    entry_struct = elffile.structs.Elf_Rela
                else:
                    entry_struct = elffile.structs.Elf_Rel

                end = pltrel + pltrelsz
                while pltrel < end:
                    data = aspace.get_bytes(pltrel, entry_struct.sizeof())
                    entry = entry_struct.parse(data)
                    reloc = Relocation(entry, elffile)
                    sym = symtab[reloc['r_info_sym']]
                    #print(reloc, sym.name, sym.entry)
                    symname = str(sym.name, "utf-8")
                    aspace.append_comment(pltrel, symname + ".plt")
                    aspace.make_arg_offset(pltrel, 0, aspace.get_data(pltrel, wordsz))

                    got_addr = reloc["r_offset"]
                    aspace.set_label(got_addr, symname + ".got")
                    aspace.make_data(got_addr, wordsz)
                    lazy_code = aspace.get_data(got_addr, wordsz)
                    aspace.make_arg_offset(got_addr, 0, lazy_code)

                    aspace.set_label(lazy_code, symname + ".lazy")
                    aspace.analisys_stack_push(lazy_code)

                    real_func = adjust_plt_addr(lazy_code)
                    aspace.make_unique_label(real_func, symname)
                    aspace.analisys_stack_push(real_func)

                    pltrel += entry_struct.sizeof()

    return elffile["e_entry"]

Example 196

Project: pygp
Source File: optimize_base.py
View license
def opt_hyper(gpr,hyperparams,Ifilter=None,maxiter=1000,gradcheck=False,bounds = None,optimizer=OPT.fmin_tnc,gradient_tolerance=1E-4,*args,**kw_args):
    """
    Optimize hyperparemters of :py:class:`pygp.gp.basic_gp.GP` ``gpr`` starting from given hyperparameters ``hyperparams``.

    **Parameters:**

    gpr : :py:class:`pygp.gp.basic_gp`
        GP regression class
    hyperparams : {'covar':logtheta, ...}
        Dictionary filled with starting hyperparameters
        for optimization. logtheta are the CF hyperparameters.
    Ifilter : [boolean]
        Index vector, indicating which hyperparameters shall
        be optimized. For instance::

            logtheta = [1,2,3]
            Ifilter = [0,1,0]

        means that only the second entry (which equals 2 in
        this example) of logtheta will be optimized
        and the others remain untouched.

    bounds : [[min,max]]
        Array with min and max value that can be attained for any hyperparameter

    maxiter: int
        maximum number of function evaluations
    gradcheck: boolean 
        check gradients comparing the analytical gradients to their approximations
    optimizer: :py:class:`scipy.optimize`
        which scipy optimizer to use? (standard lbfgsb)

    ** argument passed onto LML**

    priors : [:py:class:`pygp.priors`]
        non-default prior, otherwise assume
        first index amplitude, last noise, rest:lengthscales
    """

    def f(x):
        x_ = X0
        x_[Ifilter_x] = x
        rv =  gpr.LML(param_list_to_dict(x_,param_struct,skeys),*args,**kw_args)
        #LG.debug("L("+str(x_)+")=="+str(rv))
        if SP.isnan(rv):
            return 1E6
        return rv
    
    def df(x):
        x_ = X0
        x_[Ifilter_x] = x
        rv =  gpr.LMLgrad(param_list_to_dict(x_,param_struct,skeys),*args,**kw_args)
        rv = param_dict_to_list(rv,skeys)
        #LG.debug("dL("+str(x_)+")=="+str(rv))
        if not SP.isfinite(rv).all(): #SP.isnan(rv).any():
            In = SP.isnan(rv)
            rv[In] = 1E6
        return rv[Ifilter_x]

    #0. store parameter structure
    skeys = SP.sort(hyperparams.keys())
    param_struct = dict([(name,hyperparams[name].shape) for name in skeys])

    
    #1. convert the dictionaries to parameter lists
    X0 = param_dict_to_list(hyperparams,skeys)
    if Ifilter is not None:
        Ifilter_x = SP.array(param_dict_to_list(Ifilter,skeys),dtype='bool')
    else:
        Ifilter_x = SP.ones(len(X0),dtype='bool')

    #2. bounds
    if bounds is not None:
        #go through all hyperparams and build bound array (flattened)
        _b = []
        for key in skeys:
            if key in bounds.keys():
                _b.extend(bounds[key])
            else:
                _b.extend([(-SP.inf,+SP.inf)]*hyperparams[key].size)
        bounds = SP.array(_b)
        bounds = bounds[Ifilter_x]
        pass
       
        
    #2. set stating point of optimization, truncate the non-used dimensions
    x  = X0.copy()[Ifilter_x]
        
    LG.debug("startparameters for opt:"+str(x))
    
    if gradcheck:
	checkgrad(f, df, x)
        LG.info("check_grad (pre) (Enter to continue):" + str(OPT.check_grad(f,df,x)))
        raw_input()
	
    LG.debug("start optimization")

    #general optimizer interface
    #note: x is a subset of X, indexing the parameters that are optimized over
    # Ifilter_x pickes the subest of X, yielding x
    opt_RV=optimizer(f, x, fprime=df, maxfun=int(maxiter),pgtol=gradient_tolerance, messages=False, bounds=bounds)
    # optimizer = OPT.fmin_l_bfgs_b
    # opt_RV=optimizer(f, x, fprime=df, maxfun=int(maxiter),iprint =1, bounds=bounds, factr=10.0, pgtol=1e-10)
    opt_x = opt_RV[0]
    
    #relate back to X
    Xopt = X0.copy()
    Xopt[Ifilter_x] = opt_x
    #convert into dictionary
    opt_hyperparams = param_list_to_dict(Xopt,param_struct,skeys)
    #get the log marginal likelihood at the optimum:
    opt_lml = gpr.LML(opt_hyperparams,**kw_args)

    if gradcheck:
	checkgrad(f, df, opt_RV[0])
        LG.info("check_grad (post) (Enter to continue):" + str(OPT.check_grad(f,df,opt_RV[0])))

	pdb.set_trace()
        # raw_input()

    LG.debug("old parameters:")
    LG.debug(str(hyperparams))
    LG.debug("optimized parameters:")
    LG.debug(str(opt_hyperparams))
    LG.debug("grad:"+str(df(opt_x)))
    
    return [opt_hyperparams,opt_lml]

Example 197

Project: autotest
Source File: version_0.py
View license
    def state_iterator(self, buffer):
        new_tests = []
        boot_count = 0
        group_subdir = None
        sought_level = 0
        stack = status_lib.status_stack()
        current_kernel = kernel(self.job)
        boot_in_progress = False
        alert_pending = None
        started_time = None

        while not self.finished or buffer.size():
            # stop processing once the buffer is empty
            if buffer.size() == 0:
                yield new_tests
                new_tests = []
                continue

            # parse the next line
            line = buffer.get()
            logging.debug('STATUS: %s', line.strip())
            line = status_line.parse_line(line)
            if line is None:
                logging.debug('non-status line, ignoring')
                continue  # ignore non-status lines

            # have we hit the job start line?
            if (line.type == "START" and not line.subdir and
                    not line.testname):
                sought_level = 1
                logging.debug("found job level start "
                              "marker, looking for level "
                              "1 groups now")
                continue

            # have we hit the job end line?
            if (line.type == "END" and not line.subdir and
                    not line.testname):
                logging.debug("found job level end "
                              "marker, looking for level "
                              "0 lines now")
                sought_level = 0

            # START line, just push another layer on to the stack
            # and grab the start time if this is at the job level
            # we're currently seeking
            if line.type == "START":
                group_subdir = None
                stack.start()
                if line.indent == sought_level:
                    started_time = \
                        tko_utils.get_timestamp(
                            line.optional_fields, "timestamp")
                logging.debug("start line, ignoring")
                continue
            # otherwise, update the status on the stack
            else:
                logging.debug("GROPE_STATUS: %s" %
                              [stack.current_status(),
                                  line.status, line.subdir,
                                  line.testname, line.reason])
                stack.update(line.status)

            if line.status == "ALERT":
                logging.debug("job level alert, recording")
                alert_pending = line.reason
                continue

            # ignore Autotest.install => GOOD lines
            if (line.testname == "Autotest.install" and
                    line.status == "GOOD"):
                logging.debug("Successful Autotest "
                              "install, ignoring")
                continue

            # ignore END lines for a reboot group
            if (line.testname == "reboot" and line.type == "END"):
                logging.debug("reboot group, ignoring")
                continue

            # convert job-level ABORTs into a 'CLIENT_JOB' test, and
            # ignore other job-level events
            if line.testname is None:
                if (line.status == "ABORT" and
                        line.type != "END"):
                    line.testname = "CLIENT_JOB"
                else:
                    logging.debug("job level event, "
                                  "ignoring")
                    continue

            # use the group subdir for END lines
            if line.type == "END":
                line.subdir = group_subdir

            # are we inside a block group?
            if (line.indent != sought_level and
                line.status != "ABORT" and
                    not line.testname.startswith('reboot.')):
                if line.subdir:
                    logging.debug("set group_subdir: %s", line.subdir)
                    group_subdir = line.subdir
                logging.debug("ignoring incorrect indent "
                              "level %d != %d," %
                              (line.indent, sought_level))
                continue

            # use the subdir as the testname, except for
            # boot.* and kernel.* tests
            if (line.testname is None or
                not re.search(r"^(boot(\.\d+)?$|kernel\.)",
                              line.testname)):
                if line.subdir and '.' in line.subdir:
                    line.testname = line.subdir

            # has a reboot started?
            if line.testname == "reboot.start":
                started_time = tko_utils.get_timestamp(
                    line.optional_fields, "timestamp")
                logging.debug("reboot start event, "
                              "ignoring")
                boot_in_progress = True
                continue

            # has a reboot finished?
            if line.testname == "reboot.verify":
                line.testname = "boot.%d" % boot_count
                logging.debug("reboot verified")
                boot_in_progress = False
                verify_ident = line.reason.strip()
                current_kernel = kernel(self.job, verify_ident)
                boot_count += 1

            if alert_pending:
                line.status = "ALERT"
                line.reason = alert_pending
                alert_pending = None

            # create the actual test object
            finished_time = tko_utils.get_timestamp(
                line.optional_fields, "timestamp")
            final_status = stack.end()
            logging.debug("Adding: "
                          "%s\nSubdir:%s\nTestname:%s\n%s" %
                          (final_status, line.subdir,
                           line.testname, line.reason))
            new_test = test.parse_test(self.job, line.subdir,
                                       line.testname,
                                       final_status, line.reason,
                                       current_kernel,
                                       started_time,
                                       finished_time)
            started_time = None
            new_tests.append(new_test)

        # the job is finished, but we never came back from reboot
        if boot_in_progress:
            testname = "boot.%d" % boot_count
            reason = "machine did not return from reboot"
            logging.debug(("Adding: ABORT\nSubdir:----\n"
                           "Testname:%s\n%s")
                          % (testname, reason))
            new_test = test.parse_test(self.job, None, testname,
                                       "ABORT", reason,
                                       current_kernel, None, None)
            new_tests.append(new_test)
        yield new_tests

Example 198

View license
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(vm_name, disk_source, disk_type, disk_target,
                     flags, vm_state, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name,
                                                           options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail("Active domain XML updated"
                                                      " when --config options used"
                                                      " for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " detachment")
                    if vm_state != "shutoff":
                        if not active_attached:
                            raise exceptions.TestFail("Active domain XML updated"
                                                      " when --config options used"
                                                      " for detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated"
                                                  " when --live options used for"
                                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated"
                                                  " when --live options used for"
                                                  " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live --config options"
                                                  " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated "
                                                  "when --live --config options "
                                                  "used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "detachment")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    action_twice = params.get("change_media_action_twice", "")
    pre_vm_state = params.get("pre_vm_state")
    options = params.get("change_media_options")
    options_twice = params.get("change_media_options_twice", "")
    device_type = params.get("change_media_device_type", "cdrom")
    target_device = params.get("change_media_target_device", "hdc")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    virsh_dargs = {"debug": True, "ignore_status": True}

    if device_type not in ['cdrom', 'floppy']:
        raise exceptions.TestSkipError("Got a invalid device type:/n%s"
                                       % device_type)

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    old_iso = os.path.join(data_dir.get_tmp_dir(), old_iso_name)
    new_iso = os.path.join(data_dir.get_tmp_dir(), new_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    if vm.is_alive():
        vm.destroy(gracefully=False)

    try:
        if not init_iso_name:
            init_iso = ""
        else:
            init_iso = os.path.join(data_dir.get_tmp_dir(),
                                    init_iso_name)

        # Prepare test files.
        libvirt.create_local_disk("iso", old_iso)
        libvirt.create_local_disk("iso", new_iso)

        # Check domain's disk device
        disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
        logging.info("disk_blk %s", disk_blk)
        if target_device not in disk_blk:
            if vm.is_alive():
                virsh.destroy(vm_name)
            logging.info("Adding device")
            libvirt.create_local_disk("iso", init_iso)
            disk_params = {"disk_type": "file", "device_type": device_type,
                           "driver_name": "qemu", "driver_type": "raw",
                           "target_bus": "ide", "readonly": "yes"}
            libvirt.attach_additional_device(vm_name, target_device,
                                             init_iso, disk_params)

        vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
        elif pre_vm_state == "shutoff":
            logging.info("Shuting down %s..." % vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        elif pre_vm_state == "paused":
            logging.info("Pausing %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if not vm.pause():
                raise exceptions.TestSkipError("Cann't pause the domain")
            time.sleep(5)
        elif pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise exceptions.TestSkipError("Cann't create the domain")

        # Libvirt will ignore --source when action is eject
        attach = True
        device_source = old_iso
        if action == "--eject ":
            source = ""
            attach = False
        else:
            source = device_source

        all_options = action + options + " " + source
        ret = virsh.change_media(vm_ref, target_device,
                                 all_options, ignore_status=True, debug=True)
        status_error = False
        if pre_vm_state == "shutoff":
            if options.count("live"):
                status_error = True
        elif pre_vm_state == "transient":
            if options.count("config"):
                status_error = True

        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
            # For paused vm, change_media for eject/update operation
            # should be executed again for it takes effect
            if ret.exit_status:
                if not action.count("insert") and not options.count("force"):
                    ret = virsh.change_media(vm_ref, target_device, all_options,
                                             ignore_status=True, debug=True)
        if not status_error and ret.exit_status:
            raise exceptions.TestFail("Please check: Bug 1289069 - Ejecting "
                                      "locked cdrom tray using update-device"
                                      " fails but next try succeeds")
        libvirt.check_exit_status(ret, status_error)
        if not ret.exit_status:
            check_result(vm_name, device_source, device_type, target_device,
                         options, pre_vm_state, attach)

        if action_twice:
            if pre_vm_state == "paused":
                if not vm.pause():
                    raise exceptions.TestFail("Cann't pause the domain")
                time.sleep(5)
            attach = True
            device_source = new_iso
            if action_twice == "--eject ":
                #options_twice += " --force "
                source = ""
                attach = False
            else:
                source = device_source
            all_options = action_twice + options_twice + " " + source
            time.sleep(5)
            ret = virsh.change_media(vm_ref, target_device, all_options,
                                     ignore_status=True, debug=True)
            status_error = False
            if pre_vm_state == "shutoff":
                if options_twice.count("live"):
                    status_error = True
            elif pre_vm_state == "transient":
                if options_twice.count("config"):
                    status_error = True

            if action_twice == "--insert ":
                if pre_vm_state in ["running", "paused"]:
                    if options in ["--force", "--current", "", "--live"]:
                        if options_twice.count("config"):
                            status_error = True
                    elif options == "--config":
                        if options_twice in ["--force", "--current", ""]:
                            status_error = True
                        elif options_twice.count("live"):
                            status_error = True
                elif pre_vm_state == "transient":
                    if ret.exit_status:
                        status_error = True
                elif pre_vm_state == "shutoff":
                    if options.count("live"):
                        status_error = True
            if vm.is_paused():
                vm.resume()
                vm.wait_for_login().close()
                # For paused vm, change_media for eject/update operation
                # should be executed again for it takes effect
                if ret.exit_status and not action_twice.count("insert"):
                    ret = virsh.change_media(vm_ref, target_device, all_options,
                                             ignore_status=True, debug=True)
            if not status_error and ret.exit_status:
                raise exceptions.TestFail("Please check: Bug 1289069 - Ejecting "
                                          "locked cdrom tray using update-device"
                                          " fails but next try succeeds")
            libvirt.check_exit_status(ret, status_error)
            if not ret.exit_status:
                check_result(vm_name, device_source, device_type, target_device,
                             options_twice, pre_vm_state, attach)

        # Try to start vm.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Remove disks
        if os.path.exists(init_iso):
            os.remove(init_iso)
        if os.path.exists(old_iso):
            os.remove(old_iso)
        if os.path.exists(init_iso):
            os.remove(new_iso)

Example 199

Project: tp-qemu
Source File: cpuflags.py
View license
def run(test, params, env):
    """
    Boot guest with different cpu flags and check if guest works correctly.

    :param test: kvm test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    utils_misc.Flag.aliases = utils_misc.kvm_map_flags_aliases
    qemu_binary = utils_misc.get_qemu_binary(params)

    cpuflags_src = os.path.join(data_dir.get_deps_dir("cpu_flags"), "src")
    cpuflags_def = os.path.join(data_dir.get_deps_dir("cpu_flags"),
                                "cpu_map.xml")
    smp = int(params.get("smp", 1))

    all_host_supported_flags = params.get("all_host_supported_flags", "no")

    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")
    mig_speed = params.get("mig_speed", "1G")

    cpu_model_black_list = params.get("cpu_model_blacklist", "").split(" ")

    multi_host_migration = params.get("multi_host_migration", "no")

    class HgFlags(object):

        def __init__(self, cpu_model, extra_flags=set([])):
            virtual_flags = set(map(utils_misc.Flag,
                                    params.get("guest_spec_flags", "").split()))
            self.hw_flags = set(map(utils_misc.Flag,
                                    params.get("host_spec_flags", "").split()))
            self.qemu_support_flags = get_all_qemu_flags()
            self.host_support_flags = set(map(utils_misc.Flag,
                                              utils_misc.get_cpu_flags()))
            self.quest_cpu_model_flags = (get_guest_host_cpuflags(cpu_model) -
                                          virtual_flags)

            self.supported_flags = (self.qemu_support_flags &
                                    self.host_support_flags)
            self.cpumodel_unsupport_flags = (self.supported_flags -
                                             self.quest_cpu_model_flags)

            self.host_unsupported_flags = (self.quest_cpu_model_flags -
                                           self.host_support_flags)

            self.all_possible_guest_flags = (self.quest_cpu_model_flags -
                                             self.host_unsupported_flags)
            self.all_possible_guest_flags |= self.cpumodel_unsupport_flags

            self.guest_flags = (self.quest_cpu_model_flags -
                                self.host_unsupported_flags)
            self.guest_flags |= extra_flags

            self.host_all_unsupported_flags = set([])
            self.host_all_unsupported_flags |= self.qemu_support_flags
            self.host_all_unsupported_flags -= (self.host_support_flags |
                                                virtual_flags)

    def start_guest_with_cpuflags(cpuflags, smp=None, migration=False,
                                  wait=True):
        """
        Try to boot guest with special cpu flags and try login in to them.
        """
        params_b = params.copy()
        params_b["cpu_model"] = cpuflags
        if smp is not None:
            params_b["smp"] = smp

        vm_name = "vm1-cpuflags"
        vm = qemu_vm.VM(vm_name, params_b, test.bindir, env['address_cache'])
        env.register_vm(vm_name, vm)
        if (migration is True):
            vm.create(migration_mode=mig_protocol)
        else:
            vm.create()

        session = None
        try:
            vm.verify_alive()

            if wait:
                session = vm.wait_for_login()
        except qemu_vm.ImageUnbootableError:
            vm.destroy(gracefully=False)
            raise

        return (vm, session)

    def get_guest_system_cpuflags(vm_session):
        """
        Get guest system cpuflags.

        :param vm_session: session to checked vm.
        :return: [corespond flags]
        """
        flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE)
        out = vm_session.cmd_output("cat /proc/cpuinfo")

        flags = flags_re.search(out).groups()[0].split()
        return set(map(utils_misc.Flag, flags))

    def get_guest_host_cpuflags_legacy(cpumodel):
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        cmd = qemu_binary + " -cpu ?dump"
        output = utils.run(cmd).stdout
        re.escape(cpumodel)
        pattern = (r".+%s.*\n.*\n +feature_edx .+ \((.*)\)\n +feature_"
                   "ecx .+ \((.*)\)\n +extfeature_edx .+ \((.*)\)\n +"
                   "extfeature_ecx .+ \((.*)\)\n" % (cpumodel))
        flags = []
        model = re.search(pattern, output)
        if model is None:
            raise error.TestFail("Cannot find %s cpu model." % (cpumodel))
        for flag_group in model.groups():
            flags += flag_group.split()
        return set(map(utils_misc.Flag, flags))

    class ParseCpuFlags(object):

        def __init__(self, encoding=None):
            self.cpus = {}
            self.parser = expat.ParserCreate(encoding)
            self.parser.StartElementHandler = self.start_element
            self.parser.EndElementHandler = self.end_element
            self.last_arch = None
            self.last_model = None
            self.sub_model = False
            self.all_flags = []

        def start_element(self, name, attrs):
            if name == "cpus":
                self.cpus = {}
            elif name == "arch":
                self.last_arch = self.cpus[attrs['name']] = {}
            elif name == "model":
                if self.last_model is None:
                    self.last_model = self.last_arch[attrs['name']] = []
                else:
                    self.last_model += self.last_arch[attrs['name']]
                    self.sub_model = True
            elif name == "feature":
                if self.last_model is not None:
                    self.last_model.append(attrs['name'])
                else:
                    self.all_flags.append(attrs['name'])

        def end_element(self, name):
            if name == "arch":
                self.last_arch = None
            elif name == "model":
                if self.sub_model is False:
                    self.last_model = None
                else:
                    self.sub_model = False

        def parse_file(self, file_path):
            self.parser.ParseFile(open(file_path, 'r'))
            return self.cpus

    def get_guest_host_cpuflags_1350(cpumodel):
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        p = ParseCpuFlags()
        cpus = p.parse_file(cpuflags_def)
        for arch in cpus.values():
            if cpumodel in arch.keys():
                flags = arch[cpumodel]
        return set(map(utils_misc.Flag, flags))

    get_guest_host_cpuflags_BAD = get_guest_host_cpuflags_1350

    def get_all_qemu_flags_legacy():
        cmd = qemu_binary + " -cpu ?cpuid"
        output = utils.run(cmd).stdout

        flags_re = re.compile(r".*\n.*f_edx:(.*)\n.*f_ecx:(.*)\n"
                              ".*extf_edx:(.*)\n.*extf_ecx:(.*)")
        m = flags_re.search(output)
        flags = []
        for a in m.groups():
            flags += a.split()

        return set(map(utils_misc.Flag, flags))

    def get_all_qemu_flags_1350():
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        flags_re = re.compile(r".*Recognized CPUID flags:\n(.*)", re.DOTALL)
        m = flags_re.search(output)
        flags = []
        for a in m.groups():
            flags += a.split()

        return set(map(utils_misc.Flag, flags))

    def get_all_qemu_flags_BAD():
        """
        Get cpu flags correspond with cpumodel parameters.

        :param cpumodel: Cpumodel parameter sended to <qemu-kvm-cmd>.
        :return: [corespond flags]
        """
        p = ParseCpuFlags()
        p.parse_file(cpuflags_def)
        return set(map(utils_misc.Flag, p.all_flags))

    def get_cpu_models_legacy():
        """
        Get all cpu models from qemu.

        :return: cpu models.
        """
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        cpu_re = re.compile(r"\w+\s+\[?(\w+)\]?")
        return cpu_re.findall(output)

    def get_cpu_models_1350():
        """
        Get all cpu models from qemu.

        :return: cpu models.
        """
        cmd = qemu_binary + " -cpu ?"
        output = utils.run(cmd).stdout

        cpu_re = re.compile(r"x86\s+\[?(\w+)\]?")
        return cpu_re.findall(output)

    get_cpu_models_BAD = get_cpu_models_1350

    def get_qemu_cpu_cmd_version():
        cmd = qemu_binary + " -cpu ?cpuid"
        try:
            utils.run(cmd).stdout
            return "legacy"
        except:
            cmd = qemu_binary + " -cpu ?"
            output = utils.run(cmd).stdout
            if "CPUID" in output:
                return "1350"
            else:
                return "BAD"

    qcver = get_qemu_cpu_cmd_version()

    get_guest_host_cpuflags = locals()["get_guest_host_cpuflags_%s" % qcver]
    get_all_qemu_flags = locals()["get_all_qemu_flags_%s" % qcver]
    get_cpu_models = locals()["get_cpu_models_%s" % qcver]

    def get_flags_full_name(cpu_flag):
        """
        Get all name of Flag.

        :param cpu_flag: Flag
        :return: all name of Flag.
        """
        cpu_flag = utils_misc.Flag(cpu_flag)
        for f in get_all_qemu_flags():
            if f == cpu_flag:
                return utils_misc.Flag(f)
        return []

    def parse_qemu_cpucommand(cpumodel):
        """
        Parse qemu cpu params.

        :param cpumodel: Cpu model command.
        :return: All flags which guest must have.
        """
        flags = cpumodel.split(",")
        cpumodel = flags[0]

        qemu_model_flag = get_guest_host_cpuflags(cpumodel)
        host_support_flag = set(map(utils_misc.Flag,
                                    utils_misc.get_cpu_flags()))
        real_flags = qemu_model_flag & host_support_flag

        for f in flags[1:]:
            if f[0].startswith("+"):
                real_flags |= set([get_flags_full_name(f[1:])])
            if f[0].startswith("-"):
                real_flags -= set([get_flags_full_name(f[1:])])

        return real_flags

    def check_cpuflags(cpumodel, vm_session):
        """
        Check if vm flags are same like flags select by cpumodel.

        :param cpumodel: params for -cpu param in qemu-kvm
        :param vm_session: session to vm to check flags.

        :return: ([excess], [missing]) flags
        """
        gf = get_guest_system_cpuflags(vm_session)
        rf = parse_qemu_cpucommand(cpumodel)

        logging.debug("Guest flags: %s", gf)
        logging.debug("Host flags: %s", rf)
        logging.debug("Flags on guest not defined by host: %s", (gf - rf))
        return rf - gf

    def get_cpu_models_supported_by_host():
        """
        Get all cpumodels which set of flags is subset of hosts flags.

        :return: [cpumodels]
        """
        cpumodels = []
        for cpumodel in get_cpu_models():
            flags = HgFlags(cpumodel)
            if flags.host_unsupported_flags == set([]):
                cpumodels.append(cpumodel)
        return cpumodels

    def disable_cpu(vm_session, cpu, disable=True):
        """
        Disable cpu in guest system.

        :param cpu: CPU id to disable.
        :param disable: if True disable cpu else enable cpu.
        """
        system_cpu_dir = "/sys/devices/system/cpu/"
        cpu_online = system_cpu_dir + "cpu%d/online" % (cpu)
        cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip()
        if disable and cpu_state == "1":
            vm_session.cmd("echo 0 > %s" % cpu_online)
            logging.debug("Guest cpu %d is disabled.", cpu)
        elif cpu_state == "0":
            vm_session.cmd("echo 1 > %s" % cpu_online)
            logging.debug("Guest cpu %d is enabled.", cpu)

    def check_online_cpus(vm_session, smp, disabled_cpu):
        """
        Disable cpu in guest system.

        :param smp: Count of cpu core in system.
        :param disable_cpu: List of disabled cpu.

        :return: List of CPUs that are still enabled after disable procedure.
        """
        online = [0]
        for cpu in range(1, smp):
            system_cpu_dir = "/sys/devices/system/cpu/"
            cpu_online = system_cpu_dir + "cpu%d/online" % (cpu)
            cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip()
            if cpu_state == "1":
                online.append(cpu)
        cpu_proc = vm_session.cmd_output("cat /proc/cpuinfo")
        cpu_state_proc = map(lambda x: int(x),
                             re.findall(r"processor\s+:\s*(\d+)\n", cpu_proc))
        if set(online) != set(cpu_state_proc):
            raise error.TestError("Some cpus are disabled but %s are still "
                                  "visible like online in /proc/cpuinfo." %
                                  (set(cpu_state_proc) - set(online)))

        return set(online) - set(disabled_cpu)

    def install_cpuflags_test_on_vm(vm, dst_dir):
        """
        Install stress to vm.

        :param vm: virtual machine.
        :param dst_dir: Installation path.
        """
        session = vm.wait_for_login()
        vm.copy_files_to(cpuflags_src, dst_dir)
        session.cmd("sync")
        session.cmd("cd %s; make EXTRA_FLAGS='';" %
                    os.path.join(dst_dir, "cpu_flags"))
        session.cmd("sync")
        session.close()

    def check_cpuflags_work(vm, path, flags):
        """
        Check which flags work.

        :param vm: Virtual machine.
        :param path: Path of cpuflags_test
        :param flags: Flags to test.
        :return: Tuple (Working, not working, not tested) flags.
        """
        pass_Flags = []
        not_tested = []
        not_working = []
        session = vm.wait_for_login()
        for f in flags:
            try:
                for tc in utils_misc.kvm_map_flags_to_test[f]:
                    session.cmd("%s/cpuflags-test --%s" %
                                (os.path.join(path, "cpu_flags"), tc))
                pass_Flags.append(f)
            except aexpect.ShellCmdError:
                not_working.append(f)
            except KeyError:
                not_tested.append(f)
        return (set(map(utils_misc.Flag, pass_Flags)),
                set(map(utils_misc.Flag, not_working)),
                set(map(utils_misc.Flag, not_tested)))

    def run_stress(vm, timeout, guest_flags):
        """
        Run stress on vm for timeout time.
        """
        ret = False
        install_path = "/tmp"
        install_cpuflags_test_on_vm(vm, install_path)
        flags = check_cpuflags_work(vm, install_path, guest_flags)
        dd_session = vm.wait_for_login()
        stress_session = vm.wait_for_login()
        dd_session.sendline("dd if=/dev/[svh]da of=/tmp/stressblock"
                            " bs=10MB count=100 &")
        try:
            stress_session.cmd("%s/cpuflags-test --stress %s%s" %
                               (os.path.join(install_path, "cpu_flags"), smp,
                                utils_misc.kvm_flags_to_stresstests(flags[0])),
                               timeout=timeout)
        except aexpect.ShellTimeoutError:
            ret = True
        stress_session.close()
        dd_session.close()
        return ret

    def separe_cpu_model(cpu_model):
        try:
            (cpu_model, _) = cpu_model.split(":")
        except ValueError:
            cpu_model = cpu_model
        return cpu_model

    def parse_cpu_model():
        """
        Parse cpu_models from config file.

        :return: [(cpumodel, extra_flags)]
        """
        cpu_model = params.get("cpu_model", "")
        logging.debug("CPU model found: %s", str(cpu_model))

        try:
            (cpu_model, extra_flags) = cpu_model.split(":")
            extra_flags = set(map(utils_misc.Flag, extra_flags.split(",")))
        except ValueError:
            cpu_model = cpu_model
            extra_flags = set([])
        return (cpu_model, extra_flags)

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            if args is None:
                args = []
            try:
                ret = self.test(*args, **kargs)
            finally:
                if hasattr(self, "clean"):
                    self.clean()
            return ret

    def print_exception(called_object):
        exc_type, exc_value, exc_traceback = sys.exc_info()
        logging.error("In function (" + called_object.__name__ + "):")
        logging.error("Call from:\n" +
                      traceback.format_stack()[-2][:-1])
        logging.error("Exception from:\n" +
                      "".join(traceback.format_exception(
                              exc_type, exc_value,
                              exc_traceback.tb_next)))

    class Test_temp(MiniSubtest):

        def clean(self):
            logging.info("cleanup")
            if (hasattr(self, "vm")):
                vm = getattr(self, "vm")
                vm.destroy(gracefully=False)

    # 1) <qemu-kvm-cmd> -cpu ?model
    class test_qemu_cpu_model(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cpu_models = params.get("cpu_models", "core2duo").split()
                cmd = qemu_binary + " -cpu ?model"
                result = utils.run(cmd)
                missing = []
                cpu_models = map(separe_cpu_model, cpu_models)
                for cpu_model in cpu_models:
                    if cpu_model not in result.stdout:
                        missing.append(cpu_model)
                if missing:
                    raise error.TestFail("CPU models %s are not in output "
                                         "'%s' of command \n%s" %
                                         (missing, cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError("New qemu use new -cpu ? cmd.")

    # 2) <qemu-kvm-cmd> -cpu ?dump
    class test_qemu_dump(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cpu_models = params.get("cpu_models", "core2duo").split()
                cmd = qemu_binary + " -cpu ?dump"
                result = utils.run(cmd)
                cpu_models = map(separe_cpu_model, cpu_models)
                missing = []
                for cpu_model in cpu_models:
                    if cpu_model not in result.stdout:
                        missing.append(cpu_model)
                if missing:
                    raise error.TestFail("CPU models %s are not in output "
                                         "'%s' of command \n%s" %
                                         (missing, cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError(
                    "New qemu does not support -cpu ?dump.")

    # 3) <qemu-kvm-cmd> -cpu ?cpuid
    class test_qemu_cpuid(MiniSubtest):

        def test(self):
            if qcver == "legacy":
                cmd = qemu_binary + " -cpu ?cpuid"
                result = utils.run(cmd)
                if result.stdout is "":
                    raise error.TestFail("There aren't any cpu Flag in output"
                                         " '%s' of command \n%s" %
                                         (cmd, result.stdout))
            elif qcver == "1350":
                raise error.TestNAError("New qemu use new -cpu ? cmd.")

    # 1) boot with cpu_model
    class test_boot_cpu_model(Test_temp):

        def test(self):
            cpu_model, _ = parse_cpu_model()
            logging.debug("Run tests with cpu model %s", cpu_model)
            flags = HgFlags(cpu_model)
            (self.vm, session) = start_guest_with_cpuflags(cpu_model)
            not_enable_flags = (check_cpuflags(cpu_model, session) -
                                flags.hw_flags)
            if not_enable_flags != set([]):
                raise error.TestFail("Flags defined on host but not found "
                                     "on guest: %s" % (not_enable_flags))

    # 2) success boot with supported flags
    class test_boot_cpu_model_and_additional_flags(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            cpuf_model = cpu_model

            if all_host_supported_flags == "yes":
                for fadd in flags.cpumodel_unsupport_flags:
                    cpuf_model += ",+" + str(fadd)
            else:
                for fadd in extra_flags:
                    cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            if all_host_supported_flags == "yes":
                guest_flags = flags.all_possible_guest_flags
            else:
                guest_flags = flags.guest_flags

            (self.vm, session) = start_guest_with_cpuflags(cpuf_model)

            not_enable_flags = (check_cpuflags(cpuf_model, session) -
                                flags.hw_flags)
            if not_enable_flags != set([]):
                logging.info("Model unsupported flags: %s",
                             str(flags.cpumodel_unsupport_flags))
                logging.error("Flags defined on host but not on found "
                              "on guest: %s", str(not_enable_flags))
            logging.info("Check main instruction sets.")

            install_path = "/tmp"
            install_cpuflags_test_on_vm(self.vm, install_path)

            Flags = check_cpuflags_work(self.vm, install_path,
                                        flags.all_possible_guest_flags)
            logging.info("Woking CPU flags: %s", str(Flags[0]))
            logging.info("Not working CPU flags: %s", str(Flags[1]))
            logging.warning("Flags works even if not defined on guest cpu "
                            "flags: %s", str(Flags[0] - guest_flags))
            logging.warning("Not tested CPU flags: %s", str(Flags[2]))

            if Flags[1] & guest_flags:
                raise error.TestFail("Some flags do not work: %s" %
                                     (str(Flags[1])))

    # 3) fail boot unsupported flags
    class test_boot_warn_with_host_unsupported_flags(MiniSubtest):

        def test(self):
            # This is virtual cpu flags which are supported by
            # qemu but no with host cpu.
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Unsupported flags %s.",
                          str(flags.host_all_unsupported_flags))
            cpuf_model = cpu_model + ",check"

            # Add unsupported flags.
            for fadd in flags.host_all_unsupported_flags:
                cpuf_model += ",+" + str(fadd)

            vnc_port = utils_misc.find_free_port(5900, 6100) - 5900
            cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary,
                                                       cpuf_model,
                                                       vnc_port)
            out = None

            try:
                try:
                    out = utils.run(cmd, timeout=5, ignore_status=True).stderr
                    raise error.TestFail("Guest not boot with unsupported "
                                         "flags.")
                except error.CmdError, e:
                    out = e.result_obj.stderr
            finally:
                uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE)
                nf_re = re.compile(
                    r"^CPU feature (.+) not found", re.MULTILINE)
                warn_flags = set([utils_misc.Flag(x)
                                  for x in uns_re.findall(out)])
                not_found = set([utils_misc.Flag(x)
                                 for x in nf_re.findall(out)])
                fwarn_flags = flags.host_all_unsupported_flags - warn_flags
                fwarn_flags -= not_found
                if fwarn_flags:
                    raise error.TestFail("Qemu did not warn the use of "
                                         "flags %s" % str(fwarn_flags))

    # 3) fail boot unsupported flags
    class test_fail_boot_with_host_unsupported_flags(MiniSubtest):

        def test(self):
            # This is virtual cpu flags which are supported by
            # qemu but no with host cpu.
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)
            cpuf_model = cpu_model + ",enforce"

            logging.debug("Unsupported flags %s.",
                          str(flags.host_all_unsupported_flags))

            # Add unsupported flags.
            for fadd in flags.host_all_unsupported_flags:
                cpuf_model += ",+" + str(fadd)

            vnc_port = utils_misc.find_free_port(5900, 6100) - 5900
            cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary,
                                                       cpuf_model,
                                                       vnc_port)
            out = None
            try:
                try:
                    out = utils.run(cmd, timeout=5, ignore_status=True).stderr
                except error.CmdError:
                    logging.error("Host boot with unsupported flag")
            finally:
                uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE)
                nf_re = re.compile(
                    r"^CPU feature (.+) not found", re.MULTILINE)
                warn_flags = set([utils_misc.Flag(x)
                                  for x in uns_re.findall(out)])
                not_found = set([utils_misc.Flag(x)
                                 for x in nf_re.findall(out)])
                fwarn_flags = flags.host_all_unsupported_flags - warn_flags
                fwarn_flags -= not_found
                if fwarn_flags:
                    raise error.TestFail("Qemu did not warn the use of "
                                         "flags %s" % str(fwarn_flags))

    # 4) check guest flags under load cpu, stress and system (dd)
    class test_boot_guest_and_try_flags_under_load(Test_temp):

        def test(self):
            logging.info("Check guest working cpuflags under load "
                         "cpu and stress and system (dd)")
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            cpuf_model = cpu_model

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))

            if all_host_supported_flags == "yes":
                logging.debug("Added flags %s.",
                              str(flags.cpumodel_unsupport_flags))

                # Add unsupported flags.
                for fadd in flags.cpumodel_unsupport_flags:
                    cpuf_model += ",+" + str(fadd)

                for fdel in flags.host_unsupported_flags:
                    cpuf_model += ",-" + str(fdel)

            (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp)

            if (not run_stress(self.vm, 60, flags.guest_flags)):
                raise error.TestFail("Stress test ended before"
                                     " end of test.")

        def clean(self):
            logging.info("cleanup")
            self.vm.destroy(gracefully=False)

    # 5) Online/offline CPU
    class test_online_offline_guest_CPUs(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            logging.debug("Run tests with cpu model %s.", (cpu_model))
            flags = HgFlags(cpu_model, extra_flags)

            (self.vm, session) = start_guest_with_cpuflags(cpu_model, smp)

            def encap(timeout):
                random.seed()
                begin = time.time()
                end = begin
                if smp > 1:
                    while end - begin < 60:
                        cpu = random.randint(1, smp - 1)
                        if random.randint(0, 1):
                            disable_cpu(session, cpu, True)
                        else:
                            disable_cpu(session, cpu, False)
                        end = time.time()
                    return True
                else:
                    logging.warning("For this test is necessary smp > 1.")
                    return False
            timeout = 60

            test_flags = flags.guest_flags
            if all_host_supported_flags == "yes":
                test_flags = flags.all_possible_guest_flags

            result = utils_misc.parallel([(encap, [timeout]),
                                          (run_stress, [self.vm, timeout,
                                                        test_flags])])
            if not (result[0] and result[1]):
                raise error.TestFail("Stress tests failed before"
                                     " end of testing.")

    # 6) migration test
    class test_migration_with_additional_flags(Test_temp):

        def test(self):
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            # Add unsupported flags.
            for fadd in flags.cpumodel_unsupport_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp)

            install_path = "/tmp"
            install_cpuflags_test_on_vm(self.vm, install_path)
            flags = check_cpuflags_work(self.vm, install_path,
                                        flags.guest_flags)
            dd_session = self.vm.wait_for_login()
            stress_session = self.vm.wait_for_login()

            dd_session.sendline("nohup dd if=/dev/[svh]da of=/tmp/"
                                "stressblock bs=10MB count=100 &")
            cmd = ("nohup %s/cpuflags-test --stress  %s%s &" %
                   (os.path.join(install_path, "cpu_flags"), smp,
                    utils_misc.kvm_flags_to_stresstests(flags[0])))
            stress_session.sendline(cmd)

            time.sleep(5)

            self.vm.monitor.migrate_set_speed(mig_speed)
            self.clone = self.vm.migrate(
                mig_timeout, mig_protocol, offline=False,
                not_wait_for_migration=True)

            time.sleep(5)

            try:
                self.vm.wait_for_migration(10)
            except virt_vm.VMMigrateTimeoutError:
                self.vm.monitor.migrate_set_downtime(1)
                self.vm.wait_for_migration(mig_timeout)

            # Swap due to test cleaning.
            temp = self.vm.clone(copy_state=True)
            self.vm.__dict__ = self.clone.__dict__
            self.clone = temp

            self.vm.resume()
            self.clone.destroy(gracefully=False)

            stress_session = self.vm.wait_for_login()

            # If cpuflags-test hang up during migration test raise exception
            try:
                stress_session.cmd('killall cpuflags-test')
            except aexpect.ShellCmdError:
                raise error.TestFail("Cpuflags-test should work after"
                                     " migration.")

    def net_send_object(socket, obj):
        """
        Send python object over network.

        :param ip_addr: ipaddres of waiter for data.
        :param obj: object to send
        """
        data = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        socket.sendall("%6d" % len(data))
        socket.sendall(data)

    def net_recv_object(socket, timeout=60):
        """
        Receive python object over network.

        :param ip_addr: ipaddres of waiter for data.
        :param obj: object to send
        :return: object from network
        """
        try:
            time_start = time.time()
            data = ""
            d_len = int(socket.recv(6))

            while (len(data) < d_len and (time.time() - time_start) < timeout):
                data += socket.recv(d_len - len(data))

            data = pickle.loads(data)
            return data
        except:
            error.TestFail("Failed to receive python object over the network")
            raise

    class test_multi_host_migration(Test_temp):

        def test(self):
            """
            Test migration between multiple hosts.
            """
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            for fadd in extra_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            install_path = "/tmp"

            class testMultihostMigration(migration.MultihostMigration):

                def __init__(self, test, params, env):
                    migration.MultihostMigration.__init__(self, test, params,
                                                          env)

                def migration_scenario(self):
                    srchost = self.params.get("hosts")[0]
                    dsthost = self.params.get("hosts")[1]

                    def worker(mig_data):
                        vm = env.get_vm("vm1")
                        session = vm.wait_for_login(timeout=self.login_timeout)

                        install_cpuflags_test_on_vm(vm, install_path)

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s", str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))
                        session.sendline("nohup dd if=/dev/[svh]da of=/tmp/"
                                         "stressblock bs=10MB count=100 &")

                        cmd = ("nohup %s/cpuflags-test --stress  %s%s &" %
                               (os.path.join(install_path, "cpu_flags"),
                                smp,
                                utils_misc.kvm_flags_to_stresstests(Flags[0] &
                                                                    flags.guest_flags)))
                        logging.debug("Guest_flags: %s",
                                      str(flags.guest_flags))
                        logging.debug("Working_flags: %s", str(Flags[0]))
                        logging.debug("Start stress on guest: %s", cmd)
                        session.sendline(cmd)

                    def check_worker(mig_data):
                        vm = env.get_vm("vm1")

                        vm.verify_illegal_instruction()

                        session = vm.wait_for_login(timeout=self.login_timeout)

                        try:
                            session.cmd('killall cpuflags-test')
                        except aexpect.ShellCmdError:
                            raise error.TestFail("The cpuflags-test program"
                                                 " should be active after"
                                                 " migration and it's not.")

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s",
                                     str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))

                    self.migrate_wait(["vm1"], srchost, dsthost,
                                      worker, check_worker)

            params_b = params.copy()
            params_b["cpu_model"] = cpu_model
            mig = testMultihostMigration(test, params_b, env)
            mig.run()

    class test_multi_host_migration_onoff_cpu(Test_temp):

        def test(self):
            """
            Test migration between multiple hosts.
            """
            cpu_model, extra_flags = parse_cpu_model()

            flags = HgFlags(cpu_model, extra_flags)

            logging.debug("Cpu mode flags %s.",
                          str(flags.quest_cpu_model_flags))
            logging.debug("Added flags %s.",
                          str(flags.cpumodel_unsupport_flags))
            cpuf_model = cpu_model

            for fadd in extra_flags:
                cpuf_model += ",+" + str(fadd)

            for fdel in flags.host_unsupported_flags:
                cpuf_model += ",-" + str(fdel)

            smp = int(params["smp"])
            disable_cpus = map(lambda cpu: int(cpu),
                               params.get("disable_cpus", "").split())

            install_path = "/tmp"

            class testMultihostMigration(migration.MultihostMigration):

                def __init__(self, test, params, env):
                    migration.MultihostMigration.__init__(self, test, params,
                                                          env)
                    self.srchost = self.params.get("hosts")[0]
                    self.dsthost = self.params.get("hosts")[1]
                    self.id = {'src': self.srchost,
                               'dst': self.dsthost,
                               "type": "disable_cpu"}
                    self.migrate_count = int(self.params.get('migrate_count',
                                                             '2'))

                def ping_pong_migrate(self, sync, worker, check_worker):
                    for _ in range(self.migrate_count):
                        logging.info("File transfer not ended, starting"
                                     " a round of migration...")
                        sync.sync(True, timeout=mig_timeout)
                        if self.hostid == self.srchost:
                            self.migrate_wait(["vm1"],
                                              self.srchost,
                                              self.dsthost,
                                              start_work=worker)
                        elif self.hostid == self.dsthost:
                            self.migrate_wait(["vm1"],
                                              self.srchost,
                                              self.dsthost,
                                              check_work=check_worker)
                        tmp = self.dsthost
                        self.dsthost = self.srchost
                        self.srchost = tmp

                def migration_scenario(self):

                    sync = SyncData(self.master_id(), self.hostid, self.hosts,
                                    self.id, self.sync_server)

                    def worker(mig_data):
                        vm = env.get_vm("vm1")
                        session = vm.wait_for_login(timeout=self.login_timeout)

                        install_cpuflags_test_on_vm(vm, install_path)

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s", str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))
                        for cpu in disable_cpus:
                            if cpu < smp:
                                disable_cpu(session, cpu, True)
                            else:
                                logging.warning("There is no enouth cpu"
                                                " in Guest. It is trying to"
                                                "remove cpu:%s from guest with"
                                                " smp:%s." % (cpu, smp))
                        logging.debug("Guest_flags: %s",
                                      str(flags.guest_flags))
                        logging.debug("Working_flags: %s", str(Flags[0]))

                    def check_worker(mig_data):
                        vm = env.get_vm("vm1")

                        vm.verify_illegal_instruction()

                        session = vm.wait_for_login(timeout=self.login_timeout)

                        really_disabled = check_online_cpus(session, smp,
                                                            disable_cpus)

                        not_disabled = set(really_disabled) & set(disable_cpus)
                        if not_disabled:
                            raise error.TestFail("Some of disabled cpus are "
                                                 "online. This shouldn't "
                                                 "happen. Cpus disabled on "
                                                 "srchost:%s, Cpus not "
                                                 "disabled on dsthost:%s" %
                                                 (disable_cpus, not_disabled))

                        Flags = check_cpuflags_work(vm, install_path,
                                                    flags.all_possible_guest_flags)
                        logging.info("Woking CPU flags: %s",
                                     str(Flags[0]))
                        logging.info("Not working CPU flags: %s",
                                     str(Flags[1]))
                        logging.warning("Flags works even if not defined on"
                                        " guest cpu flags: %s",
                                        str(Flags[0] - flags.guest_flags))
                        logging.warning("Not tested CPU flags: %s",
                                        str(Flags[2]))

                    self.ping_pong_migrate(sync, worker, check_worker)

            params_b = params.copy()
            params_b["cpu_model"] = cpu_model
            mig = testMultihostMigration(test, params_b, env)
            mig.run()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        if params.get("cpu_model"):
            tests_group()
        else:
            cpu_models = (set(get_cpu_models_supported_by_host()) -
                          set(cpu_model_black_list))
            logging.info("Start test with cpu models %s" % (str(cpu_models)))
            failed = []
            for cpumodel in cpu_models:
                params["cpu_model"] = cpumodel
                try:
                    tests_group()
                except:
                    print_exception(tests_group)
                    failed.append(cpumodel)
            if failed != []:
                raise error.TestFail("Test of cpu models %s failed." %
                                     (str(failed)))
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " cpuflags test" % test_type)

Example 200

Project: Snoopy
Source File: fetchClients.py
View license
def main():

    print "Content-type: xml\n\n";
    MaltegoXML_in = sys.stdin.read()
    if MaltegoXML_in <> '':
        m = MaltegoMsg(MaltegoXML_in)
	logging.debug(m)    
	cursor=stawk_db.dbconnect()
        TRX = MaltegoTransform()

#	logging.debug(m.AdditionalFields['end_time'])

	logging.info("Fetching victims")


	drone='%'
	if 'properties.drone' in m.AdditionalFields:
                drone=m.AdditionalFields['properties.drone']
	
	if 'drone' in m.AdditionalFields:
		drone=m.AdditionalFields['drone']
	
#	drone=m.AdditionalFields['drone']

        # If no start / end times are specified, we default to lookback 
        now=datetime.datetime.now()
        if 'start_time' in m.AdditionalFields and 'end_time' in m.AdditionalFields :
                start_time=m.AdditionalFields['start_time']
                end_time=m.AdditionalFields['end_time']
        else:  
                start_time=now+datetime.timedelta(seconds=-lookback)
                end_time=now+datetime.timedelta(seconds=lookback)

	        # Maltego requires format e.g 2012-10-23 22:37:12.0
	        now=now.strftime("%Y-%m-%d %H:%M:%S.0")
	        start_time=start_time.strftime("%Y-%m-%d %H:%M:%S.0")
	        end_time=end_time.strftime("%Y-%m-%d %H:%M:%S.0")

	logging.debug("1. S,E - %s / %s"%(start_time,end_time))

	if 'location' in m.AdditionalFields:
                location=m.AdditionalFields['location']
		# I'm a dirty hacker, short and stout.
		logging.debug("SELECT MIN(timestamp),MAX(timestamp) FROM probes WHERE location LIKE %s AND monitor_id=%s AND timestamp >= %s AND timestamp <= %s"%(location,drone,start_time,end_time))
		cursor.execute("SELECT MIN(timestamp),MAX(timestamp) FROM probes WHERE location LIKE %s AND monitor_id=%s AND timestamp >= %s AND timestamp <= %s",(location,drone,start_time,end_time))
		result=cursor.fetchone()
		start_time=result[0]
		end_time=result[1]
        else:
                location="%"


	logging.debug("2. S,E - %s / %s"%(start_time,end_time))
	logging.debug(drone)


	try:

		logging.info("SELECT DISTINCT device_mac,vendor_short,monitor_id AS drone_id,'probes' AS source, IFNULL(hostname,'') AS hostname,location FROM proximity_sessions LEFT OUTER JOIN dhcp_leases ON proximity_sessions.device_mac = dhcp_leases.mac WHERE monitor_id='%s' AND location LIKE '%s' AND last_probe > '%s' AND last_probe < '%s' UNION SELECT DISTINCT dhcp_leases.mac,mac_vendor.vendor_short,drone_conf.id AS drone_id, 'web' AS source, dhcp_leases.hostname, '' AS location from dhcp_leases inner join mac_vendor on mac_prefix=mac_vendor.mac inner join squid_logs on client_ip=dhcp_leases.ip inner join drone_conf on drone_conf.ip_prefix=dhcp_leases.ip_prefix WHERE drone_conf.id='%s' AND timestamp > '%s' AND timestamp < '%s'"%(drone,location,start_time,end_time,drone,start_time,end_time))
		cursor.execute("SELECT DISTINCT device_mac,vendor_short,monitor_id AS drone_id,'probes' AS source, IFNULL(hostname,'') AS hostname,location FROM proximity_sessions LEFT OUTER JOIN dhcp_leases ON proximity_sessions.device_mac = dhcp_leases.mac WHERE monitor_id=%s AND location LIKE %s AND last_probe >= %s AND last_probe <= %s UNION SELECT DISTINCT dhcp_leases.mac,mac_vendor.vendor_short,drone_conf.id AS drone_id, 'web' AS source, dhcp_leases.hostname, '' AS location from dhcp_leases inner join mac_vendor on mac_prefix=mac_vendor.mac inner join squid_logs on client_ip=dhcp_leases.ip inner join drone_conf on drone_conf.ip_prefix=dhcp_leases.ip_prefix WHERE drone_conf.id=%s AND timestamp >= %s AND timestamp <= %s",(drone,location,start_time,end_time,drone,start_time,end_time))
		results=cursor.fetchall()
		logging.debug( "Observed %d clients" %len(results))

		dataz={}
		for row in results:
			logging.debug(row)
		        mac=row[0]
		        vendor=row[1]
			drone=row[2]
		        source=row[3]
		        hostname=row[4]
			obs_location=row[5]
		        tmp={'vendor':vendor,'hostname':hostname}
		        if source=='web':
		                tmp['from_web']="True"
		        elif source == 'probes':
		                tmp['from_probes']="True"
		
		        if mac not in dataz:
		                dataz[mac]=tmp
				dataz[mac]['obs_location']=obs_location
		        else:  
		                dataz[mac] = dict(dataz[mac].items() + tmp.items())
				dataz[mac]['obs_location'] = dataz[mac]['obs_location'] + ", " + obs_location



		for k,v in dataz.iteritems():
	       	 	mac=k
			vendor=v['vendor']
			hostname=v['hostname']
			obs_location=v['obs_location']
			from_web,from_probes="False","False"
			if 'from_web' in v:
				from_web="True"
			if 'from_probes' in v:
				from_probes="True"
        	
	#		if from_web == "False":
			if len(hostname) < 1:
				NewEnt=TRX.addEntity("snoopy.Client", "%s"%(vendor));
			else:
				NewEnt=TRX.addEntity("snoopy.Client", "%s (%s)"%(vendor,hostname))
			NewEnt.addAdditionalFields("mac","mac address", "strict",mac)
			NewEnt.addAdditionalFields("vendor","vendor","strict",vendor)
			NewEnt.addAdditionalFields("hostname","hostname","hostname",hostname)

			NewEnt.addAdditionalFields("from_web","from_web","nostrict",from_web)
			NewEnt.addAdditionalFields("from_probes","from_probes","nostrict",from_probes)

			NewEnt.addAdditionalFields("drone","drone","nostrict",drone)
		
			NewEnt.addAdditionalFields("start_time", "start_time", "nostrict",start_time)
			NewEnt.addAdditionalFields("end_time","end_time", "nostrict",end_time)
     			NewEnt.addAdditionalFields("location","Location","nostrict",location)
     			NewEnt.addAdditionalFields("obs_location","Observed Locations","nostrict",obs_location)
			
	
			#Add something to icon to distinguish probes and web?
 
        except Exception, e:
                logging.debug("Exception from fetchClients.py:")
                logging.debug(e)


	TRX.returnOutput()