logging.debug

Here are the examples of the python api logging.debug taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 101

Project: newfies-dialer
Source File: views.py
View license
@permission_required('dialer_campaign.view_dashboard', login_url='/')
@login_required
def customer_dashboard(request, on_index=None):
    """Customer dashboard gives the following information

        * Total Campaigns contacts
        * Amount of contact reached today
        * Disposition of calls via pie chart
        * Call records & Duration of calls are shown on graph by days/hours

    **Attributes**:

        * ``template`` - frontend/dashboard.html
        * ``form`` - DashboardForm
    """
    logging.debug('Start Dashboard')
    # All campaign for logged in User
    campaign_id_list = Campaign.objects.values_list('id', flat=True).filter(user=request.user).order_by('id')

    # Contacts count which are active and belong to those phonebook(s) which is
    # associated with all campaign
    pb_active_contact_count = Contact.objects\
        .filter(phonebook__campaign__in=campaign_id_list, status=CONTACT_STATUS.ACTIVE).count()

    form = DashboardForm(request.user, request.POST or None)
    logging.debug('Got Campaign list')

    total_record = dict()
    total_duration_sum = 0
    total_billsec_sum = 0
    total_call_count = 0
    total_answered = 0
    total_not_answered = 0
    total_busy = 0
    total_cancel = 0
    total_congestion = 0
    total_failed = 0
    search_type = SEARCH_TYPE.D_Last_24_hours  # default Last 24 hours
    selected_campaign = ''

    if campaign_id_list:
        selected_campaign = campaign_id_list[0]  # default campaign id

    # selected_campaign should not be empty
    if selected_campaign:
        if form.is_valid():
            selected_campaign = request.POST['campaign']
            search_type = request.POST['search_type']

        end_date = datetime.utcnow().replace(tzinfo=utc)
        start_date = calculate_date(search_type)

        # date_length is used to do group by starting_date
        if int(search_type) >= SEARCH_TYPE.B_Last_7_days:  # all options except 30 days
            date_length = 13
            if int(search_type) == SEARCH_TYPE.C_Yesterday:  # yesterday
                tday = datetime.utcnow().replace(tzinfo=utc)
                start_date = datetime(tday.year, tday.month, tday.day, 0, 0, 0, 0)\
                    .replace(tzinfo=utc) - relativedelta(days=1)
                end_date = datetime(tday.year, tday.month, tday.day, 23, 59, 59, 999999)\
                    .replace(tzinfo=utc) - relativedelta(days=1)
            if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
                date_length = 16
        else:
            date_length = 10  # Last 30 days option

        select_data = {
            "starting_date": "SUBSTR(CAST(starting_date as CHAR(30)),1,%s)" % str(date_length)
        }

        # This calls list is used by pie chart
        calls = VoIPCall.objects\
            .filter(callrequest__campaign=selected_campaign,
                    duration__isnull=False,
                    user=request.user,
                    starting_date__range=(start_date, end_date))\
            .extra(select=select_data)\
            .values('starting_date', 'disposition')\
            .annotate(Count('starting_date'))\
            .order_by('starting_date')

        logging.debug('Aggregate VoIPCall')

        for i in calls:
            total_call_count += i['starting_date__count']
            if i['disposition'] == CALL_DISPOSITION.ANSWER or i['disposition'] == 'NORMAL_CLEARING':
                total_answered += i['starting_date__count']
            elif i['disposition'] == CALL_DISPOSITION.BUSY or i['disposition'] == 'USER_BUSY':
                total_busy += i['starting_date__count']
            elif i['disposition'] == CALL_DISPOSITION.NOANSWER or i['disposition'] == 'NO_ANSWER':
                total_not_answered += i['starting_date__count']
            elif i['disposition'] == CALL_DISPOSITION.CANCEL or i['disposition'] == 'ORIGINATOR_CANCEL':
                total_cancel += i['starting_date__count']
            elif i['disposition'] == CALL_DISPOSITION.CONGESTION or i['disposition'] == 'NORMAL_CIRCUIT_CONGESTION':
                total_congestion += i['starting_date__count']
            else:
                # VOIP CALL FAILED
                total_failed += i['starting_date__count']

        # following calls list is without disposition & group by call date
        calls = VoIPCall.objects\
            .filter(callrequest__campaign=selected_campaign,
                    duration__isnull=False,
                    user=request.user,
                    starting_date__range=(start_date, end_date))\
            .extra(select=select_data)\
            .values('starting_date')\
            .annotate(Sum('duration'))\
            .annotate(Sum('billsec'))\
            .annotate(Avg('duration'))\
            .annotate(Count('starting_date'))\
            .order_by('starting_date')

        logging.debug('Aggregate VoIPCall (2)')

        mintime = start_date
        maxtime = end_date
        calls_dict = {}
        calls_dict_with_min = {}

        for call in calls:
            total_duration_sum += call['duration__sum']
            total_billsec_sum += call['billsec__sum']
            if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
                ctime = datetime(int(call['starting_date'][0:4]),
                                 int(call['starting_date'][5:7]),
                                 int(call['starting_date'][8:10]),
                                 int(call['starting_date'][11:13]),
                                 0,
                                 0,
                                 0).replace(tzinfo=utc)
                if int(search_type) >= SEARCH_TYPE.E_Last_12_hours:
                    ctime = datetime(int(call['starting_date'][0:4]),
                                     int(call['starting_date'][5:7]),
                                     int(call['starting_date'][8:10]),
                                     int(call['starting_date'][11:13]),
                                     int(call['starting_date'][14:16]),
                                     0,
                                     0).replace(tzinfo=utc)
            else:
                ctime = datetime(int(call['starting_date'][0:4]),
                                 int(call['starting_date'][5:7]),
                                 int(call['starting_date'][8:10]),
                                 0,
                                 0,
                                 0,
                                 0).replace(tzinfo=utc)
            if ctime > maxtime:
                maxtime = ctime
            elif ctime < mintime:
                mintime = ctime

            # all options except 30 days
            if int(search_type) >= SEARCH_TYPE.B_Last_7_days:
                calls_dict[int(ctime.strftime("%Y%m%d%H"))] =\
                    {
                        'call_count': call['starting_date__count'],
                        'duration_sum': call['duration__sum'],
                        'duration_avg': float(call['duration__avg']),
                }

                calls_dict_with_min[int(ctime.strftime("%Y%m%d%H%M"))] =\
                    {
                        'call_count': call['starting_date__count'],
                        'duration_sum': call['duration__sum'],
                        'duration_avg': float(call['duration__avg']),
                }
            else:
                # Last 30 days option
                calls_dict[int(ctime.strftime("%Y%m%d"))] =\
                    {
                        'call_count': call['starting_date__count'],
                        'duration_sum': call['duration__sum'],
                        'duration_avg': float(call['duration__avg']),
                }

        logging.debug('After Call Loops')

        dateList = date_range(mintime, maxtime, q=search_type)

        for date in dateList:
            inttime = int(date.strftime("%Y%m%d"))

            # last 7 days | yesterday | last 24 hrs
            if int(search_type) == SEARCH_TYPE.B_Last_7_days \
                or int(search_type) == SEARCH_TYPE.C_Yesterday \
                    or int(search_type) == SEARCH_TYPE.D_Last_24_hours:

                for option in range(0, 24):
                    day_time = int(str(inttime) + str(option).zfill(2))

                    graph_day = datetime(int(date.strftime("%Y")),
                                         int(date.strftime("%m")),
                                         int(date.strftime("%d")),
                                         int(str(option).zfill(2))).replace(tzinfo=utc)

                    dt = int(1000 * time.mktime(graph_day.timetuple()))
                    total_record[dt] = {
                        'call_count': 0,
                        'duration_sum': 0,
                        'duration_avg': 0.0,
                    }

                    if day_time in calls_dict.keys():
                        total_record[dt]['call_count'] += calls_dict[day_time]['call_count']
                        total_record[dt]['duration_sum'] += calls_dict[day_time]['duration_sum']
                        total_record[dt]['duration_avg'] += float(calls_dict[day_time]['duration_avg'])

            # last 12 hrs | last 6 hrs | last 1 hr
            elif (int(search_type) == SEARCH_TYPE.E_Last_12_hours
                  or int(search_type) == SEARCH_TYPE.F_Last_6_hours
                  or int(search_type) == SEARCH_TYPE.G_Last_hour):

                for hour in range(0, 24):
                    for minute in range(0, 60):
                        hr_time = int(str(inttime) + str(hour).zfill(2) + str(minute).zfill(2))

                        graph_day = datetime(int(date.strftime("%Y")),
                                             int(date.strftime("%m")),
                                             int(date.strftime("%d")),
                                             int(str(hour).zfill(2)),
                                             int(str(minute).zfill(2))).replace(tzinfo=utc)

                        dt = int(1000 * time.mktime(graph_day.timetuple()))
                        total_record[dt] = {
                            'call_count': 0,
                            'duration_sum': 0,
                            'duration_avg': 0.0,
                        }

                        if hr_time in calls_dict_with_min.keys():
                            total_record[dt]['call_count'] += calls_dict_with_min[hr_time]['call_count']
                            total_record[dt]['duration_sum'] += calls_dict_with_min[hr_time]['duration_sum']
                            total_record[dt]['duration_avg'] += float(calls_dict_with_min[hr_time]['duration_avg'])
            else:
                # Default: Last 30 days option
                graph_day = datetime(int(date.strftime("%Y")),
                                     int(date.strftime("%m")),
                                     int(date.strftime("%d"))).replace(tzinfo=utc)
                dt = int(1000 * time.mktime(graph_day.timetuple()))
                total_record[dt] = {
                    'call_count': 0,
                    'duration_sum': 0,
                    'duration_avg': 0,
                }
                if inttime in calls_dict.keys():
                    total_record[dt]['call_count'] += calls_dict[inttime]['call_count']
                    total_record[dt]['duration_sum'] += calls_dict[inttime]['duration_sum']
                    total_record[dt]['duration_avg'] += float(calls_dict[inttime]['duration_avg'])

    logging.debug('After dateList Loops')

    # sorting on date col
    total_record = total_record.items()
    total_record = sorted(total_record, key=lambda k: k[0])

    # lineplusbarwithfocuschart
    final_charttype = "linePlusBarChart"
    xdata = []
    ydata = []
    ydata2 = []
    for i in total_record:
        xdata.append(i[0])
        ydata.append(i[1]['call_count'])
        ydata2.append(i[1]['duration_sum'])

    tooltip_date = "%d %b %y %H:%M %p"
    kwargs1 = {}
    kwargs1['bar'] = True
    extra_serie1 = {"tooltip": {"y_start": "", "y_end": " calls"}, "date_format": tooltip_date}
    extra_serie2 = {"tooltip": {"y_start": "", "y_end": " sec"}, "date_format": tooltip_date}

    final_chartdata = {
        'x': xdata,
        'name1': 'Calls', 'y1': ydata, 'extra1': extra_serie1, 'kwargs1': kwargs1,
        'name2': 'Duration', 'y2': ydata2, 'extra2': extra_serie2,
    }

    # Contacts which are successfully called for running campaign
    reached_contact = 0
    if campaign_id_list:
        tday = datetime.utcnow().replace(tzinfo=utc)
        start_date = datetime(tday.year, tday.month, tday.day, 0, 0, 0, 0).replace(tzinfo=utc)
        end_date = datetime(tday.year, tday.month, tday.day, 23, 59, 59, 999999).replace(tzinfo=utc)
        reached_contact = Subscriber.objects\
            .filter(campaign_id__in=campaign_id_list,  # status=5,
                    updated_date__range=(start_date, end_date))\
            .count()

    # PieChart
    hangup_analytic_charttype = "pieChart"
    xdata = []
    ydata = []
    hangup_analytic_chartdata = {'x': xdata, 'y1': ydata}
    if total_call_count != 0:
        for i in CALL_DISPOSITION:
            xdata.append(i[0])

        # Y-axis order depend upon CALL_DISPOSITION
        # 'ANSWER', 'BUSY', 'CANCEL', 'CONGESTION', 'FAILED', 'NOANSWER'
        ydata = [percentage(total_answered, total_call_count),
                 percentage(total_busy, total_call_count),
                 percentage(total_cancel, total_call_count),
                 percentage(total_congestion, total_call_count),
                 percentage(total_failed, total_call_count),
                 percentage(total_not_answered, total_call_count)]

        color_list = [
            COLOR_DISPOSITION['ANSWER'],
            COLOR_DISPOSITION['BUSY'],
            COLOR_DISPOSITION['CANCEL'],
            COLOR_DISPOSITION['CONGESTION'],
            COLOR_DISPOSITION['FAILED'],
            COLOR_DISPOSITION['NOANSWER'],
        ]

        extra_serie = {"tooltip": {"y_start": "", "y_end": " %"},
                       "color_list": color_list}
        hangup_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}

    data = {
        'form': form,
        'campaign_phonebook_active_contact_count': pb_active_contact_count,
        'reached_contact': reached_contact,
        'total_duration_sum': total_duration_sum,
        'total_billsec_sum': total_billsec_sum,
        'total_call_count': total_call_count,
        'total_answered': total_answered,
        'total_not_answered': total_not_answered,
        'total_busy': total_busy,
        'total_cancel': total_cancel,
        'total_congestion': total_congestion,
        'total_failed': total_failed,
        'answered_color': COLOR_DISPOSITION['ANSWER'],
        'busy_color': COLOR_DISPOSITION['BUSY'],
        'not_answered_color': COLOR_DISPOSITION['NOANSWER'],
        'cancel_color': COLOR_DISPOSITION['CANCEL'],
        'congestion_color': COLOR_DISPOSITION['CONGESTION'],
        'failed_color': COLOR_DISPOSITION['FAILED'],
        'CALL_DISPOSITION': CALL_DISPOSITION,
        'hangup_analytic_chartdata': hangup_analytic_chartdata,
        'hangup_analytic_charttype': hangup_analytic_charttype,
        'hangup_chartcontainer': 'piechart_container',
        'hangup_extra': {
            'x_is_date': False,
            'x_axis_format': '',
            'tag_script_js': True,
            'jquery_on_ready': True,
        },
        'final_chartdata': final_chartdata,
        'final_charttype': final_charttype,
        'final_chartcontainer': 'lineplusbarwithfocuschart_container',
        'final_extra': {
            'x_is_date': True,
            'x_axis_format': '%d %b %Y',
            'tag_script_js': True,
            'jquery_on_ready': True,
            'resize': True,
            'focus_enable': True,
        }
    }
    if on_index == 'yes':
        return data
    return render_to_response('frontend/dashboard.html', data, context_instance=RequestContext(request))

Example 102

Project: modrana
Source File: weather_stations.py
View license
    def import_locations(self, data, index="WMO"):
        """Parse NOAA weather station data files

        ``import_locations()`` returns a dictionary with keys containing either
        the WMO or ICAO identifier, and values that are ``Station`` objects that
        describes the large variety of data exported by NOAA_.

        It expects data files in one of the following formats::

            00;000;PABL;Buckland, Buckland Airport;AK;United States;4;65-58-56N;161-09-07W;;;7;;
            01;001;ENJA;Jan Mayen;;Norway;6;70-56N;008-40W;70-56N;008-40W;10;9;P
            01;002;----;Grahuken;;Norway;6;79-47N;014-28E;;;;15;

        or::

            AYMD;94;014;Madang;;Papua New Guinea;5;05-13S;145-47E;05-13S;145-47E;3;5;P
            AYMO;--;---;Manus Island/Momote;;Papua New Guinea;5;02-03-43S;147-25-27E;;;4;;
            AYPY;94;035;Moresby;;Papua New Guinea;5;09-26S;147-13E;09-26S;147-13E;38;49;P

        Files containing the data in this format can be downloaded from
        the :abbr:`NOAA (National Oceanographic and Atmospheric
        Administration)`'s site in their `station location page`_.

        WMO indexed files downloaded from the :abbr:`NOAA (National
        Oceanographic and Atmospheric Administration)` site when processed by
        ``import_locations()`` will return ``dict`` object of the following
        style::

            {'00000': Station('PABL', 'Buckland, Buckland Airport', 'AK',
                              'United States', 4, 65.982222. -160.848055, None,
                              None, 7, False),
             '01001'; Station('ENJA', Jan Mayen, None, 'Norway', 6, 70.933333,
                              -7.333333, 70.933333, -7.333333, 10, 9, True),
             '01002': Station(None, 'Grahuken', None, 'Norway', 6, 79.783333,
                              13.533333, None, None, 15, False)}

        And ``dict`` objects such as the following will be created when ICAO
        indexed data files are processed::

            {'AYMD': Station("94", "014", "Madang", None, "Papua New Guinea",
                             5, -5.216666, 145.783333, -5.216666,
                             145.78333333333333, 3, 5, True,
             'AYMO': Station(None, None, "Manus Island/Momote", None,
                             "Papua New Guinea", 5, -2.061944, 147.424166,
                             None, None, 4, False,
             'AYPY': Station("94", "035", "Moresby", None, "Papua New Guinea",
                             5, -9.433333, 147.216667, -9.433333, 147.216667,
                             38, 49, True}

        >>> stations = Stations(open("WMO_stations"))
        >>> for key, value in sorted(stations.items()):
        ...     print("%s - %s" % (key, value))
        00000 - Buckland, Buckland Airport (PABL - N65.982°; W161.152°)
        01001 - Jan Mayen (ENJA - N70.933°; W008.667°)
        01002 - Grahuken (N79.783°; E014.467°)
        >>> stations = Stations(open("ICAO_stations"), "ICAO")
        >>> for key, value in sorted(stations.items()):
        ...     print("%s - %s" % (key, value))
        AYMD - Madang (94014 - S05.217°; E145.783°)
        AYMO - Manus Island/Momote (S02.062°; E147.424°)
        AYPY - Moresby (94035 - S09.433°; E147.217°)
        >>> stations = Stations(open("broken_WMO_stations"))
        >>> for key, value in sorted(stations.items()):
        ...     print("%s - %s" % (key, value))
        71046 - Komakuk Beach, Y. T. (CWKM - N69.617°; W140.200°)
        71899 - Langara, B. C. (CWLA - N54.250°; W133.133°)
        >>> stations = Stations(open("broken_ICAO_stations"), "ICAO")
        >>> for key, value in sorted(stations.items()):
        ...     print("%s - %s" % (key, value))
        KBRX - Bordeaux (N41.933°; W104.950°)
        KCQB - Chandler, Chandler Municipal Airport (N35.724°; W096.820°)
        KTYR - Tyler, Tyler Pounds Field (N32.359°; W095.404°)

        :type data: ``file``, ``list`` or ``str``
        :param data: NOAA station data to read
        :type index: ``str``
        :param index: The identifier type used in the file
        :rtype: ``dict``
        :return: WMO locations with `Station` objects
        :raise FileFormatError: Unknown file format

        .. _NOAA: http://weather.noaa.gov/
        .. _station location page: http://weather.noaa.gov/tg/site.shtml

        """
        self._data = data
        data = utils.prepare_read(data)

        for line in data:
            line = line.strip()
            chunk = line.split(";")
            if not len(chunk) == 14:
                if index == "ICAO":
                    # Some entries only have 12 or 13 elements, so we assume 13
                    # and 14 are None.  Of the entries I've hand checked this
                    # assumption would be correct.
                    logging.debug("Extending ICAO `%s' entry, because it is "
                                  "too short to process" % line)
                    chunk.extend(["", ""])
                elif index == "WMO" and len(chunk) == 13:
                    # A few of the WMO indexed entries are missing their RBSN
                    # fields, hand checking the entries for 71046 and 71899
                    # shows that they are correct if we just assume RBSN is
                    # false.
                    logging.debug("Extending WMO `%s' entry, because it is "
                                  "too short to process" % line)
                    chunk.append("")
                else:
                    raise utils.FileFormatError("NOAA")
            if index == "WMO":
                identifier = "".join(chunk[:2])
                alt_id = chunk[2]
            elif index == "ICAO":
                identifier = chunk[0]
                alt_id = "".join(chunk[1:3])
            else:
                raise ValueError("Unknown format `%s'" % index)
            if alt_id in ("----", "-----"):
                alt_id = None
            name = chunk[3]
            state = chunk[4] if chunk[4] else None
            country = chunk[5]
            wmo = int(chunk[6]) if chunk[6] else None
            point_data = []
            for i in chunk[7:11]:
                if not i:
                    point_data.append(None)
                    continue
                # Some entries in nsd_cccc.txt are of the format "DD-MM-
                # N", so we just take the spaces to mean 0 seconds.
                if " " in i:
                    logging.debug("Fixing unpadded location data in `%s' entry"
                                  % line)
                    i = i.replace(" ", "0")
                values = map(int, i[:-1].split("-"))
                if i[-1] in ("S", "W"):
                    values = [-i for i in values]
                point_data.append(point.utils.to_dd(*values))
            latitude, longitude, ua_latitude, ua_longitude = point_data
            altitude = int(chunk[11]) if chunk[11] else None
            ua_altitude = int(chunk[12]) if chunk[12] else None
            rbsn = False if not chunk[13] else True
            self[identifier] = Station(alt_id, name, state, country, wmo,
                                       latitude, longitude, ua_latitude,
                                       ua_longitude, altitude, ua_altitude,
                                       rbsn)

Example 103

Project: laikaboss
Source File: laikad.py
View license
def main():
    '''Main program logic. Becomes the supervisor process.'''
    parser = OptionParser(usage="usage: %prog [options]\n"
        "Default settings in config file: laikad.conf")

    parser.add_option("-d", "--debug",
                      action="store_true", default=False,
                      dest="debug",
                      help="enable debug messages to the console.")
    parser.add_option("-s", "--scan-config",
                      action="store", type="string",
                      dest="laikaboss_config_path",
                      help="specify a path for laikaboss configuration")
    parser.add_option("-c", "--laikad-config",
                      action="store", type="string",
                      dest="laikad_config_path",
                      help="specify a path for laikad configuration")
    parser.add_option("-b", "--broker-backend",
                      action="store", type="string",
                      dest="broker_backend_address",
                      help="specify an address for the workers to connect to. "
                      "ex: tcp://*:5559")
    parser.add_option("-f", "--broker-frontend",
                      action="store", type="string",
                      dest="broker_frontend_address",
                      help="specify an address for clients to connect to. ex: "
                      "tcp://*:5558")
    parser.add_option("-w", "--worker-connect",
                      action="store", type="string",
                      dest="worker_connect_address",
                      help="specify an address for clients to connect to. ex: "
                      "tcp://localhost:5559")
    parser.add_option("-n", "--no-broker",
                      action="store_true", default=False,
                      dest="no_broker",
                      help="specify this option to disable the broker for this "
                      "instance.")
    parser.add_option("-i", "--id",
                      action="store", type="string",
                      dest="runas_uid",
                      help="specify a valid username to switch to after starting "
                      "as root.")
    parser.add_option("-p", "--processes",
                      action="store", type="int",
                      dest="num_procs",
                      help="specify the number of workers to launch with this "
                      "daemon")
    parser.add_option("-r", "--restart-after",
                      action="store", type="int",
                      dest="ttl",
                      help="restart worker after scanning this many items")
    parser.add_option("-t", "--restart-after-min",
                      action="store", type="int",
                      dest="time_ttl",
                      help="restart worker after scanning for this many "
                      "minutes.")
    parser.add_option("-a", "--async",
                      action="store_true", default=False,
                      dest="run_async",
                      help="enable async messages. "
                      "This will disable any responses back to the client.")
    parser.add_option("-g", "--grace-timeout",
                      action="store", type="int",
                      dest="gracetimeout",
                      help="when shutting down, the timeout to allow workers to"
                      " finish ongoing scans before being killed")
    (options, _) = parser.parse_args()

    # Set the configuration file path for laikad
    config_location = '/etc/laikaboss/laikad.conf'
    if options.laikad_config_path:
        config_location = options.laikad_config_path
        if not os.path.exists(options.laikad_config_path):
            print "the provided config path is not valid, exiting"
            return 1
    # Next, check to see if we're in the top level source directory (dev environment)
    elif os.path.exists(DEFAULT_CONFIGS['laikad_dev_config_path']):
        config_location = DEFAULT_CONFIGS['laikad_dev_config_path']
    # Next, check for an installed copy of the default configuration
    elif os.path.exists(DEFAULT_CONFIGS['laikad_sys_config_path']):
        config_location = DEFAULT_CONFIGS['laikad_sys_config_path']
    # Exit
    else:
        print 'A valid laikad configuration was not found in either of the following locations:\
\n%s\n%s' % (DEFAULT_CONFIGS['laikad_dev_config_path'],DEFAULT_CONFIGS['laikad_sys_config_path'])
        return 1
    
    # Read the laikad config file
    config_parser = ConfigParser()
    config_parser.read(config_location)

    # Parse through the config file and append each section to a single dict
    for section in config_parser.sections():
        CONFIGS.update(dict(config_parser.items(section)))

    # We need a default framework config at a minimum
    if options.laikaboss_config_path:
        laikaboss_config_path = options.laikaboss_config_path
        logging.debug("using alternative config path: %s" % options.laikaboss_config_path)
        if not os.path.exists(options.laikaboss_config_path):
            print "the provided config path is not valid, exiting"
            return 1
    #Next, check for a config path in the laikad config
    elif os.path.exists(get_option('configpath')):
        laikaboss_config_path = get_option('configpath')
    # Next, check to see if we're in the top level source directory (dev environment)
    elif os.path.exists(DEFAULT_CONFIGS['dev_config_path']):
        laikaboss_config_path = DEFAULT_CONFIGS['dev_config_path']
    # Next, check for an installed copy of the default configuration
    elif os.path.exists(DEFAULT_CONFIGS['sys_config_path']):
        laikaboss_config_path = DEFAULT_CONFIGS['sys_config_path']
    # Exit
    else:
        print 'A valid framework configuration was not found in either of the following locations:\
\n%s\n%s' % (DEFAULT_CONFIGS['dev_config_path'],DEFAULT_CONFIGS['sys_config_path'])
        return 1

    if options.num_procs:
        num_procs = options.num_procs
    else:
        num_procs = int(get_option('numprocs'))

    if options.ttl:
        ttl = options.ttl
    else:
        ttl = int(get_option('ttl'))

    if options.time_ttl:
        time_ttl = options.time_ttl
    else:
        time_ttl = int(get_option('time_ttl'))

    if options.broker_backend_address:
        broker_backend_address = options.broker_backend_address
    else:
        broker_backend_address = get_option('brokerbackend')

    if options.broker_frontend_address:
        broker_frontend_address = options.broker_frontend_address
    else:
        broker_frontend_address = get_option('brokerfrontend')

    if options.worker_connect_address:
        worker_connect_address = options.worker_connect_address
    else:
        worker_connect_address = get_option('workerconnect')

    if options.gracetimeout:
        gracetimeout = options.gracetimeout
    else:
        gracetimeout = int(get_option('gracetimeout'))

    if options.run_async:
        async = True
    else:
        async = strtobool(get_option('async'))
   
    logresult = strtobool(get_option('log_result'))

    # Get the UserID to run as, if it was not specified on the command line
    # we'll use the current user by default
    runas_uid = None
    runas_gid = None

    if options.runas_uid:
        from pwd import getpwnam
        runas_uid = getpwnam(options.runas_uid).pw_uid
        runas_gid = getpwnam(options.runas_uid).pw_gid

    if options.debug:
        logging.basicConfig(level=logging.DEBUG)

    # Lower privileges if a UID has been set
    try:
        if runas_uid:
            os.setgid(runas_gid)
            os.setuid(runas_uid)
    except OSError:
        print "Unable to set user ID to %i, defaulting to current user" % runas_uid

    # Add intercept for graceful shutdown
    def shutdown(signum, frame):
        '''Signal handler for shutting down supervisor gracefully'''
        logging.debug("Supervisor: shutdown handler triggered")
        global KEEP_RUNNING
        KEEP_RUNNING = False
    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)

    # Start the broker
    broker_proc = None
    if not options.no_broker:
        if async:
            broker_proc = AsyncBroker(broker_backend_address, broker_frontend_address)
        else:
            broker_proc = SyncBroker(broker_backend_address, broker_frontend_address, gracetimeout)
        broker_proc.start()

    # Start the workers
    workers = []
    for _ in range(num_procs):
        worker_proc = Worker(laikaboss_config_path, worker_connect_address, ttl,
            time_ttl, logresult, int(get_option('workerpolltimeout')), gracetimeout)
        worker_proc.start()
        workers.append(worker_proc)

    while KEEP_RUNNING:
        # Ensure we have a broker
        if not options.no_broker and not broker_proc.is_alive():
            if async:
                broker_proc = AsyncBroker(broker_backend_address, broker_frontend_address)
            else:
                broker_proc = SyncBroker(broker_backend_address, broker_frontend_address,
                    gracetimeout)
            broker_proc.start()

        # Ensure we have living workers
        dead_workers = []
        for worker_proc in workers:
            if not worker_proc.is_alive():
                dead_workers.append(worker_proc)

        for worker_proc in dead_workers:
            workers.remove(worker_proc)
            new_proc = Worker(laikaboss_config_path, worker_connect_address, ttl, time_ttl,
                logresult, int(get_option('workerpolltimeout')), gracetimeout)
            new_proc.start()
            workers.append(new_proc)
            worker_proc.join()

        # Wait a little bit
        time.sleep(5)

    logging.debug("Supervisor: beginning graceful shutdown sequence")
    logging.info("Supervisor: giving workers %d second grace period", gracetimeout)
    time.sleep(gracetimeout)
    logging.info("Supervisor: terminating workers")
    for worker_proc in workers:
        if worker_proc.is_alive():
            os.kill(worker_proc.pid, signal.SIGKILL)
    for worker_proc in workers:
        worker_proc.join()
    if not options.no_broker:
        if broker_proc.is_alive():
            os.kill(broker_proc.pid, signal.SIGKILL)
        broker_proc.join()
    logging.debug("Supervisor: finished")

Example 104

Project: lbry
Source File: test_misc.py
View license
    def test_double_download(self):
        sd_hash_queue = Queue()
        kill_event = Event()
        dead_event = Event()
        lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event, 5209343)
        uploader = Process(target=lbry_uploader.start)
        uploader.start()
        self.server_processes.append(uploader)

        logging.debug("Testing double download")

        wallet = FakeWallet()
        peer_manager = PeerManager()
        peer_finder = FakePeerFinder(5553, peer_manager, 1)
        hash_announcer = FakeAnnouncer()
        rate_limiter = DummyRateLimiter()
        sd_identifier = StreamDescriptorIdentifier()

        downloaders = []

        db_dir = "client"
        blob_dir = os.path.join(db_dir, "blobfiles")
        os.mkdir(db_dir)
        os.mkdir(blob_dir)

        self.session = Session(settings.data_rate, db_dir=db_dir, lbryid="abcd",
                               peer_finder=peer_finder, hash_announcer=hash_announcer,
                               blob_dir=blob_dir, peer_port=5553, use_upnp=False,
                               rate_limiter=rate_limiter, wallet=wallet,
                               blob_tracker_class=DummyBlobAvailabilityTracker, is_generous=settings.is_generous_host)

        self.stream_info_manager = DBEncryptedFileMetadataManager(self.session.db_dir)
        self.lbry_file_manager = EncryptedFileManager(self.session, self.stream_info_manager, sd_identifier)

        def make_downloader(metadata, prm):
            info_validator = metadata.validator
            options = metadata.options
            factories = metadata.factories
            chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
            return factories[0].make_downloader(metadata, chosen_options, prm)

        def append_downloader(downloader):
            downloaders.append(downloader)
            return downloader

        def download_file(sd_hash):
            prm = self.session.payment_rate_manager
            d = download_sd_blob(self.session, sd_hash, prm)
            d.addCallback(sd_identifier.get_metadata_for_sd_blob)
            d.addCallback(make_downloader, prm)
            d.addCallback(append_downloader)
            d.addCallback(lambda downloader: downloader.start())
            return d

        def check_md5_sum():
            f = open('test_file')
            hashsum = MD5.new()
            hashsum.update(f.read())
            self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")

        def delete_lbry_file():
            logging.debug("deleting the file...")
            d = self.lbry_file_manager.delete_lbry_file(downloaders[0])
            d.addCallback(lambda _: self.lbry_file_manager.get_count_for_stream_hash(downloaders[0].stream_hash))
            d.addCallback(
                lambda c: self.stream_info_manager.delete_stream(downloaders[1].stream_hash) if c == 0 else True)
            return d

        def check_lbry_file():
            d = downloaders[1].status()
            d.addCallback(lambda _: downloaders[1].status())

            def check_status_report(status_report):
                self.assertEqual(status_report.num_known, status_report.num_completed)
                self.assertEqual(status_report.num_known, 3)

            d.addCallback(check_status_report)
            return d

        def start_transfer(sd_hash):
            logging.debug("Starting the transfer")

            d = self.session.setup()
            d.addCallback(lambda _: self.stream_info_manager.setup())
            d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
            d.addCallback(lambda _: self.lbry_file_manager.setup())
            d.addCallback(lambda _: download_file(sd_hash))
            d.addCallback(lambda _: check_md5_sum())
            d.addCallback(lambda _: download_file(sd_hash))
            d.addCallback(lambda _: delete_lbry_file())
            d.addCallback(lambda _: check_lbry_file())

            return d

        def stop(arg):
            if isinstance(arg, Failure):
                logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
            else:
                logging.debug("Client is stopping normally.")
            kill_event.set()
            logging.debug("Set the kill event")
            d = self.wait_for_event(dead_event, 15)

            def print_shutting_down():
                logging.info("Client is shutting down")

            d.addCallback(lambda _: print_shutting_down())
            d.addCallback(lambda _: arg)
            return d

        d = self.wait_for_hash_from_queue(sd_hash_queue)
        d.addCallback(start_transfer)
        d.addBoth(stop)
        return d

Example 105

Project: opendrift
Source File: opendrift3D.py
View license
    def vertical_mixing(self):
        """Mix particles vertically according to eddy diffusivity and buoyancy

            Buoyancy is expressed as terminal velocity, which is the
            steady-state vertical velocity due to positive or negative
            buoyant behaviour. It is usually a function of particle density,
            diameter, and shape.

            Vertical particle displacemend du to turbulent mixing is
            calculated using the "binned random walk scheme" (Thygessen and
            Aadlandsvik, 2007).
            The formulation of this scheme is copied from LADIM (IMR).
        """

        if self.config['processes']['turbulentmixing'] is False:
            logging.debug('Turbulent mixing deactivated.')
            return

        from opendrift.models import eddydiffusivity

        dz = self.config['turbulentmixing']['verticalresolution']
        dz = np.float32(dz)  # Convert to avoid error for older numpy
        dt_mix = self.config['turbulentmixing']['timestep']

        # minimum height/maximum depth for each particle
        Zmin = -1.*self.environment.sea_floor_depth_below_sea_level

        # place particle in center of bin
        surface = self.elements.z == 0
        self.elements.z[~surface] = np.round(self.elements.z[~surface]/dz)*dz

        #avoid that elements are below bottom
        bottom = np.where(self.elements.z < Zmin)
        self.elements.z[bottom] = np.round(Zmin/dz)*dz + dz/2.

        # Eventual model specific preparions
        self.prepare_vertical_mixing()

        # get profile of eddy diffusivity
        # get vertical eddy diffusivity from environment or specific model
        if (self.config['turbulentmixing']['diffusivitymodel'] ==
                'environment'):
            if 'ocean_vertical_diffusivity' in self.environment_profiles:
                Kprofiles = self.environment_profiles[
                    'ocean_vertical_diffusivity']
                logging.debug('use diffusivity from ocean model')
            else:
                # NB: using constant diffusivity, and value from first
                # element only - this should be checked/improved!
                Kprofiles = \
                    self.environment.ocean_vertical_diffusivity[0] * \
                    np.ones((len(self.environment_profiles['z']),
                             self.num_elements_active()))
                logging.debug('use constant diffusivity')
        else:
            logging.debug('use functional expression for diffusivity')
            Kprofiles = getattr(
                eddydiffusivity,
                self.config['turbulentmixing']['diffusivitymodel'])(self)

        # get profiles of salinity and temperature
        # (to save interpolation time in the inner loop)
        if 'TSprofiles' in self.config['turbulentmixing']:
            if self.config['turbulentmixing']['TSprofiles'] is True:
                Sprofiles = self.environment_profiles['sea_water_salinity']
                Tprofiles = \
                    self.environment_profiles['sea_water_temperature']

        # prepare vertical interpolation coordinates
        z_i = range(Kprofiles.shape[0])
        z_index = interp1d(-self.environment_profiles['z'],
                           z_i, bounds_error=False)

        # internal loop for fast time step of vertical mixing model
        # binned random walk needs faster time step compared
        # to horizontal advection
        ntimes_mix = int(self.time_step.total_seconds()/dt_mix)
        logging.debug('Vertical mixing module:')
        logging.debug('turbulent diffusion with binned random walk scheme')
        logging.debug('using ' + str(ntimes_mix) + ' fast time steps of dt=' +
                      str(dt_mix) + 's')
        for i in range(0, ntimes_mix):
            #remember which particles belong to the exact surface
            surface = self.elements.z == 0

            # update terminal velocity according to environmental variables
            if 'TSprofiles' in self.config['turbulentmixing']:
                if self.config['turbulentmixing']['TSprofiles'] is True:
                    self.update_terminal_velocity(Tprofiles=Tprofiles,
                                                  Sprofiles=Sprofiles,
                                                  z_index=z_index)
                else:
                    self.update_terminal_velocity()
            else:
                # this is faster, but ignores density gradients in
                # water column for the inner loop
                self.update_terminal_velocity()

            w = self.elements.terminal_velocity

            # diffusivity K at depth z
            zi = z_index(-self.elements.z)
            upper = np.maximum(np.floor(zi).astype(np.int), 0)
            lower = np.minimum(upper+1, Kprofiles.shape[0]-1)
            weight_upper = 1 - (zi - upper)
            K1 = Kprofiles[upper, range(Kprofiles.shape[1])] * \
                weight_upper + \
                Kprofiles[lower, range(Kprofiles.shape[1])] * \
                (1-weight_upper)

            # K at depth z-dz ; gradient of K is required for correct
            # solution with random walk scheme
            zi = z_index(-(self.elements.z-dz))
            upper = np.maximum(np.floor(zi).astype(np.int), 0)
            lower = np.minimum(upper+1, Kprofiles.shape[0]-1)
            weight_upper = 1 - (zi - upper)
            K2 = Kprofiles[upper, range(Kprofiles.shape[1])] * \
                weight_upper + \
                Kprofiles[lower, range(Kprofiles.shape[1])] * \
                (1-weight_upper)

            # calculate rise/sink probability dependent on K and w
            p = dt_mix * (2.0*K1 + dz*w)/(2.0*dz*dz)  # probability to rise
            q = dt_mix * (2.0*K2 - dz*w)/(2.0*dz*dz)  # probability to sink

            # check if probabilities are reasonable or wrong; which can happen if K is very high (K>0.1)
            wrong = p+q > 1.00002
            if wrong.sum() > 0:
                logging.info('WARNING! '+str(wrong.sum())+' elements have p+q>1; you might need a smaller mixing time step')
                # fixing p and q by scaling them to assure p+q<1:
                norm = p+q
                p[wrong] = p[wrong]/norm[wrong] 
                q[wrong] = q[wrong]/norm[wrong]

            # use probabilities to mix some particles up or down
            RandKick = np.random.random(self.num_elements_active())           
            up = np.where(RandKick < p)
            down = np.where(RandKick > 1.0 - q)           
            self.elements.z[up] = self.elements.z[up] + dz # move to layer above
            self.elements.z[down] = self.elements.z[down] - dz # move to layer underneath

            # put the particles that belong to the surface slick (if present) back to the surface
            self.elements.z[surface] = 0.

            #avoid that elements are below bottom
            bottom = np.where(self.elements.z < Zmin)
            self.elements.z[bottom] = np.round(Zmin/dz)*dz + dz/2.

            # Call surface interaction:
            # reflection at surface or formation of slick and wave mixing if implemented for this class
            self.surface_interaction(dt_mix)

Example 106

View license
  @oauth_decorator.oauth_required
  @data_handler.data_required
  def get_instances(self):
    """List instances.

    Uses app engine app identity to retrieve an access token for the app
    engine service account. No client OAuth required. External IP is used
    to determine if the instance is actually running.
    """

    gce_project = self._create_gce()
    instances = gce_appengine.GceAppEngine().run_gce_request(
        self,
        gce_project.list_instances,
        'Error listing instances: ',
        filter='name eq ^%s-.*' % self.instance_prefix())

    # A map of instanceName -> (ip, RPC)
    health_rpcs = {}

    # Convert instance info to dict and check server status.
    num_running = 0
    instance_dict = {}
    if instances:
      for instance in instances:
        instance_record = {}
        instance_dict[instance.name] = instance_record
        if instance.status:
          instance_record['status'] = instance.status
        else:
          instance_record['status'] = 'OTHER'
        ip = None
        for interface in instance.network_interfaces:
          for config in interface.get('accessConfigs', []):
            if 'natIP' in config:
              ip = config['natIP']
              instance_record['externalIp'] = ip
              break
          if ip: break

        # Ping the instance server. Grab stats from /debug/vars.
        if ip and instance.status == 'RUNNING':
          num_running += 1
          health_url = 'http://%s/debug/vars?t=%d' % (ip, int(time.time()))
          logging.debug('Health checking %s', health_url)
          rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
          urlfetch.make_fetch_call(rpc, url=health_url)
          health_rpcs[instance.name] = rpc

    # Ping through a LBs too.  Only if we get success there do we know we are
    # really serving.
    loadbalancers = []
    lb_rpcs = {}
    if instances and len(instances) > 1:
      loadbalancers = self._get_lb_servers()
    if num_running > 0 and loadbalancers:
      for lb in loadbalancers:
        health_url = 'http://%s/health?t=%d' % (lb, int(time.time()))
        logging.debug('Health checking %s', health_url)
        rpc = urlfetch.create_rpc(deadline = HEALTH_CHECK_TIMEOUT)
        urlfetch.make_fetch_call(rpc, url=health_url)
        lb_rpcs[lb] = rpc

    # wait for RPCs to complete and update dict as necessary
    vars_aggregator = ServerVarsAggregator()

    # TODO: there is significant duplication here.  Refactor.
    for (instance_name, rpc) in health_rpcs.items():
      result = None
      instance_record = instance_dict[instance_name]
      try:
        result = rpc.get_result()
        if result and "memstats" in result.content:
          logging.debug('%s healthy!', instance_name)
          instance_record['status'] = 'SERVING'
          instance_vars = {}
          try:
            instance_vars = json.loads(result.content)
            instance_record['vars'] = instance_vars
            vars_aggregator.aggregate_vars(instance_vars)
          except ValueError as error:
            logging.error('Error decoding vars json for %s: %s', instance_name, error)
        else:
          logging.debug('%s unhealthy. Content: %s', instance_name, result.content)
      except urlfetch.Error as error:
        logging.debug('%s unhealthy: %s', instance_name, str(error))

    # Check health status through the load balancer.
    loadbalancer_healthy = bool(lb_rpcs)
    for (lb, lb_rpc) in lb_rpcs.items():
      result = None
      try:
        result = lb_rpc.get_result()
        if result and "ok" in result.content:
          logging.info('LB %s healthy: %s\n%s', lb, result.headers, result.content)
        else:
          logging.info('LB %s result not okay: %s, %s', lb, result.status_code, result.content)
          loadbalancer_healthy = False
          break
      except urlfetch.Error as error:
        logging.info('LB %s fetch error: %s', lb, str(error))
        loadbalancer_healthy = False
        break

    response_dict = {
      'instances': instance_dict,
      'vars': vars_aggregator.get_aggregate(),
      'loadbalancers': loadbalancers,
      'loadbalancer_healthy': loadbalancer_healthy,
    }
    self.response.headers['Content-Type'] = 'application/json'
    self.response.out.write(json.dumps(response_dict))

Example 107

View license
def ListConsistencyBenchmark(service):
  """ A benchmark test to measure list-after-write consistency. It uploads
  a large number of 1-byte objects in a short amount of time, and then issues
  a list request. If the first list request returns all objects as expected,
  then the result is deemed consistent. Otherwise, it keeps issuing list request
  until all expected objects are returned, and the test reports the time
  it takes from the end of the last write to the time the list returns
  consistent result.

  Args:
    service: the ObjectStorageServiceBase object to use.

  Returns:
    A dictionary that contains the test results:
      'is-list-consistent': True/False
      'list-latency': if list is consistent, what is its latency
      'inconsistency-window': if list is inconsistent, how long did it take to
          reach consistency.

  Raises:
    LowAvailabilityError: when the storage service has failed a high number of
        our RW requests that exceeds a threshold (>5%), we raise this error
        instead of collecting performance numbers from this run.
  """

  # Provision the test with tons of one byte objects. Write them all at once.
  object_prefix = 'pkb_list_consistency_%f' % time.time()
  final_objects_written = []

  per_thread_objects_written = [[] for i in
                                 range(LIST_CONSISTENCY_THREAD_COUNT)]  # noqa

  threads = []

  for i in range(LIST_CONSISTENCY_THREAD_COUNT):
    my_prefix = '%s_%d' % (object_prefix, i)
    thread = Thread(target=WriteObjects,
                    args=(service, FLAGS.bucket, my_prefix,
                          LIST_CONSISTENCY_OBJECT_COUNT /
                          LIST_CONSISTENCY_THREAD_COUNT,
                          1,
                          per_thread_objects_written[i], None, None))
    thread.daemon = True
    thread.start()
    threads.append(thread)

  logging.debug('All threads started, waiting for them to end...')

  for i in range(LIST_CONSISTENCY_THREAD_COUNT):
    try:
      threads[i].join()
      final_objects_written += per_thread_objects_written[i]
    except:
      logging.exception('Caught exception waiting for the %dth thread.', i)
  logging.debug('All threads ended...')

  write_finish_time = time.time()

  final_count = len(final_objects_written)
  if final_count < LIST_CONSISTENCY_OBJECT_COUNT * (1 - FAILURE_TOLERANCE):
    raise LowAvailabilityError('Failed to provision required number of '
                               'objects, exiting.')

  logging.info('Done provisioning the objects, objects written %d. Now start '
               'doing the lists...', final_count)

  # Now list this bucket under this prefix, compare the list results with
  # objects_written. If they are not the same, keep doing it until they
  # are the same.
  result_consistent, list_count, list_latency, total_wait_time = (
      ListAndWaitForObjects(service, write_finish_time,
                            set(final_objects_written), object_prefix))

  final_result = {}
  AnalyzeListResults(final_result, result_consistent, list_count, list_latency,
                     total_wait_time, LIST_AFTER_WRITE_SCENARIO)

  logging.info('One list-after-write iteration completed. result is %s',
               final_result)
  if not result_consistent:
    # There is no point continuing testing the list-after-update consistency if
    # list-after-write is still not consistent after waiting for extended
    # period of time.
    logging.info('Not doing list-after-update tests because results are still '
                 'not consistent after max wait time for list-after-write.')
    return final_result

  logging.info('Start benchmarking list-after-update consistency.')

  # Now delete some objects and do list again, this measures list-after-update
  # consistency
  per_thread_objects_to_delete = [
      [] for i in range(LIST_CONSISTENCY_THREAD_COUNT)]

  for i in range(LIST_CONSISTENCY_THREAD_COUNT):
    for j in range(len(per_thread_objects_written[i])):
      # Delete about 30% of the objects written so far.
      if random.Random() < LIST_AFTER_UPDATE_DELETION_RATIO:
        per_thread_objects_to_delete[i].append(per_thread_objects_written[i][j])

  # Now issue the delete concurrently.
  per_thread_objects_deleted = [
      [] for i in range(LIST_CONSISTENCY_THREAD_COUNT)]

  DeleteObjectsConcurrently(service,
                            per_thread_objects_to_delete,
                            per_thread_objects_deleted)

  delete_finish_time = time.time()
  final_expectation = []
  for i in range(LIST_CONSISTENCY_THREAD_COUNT):
    for k in range(len(per_thread_objects_deleted[i])):
      per_thread_objects_written[i].remove(per_thread_objects_deleted[i][k])
    final_expectation += per_thread_objects_written[i]

  result_consistent, list_count, list_latency, total_wait_time = (
      ListAndWaitForObjects(service, delete_finish_time,
                            set(final_expectation), object_prefix))

  AnalyzeListResults(final_result, result_consistent, list_count, list_latency,
                     total_wait_time, LIST_AFTER_UPDATE_SCENARIO)

  logging.info('One list-after-update iteration completed. result is %s',
               final_result)

  # Final clean up: delete the objects still remaining.
  DeleteObjectsConcurrently(service,
                            per_thread_objects_written,
                            per_thread_objects_deleted)
  return final_result

Example 108

Project: centinel
Source File: http.py
View license
def get_request(netloc, path="/", headers=None, ssl=False,
                external=None, url=None, log_prefix=''):
    http_results = {}

    # Add User-Agent string if not present in headers
    if headers is None:
        headers = {"User-Agent": random.choice(user_agent_pool)}
    elif type(headers) is dict and "User-Agent" not in headers:
        headers["user-Agent"] = random.choice(user_agent_pool)

    first_response = _get_http_request(netloc, path, headers, ssl)
    if "failure" in first_response["response"]:  # If there was an error, just ignore redirects and return
        first_response_information = {"redirect_count": 0,
                                      "redirect_loop": False,
                                      "full_url": url,
                                      "response": first_response["response"],
                                      "request": first_response["request"]}
        http_results = first_response_information

        if external is not None and type(external) is dict:
            external[url] = http_results
        return http_results

    logging.debug("%sSending HTTP GET request for %s." % (log_prefix, url))

    response_headers_contains_location = False
    location_url = None
    # Checks HTTP Status code and location header to see if the webpage calls for a redirect
    for header, header_value in first_response["response"]["headers"].items():
        if header.lower() == "location":
            response_headers_contains_location = True
            location_url = header_value

    # check meta redirect
    meta_redirect_url = None
    is_meta_redirect = False
    if "body" in first_response["response"]:
        try:
            meta_redirect_url = meta_redirect(first_response["response"]["body"])
        except:
            logging.warning("%sError looking for redirects in: %s." % (log_prefix, url))
    elif "body.b64" in first_response["response"]:
        body_decoded = base64.b64decode(first_response["response"]["body.b64"])
        meta_redirect_url = meta_redirect(body_decoded)

    if meta_redirect_url is not None:
        is_meta_redirect = True

    is_redirecting = response_headers_contains_location or is_meta_redirect


    previous_url = ""
    previous_netloc = netloc
    if is_redirecting:
        http_results["redirects"] = {}
        first_response_information = {"full_url": url,
                                      "response": first_response["response"],
                                      "request": first_response["request"]}
        http_results["redirects"][0] = first_response_information
        redirect_http_result = None
        redirect_number = 1
        while redirect_http_result is None or is_redirecting and\
                redirect_number <= REDIRECT_LOOP_THRESHOLD:  # While there are more redirects...
            # Usually, redirects that redirect more than 5 times are infinite loops
            if response_headers_contains_location:
                redirect_url = location_url
            elif is_meta_redirect:
                redirect_url = meta_redirect_url

            # prevent looping on the same URL
            if previous_url == redirect_url:
                break
            previous_url = redirect_url

            use_ssl = redirect_url.startswith("https://")  # If redirect url starts with https, use ssl

            # Scheme, query, and fragment aren't used. Urlparse is used here to split the url into the host and path
            # Useful for httplib since it requires this
            parsed_url = urlparse(redirect_url)

            netloc = parsed_url.netloc
            # if host is not specified, use the last one
            if netloc is None or netloc == "":
                netloc = previous_netloc

            previous_netloc = netloc

            redirect_http_result = _get_http_request(netloc, parsed_url.path, ssl=use_ssl)

            # If there is an error in the redirects, break the loop and stop there
            if "failure" in redirect_http_result["response"]:
                http_results["response"] = redirect_http_result["response"]  # This will count as the final response
                break

            response_headers_contains_location = False
            location_url = None
            for header, header_value in redirect_http_result["response"]["headers"].items():
                if header.lower() == "location":
                    response_headers_contains_location = True
                    location_url = header_value

            # check meta redirect
            meta_redirect_url = None
            is_meta_redirect = False
            if "body" in redirect_http_result["response"]:
                meta_redirect_url = meta_redirect(redirect_http_result["response"]["body"])
            elif "body.b64" in redirect_http_result["response"]:
                body_decoded = base64.b64decode(first_response["response"]["body.b64"])
                meta_redirect_url = meta_redirect(body_decoded)

            if meta_redirect_url is not None:
                is_meta_redirect = True

            is_redirecting = response_headers_contains_location or is_meta_redirect

            # If this is the final response, put this in the first request and response json
            if not is_redirecting or redirect_number == REDIRECT_LOOP_THRESHOLD:
                http_results["redirect_loop"] = (is_redirecting and redirect_number ==  REDIRECT_LOOP_THRESHOLD)
                http_results["redirect_count"] = redirect_number
                http_results["full_url"] = redirect_url

            redirect_information = {"full_url": redirect_url,
                                    "response": redirect_http_result["response"],
                                    "request": redirect_http_result["request"]}
            http_results["redirects"][redirect_number] = redirect_information

            redirect_number += 1

    else:
        first_response_information = {"redirect_count": 0,
                                      "redirect_loop": False,
                                      "full_url": url,
                                      "response": first_response["response"],
                                      "request": first_response["request"]}
        http_results = first_response_information

    # the external result is used when threading to store
    # the results in the list container provided.
    if external is not None and type(external) is dict:
        external[url] = http_results
    return http_results

Example 109

Project: pywb
Source File: pywb_init.py
View license
def create_wb_router(passed_config=None):
    passed_config = passed_config or {}

    defaults = load_yaml_config(DEFAULT_CONFIG)

    config = DictChain(passed_config, defaults)

    routes = []

    port = config.get('port')

    collections = config.get('collections', {})

    static_routes = config.get('static_routes', {})

    root_route = None

    # collections based on file system
    if config.get('enable_auto_colls', True):
        colls_loader_cls = config.get('colls_loader_cls', DirectoryCollsLoader)
        dir_loader = colls_loader_cls(config, static_routes, collections)
        dir_loader()
        #collections.update(dir_loader())

    if config.get('enable_memento', False):
        request_class = MementoRequest
    else:
        request_class = WbRequest

    # store live and replay handlers
    handler_dict = {}

    # setup template globals
    templates_dirs = config['templates_dirs']
    jinja_env = J2TemplateView.init_shared_env(paths=templates_dirs,
                                               packages=config['template_packages'])

    jinja_env.globals.update(config.get('template_globals', {}))

    for static_name, static_path in six.iteritems(static_routes):
        routes.append(Route(static_name, StaticHandler(static_path)))

    for name, value in six.iteritems(collections):
        if isinstance(value, BaseHandler):
            handler_dict[name] = value
            new_route = Route(name, value, config=config)
            if name != '':
                routes.append(new_route)
            else:
                root_route = new_route
            continue

        route_config = init_route_config(value, config)
        route_class = route_config.get('route_class', Route)

        if route_config.get('index_paths') == '$liveweb':
            live = create_live_handler(route_config)
            handler_dict[name] = live
            new_route = route_class(name, live, config=route_config)
            if name != '':
                routes.append(new_route)
            else:
                root_route = new_route
            continue

        query_handler = init_collection(route_config)

        wb_handler = create_wb_handler(
            query_handler=query_handler,
            config=route_config,
        )

        handler_dict[name] = wb_handler

        logging.debug('Adding Collection: ' + name)

        new_route = route_class(name, wb_handler,
                                config=route_config,
                                request_class=request_class)

        if name != '':
            routes.append(new_route)
        else:
            root_route = new_route

        # cdx query handler
        cdx_api_suffix = route_config.get('enable_cdx_api', False)

        if cdx_api_suffix:
            add_cdx_api_handler(name, cdx_api_suffix, routes, query_handler,
                                route_class=route_class)

    if config.get('debug_echo_env', False):
        routes.append(Route('echo_env', DebugEchoEnvHandler()))

    if config.get('debug_echo_req', False):
        routes.append(Route('echo_req', DebugEchoHandler()))

    if root_route:
        routes.append(root_route)

    # resolve any cross handler references
    for route in routes:
        if hasattr(route.handler, 'resolve_refs'):
            route.handler.resolve_refs(handler_dict)

    # default to regular archival mode
    router = ArchivalRouter

    if config.get('enable_http_proxy', False):
        router = ProxyArchivalRouter

        view = init_view(config, 'proxy_select_html')

        if 'proxy_options' not in passed_config:
            passed_config['proxy_options'] = {}

        if view:
            passed_config['proxy_options']['proxy_select_view'] = view

        view = init_view(config, 'proxy_cert_download_html')

        if view:
            passed_config['proxy_options']['proxy_cert_download_view'] = view

    # Finally, create wb router
    return router(
        routes,
        port=port,
        abs_path=config.get('absolute_paths', True),
        home_view=init_view(config, 'home_html'),
        error_view=init_view(config, 'error_html'),
        info_view=init_view(config, 'info_json'),
        config=config
    )

Example 110

Project: tp-qemu
Source File: ksm_overcommit.py
View license
def run(test, params, env):
    """
    Tests KSM (Kernel Shared Memory) capability by allocating and filling
    KVM guests memory using various values. KVM sets the memory as
    MADV_MERGEABLE so all VM's memory can be merged. The workers in
    guest writes to tmpfs filesystem thus allocations are not limited
    by process max memory, only by VM's memory. Two test modes are supported -
    serial and parallel.

    Serial mode - uses multiple VMs, allocates memory per guest and always
                  verifies the correct number of shared memory.
                  0) Prints out the setup and initialize guest(s)
                  1) Fills guest with the same number (S1)
                  2) Random fill on the first guest
                  3) Random fill of the remaining VMs one by one until the
                     memory is completely filled (KVM stops machines which
                     asks for additional memory until there is available
                     memory) (S2, shouldn't finish)
                  4) Destroy all VMs but the last one
                  5) Checks the last VMs memory for corruption
    Parallel mode - uses one VM with multiple allocator workers. Executes
                   scenarios in parallel to put more stress on the KVM.
                   0) Prints out the setup and initialize guest(s)
                   1) Fills memory with the same number (S1)
                   2) Fills memory with random numbers (S2)
                   3) Verifies all pages
                   4) Fills memory with the same number (S2)
                   5) Changes the last 96B (S3)

    Scenarios:
    S1) Fill all vms with the same value (all pages should be merged into 1)
    S2) Random fill (all pages should be splitted)
    S3) Fill last 96B (change only last 96B of each page; some pages will be
                      merged; there was a bug with data corruption)
    Every worker has unique random key so we are able to verify the filled
    values.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.

    :param cfg: ksm_swap - use swap?
    :param cfg: ksm_overcommit_ratio - memory overcommit (serial mode only)
    :param cfg: ksm_parallel_ratio - number of workers (parallel mode only)
    :param cfg: ksm_host_reserve - override memory reserve on host in MB
    :param cfg: ksm_guest_reserve - override memory reserve on guests in MB
    :param cfg: ksm_mode - test mode {serial, parallel}
    :param cfg: ksm_perf_ratio - performance ratio, increase it when your
                                 machine is too slow
    """
    def _start_allocator(vm, session, timeout):
        """
        Execute ksm_overcommit_guest.py on guest, wait until it's initialized.

        :param vm: VM object.
        :param session: Remote session to a VM object.
        :param timeout: Timeout that will be used to verify if
                ksm_overcommit_guest.py started properly.
        """
        logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
        session.sendline("python /tmp/ksm_overcommit_guest.py")
        try:
            session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout)
        except aexpect.ExpectProcessTerminatedError, details:
            e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
                     (vm.name, str(details)))
            raise error.TestFail(e_msg)

    def _execute_allocator(command, vm, session, timeout):
        """
        Execute a given command on ksm_overcommit_guest.py main loop,
        indicating the vm the command was executed on.

        :param command: Command that will be executed.
        :param vm: VM object.
        :param session: Remote session to VM object.
        :param timeout: Timeout used to verify expected output.

        :return: Tuple (match index, data)
        """
        logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, "
                      "vm: %s, timeout: %s", command, vm.name, timeout)
        session.sendline(command)
        try:
            (match, data) = session.read_until_last_line_matches(
                ["PASS:", "FAIL:"],
                timeout)
        except aexpect.ExpectProcessTerminatedError, details:
            e_msg = ("Failed to execute command '%s' on "
                     "ksm_overcommit_guest.py, vm '%s': %s" %
                     (command, vm.name, str(details)))
            raise error.TestFail(e_msg)
        return (match, data)

    def get_ksmstat():
        """
        Return sharing memory by ksm in MB

        :return: memory in MB
        """
        fpages = open('/sys/kernel/mm/ksm/pages_sharing')
        ksm_pages = int(fpages.read())
        fpages.close()
        return ((ksm_pages * 4096) / 1e6)

    def initialize_guests():
        """
        Initialize guests (fill their memories with specified patterns).
        """
        logging.info("Phase 1: filling guest memory pages")
        for session in lsessions:
            vm = lvms[lsessions.index(session)]

            logging.debug("Turning off swap on vm %s", vm.name)
            session.cmd("swapoff -a", timeout=300)

            # Start the allocator
            _start_allocator(vm, session, 60 * perf_ratio)

        # Execute allocator on guests
        for i in range(0, vmsc):
            vm = lvms[i]

            cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % skeys[0]
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * 2 * perf_ratio)

            # Let ksm_overcommit_guest.py do its job
            # (until shared mem reaches expected value)
            shm = 0
            j = 0
            logging.debug("Target shared meminfo for guest %s: %s", vm.name,
                          ksm_size)
            while ((new_ksm and (shm < (ksm_size * (i + 1)))) or
                    (not new_ksm and (shm < (ksm_size)))):
                if j > 64:
                    logging.debug(utils_test.get_memory_info(lvms))
                    raise error.TestError("SHM didn't merge the memory until "
                                          "the DL on guest: %s" % vm.name)
                pause = ksm_size / 200 * perf_ratio
                logging.debug("Waiting %ds before proceeding...", pause)
                time.sleep(pause)
                if (new_ksm):
                    shm = get_ksmstat()
                else:
                    shm = vm.get_shared_meminfo()
                logging.debug("Shared meminfo for guest %s after "
                              "iteration %s: %s", vm.name, j, shm)
                j += 1

        # Keep some reserve
        pause = ksm_size / 200 * perf_ratio
        logging.debug("Waiting %ds before proceeding...", pause)
        time.sleep(pause)

        logging.debug(utils_test.get_memory_info(lvms))
        logging.info("Phase 1: PASS")

    def separate_first_guest():
        """
        Separate memory of the first guest by generating special random series
        """
        logging.info("Phase 2: Split the pages on the first guest")

        cmd = "mem.static_random_fill()"
        data = _execute_allocator(cmd, lvms[0], lsessions[0],
                                  fill_base_timeout * 2 * perf_ratio)[1]

        r_msg = data.splitlines()[-1]
        logging.debug("Return message of static_random_fill: %s", r_msg)
        out = int(r_msg.split()[4])
        logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size,
                      out, (ksm_size * 1000 / out))
        logging.debug(utils_test.get_memory_info(lvms))
        logging.debug("Phase 2: PASS")

    def split_guest():
        """
        Sequential split of pages on guests up to memory limit
        """
        logging.info("Phase 3a: Sequential split of pages on guests up to "
                     "memory limit")
        last_vm = 0
        session = None
        vm = None
        for i in range(1, vmsc):
            # Check VMs
            for j in range(0, vmsc):
                if not lvms[j].is_alive:
                    e_msg = ("VM %d died while executing static_random_fill on"
                             " VM %d in allocator loop" % (j, i))
                    raise error.TestFail(e_msg)
            vm = lvms[i]
            session = lsessions[i]
            cmd = "mem.static_random_fill()"
            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
                          "vm: %s", cmd, vm.name)
            session.sendline(cmd)

            out = ""
            try:
                logging.debug("Watching host mem while filling vm %s memory",
                              vm.name)
                while (not out.startswith("PASS") and
                       not out.startswith("FAIL")):
                    if not vm.is_alive():
                        e_msg = ("VM %d died while executing "
                                 "static_random_fill on allocator loop" % i)
                        raise error.TestFail(e_msg)
                    free_mem = int(utils_memory.read_from_meminfo("MemFree"))
                    if (ksm_swap):
                        free_mem = (free_mem +
                                    int(utils_memory.read_from_meminfo("SwapFree")))
                    logging.debug("Free memory on host: %d", free_mem)

                    # We need to keep some memory for python to run.
                    if (free_mem < 64000) or (ksm_swap and
                                              free_mem < (450000 * perf_ratio)):
                        vm.pause()
                        for j in range(0, i):
                            lvms[j].destroy(gracefully=False)
                        time.sleep(20)
                        vm.resume()
                        logging.debug("Only %s free memory, killing %d guests",
                                      free_mem, (i - 1))
                        last_vm = i
                    out = session.read_nonblocking(0.1, 1)
                    time.sleep(2)
            except OSError:
                logging.debug("Only %s host free memory, killing %d guests",
                              free_mem, (i - 1))
                logging.debug("Stopping %s", vm.name)
                vm.pause()
                for j in range(0, i):
                    logging.debug("Destroying %s", lvms[j].name)
                    lvms[j].destroy(gracefully=False)
                time.sleep(20)
                vm.resume()
                last_vm = i

            if last_vm != 0:
                break
            logging.debug("Memory filled for guest %s", vm.name)

        logging.info("Phase 3a: PASS")

        logging.info("Phase 3b: Verify memory of the max stressed VM")
        for i in range(last_vm + 1, vmsc):
            lsessions[i].close()
            if i == (vmsc - 1):
                logging.debug(utils_test.get_memory_info([lvms[i]]))
            logging.debug("Destroying guest %s", lvms[i].name)
            lvms[i].destroy(gracefully=False)

        # Verify last machine with randomly generated memory
        cmd = "mem.static_random_verify()"
        _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
                           (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([lvms[last_vm]]))

        lsessions[last_vm].cmd_output("die()", 20)
        lvms[last_vm].destroy(gracefully=False)
        logging.info("Phase 3b: PASS")

    def split_parallel():
        """
        Parallel page spliting
        """
        logging.info("Phase 1: parallel page spliting")
        # We have to wait until allocator is finished (it waits 5 seconds to
        # clean the socket

        session = lsessions[0]
        vm = lvms[0]
        for i in range(1, max_alloc):
            lsessions.append(vm.wait_for_login(timeout=360))

        session.cmd("swapoff -a", timeout=300)

        for i in range(0, max_alloc):
            # Start the allocator
            _start_allocator(vm, lsessions[i], 60 * perf_ratio)

        logging.info("Phase 1: PASS")

        logging.info("Phase 2a: Simultaneous merging")
        logging.debug("Memory used by allocator on guests = %dMB",
                      (ksm_size / max_alloc))

        for i in range(0, max_alloc):
            cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
                                                 skeys[i], dkeys[i])
            _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)

            cmd = "mem.value_fill(%d)" % (skeys[0])
            _execute_allocator(cmd, vm, lsessions[i],
                               fill_base_timeout * perf_ratio)

        # Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3)
        shm = 0
        i = 0
        logging.debug("Target shared memory size: %s", ksm_size)
        while (shm < ksm_size):
            if i > 64:
                logging.debug(utils_test.get_memory_info(lvms))
                raise error.TestError("SHM didn't merge the memory until DL")
            pause = ksm_size / 200 * perf_ratio
            logging.debug("Waiting %ds before proceed...", pause)
            time.sleep(pause)
            if (new_ksm):
                shm = get_ksmstat()
            else:
                shm = vm.get_shared_meminfo()
            logging.debug("Shared meminfo after attempt %s: %s", i, shm)
            i += 1

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2a: PASS")

        logging.info("Phase 2b: Simultaneous spliting")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            logging.debug(data)
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          (ksm_size / max_alloc), out,
                          (ksm_size * 1000 / out / max_alloc))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2b: PASS")

        logging.info("Phase 2c: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify()"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2c: PASS")

        logging.info("Phase 2d: Simultaneous merging")
        # Actual splitting
        for i in range(0, max_alloc):
            cmd = "mem.value_fill(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * 2 * perf_ratio)[1]
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2d: PASS")

        logging.info("Phase 2e: Simultaneous verification")
        for i in range(0, max_alloc):
            cmd = "mem.value_check(%d)" % skeys[0]
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      (mem / 200 * 50 * perf_ratio))[1]
        logging.info("Phase 2e: PASS")

        logging.info("Phase 2f: Simultaneous spliting last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_fill(96)"
            data = _execute_allocator(cmd, vm, lsessions[i],
                                      fill_base_timeout * perf_ratio)[1]

            data = data.splitlines()[-1]
            out = int(data.split()[4])
            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
                          ksm_size / max_alloc, out,
                          (ksm_size * 1000 / out / max_alloc))

        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2f: PASS")

        logging.info("Phase 2g: Simultaneous verification last 96B")
        for i in range(0, max_alloc):
            cmd = "mem.static_random_verify(96)"
            _, data = _execute_allocator(cmd, vm, lsessions[i],
                                         (mem / 200 * 50 * perf_ratio))
        logging.debug(utils_test.get_memory_info([vm]))
        logging.info("Phase 2g: PASS")

        logging.debug("Cleaning up...")
        for i in range(0, max_alloc):
            lsessions[i].cmd_output("die()", 20)
        session.close()
        vm.destroy(gracefully=False)

    # Main test code
    logging.info("Starting phase 0: Initialization")
    if utils.run("ps -C ksmtuned", ignore_status=True).exit_status == 0:
        logging.info("Killing ksmtuned...")
        utils.run("killall ksmtuned")
    new_ksm = False
    if (os.path.exists("/sys/kernel/mm/ksm/run")):
        utils.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs")
        utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
        utils.run("echo 1 > /sys/kernel/mm/ksm/run")

        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
        if os.path.exists(e_up):
            utils.run("echo 'never' > %s" % e_up)
        if os.path.exists(e_rh):
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, details:
            raise error.TestFail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal() -
                         utils_memory.read_from_meminfo("MemFree")) /
                        1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
    else:
        _guest_reserve = False

    max_vms = int(params.get("max_vms", 2))
    overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
    max_alloc = int(params.get("ksm_parallel_ratio", 1))

    # vmsc: count of all used VMs
    vmsc = int(overcommit) + 1
    vmsc = max(vmsc, max_vms)

    if (params['ksm_mode'] == "serial"):
        max_alloc = vmsc
        if _host_reserve:
            # First round of additional guest reserves
            host_reserve += vmsc * 64
            _host_reserve = vmsc

    host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve)

    ksm_swap = False
    if params.get("ksm_swap") == "yes":
        ksm_swap = True

    # Performance ratio
    perf_ratio = params.get("ksm_perf_ratio")
    if perf_ratio:
        perf_ratio = float(perf_ratio)
    else:
        perf_ratio = 1

    if (params['ksm_mode'] == "parallel"):
        vmsc = 1
        overcommit = 1
        mem = host_mem
        # 32bit system adjustment
        if "64" not in params.get("vm_arch_name"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if (host_mem > 3100):
                mem = 3100

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                mem = 3100 - 64

    else:
        # mem: Memory of the guest systems. Maximum must be less than
        # host's physical ram
        mem = int(overcommit * host_mem / vmsc)

        # 32bit system adjustment
        if not params['image_name'].endswith("64"):
            logging.debug("Probably i386 guest architecture, "
                          "max allocator mem = 2G")
            # Guest can have more than 2G but
            # kvm mem + 1MB (allocator itself) can't
            if mem - guest_reserve - 1 > 3100:
                vmsc = int(math.ceil((host_mem * overcommit) /
                                     (3100 + guest_reserve)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

        if os.popen("uname -i").readline().startswith("i386"):
            logging.debug("Host is i386 architecture, max guest mem is 2G")
            # Guest system with qemu overhead (64M) can't have more than 2G
            if mem > 3100 - 64:
                vmsc = int(math.ceil((host_mem * overcommit) /
                                     (3100 - 64.0)))
                if _host_reserve:
                    host_reserve += (vmsc - _host_reserve) * 64
                    host_mem -= (vmsc - _host_reserve) * 64
                    _host_reserve = vmsc
                mem = int(math.floor(host_mem * overcommit / vmsc))

    # 0.055 represents OS + TMPFS additional reserve per guest ram MB
    if _guest_reserve:
        guest_reserve += math.ceil(mem * 0.055)

    swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024

    logging.debug("Overcommit = %f", overcommit)
    logging.debug("True overcommit = %f ", (float(vmsc * mem) /
                                            float(host_mem)))
    logging.debug("Host memory = %dM", host_mem)
    logging.debug("Guest memory = %dM", mem)
    logging.debug("Using swap = %s", ksm_swap)
    logging.debug("Swap = %dM", swap)
    logging.debug("max_vms = %d", max_vms)
    logging.debug("Count of all used VMs = %d", vmsc)
    logging.debug("Performance_ratio = %f", perf_ratio)

    # Generate unique keys for random series
    skeys = []
    dkeys = []
    for i in range(0, max(vmsc, max_alloc)):
        key = random.randrange(0, 255)
        while key in skeys:
            key = random.randrange(0, 255)
        skeys.append(key)

        key = random.randrange(0, 999)
        while key in dkeys:
            key = random.randrange(0, 999)
        dkeys.append(key)

    logging.debug("skeys: %s", skeys)
    logging.debug("dkeys: %s", dkeys)

    lvms = []
    lsessions = []

    # As we don't know the number and memory amount of VMs in advance,
    # we need to specify and create them here
    vm_name = params["main_vm"]
    params['mem'] = mem
    params['vms'] = vm_name
    # Associate pidfile name
    params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
                                                                 'pid')
    if not params.get('extra_params'):
        params['extra_params'] = ' '
    params['extra_params_' + vm_name] = params.get('extra_params')
    params['extra_params_' + vm_name] += (" -pidfile %s" %
                                          (params.get('pid_' + vm_name)))
    params['extra_params'] = params.get('extra_params_' + vm_name)

    # ksm_size: amount of memory used by allocator
    ksm_size = mem - guest_reserve
    logging.debug("Memory used by allocator on guests = %dM", ksm_size)
    fill_base_timeout = ksm_size / 10

    # Creating the first guest
    env_process.preprocess_vm(test, params, env, vm_name)
    lvms.append(env.get_vm(vm_name))
    if not lvms[0]:
        raise error.TestError("VM object not found in environment")
    if not lvms[0].is_alive():
        raise error.TestError("VM seems to be dead; Test requires a living "
                              "VM")

    logging.debug("Booting first guest %s", lvms[0].name)

    lsessions.append(lvms[0].wait_for_login(timeout=360))
    # Associate vm PID
    try:
        tmp = open(params.get('pid_' + vm_name), 'r')
        params['pid_' + vm_name] = int(tmp.readline())
    except Exception:
        raise error.TestFail("Could not get PID of %s" % (vm_name))

    # Creating other guest systems
    for i in range(1, vmsc):
        vm_name = "vm" + str(i + 1)
        params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
                                                                     'pid')
        params['extra_params_' + vm_name] = params.get('extra_params')
        params['extra_params_' + vm_name] += (" -pidfile %s" %
                                              (params.get('pid_' + vm_name)))
        params['extra_params'] = params.get('extra_params_' + vm_name)

        # Last VM is later used to run more allocators simultaneously
        lvms.append(lvms[0].clone(vm_name, params))
        env.register_vm(vm_name, lvms[i])
        params['vms'] += " " + vm_name

        logging.debug("Booting guest %s", lvms[i].name)
        lvms[i].create()
        if not lvms[i].is_alive():
            raise error.TestError("VM %s seems to be dead; Test requires a"
                                  "living VM" % lvms[i].name)

        lsessions.append(lvms[i].wait_for_login(timeout=360))
        try:
            tmp = open(params.get('pid_' + vm_name), 'r')
            params['pid_' + vm_name] = int(tmp.readline())
        except Exception:
            raise error.TestFail("Could not get PID of %s" % (vm_name))

    # Let guests rest a little bit :-)
    pause = vmsc * 2 * perf_ratio
    logging.debug("Waiting %ds before proceed", pause)
    time.sleep(vmsc * 2 * perf_ratio)
    logging.debug(utils_test.get_memory_info(lvms))

    # Copy ksm_overcommit_guest.py into guests
    shared_dir = os.path.dirname(data_dir.get_data_dir())
    vksmd_src = os.path.join(shared_dir, "scripts", "ksm_overcommit_guest.py")
    dst_dir = "/tmp"
    for vm in lvms:
        vm.copy_files_to(vksmd_src, dst_dir)
    logging.info("Phase 0: PASS")

    if params['ksm_mode'] == "parallel":
        logging.info("Starting KSM test parallel mode")
        split_parallel()
        logging.info("KSM test parallel mode: PASS")
    elif params['ksm_mode'] == "serial":
        logging.info("Starting KSM test serial mode")
        initialize_guests()
        separate_first_guest()
        split_guest()
        logging.info("KSM test serial mode: PASS")

Example 111

Project: benchexec
Source File: model.py
View license
    def __init__(self, benchmark_file, config, start_time):
        """
        The constructor of Benchmark reads the source files, options, columns and the tool
        from the XML in the benchmark_file..
        """
        logging.debug("I'm loading the benchmark %s.", benchmark_file)

        self.config = config
        self.benchmark_file = benchmark_file
        self.base_dir = os.path.dirname(self.benchmark_file)

        # get benchmark-name
        self.name = os.path.basename(benchmark_file)[:-4] # remove ending ".xml"
        if config.name:
            self.name += "."+config.name

        self.start_time = start_time
        self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)

        self.output_base_name = config.output_path + self.name + "." + self.instance
        self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
        self.log_zip = self.output_base_name + ".logfiles.zip"
        self.result_files_folder = self.output_base_name + ".files"

        # parse XML
        try:
            rootTag = ElementTree.ElementTree().parse(benchmark_file)
        except ElementTree.ParseError as e:
            sys.exit('Benchmark file {} is invalid: {}'.format(benchmark_file, e))
        if 'benchmark' != rootTag.tag:
            sys.exit("Benchmark file {} is invalid: "
                "It's root element is not named 'benchmark'.".format(benchmark_file))

        # get tool
        tool_name = rootTag.get('tool')
        if not tool_name:
            sys.exit('A tool needs to be specified in the benchmark definition file.')
        (self.tool_module, self.tool) = load_tool_info(tool_name)
        self.tool_name = self.tool.name()
        # will be set from the outside if necessary (may not be the case in SaaS environments)
        self.tool_version = None
        self.executable = None

        logging.debug("The tool to be benchmarked is %s.", self.tool_name)

        def parse_memory_limit(value):
            try:
                value = int(value)
                logging.warning(
                    'Value "%s" for memory limit interpreted as MB for backwards compatibility, '
                    'specify a unit to make this unambiguous.',
                    value)
                return value * _BYTE_FACTOR * _BYTE_FACTOR
            except ValueError:
                return util.parse_memory_value(value)

        def handle_limit_value(name, key, cmdline_value, parse_fn):
            value = rootTag.get(key, None)
            # override limit from XML with values from command line
            if cmdline_value is not None:
                if cmdline_value.strip() == "-1": # infinity
                    value = None
                else:
                    value = cmdline_value

            if value is not None:
                try:
                    self.rlimits[key] = parse_fn(value)
                except ValueError as e:
                    sys.exit('Invalid value for {} limit: {}'.format(name.lower(), e))
                if self.rlimits[key] <= 0:
                    sys.exit('{} limit "{}" is invalid, it needs to be a positive number '
                         '(or -1 on the command line for disabling it).'.format(name, value))

        self.rlimits = {}
        keys = list(rootTag.keys())
        handle_limit_value("Time", TIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Hard time", HARDTIMELIMIT, config.timelimit, util.parse_timespan_value)
        handle_limit_value("Memory", MEMLIMIT, config.memorylimit, parse_memory_limit)
        handle_limit_value("Core", CORELIMIT, config.corelimit, int)

        if HARDTIMELIMIT in self.rlimits:
            hardtimelimit = self.rlimits.pop(HARDTIMELIMIT)
            if TIMELIMIT in self.rlimits:
                if hardtimelimit < self.rlimits[TIMELIMIT]:
                    logging.warning(
                        'Hard timelimit %d is smaller than timelimit %d, ignoring the former.',
                        hardtimelimit, self.rlimits[TIMELIMIT])
                elif hardtimelimit > self.rlimits[TIMELIMIT]:
                    self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
                    self.rlimits[TIMELIMIT] = hardtimelimit
            else:
                self.rlimits[TIMELIMIT] = hardtimelimit

        # get number of threads, default value is 1
        self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
        if config.num_of_threads != None:
            self.num_of_threads = config.num_of_threads
        if self.num_of_threads < 1:
            logging.error("At least ONE thread must be given!")
            sys.exit()

        # get global options and property file
        self.options = util.get_list_from_xml(rootTag)
        self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))

        # get columns
        self.columns = Benchmark.load_columns(rootTag.find("columns"))

        # get global source files, they are used in all run sets
        globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")

        # get required files
        self._required_files = set()
        for required_files_tag in rootTag.findall('requiredfiles'):
            required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
            if not required_files:
                logging.warning('Pattern %s in requiredfiles tag did not match any file.',
                                required_files_tag.text)
            self._required_files = self._required_files.union(required_files)

        # get requirements
        self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)

        result_files_tags = rootTag.findall("resultfiles")
        if result_files_tags:
            self.result_files_patterns = [
                os.path.normpath(p.text) for p in result_files_tags if p.text]
            for pattern in self.result_files_patterns:
                if pattern.startswith(".."):
                    sys.exit("Invalid relative result-files pattern '{}'.".format(pattern))
        else:
            # default is "everything below current directory"
            self.result_files_patterns = ["."]

        # get benchmarks
        self.run_sets = []
        for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
            self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))

        if not self.run_sets:
            for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
                self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
            if self.run_sets:
                logging.warning("Benchmark file %s uses deprecated <test> tags. "
                                "Please rename them to <rundefinition>.",
                                benchmark_file)
            else:
                logging.warning("Benchmark file %s specifies no runs to execute "
                                "(no <rundefinition> tags found).",
                                benchmark_file)

        if not any(runSet.should_be_executed() for runSet in self.run_sets):
            logging.warning("No <rundefinition> tag selected, nothing will be executed.")
            if config.selected_run_definitions:
                logging.warning("The selection %s does not match any run definitions of %s.",
                                config.selected_run_definitions,
                                [runSet.real_name for runSet in self.run_sets])
        elif config.selected_run_definitions:
            for selected in config.selected_run_definitions:
                if not any(util.wildcard_match(run_set.real_name, selected) for run_set in self.run_sets):
                    logging.warning(
                        'The selected run definition "%s" is not present in the input file, '
                        'skipping it.',
                        selected)

Example 112

Project: sugar-toolkit-gtk3
Source File: activity.py
View license
    def __init__(self, handle, create_jobject=True):
        '''
        Initialise the Activity

        Args:

        handle (sugar3.activity.activityhandle.ActivityHandle)
            instance providing the activity id and access to the
            presence service which *may* provide sharing for this
            application
        create_jobject (boolean)
            DEPRECATED: define if it should create a journal object if we are
            not resuming. The parameter is ignored, and always  will
            be created a object in the Journal.

        Side effects:

            Sets the gdk screen DPI setting (resolution) to the
            Sugar screen resolution.

            Connects our "destroy" message to our _destroy_cb
            method.

            Creates a base Gtk.Window within this window.

            Creates an ActivityService (self._bus) servicing
            this application.

        Usage:
            If your Activity implements __init__(), it should call
            the base class __init()__ before doing Activity specific things.

        '''

        # Stuff that needs to be done early
        icons_path = os.path.join(get_bundle_path(), 'icons')
        Gtk.IconTheme.get_default().append_search_path(icons_path)

        sugar_theme = 'sugar-72'
        if 'SUGAR_SCALING' in os.environ:
            if os.environ['SUGAR_SCALING'] == '100':
                sugar_theme = 'sugar-100'

        # This code can be removed when we grow an xsettings daemon (the GTK+
        # init routines will then automatically figure out the font settings)
        settings = Gtk.Settings.get_default()
        settings.set_property('gtk-theme-name', sugar_theme)
        settings.set_property('gtk-icon-theme-name', 'sugar')
        settings.set_property('gtk-button-images', True)
        settings.set_property('gtk-font-name',
                              '%s %f' % (style.FONT_FACE, style.FONT_SIZE))

        Window.__init__(self)

        if 'SUGAR_ACTIVITY_ROOT' in os.environ:
            # If this activity runs inside Sugar, we want it to take all the
            # screen. Would be better if it was the shell to do this, but we
            # haven't found yet a good way to do it there. See #1263.
            self.connect('window-state-event', self.__window_state_event_cb)
            screen = Gdk.Screen.get_default()
            screen.connect('size-changed', self.__screen_size_changed_cb)
            self._adapt_window_to_screen()

        # process titles will only show 15 characters
        # but they get truncated anyway so if more characters
        # are supported in the future we will get a better view
        # of the processes
        proc_title = '%s <%s>' % (get_bundle_name(), handle.activity_id)
        util.set_proc_title(proc_title)

        self.connect('realize', self.__realize_cb)
        self.connect('delete-event', self.__delete_event_cb)

        self._active = False
        self._active_time = None
        self._spent_time = 0
        self._activity_id = handle.activity_id
        self.shared_activity = None
        self._join_id = None
        self._updating_jobject = False
        self._closing = False
        self._quit_requested = False
        self._deleting = False
        self._max_participants = None
        self._invites_queue = []
        self._jobject = None
        self._read_file_called = False

        self._session = _get_session()
        self._session.register(self)
        self._session.connect('quit-requested',
                              self.__session_quit_requested_cb)
        self._session.connect('quit', self.__session_quit_cb)

        accel_group = Gtk.AccelGroup()
        self.sugar_accel_group = accel_group
        self.add_accel_group(accel_group)

        self._bus = ActivityService(self)
        self._owns_file = False

        share_scope = SCOPE_PRIVATE

        if handle.object_id:
            self._jobject = datastore.get(handle.object_id)

            if 'share-scope' in self._jobject.metadata:
                share_scope = self._jobject.metadata['share-scope']

            if 'launch-times' in self._jobject.metadata:
                self._jobject.metadata['launch-times'] += ', %d' % \
                    int(time.time())
            else:
                self._jobject.metadata['launch-times'] = \
                    str(int(time.time()))

            if 'spent-times' in self._jobject.metadata:
                self._jobject.metadata['spent-times'] += ', 0'
            else:
                self._jobject.metadata['spent-times'] = '0'

        self.shared_activity = None
        self._join_id = None

        if handle.object_id is None:
            logging.debug('Creating a jobject.')
            self._jobject = self._initialize_journal_object()

        if handle.invited:
            wait_loop = GObject.MainLoop()
            self._client_handler = _ClientHandler(
                self.get_bundle_id(),
                partial(self.__got_channel_cb, wait_loop))
            # FIXME: The current API requires that self.shared_activity is set
            # before exiting from __init__, so we wait until we have got the
            # shared activity. http://bugs.sugarlabs.org/ticket/2168
            wait_loop.run()
        else:
            pservice = presenceservice.get_instance()
            mesh_instance = pservice.get_activity(self._activity_id,
                                                  warn_if_none=False)
            self._set_up_sharing(mesh_instance, share_scope)

        if self.shared_activity is not None:
            self._jobject.metadata['title'] = self.shared_activity.props.name
            self._jobject.metadata['icon-color'] = \
                self.shared_activity.props.color
        else:
            self._jobject.metadata.connect('updated',
                                           self.__jobject_updated_cb)
        self.set_title(self._jobject.metadata['title'])

        bundle = get_bundle_instance(get_bundle_path())
        self.set_icon_from_file(bundle.get_icon())

Example 113

Project: gandi-dyndns
Source File: gandi_dyndns.py
View license
def update_ip():
  '''
  Check our external IP address and update Gandi's A-record to point to it if
  it has changed.
  '''

  # load the config file so we can get our variables
  log.debug('Loading config file...')
  config = load_config()
  if not check_config(config):
    sys.exit(2)
  log.debug('Config file loaded.')

  # create a connection to the Gandi production API
  gandi = GandiServerProxy(config['api_key'])

  # see if the record's IP differs from ours
  if 'command' in config:
    log.debug('Getting external IP using local command...')
    external_ip = get_local_ip(config['command'])
  else:
    log.debug('Getting external IP...')
    external_ip = get_external_ip()

  log.debug('External IP is: %s', external_ip)

  # make sure we actually got the external IP
  if external_ip is None:
    log.fatal('Could not get external IP.')
    sys.exit(2)

  exit_code = 0

  for domain in config['domains']:
    # get the current zone id for the configured domain
    log.debug("Getting domain info for domain '%s'...", domain)
    domain_info = gandi.domain.info(domain)
    zone_id = domain_info['zone_id']
    log.debug('Got domain info.')

    # get the list of records for the domain's current zone
    log.debug('Getting zone records for live zone version...')
    zone_records = gandi.domain.zone.record.list(zone_id, 0)
    log.debug('Got zone records.')

    updates = []
    for rec in config['domains'][domain]:
      rec = rec.strip()

      # find the configured record, or None if there's not a valid one
      log.debug("Searching for dynamic record '%s'...", rec)
      dynamic_record = None
      for record in zone_records:
        if is_valid_dynamic_record(rec, record):
          dynamic_record = record
          break

      # fail if we found no valid record to update
      if dynamic_record is None:
        log.error('No record found - there must be an A record with a matching name.')
        continue # with next record

      log.debug('  Dynamic record found.')

      # extract the current live IP
      record_ip = dynamic_record['value'].strip()
      log.debug('  Current dynamic record IP is: %s', record_ip)

      # compare the IPs, and exit if they match
      if external_ip == record_ip:
        log.debug('  External IP matches current dynamic record IP, no update necessary.')
        continue # with next record

      log.debug('  External IP differs from current dynamic record IP!')
      updates.append(rec)

    if not updates:
      log.info('External IP matches current dynamic records IPs, no update necessary.')
      continue # with next domain

    # clone the active zone version so we can modify it
    log.info('Cloning current zone version...')
    new_version_id = gandi.domain.zone.version.new(zone_id)
    log.info('Current zone version cloned.')

    log.info('Getting cloned zone records...')
    new_zone_records = gandi.domain.zone.record.list(zone_id, new_version_id)
    log.info('Cloned zone records retrieved.')

    errors = 0
    for rec in updates:
      # find the configured record, or None if there's not a valid one
      log.debug('Locating dynamic record in cloned zone version...')
      new_dynamic_record = None
      for record in new_zone_records:
        if is_valid_dynamic_record(rec, record):
          new_dynamic_record = record
          break

      # fail if we couldn't find the dynamic record again (this shouldn't happen...)
      if new_dynamic_record is None:
        log.error('Could not find dynamic record in cloned zone version!')
        errors += 1
        continue # with next record

      log.debug('Cloned dynamic record found.')

      # update the new version's dynamic record value (i.e. its IP address)
      log.debug('Updating dynamic record with current external IP...')
      updated_records = gandi.domain.zone.record.update(zone_id, new_version_id, {
        'id': new_dynamic_record['id']
      }, {
        'name': new_dynamic_record['name'],
        'type': new_dynamic_record['type'],
        'value': external_ip
      })

      # ensure that we successfully set the new dynamic record
      if (not updated_records or
          'value' not in updated_records[0] or
          updated_records[0]['value'] != external_ip):
        log.fatal('Failed to successfully update dynamic record!')
        errors += 1
        continue # with next record

      log.info('Dynamic record updated.')

    if errors:
      log.info('Errors during processing, zone NOT UPDATED.')
      exit_code = 1
      continue # with next domain

    # set the new zone version as the active version
    log.info('Updating active zone version...')
    gandi.domain.zone.version.set(zone_id, new_version_id)

    log.info('Set zone %d as the active zone version.', new_version_id)
    log.info('Dynamic record successfully updated to %s!', external_ip)

  if exit_code != 0:
    sys.exit(exit_code)

Example 114

Project: autotest
Source File: version_1.py
View license
    def state_iterator(self, buffer):
        line = None
        new_tests = []
        job_count, boot_count = 0, 0
        min_stack_size = 0
        stack = status_lib.status_stack()
        current_kernel = kernel("", [])  # UNKNOWN
        current_status = status_lib.statuses[-1]
        current_reason = None
        started_time_stack = [None]
        subdir_stack = [None]
        running_test = None
        running_reasons = set()
        yield []   # we're ready to start running

        # create a RUNNING SERVER_JOB entry to represent the entire test
        running_job = test.parse_partial_test(self.job, "----", "SERVER_JOB",
                                              "", current_kernel,
                                              self.job.started_time)
        new_tests.append(running_job)

        while True:
            # are we finished with parsing?
            if buffer.size() == 0 and self.finished:
                if stack.size() == 0:
                    break
                # we have status lines left on the stack,
                # we need to implicitly abort them first
                logging.debug('Unexpected end of job, aborting')
                abort_subdir_stack = list(subdir_stack)
                if self.job.aborted_by:
                    reason = "Job aborted by %s" % self.job.aborted_by
                    reason += self.job.aborted_on.strftime(
                        " at %b %d %H:%M:%S")
                else:
                    reason = "Job aborted unexpectedly"

                timestamp = line.optional_fields.get('timestamp')
                for i in reversed(xrange(stack.size())):
                    if abort_subdir_stack:
                        subdir = abort_subdir_stack.pop()
                    else:
                        subdir = None
                    abort = self.make_dummy_abort(
                        i, subdir, subdir, timestamp, reason)
                    buffer.put(abort)

            # stop processing once the buffer is empty
            if buffer.size() == 0:
                yield new_tests
                new_tests = []
                continue

            # reinitialize the per-iteration state
            started_time = None
            finished_time = None

            # get the next line
            raw_line = status_lib.clean_raw_line(buffer.get())
            logging.debug('STATUS: %s', raw_line.strip())
            line = status_line.parse_line(raw_line)
            if line is None:
                logging.debug('non-status line, ignoring')
                continue

            # do an initial sanity check of the indentation
            expected_indent = stack.size()
            if line.type == "END":
                expected_indent -= 1
            if line.indent < expected_indent:
                # ABORT the current level if indentation was unexpectedly low
                self.put_back_line_and_abort(
                    buffer, raw_line, stack.size() - 1, subdir_stack[-1],
                    line.optional_fields.get("timestamp"), line.reason)
                continue
            elif line.indent > expected_indent:
                # ignore the log if the indent was unexpectedly high
                logging.debug("unexpected extra indentation, ignoring")
                continue

            # initial line processing
            if line.type == "START":
                stack.start()
                started_time = line.get_timestamp()
                if (line.testname is None and line.subdir is None and
                        not running_test):
                    # we just started a client, all tests are relative to here
                    min_stack_size = stack.size()
                    # start a "RUNNING" CLIENT_JOB entry
                    job_name = "CLIENT_JOB.%d" % job_count
                    running_client = test.parse_partial_test(self.job, None,
                                                             job_name,
                                                             "", current_kernel,
                                                             started_time)
                    logging.debug("RUNNING: %s", running_client.status)
                    logging.debug("Testname: %s", running_client.testname)
                    new_tests.append(running_client)
                elif stack.size() == min_stack_size + 1 and not running_test:
                    # we just started a new test, insert a running record
                    running_reasons = set()
                    if line.reason:
                        running_reasons.add(line.reason)
                    running_test = test.parse_partial_test(self.job,
                                                           line.subdir,
                                                           line.testname,
                                                           line.reason,
                                                           current_kernel,
                                                           started_time)
                    logging.debug("RUNNING: %s", running_test.status)
                    logging.debug("Subdir: %s", running_test.subdir)
                    logging.debug("Testname: %s", running_test.testname)
                    logging.debug("Reason: %s", running_test.reason)
                    new_tests.append(running_test)
                started_time_stack.append(started_time)
                subdir_stack.append(line.subdir)
                continue
            elif line.type == "INFO":
                fields = line.optional_fields
                # update the current kernel if one is defined in the info
                if "kernel" in fields:
                    current_kernel = line.get_kernel()
                # update the SERVER_JOB reason if one was logged for an abort
                if "job_abort_reason" in fields:
                    running_job.reason = fields["job_abort_reason"]
                    new_tests.append(running_job)
                continue
            elif line.type == "STATUS":
                # update the stacks
                if line.subdir and stack.size() > min_stack_size:
                    subdir_stack[-1] = line.subdir
                # update the status, start and finished times
                stack.update(line.status)
                if status_lib.is_worse_than_or_equal_to(line.status,
                                                        current_status):
                    if line.reason:
                        # update the status of a currently running test
                        if running_test:
                            running_reasons.add(line.reason)
                            running_reasons = tko_utils.drop_redundant_messages(
                                running_reasons)
                            sorted_reasons = sorted(running_reasons)
                            running_test.reason = ", ".join(sorted_reasons)
                            current_reason = running_test.reason
                            new_tests.append(running_test)
                            logging.debug("update RUNNING reason: %s",
                                          line.reason)
                        else:
                            current_reason = line.reason
                    current_status = stack.current_status()
                started_time = None
                finished_time = line.get_timestamp()
                # if this is a non-test entry there's nothing else to do
                if line.testname is None and line.subdir is None:
                    continue
            elif line.type == "END":
                # grab the current subdir off of the subdir stack, or, if this
                # is the end of a job, just pop it off
                if (line.testname is None and line.subdir is None and
                        not running_test):
                    min_stack_size = stack.size() - 1
                    subdir_stack.pop()
                else:
                    line.subdir = subdir_stack.pop()
                    if not subdir_stack[-1] and stack.size() > min_stack_size:
                        subdir_stack[-1] = line.subdir
                # update the status, start and finished times
                stack.update(line.status)
                current_status = stack.end()
                if stack.size() > min_stack_size:
                    stack.update(current_status)
                    current_status = stack.current_status()
                started_time = started_time_stack.pop()
                finished_time = line.get_timestamp()
                # update the current kernel
                if line.is_successful_reboot(current_status):
                    current_kernel = line.get_kernel()
                # adjust the testname if this is a reboot
                if line.testname == "reboot" and line.subdir is None:
                    line.testname = "boot.%d" % boot_count
            else:
                assert False

            # have we just finished a test?
            if stack.size() <= min_stack_size:
                # if there was no testname, just use the subdir
                if line.testname is None:
                    line.testname = line.subdir
                # if there was no testname or subdir, use 'CLIENT_JOB'
                if line.testname is None:
                    line.testname = "CLIENT_JOB.%d" % job_count
                    running_test = running_client
                    job_count += 1
                    if not status_lib.is_worse_than_or_equal_to(
                            current_status, "ABORT"):
                        # a job hasn't really failed just because some of the
                        # tests it ran have
                        current_status = "GOOD"

                if not current_reason:
                    current_reason = line.reason
                new_test = test.parse_test(self.job,
                                           line.subdir,
                                           line.testname,
                                           current_status,
                                           current_reason,
                                           current_kernel,
                                           started_time,
                                           finished_time,
                                           running_test)
                running_test = None
                current_status = status_lib.statuses[-1]
                current_reason = None
                if new_test.testname == ("boot.%d" % boot_count):
                    boot_count += 1

                logging.debug("ADD: %s", new_test.status)
                logging.debug("Subdir: %s", new_test.subdir)
                logging.debug("Testname: %s", new_test.testname)
                logging.debug(new_test.reason)

                new_tests.append(new_test)

        # the job is finished, produce the final SERVER_JOB entry and exit
        final_job = test.parse_test(self.job, "----", "SERVER_JOB",
                                    self.job.exit_status(), running_job.reason,
                                    current_kernel,
                                    self.job.started_time,
                                    self.job.finished_time,
                                    running_job)
        new_tests.append(final_job)
        yield new_tests

Example 115

Project: sympy-live
Source File: shell.py
View license
    def evaluate(self, statement, session, printer=None, stream=None):
        """Evaluate the statement in sessions's globals. """
        # the Python compiler doesn't like network line endings
        source = statement.replace('\r\n', '\n').rstrip()

        try:
            # check for a SyntaxError now; this way the user will see their
            # original statement and not the transformed one
            ast.parse(source)
        except SyntaxError:
            return self.error(stream, self.syntaxerror())

        # convert int to Integer (1/2 -> Integer(1)/Integer(2))
        source = int_to_Integer(source)

        # split source code into 'exec' and 'eval' parts
        exec_source, eval_source = self.split(source)

        try:
            self.compile(eval_source, 'eval')
        except (OverflowError, SyntaxError, ValueError):
            exec_source, eval_source = source, None

        if exec_source is not None:
            exec_source += '\n'
        if eval_source is not None:
            eval_source += '\n'

        # create a dedicated module to be used as this statement's __main__
        statement_module = new.module('__main__')

        # use this request's __builtin__, since it changes on each request.
        # this is needed for import statements, among other things.
        import __builtin__
        statement_module.__builtin__ = __builtin__

        # create customized display hook
        stringify_func = printer or sstr

        def displayhook(arg):
            if arg is not None:
                __builtin__._ = None
                print stringify_func(arg)
                __builtin__._ = arg

        old_displayhook = sys.displayhook
        sys.displayhook = displayhook

        # swap in our custom module for __main__. then unpickle the session
        # globals, run the statement, and re-pickle the session globals, all
        # inside it.
        old_main = sys.modules.get('__main__')

        try:
            old_globals = {}
            sys.modules['__main__'] = statement_module
            statement_module.__name__ = '__main__'

            # re-evaluate the unpicklables
            for code in session.unpicklables:
                exec code in statement_module.__dict__
                exec code in old_globals

            # re-initialize the globals
            session_globals_dict = session.globals_dict()

            for name, val in session_globals_dict.items():
                try:
                    statement_module.__dict__[name] = val
                    old_globals[name] = val
                except:
                    session.remove_global(name)

            # re-initialize '_' special variable
            __builtin__._ = session_globals_dict.get('_')

            # run!
            offset = 0

            try:
                old_stdout = sys.stdout
                old_stderr = sys.stderr

                try:
                    if stream is not None:
                        sys.stdout = stream
                        sys.stderr = stream

                    if exec_source is not None:
                        try:
                            exec_code = self.compile(exec_source, 'exec')
                        except (OverflowError, SyntaxError, ValueError):
                            return self.error(stream, self.syntaxerror())

                        eval(exec_code, statement_module.__dict__)

                    if eval_source is not None:
                        if exec_source is not None:
                            offset = len(exec_source.split('\n'))

                        result = eval(eval_source, statement_module.__dict__)
                        sys.displayhook(result)
                finally:
                    sys.stdout = old_stdout
                    sys.stderr = old_stderr
            except DeadlineExceededError:
                logging.debug("is deadlineexceedederror in evaluate")
                raise DeadlineExceededError
            except:
                return self.error(stream, self.traceback(offset))

            # extract the new globals that this statement added
            new_globals = {}

            for name, val in statement_module.__dict__.items():
                if name not in old_globals or val != old_globals[name]:
                    new_globals[name] = val

            for name in old_globals:
                if name not in statement_module.__dict__:
                    session.remove_global(name)

            if True in [isinstance(val, UNPICKLABLE_TYPES) for val in new_globals.values()]:
                # this statement added an unpicklable global. store the statement and
                # the names of all of the globals it added in the unpicklables
                source = ""

                if exec_source:
                    source += exec_source
                if eval_source:
                    source += eval_source

                source += "\n"

                session.add_unpicklable(source, new_globals.keys())
                logging.debug('Storing this statement as an unpicklable.')
            else:
                # this statement didn't add any unpicklables. pickle and store the
                # new globals back into the datastore
                for name, val in new_globals.items():
                    if not name.startswith('__'):
                        try:
                            session.set_global(name, val)
                        except (TypeError, pickle.PicklingError):
                            pass

            # save '_' special variable into the datastore
            val = getattr(__builtin__, '_', None)

            try:
                session.set_global('_', val)
            except (TypeError, pickle.PicklingError):
                session.set_global('_', None)
        finally:
            sys.modules['__main__'] = old_main
            sys.displayhook = old_displayhook

            try:
                del __builtin__._
            except AttributeError:
                pass

        try:
            session.put()
        except RequestTooLargeError:
            stream.truncate(0) # clear output
            self.error(stream, ('Unable to process statement due to its excessive size.',))

Example 116

View license
@error.context_aware
def run(test, params, env):
    """
    KVM migration with destination problems.
    Contains group of test for testing qemu behavior if some
    problems happens on destination side.

    Tests are described right in test classes comments down in code.

    Test needs params: nettype = bridge.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    login_timeout = int(params.get("login_timeout", 360))
    mig_timeout = float(params.get("mig_timeout", "3600"))
    mig_protocol = params.get("migration_protocol", "tcp")

    test_rand = None
    mount_path = None
    while mount_path is None or os.path.exists(mount_path):
        test_rand = utils.generate_random_string(3)
        mount_path = ("%s/ni_mount_%s" %
                      (data_dir.get_data_dir(), test_rand))

    mig_dst = os.path.join(mount_path, "mig_dst")

    migration_exec_cmd_src = params.get("migration_exec_cmd_src",
                                        "gzip -c > %s")
    migration_exec_cmd_src = (migration_exec_cmd_src % (mig_dst))

    class MiniSubtest(object):

        def __new__(cls, *args, **kargs):
            self = super(MiniSubtest, cls).__new__(cls)
            ret = None
            exc_info = None
            if args is None:
                args = []
            try:
                try:
                    ret = self.test(*args, **kargs)
                except Exception:
                    exc_info = sys.exc_info()
            finally:
                if hasattr(self, "clean"):
                    try:
                        self.clean()
                    except Exception:
                        if exc_info is None:
                            raise
                    if exc_info:
                        raise exc_info[0], exc_info[1], exc_info[2]
            return ret

    def control_service(session, service, init_service, action, timeout=60):
        """
        Start service on guest.

        :param vm: Virtual machine for vm.
        :param service: service to stop.
        :param action: action with service (start|stop|restart)
        :param init_service: name of service for old service control.
        """
        status = utils_misc.get_guest_service_status(session, service,
                                                     service_former=init_service)
        if action == "start" and status == "active":
            logging.debug("%s already started, no need start it again.",
                          service)
            return
        if action == "stop" and status == "inactive":
            logging.debug("%s already stopped, no need stop it again.",
                          service)
            return
        try:
            session.cmd("systemctl --version", timeout=timeout)
            session.cmd("systemctl %s %s.service" % (action, service),
                        timeout=timeout)
        except:
            session.cmd("service %s %s" % (init_service, action),
                        timeout=timeout)

    def set_nfs_server(vm, share_cfg):
        """
        Start nfs server on guest.

        :param vm: Virtual machine for vm.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = "echo '%s' > /etc/exports" % (share_cfg)
        control_service(session, "nfs-server", "nfs", "stop")
        session.cmd(cmd)
        control_service(session, "nfs-server", "nfs", "start")
        session.cmd("iptables -F")
        session.close()

    def umount(mount_path):
        """
        Umount nfs server mount_path

        :param mount_path: path where nfs dir will be placed.
        """
        utils.run("umount -f %s" % (mount_path))

    def create_file_disk(dst_path, size):
        """
        Create file with size and create there ext3 filesystem.

        :param dst_path: Path to file.
        :param size: Size of file in MB
        """
        utils.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size))
        utils.run("mkfs.ext3 -F %s" % (dst_path))

    def mount(disk_path, mount_path, options=None):
        """
        Mount Disk to path

        :param disk_path: Path to disk
        :param mount_path: Path where disk will be mounted.
        :param options: String with options for mount
        """
        if options is None:
            options = ""
        else:
            options = "%s" % options

        utils.run("mount %s %s %s" % (options, disk_path, mount_path))

    def find_disk_vm(vm, disk_serial):
        """
        Find disk on vm which ends with disk_serial

        :param vm: VM where to find a disk.
        :param disk_serial: sufix of disk id.

        :return: string Disk path
        """
        session = vm.wait_for_login(timeout=login_timeout)

        disk_path = os.path.join("/", "dev", "disk", "by-id")
        disks = session.cmd("ls %s" % disk_path).split("\n")
        session.close()
        disk = filter(lambda x: x.endswith(disk_serial), disks)
        if not disk:
            return None
        return os.path.join(disk_path, disk[0])

    def prepare_disk(vm, disk_path, mount_path):
        """
        Create Ext3 on disk a send there data from main disk.

        :param vm: VM where to find a disk.
        :param disk_path: Path to disk in guest system.
        """
        session = vm.wait_for_login(timeout=login_timeout)
        session.cmd("mkfs.ext3 -F %s" % (disk_path))
        session.cmd("mount %s %s" % (disk_path, mount_path))
        session.close()

    def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None):
        """
        Start disk load. Cyclic copy from src_path to dst_path.

        :param vm: VM where to find a disk.
        :param src_path: Source of data
        :param dst_path: Path to destination
        :param copy_timeout: Timeout for copy
        :param dsize: Size of data block which is periodical copied.
        """
        if dsize is None:
            dsize = 100
        session = vm.wait_for_login(timeout=login_timeout)
        cmd = ("nohup /bin/bash -c 'while true; do dd if=%s of=%s bs=1M "
               "count=%s; done;' 2> /dev/null &" % (src_path, dst_path, dsize))
        pid = re.search(r"\[.+\] (.+)",
                        session.cmd_output(cmd, timeout=copy_timeout))
        return pid.group(1)

    class IscsiServer_tgt(object):

        """
        Class for set and start Iscsi server.
        """

        def __init__(self):
            self.server_name = "autotest_guest_" + test_rand
            self.user = "user1"
            self.passwd = "pass"
            self.config = """
<target %s:dev01>
    backing-store %s
    incominguser %s %s
</target>
"""

        def set_iscsi_server(self, vm_ds, disk_path, disk_size):
            """
            Set iscsi server with some variant.

            @oaram vm_ds: VM where should be iscsi server started.
            :param disk_path: path where should be disk placed.
            :param disk_size: size of new disk.
            """
            session = vm_ds.wait_for_login(timeout=login_timeout)

            session.cmd("dd if=/dev/zero of=%s bs=1M count=%s" % (disk_path,
                                                                  disk_size))
            status, output = session.cmd_status_output("setenforce 0")
            if status not in [0, 127]:
                logging.warn("Function setenforce fails.\n %s" % (output))

            config = self.config % (self.server_name, disk_path,
                                    self.user, self.passwd)
            cmd = "cat > /etc/tgt/conf.d/virt.conf << EOF" + config + "EOF"
            control_service(session, "tgtd", "tgtd", "stop")
            session.sendline(cmd)
            control_service(session, "tgtd", "tgtd", "start")
            session.cmd("iptables -F")
            session.close()

        def find_disk(self):
            disk_path = os.path.join("/", "dev", "disk", "by-path")
            disks = utils.run("ls %s" % disk_path).stdout.split("\n")
            disk = filter(lambda x: self.server_name in x, disks)
            if disk is []:
                return None
            return os.path.join(disk_path, disk[0].strip())

        def connect(self, vm_ds):
            """
            Connect to iscsi server on guest.

            :param vm_ds: Guest where is iscsi server running.

            :return: path where disk is connected.
            """
            ip_dst = vm_ds.get_address()
            utils.run("iscsiadm -m discovery -t st -p %s" % (ip_dst))

            server_ident = ('iscsiadm -m node --targetname "%s:dev01"'
                            ' --portal %s' % (self.server_name, ip_dst))
            utils.run("%s --op update --name node.session.auth.authmethod"
                      " --value CHAP" % (server_ident))
            utils.run("%s --op update --name node.session.auth.username"
                      " --value %s" % (server_ident, self.user))
            utils.run("%s --op update --name node.session.auth.password"
                      " --value %s" % (server_ident, self.passwd))
            utils.run("%s --login" % (server_ident))
            time.sleep(1.0)
            return self.find_disk()

        def disconnect(self):
            server_ident = ('iscsiadm -m node --targetname "%s:dev01"' %
                            (self.server_name))
            utils.run("%s --logout" % (server_ident))

    class IscsiServer(object):

        """
        Iscsi server implementation interface.
        """

        def __init__(self, iscsi_type, *args, **kargs):
            if iscsi_type == "tgt":
                self.ic = IscsiServer_tgt(*args, **kargs)
            else:
                raise NotImplementedError()

        def __getattr__(self, name):
            if self.ic:
                return self.ic.__getattribute__(name)
            raise AttributeError("Cannot find attribute %s in class" % name)

    class test_read_only_dest(MiniSubtest):

        """
        Migration to read-only destination by using a migration to file.

        1) Start guest with NFS server.
        2) Config NFS server share for read-only.
        3) Mount the read-only share to host.
        4) Start second guest and try to migrate to read-only dest.

        result) Migration should fail with error message about read-only dst.
        """

        def test(self):
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            vm_ds = env.get_vm("virt_test_vm2_data_server")
            vm_guest = env.get_vm("virt_test_vm1_guest")
            ro_timeout = int(params.get("read_only_timeout", "480"))
            exp_str = r".*Read-only file system.*"
            utils.run("mkdir -p %s" % (mount_path))

            vm_ds.verify_alive()
            vm_guest.create()
            vm_guest.verify_alive()

            set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")
            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=ro_timeout, first=2):
                raise error.TestFail("The Read-only file system warning not"
                                     " come in time limit.")

        def clean(self):
            if os.path.exists(mig_dst):
                os.remove(mig_dst)
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_low_space_dest(MiniSubtest):

        """
        Migrate to destination with low space.

        1) Start guest.
        2) Create disk with low space.
        3) Try to migratie to the disk.

        result) Migration should fail with warning about No left space on dev.
        """

        def test(self):
            self.disk_path = None
            while self.disk_path is None or os.path.exists(self.disk_path):
                self.disk_path = ("%s/disk_%s" %
                                  (test.tmpdir, utils.generate_random_string(3)))

            disk_size = utils.convert_data_size(params.get("disk_size", "10M"),
                                                default_sufix='M')
            disk_size /= 1024 * 1024    # To MB.

            exp_str = r".*gzip: stdout: No space left on device.*"
            vm_guest = env.get_vm("virt_test_vm1_guest")
            utils.run("mkdir -p %s" % (mount_path))

            vm_guest.verify_alive()
            vm_guest.wait_for_login(timeout=login_timeout)

            create_file_disk(self.disk_path, disk_size)
            mount(self.disk_path, mount_path, "-o loop")

            vm_guest.migrate(mig_timeout, mig_protocol,
                             not_wait_for_migration=True,
                             migration_exec_cmd_src=migration_exec_cmd_src,
                             env=env)

            if not utils_misc.wait_for(lambda: process_output_check(
                                       vm_guest.process, exp_str),
                                       timeout=60, first=1):
                raise error.TestFail("The migration to destination with low "
                                     "storage space didn't fail as it should.")

        def clean(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.disk_path):
                os.remove(self.disk_path)

    class test_extensive_io(MiniSubtest):

        """
        Migrate after extensive_io abstract class. This class only define
        basic funtionaly and define interface. For other tests.

        1) Start ds_guest which starts data server.
        2) Create disk for data stress in ds_guest.
        3) Share and prepare disk from ds_guest
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test(self):
            self.copier_pid = None
            if params.get("nettype") != "bridge":
                raise error.TestNAError("Unable start test without params"
                                        " nettype=bridge.")

            self.disk_serial = params.get("drive_serial_image2_vm1",
                                          "nfs-disk-image2-vm1")
            self.disk_serial_src = params.get("drive_serial_image1_vm1",
                                              "root-image1-vm1")
            self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt")
            self.copy_timeout = int(params.get("copy_timeout", "1024"))

            self.copy_block_size = params.get("copy_block_size", "100M")
            self.copy_block_size = utils.convert_data_size(
                self.copy_block_size,
                "M")
            self.disk_size = "%s" % (self.copy_block_size * 1.4)
            self.copy_block_size /= 1024 * 1024

            self.server_recover_timeout = (
                int(params.get("server_recover_timeout", "240")))

            utils.run("mkdir -p %s" % (mount_path))

            self.test_params()
            self.config()

            self.vm_guest_params = params.copy()
            self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path
            self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand)
            self.vm_guest_params["image_size_image2_vm1"] = self.disk_size
            self.vm_guest_params = self.vm_guest_params.object_params("vm1")
            self.image2_vm_guest_params = (self.vm_guest_params.
                                           object_params("image2"))

            env_process.preprocess_image(test,
                                         self.image2_vm_guest_params,
                                         env)
            self.vm_guest.create(params=self.vm_guest_params)

            self.vm_guest.verify_alive()
            self.vm_guest.wait_for_login(timeout=login_timeout)
            self.workload()

            self.restart_server()

            self.vm_guest.migrate(mig_timeout, mig_protocol, env=env)

            try:
                self.vm_guest.verify_alive()
                self.vm_guest.wait_for_login(timeout=login_timeout)
            except aexpect.ExpectTimeoutError:
                raise error.TestFail("Migration should be successful.")

        def test_params(self):
            """
            Test specific params. Could be implemented in inherited class.
            """
            pass

        def config(self):
            """
            Test specific config.
            """
            raise NotImplementedError()

        def workload(self):
            disk_path = find_disk_vm(self.vm_guest, self.disk_serial)
            if disk_path is None:
                raise error.TestFail("It was impossible to find disk on VM")

            prepare_disk(self.vm_guest, disk_path, self.guest_mount_path)

            disk_path_src = find_disk_vm(self.vm_guest, self.disk_serial_src)
            dst_path = os.path.join(self.guest_mount_path, "test.data")
            self.copier_pid = disk_load(self.vm_guest, disk_path_src, dst_path,
                                        self.copy_timeout, self.copy_block_size)

        def restart_server(self):
            raise NotImplementedError()

        def clean_test(self):
            """
            Test specific cleanup.
            """
            pass

        def clean(self):
            if self.copier_pid:
                try:
                    if self.vm_guest.is_alive():
                        session = self.vm_guest.wait_for_login(timeout=login_timeout)
                        session.cmd("kill -9 %s" % (self.copier_pid))
                except:
                    logging.warn("It was impossible to stop copier. Something "
                                 "probably happened with GUEST or NFS server.")

            if params.get("kill_vm") == "yes":
                if self.vm_guest.is_alive():
                    self.vm_guest.destroy()
                    utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30,
                                        2, 2, "Waiting for dying of guest.")
                qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                                mount_path,
                                                None)
                qemu_img.check_image(self.image2_vm_guest_params,
                                     mount_path)

            self.clean_test()

    class test_extensive_io_nfs(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts NFS server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over NFS.
        4) Mount the disk to mount_path
        5) Create disk for second guest in the mounted path.
        6) Start second guest with prepared disk.
        7) Start stress on the prepared disk on second guest.
        8) Wait few seconds.
        9) Restart iscsi server.
        10) Migrate second guest.

        result) Migration should be successful.
        """

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            set_nfs_server(vm_ds, "/mnt *(rw,async,no_root_squash)")

            mount_src = "%s:/mnt" % (vm_ds.get_address())
            mount(mount_src, mount_path,
                  "-o hard,timeo=14,rsize=8192,wsize=8192")

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "stop")  # Stop NFS server
            time.sleep(5)
            control_service(self.control_session_ds, "nfs-server",
                            "nfs", "start")  # Start NFS server

            """
            Touch waits until all previous requests are invalidated
            (NFS grace period). Without grace period qemu start takes
            to long and timers for machine creation dies.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)

    class test_extensive_io_iscsi(test_extensive_io):

        """
        Migrate after extensive io.

        1) Start ds_guest which starts iscsi server.
        2) Create disk for data stress in ds_guest.
        3) Share disk over iscsi.
        4) Join to disk on host.
        5) Prepare partition on the disk.
        6) Mount the disk to mount_path
        7) Create disk for second guest in the mounted path.
        8) Start second guest with prepared disk.
        9) Start stress on the prepared disk on second guest.
        10) Wait few seconds.
        11) Restart iscsi server.
        12) Migrate second guest.

        result) Migration should be successful.
        """

        def test_params(self):
            self.iscsi_variant = params.get("iscsi_variant", "tgt")
            self.ds_disk_path = os.path.join(self.guest_mount_path, "test.img")

        def config(self):
            vm_ds = env.get_vm("virt_test_vm2_data_server")
            self.vm_guest = env.get_vm("vm1")
            self.image2_vm_guest_params = None
            self.copier_pid = None
            self.qemu_img = None

            vm_ds.verify_alive()
            self.control_session_ds = vm_ds.wait_for_login(timeout=login_timeout)

            self.isci_server = IscsiServer("tgt")
            disk_path = os.path.join(self.guest_mount_path, "disk1")
            self.isci_server.set_iscsi_server(vm_ds, disk_path,
                                              (int(float(self.disk_size) * 1.1) / (1024 * 1024)))
            self.host_disk_path = self.isci_server.connect(vm_ds)

            utils.run("mkfs.ext3 -F %s" % (self.host_disk_path))
            mount(self.host_disk_path, mount_path)

        def restart_server(self):
            time.sleep(10)  # Wait for wail until copy start working.
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "stop", 240)  # Stop Iscsi server
            time.sleep(5)
            control_service(self.control_session_ds, "tgtd",
                            "tgtd", "start", 240)  # Start Iscsi server

            """
            Wait for iscsi server after restart and will be again
            accessible.
            """
            qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params,
                                            mount_path,
                                            None)
            utils.run("touch %s" % (qemu_img.image_filename),
                      self.server_recover_timeout)

        def clean_test(self):
            if os.path.exists(mount_path):
                umount(mount_path)
                os.rmdir(mount_path)
            if os.path.exists(self.host_disk_path):
                self.isci_server.disconnect()

    test_type = params.get("test_type")
    if (test_type in locals()):
        tests_group = locals()[test_type]
        tests_group()
    else:
        raise error.TestFail("Test group '%s' is not defined in"
                             " migration_with_dst_problem test" % test_type)

Example 117

Project: jcvi
Source File: phylo.py
View license
def build(args):
    """
    %prog build [prot.fasta] cds.fasta [options] --outdir=outdir

    This function wraps on the following steps:
    1. msa using ClustalW2 or MUSCLE(default)
    2. (optional) alignment editing using Gblocks
    3. build NJ tree using PHYLIP in EMBOSS package
       seq names should be unique by first 10 chars (restriction of PHYLIP)
    4. build ML tree using RAxML(default) or PHYML, use keywords raxml or phyml,
       *WARNING* maybe slow with large dataset

    If an outgroup file is provided, the result tree will be rooted on the
    outgroup according to order in the file, i.e. the name in row1 will be
    tried first. If not found, row2 will be used, etc.
    Tail truncated names can be provided so long as it is unique among the seqs.
    If not uniq, the first occurrence will be used. For example, if you have
    two moss sequences in your input, then the tree will be rooted on the
    first moss sequence encountered by the program, unless they are monophylic,
     in which case the root will be their common ancestor.

    --stree and --smap are required if --treefix is set.

    Trees can be edited again using an editor such as Dendroscope. This
    is the recommended way to get highly customized trees.

    Newick format trees will be deposited into outdir (. by default).
    """
    from jcvi.formats.fasta import translate

    p = OptionParser(build.__doc__)
    p.add_option("--longest", action="store_true",
                 help="Get longest ORF, only works if no pep file, "\
                      "e.g. ESTs [default: %default]")
    p.add_option("--nogblocks", action="store_true",
                 help="don't use Gblocks to edit alignment [default: %default]")
    p.add_option("--synonymous", action="store_true",
                 help="extract synonymous sites of the alignment [default: %default]")
    p.add_option("--fourfold", action="store_true",
                 help="extract fourfold degenerate sites of the alignment [default: %default]")
    p.add_option("--msa", default="muscle", choices=("clustalw", "muscle"),
                 help="software used to align the proteins [default: %default]")
    p.add_option("--noneighbor", action="store_true",
                 help="don't build NJ tree [default: %default]")
    p.add_option("--ml", default=None, choices=("raxml", "phyml"),
                 help="software used to build ML tree [default: %default]")
    p.add_option("--outgroup",
                 help="path to file containing outgroup orders [default: %default]")
    p.add_option("--SH", help="path to reference Newick tree [default: %default]")
    p.add_option("--shout", default="SH_out.txt", \
                 help="SH output file name [default: %default]")
    p.add_option("--treefix", action="store_true",
                 help="use TreeFix to rearrange ML tree [default: %default]")
    p.add_option("--stree", help="path to species Newick tree [default: %default]")
    p.add_option("--smap", help="path to smap file: " \
                    "gene_name_pattern<tab>species_name [default: %default]")
    p.set_outdir()

    opts, args = p.parse_args(args)
    gblocks = not opts.nogblocks
    synonymous = opts.synonymous
    fourfold = opts.fourfold
    neighbor = not opts.noneighbor
    outgroup = opts.outgroup
    outdir = opts.outdir

    if len(args) == 1:
        protein_file, dna_file = None, args[0]
    elif len(args) == 2:
        protein_file, dna_file = args
    else:
        print >>sys.stderr, "Incorrect arguments"
        sys.exit(not p.print_help())

    if opts.treefix:
        stree = opts.stree
        smap = opts.smap
        assert stree and smap, "TreeFix requires stree and smap files."
        opts.ml = "raxml"

    treedir = op.join(outdir, "tree")
    mkdir(treedir)

    if not protein_file:
        protein_file = dna_file + ".pep"
        translate_args = [dna_file, "--outfile=" + protein_file]
        if opts.longest:
            translate_args += ["--longest"]
        dna_file, protein_file = translate(translate_args)

    work_dir = op.join(outdir, "alignment")
    mkdir(work_dir)
    p_recs = list(SeqIO.parse(open(protein_file), "fasta"))
    if opts.msa == "clustalw":
        align_fasta = clustal_align_protein(p_recs, work_dir)
    elif opts.msa == "muscle":
        align_fasta = muscle_align_protein(p_recs, work_dir)

    n_recs = list(SeqIO.parse(open(dna_file), "fasta"))
    mrtrans_fasta = run_mrtrans(align_fasta, n_recs, work_dir, outfmt="fasta")

    if not mrtrans_fasta:
        logging.debug("pal2nal aborted. " \
            "Cannot reliably build tree for {0}".format(dna_file))
        return

    codon_aln_fasta = mrtrans_fasta
    if gblocks:
        gb_fasta = run_gblocks(mrtrans_fasta)
        codon_aln_fasta = gb_fasta if gb_fasta else codon_aln_fasta

    else:
        if synonymous:
            codon_aln_fasta = subalignment(mrtrans_fasta, "synonymous")

        if fourfold:
            codon_aln_fasta = subalignment(mrtrans_fasta, "fourfold")

    if not neighbor and not opts.ml:
        return codon_aln_fasta

    alignment = AlignIO.read(codon_aln_fasta, "fasta")
    if len(alignment) <= 3:
        raise ValueError("Too few seqs to build tree.")

    mkdir(op.join(treedir, "work"))
    if neighbor:
        out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
                ".NJ.unrooted.dnd")
        try:
            outfile, phy_file = build_nj_phylip(alignment, \
                outfile=out_file, outgroup=outgroup, work_dir=treedir)
        except:
            print "NJ tree cannot be built for {0}".format(dna_file)

        if opts.SH:
            reftree = opts.SH
            querytree = outfile
            SH_raxml(reftree, querytree, phy_file, shout=opts.shout)

    if opts.ml:
        out_file = op.join(treedir, op.basename(dna_file).rsplit(".", 1)[0] + \
                ".ML.unrooted.dnd")

        if opts.ml == "phyml":
            try:
                outfile, phy_file = build_ml_phyml\
                    (alignment, outfile=out_file, work_dir=treedir)
            except:
                print "ML tree cannot be built for {0}".format(dna_file)

        elif opts.ml == "raxml":
            try:
                outfile, phy_file = build_ml_raxml\
                    (alignment, outfile=out_file, work_dir=treedir)
            except:
                print "ML tree cannot be built for {0}".format(dna_file)

        if outgroup:
            new_out_file = out_file.replace(".unrooted", "")
            t = smart_reroot(treefile=out_file, outgroupfile=outgroup, \
                outfile=new_out_file)
            if t == new_out_file:
                sh("rm %s" % out_file)
                outfile = new_out_file

        if opts.SH:
            reftree = opts.SH
            querytree = outfile
            SH_raxml(reftree, querytree, phy_file, shout=opts.shout)

        if opts.treefix:
            treefix_dir = op.join(treedir, "treefix")
            assert mkdir(treefix_dir, overwrite=True)

            sh("cp {0} {1}/".format(outfile, treefix_dir))
            input = op.join(treefix_dir, op.basename(outfile))
            aln_file = input.rsplit(".", 1)[0] + ".fasta"
            SeqIO.write(alignment, aln_file, "fasta")

            outfile = run_treefix(input=input, stree_file=stree, smap_file=smap, \
                        a_ext=".fasta", o_ext=".dnd", n_ext = ".treefix.dnd")

    return outfile

Example 118

Project: lnst
Source File: Netperf.py
View license
    def _run_client(self, cmd):
        logging.debug("running as client...")

        res_data = {}
        res_data["testname"] = self._testname

        rv = 0
        results = []
        rates = []
        for i in range(1, self._runs+1):
            if self._runs > 1:
                logging.info("Netperf starting run %d" % i)
            clients = []
            client_results = []
            for i in range(0, self._num_parallel):
                clients.append(ShellProcess(cmd))

            for client in clients:
                ret_code = None
                try:
                    ret_code = client.wait()
                    rv += ret_code
                except OSError as e:
                    if e.errno == errno.EINTR:
                        client.kill()

                output = client.read_nonblocking()
                logging.debug(output)

                if ret_code is not None and ret_code == 0:
                    client_results.append(self._parse_output(output))

            if len(client_results) > 0:
                #accumulate all the parallel results into one
                result = client_results[0]
                for res in client_results[1:]:
                    result = self._sum_results(result, res)

                results.append(result)
                rates.append(results[-1]["rate"])

        if results > 1:
            res_data["results"] = results

        if len(rates) > 0:
            rate = sum(rates)/len(rates)
        else:
            rate = 0.0

        if len(rates) > 1:
            # setting deviation to 2xstd_deviation because of the 68-95-99.7
            # rule this seems comparable to the -I 99 netperf setting
            res_data["std_deviation"] = std_deviation(rates)
            rate_deviation = 2*res_data["std_deviation"]
        elif len(rates) == 1 and self._confidence is not None:
            result = results[0]
            rate_deviation = rate * (result["confidence"][1] / 100)
        else:
            rate_deviation = 0.0

        res_data["rate"] = rate
        res_data["rate_deviation"] = rate_deviation

        rate_pretty = self._pretty_rate(rate)
        rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])

        if rv != 0 and self._runs == 1:
            res_data["msg"] = "Could not get performance throughput!"
            logging.info(res_data["msg"])
            return (False, res_data)
        elif rv != 0 and self._runs > 1:
            res_data["msg"] = "At least one of the Netperf runs failed, "\
                              "check the logs and result data for more "\
                              "information."
            logging.info(res_data["msg"])
            return (False, res_data)

        res_val = False
        res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
                                            (rate_pretty["rate"],
                                             rate_dev_pretty["rate"],
                                             rate_pretty["unit"])
        if rate > 0.0:
            res_val = True
        else:
            res_val = False
            return (res_val, res_data)

        if self._max_deviation is not None:
            if self._max_deviation["type"] == "percent":
                percentual_deviation = (rate_deviation / rate) * 100
                if percentual_deviation > self._max_deviation["value"]:
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %%)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       self._max_deviation["value"])
                    return (res_val, res_data)
            elif self._max_deviation["type"] == "absolute":
                if rate_deviation > self._max_deviation["value"]["rate"]:
                    pretty_deviation = self._pretty_rate(self._max_deviation["value"]["rate"])
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %s)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       pretty_deviation["rate"],
                                       pretty_deviation["unit"])
                    return (res_val, res_data)
        if self._threshold_interval is not None:
            result_interval = (rate - rate_deviation,
                               rate + rate_deviation)

            threshold_pretty = self._pretty_rate(self._threshold["rate"])
            threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"],
                                                     unit = threshold_pretty["unit"])

            if self._threshold_interval[0] > result_interval[1]:
                res_val = False
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
            else:
                res_val = True
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
        return (res_val, res_data)

Example 119

Project: seedBank
Source File: seedbank.py
View license
def argument_parser():
    """process the arguments"""
    parse_arg = parse.ParseArguments(cfg)

    parser = argparse.ArgumentParser(description='seedBank - Debian/Ubuntu '
        'netboot installations the way it is meant to be... (c) 2009-2015 '
        'Jasper Poppe <[email protected]>', epilog='for more information '
        'visit: http://www.infrastructureanywhere.com',
        fromfile_prefix_chars='@')
    parser.add_argument('--version', action='version', version=__version__)
    subparsers = parser.add_subparsers(help='commands')

    parser_list = subparsers.add_parser('list',
        help='list resources like netboot images, seed files, Puppet '
        'manifests, configuration overrides, file overlays, pxelinux.cfg files'
        ', netboot images and ISOs', formatter_class=RawTextHelpFormatter)
    parser_list.add_argument('-a', '--all', action='store_true',
        help='list all resources')
    parser_list.add_argument('-n', '--netboots', action='store_true',
        help='list releases which are available for netboot\ninstallations'
        ', names starting with an asterisk are\nready to use\nnetboot images '
        'are managed by the "seedbank manage -n"\ncommand')
    parser_list.add_argument('-i', '--isos', action='store_true',
        help='list ISOs which are used for building (unattended)\ninstallation '
        'ISOs ISO names starting with an asterisk\nare ready to use\nISOs '
        'are managed by the "seedbank manage" command')
    parser_list.add_argument('-s', '--seeds',  action='store_true',
        help='list available seed files\nseed files are (partial) preseed ' 
        'files which are used\nfor providing answers to the installer\n'
        'default location: /etc/seedbank/seeds')
    parser_list.add_argument('-c', '--configs', action='store_true',
        help='list available configs\nconfigs are used for overriding the '
        'default configuration\nand providing default command line options '
        'for the\n"seedbank pxe" and "seedbank iso commands"\ndefault '
        'location: /etc/seedbank/configs')
    parser_list.add_argument('-o', '--overlays', action='store_true',
        help='list available overlays\nthe contents of an overlay directory '
        'will be copied over\nthe file system just before the end of an '
        'installation\ndefault location: /etc/seedbank/overlays')
    parser_list.add_argument('-p', '--puppet', action='store_true',
        help='list available Puppet manifests\nWhen you enable one or more '
        'Puppet manifests they will\nbe applied once by a stand alone Puppet '
        'instance,\ndirectly after the first boot of the machine\nwhich has '
        'been installed\ndefault location: /etc/seedbank/manifests')
    parser_list.add_argument('-P', '--pxe', action='store_true',
        help='list all "pxelinux.cfg" configs\npxelinux.cfg files are '
        'generated by the "seedbank pxe"\ncommand and are used to provide '
        'machine specific\ninformation to the installer after the machine '
        'PXE boots\nvia the network, the variables stored in those file\n'
        'comments are used by the seedBank daemon for\ngenerating the preseed '
        'file, those variables could also\nbe used in file overlay templates\n'
        'default location: /var/lib/tftpboot/pxelinux.cfg')
    parser_list.set_defaults(func=parse_arg.list)

    parser_shared = argparse.ArgumentParser(add_help=False)
    parser_shared.add_argument('-o', '--overlay',  default=None, help='file '
        'overlay which will be copied over the file system before the end of '
        'the installation')
    parser_shared.add_argument('-s', '--seed', help='override the default '
        'preseed file (the default preseed file has the name of the '
        'distribution, e.g: squeeze or precise)')
    parser_shared.add_argument('-a', '--additional', action='append',
        default=[], metavar='SEED', help='append additional seed files to the '
        'default seed file like disk recipes, repositories or other additional '
        '(custom) seeds')
    parser_shared.add_argument('fqdn', nargs='?', help='fully qualified domain '
        'name of the node to install')
    parser_shared.add_argument('-p', '--puppet', action='append',
        metavar='MANIFEST', default=[], help='choose one or more Puppet '
        'manifest(s) to apply after the installation')
    parser_shared.add_argument('-c', '--config', default=None, help='override '
        'template (pxe and seed) settings and set command line arguments,'
        'could also be used for creating machine profiles')

    parser_pxe = subparsers.add_parser('pxe', parents=[parser_shared],
        help='manage netboot installations, prepare a pxelinux.cfg '
        'file with all the settings required for a netboot installation')
    parser_pxe.add_argument('-r', '--release', help='release name (default: '
        'settings -> default_release -> pxe)')
    parser_pxe.add_argument('-m', '--macaddress',
        help='use a MAC address instead of a to hexidecimal converted IP '
        'address for the pxelinux.cfg configuration file name, the advantage '
        'of this is there will be no DNS lookups needed')
    parser_pxe.add_argument('-v', '--variables', nargs=2, action='append',
        metavar=('KEY', 'VALUE'),
        default=[], help='add one or more additional pxe variables which '
        'will be stored in the generated pxelinux.cfg file and could be used '
        'by seedBank templates in the overlay directory and the disable and '
        'enable hooks')
    parser_pxe.set_defaults(func=parse_arg.pxe)

    parser_iso = subparsers.add_parser('iso', parents=[parser_shared],
        help='build an (unattended) installation ISO')
    parser_iso.add_argument('-r', '--release', help='release name (default: '
        'settings -> default_release -> iso)')
    parser_iso.add_argument('-i', '--isofile', help='file name and location '
        'of the generated ISO (default: ./<fqdn>.iso)')
    parser_iso.add_argument('-v', '--variables', nargs=2, action='append',
        metavar=('KEY', 'VALUE'),
        default=[], help='add (or overrides) one or more seed and or overlay '
        'variables which could be used by seedBank templates in the overlay '
        'directory and preseed files')
    parser_iso.set_defaults(func=parse_arg.iso)

    parser_manage = subparsers.add_parser('manage', help='download and '
        'manage netboot images, syslinux files and ISOs')
    group = parser_manage.add_mutually_exclusive_group()
    group.add_argument('-s', '--syslinux', action='store_true',
        help='download the syslinux archive and extract the pxelinux.0, '
        'menu.c32 and vesamenu.c32 files and place those in the tftpboot '
        'directory. (those files are required for doing a PXE boot)')
    group.add_argument('-n', '--netboot', action='store', metavar='RELEASE',
        help='download and prepare a netboot image, when the release has been '
        'defined in the distributions -> firmware section Debian "non free" '
        'firmware files will be integrated into the netboot image, the '
        'contents of the netboot archive will be placed in the configured '
        'tftpboot path')
    group.add_argument('-i', '--iso', action='store', metavar='RELEASE',
        help='download ISO images which are used by the "seedbank iso command"')
    group.add_argument('-r', '--remove', action='store', metavar='RELEASE',
        help='remove an iso or netboot images from the tftpboot and seedBank '
        'archives directories')
    group.add_argument('-o', '--overlay', action='store_true',
        help='update or create <overlay>.permissions for all overlays, those '
        'files contain user, group and permissions which will be set by a '
        'dynamically generated script just before the end of an installation '
        'after the overlay got applied')
    group.set_defaults(func=parse_arg.manage)

    parser_daemon = subparsers.add_parser('daemon', help='seedBank daemon')
    parser_daemon.add_argument('-s', '--start', action='store_true',
        help='start the seedBank daemon which provides dynamically resources '
        'used by the installer, it also takes care of disabling the pxelinux '
        'configuration files after a successfull installation')
    parser_daemon.add_argument('-b', '--bottle', action='store_true',
        help='start the bottle')
    parser_daemon.set_defaults(func=parse_arg.daemon)

    args = parser.parse_args()
    logging.debug('given arguments: %s', args)

    if len(sys.argv) == 2:
        if sys.argv[1] == 'list':
            parser_list.print_help()
        if sys.argv[1] == 'pxe':
            parser_pxe.print_help()
        if sys.argv[1] == 'iso':
            parser_iso.print_help()
        elif sys.argv[1] == 'manage':
            parser_manage.print_help()
        elif sys.argv[1] == 'daemon':
            parser_daemon.print_help()
    else:
        args.func(args)

Example 120

Project: seedBank
Source File: seedbank.py
View license
def argument_parser():
    """process the arguments"""
    parse_arg = parse.ParseArguments(cfg)

    parser = argparse.ArgumentParser(description='seedBank - Debian/Ubuntu '
        'netboot installations the way it is meant to be... (c) 2009-2015 '
        'Jasper Poppe <[email protected]>', epilog='for more information '
        'visit: http://www.infrastructureanywhere.com',
        fromfile_prefix_chars='@')
    parser.add_argument('--version', action='version', version=__version__)
    subparsers = parser.add_subparsers(help='commands')

    parser_list = subparsers.add_parser('list',
        help='list resources like netboot images, seed files, Puppet '
        'manifests, configuration overrides, file overlays, pxelinux.cfg files'
        ', netboot images and ISOs', formatter_class=RawTextHelpFormatter)
    parser_list.add_argument('-a', '--all', action='store_true',
        help='list all resources')
    parser_list.add_argument('-n', '--netboots', action='store_true',
        help='list releases which are available for netboot\ninstallations'
        ', names starting with an asterisk are\nready to use\nnetboot images '
        'are managed by the "seedbank manage -n"\ncommand')
    parser_list.add_argument('-i', '--isos', action='store_true',
        help='list ISOs which are used for building (unattended)\ninstallation '
        'ISOs ISO names starting with an asterisk\nare ready to use\nISOs '
        'are managed by the "seedbank manage" command')
    parser_list.add_argument('-s', '--seeds',  action='store_true',
        help='list available seed files\nseed files are (partial) preseed ' 
        'files which are used\nfor providing answers to the installer\n'
        'default location: /etc/seedbank/seeds')
    parser_list.add_argument('-c', '--configs', action='store_true',
        help='list available configs\nconfigs are used for overriding the '
        'default configuration\nand providing default command line options '
        'for the\n"seedbank pxe" and "seedbank iso commands"\ndefault '
        'location: /etc/seedbank/configs')
    parser_list.add_argument('-o', '--overlays', action='store_true',
        help='list available overlays\nthe contents of an overlay directory '
        'will be copied over\nthe file system just before the end of an '
        'installation\ndefault location: /etc/seedbank/overlays')
    parser_list.add_argument('-p', '--puppet', action='store_true',
        help='list available Puppet manifests\nWhen you enable one or more '
        'Puppet manifests they will\nbe applied once by a stand alone Puppet '
        'instance,\ndirectly after the first boot of the machine\nwhich has '
        'been installed\ndefault location: /etc/seedbank/manifests')
    parser_list.add_argument('-P', '--pxe', action='store_true',
        help='list all "pxelinux.cfg" configs\npxelinux.cfg files are '
        'generated by the "seedbank pxe"\ncommand and are used to provide '
        'machine specific\ninformation to the installer after the machine '
        'PXE boots\nvia the network, the variables stored in those file\n'
        'comments are used by the seedBank daemon for\ngenerating the preseed '
        'file, those variables could also\nbe used in file overlay templates\n'
        'default location: /var/lib/tftpboot/pxelinux.cfg')
    parser_list.set_defaults(func=parse_arg.list)

    parser_shared = argparse.ArgumentParser(add_help=False)
    parser_shared.add_argument('-o', '--overlay',  default=None, help='file '
        'overlay which will be copied over the file system before the end of '
        'the installation')
    parser_shared.add_argument('-s', '--seed', help='override the default '
        'preseed file (the default preseed file has the name of the '
        'distribution, e.g: squeeze or precise)')
    parser_shared.add_argument('-a', '--additional', action='append',
        default=[], metavar='SEED', help='append additional seed files to the '
        'default seed file like disk recipes, repositories or other additional '
        '(custom) seeds')
    parser_shared.add_argument('fqdn', nargs='?', help='fully qualified domain '
        'name of the node to install')
    parser_shared.add_argument('-p', '--puppet', action='append',
        metavar='MANIFEST', default=[], help='choose one or more Puppet '
        'manifest(s) to apply after the installation')
    parser_shared.add_argument('-c', '--config', default=None, help='override '
        'template (pxe and seed) settings and set command line arguments,'
        'could also be used for creating machine profiles')

    parser_pxe = subparsers.add_parser('pxe', parents=[parser_shared],
        help='manage netboot installations, prepare a pxelinux.cfg '
        'file with all the settings required for a netboot installation')
    parser_pxe.add_argument('-r', '--release', help='release name (default: '
        'settings -> default_release -> pxe)')
    parser_pxe.add_argument('-m', '--macaddress',
        help='use a MAC address instead of a to hexidecimal converted IP '
        'address for the pxelinux.cfg configuration file name, the advantage '
        'of this is there will be no DNS lookups needed')
    parser_pxe.add_argument('-v', '--variables', nargs=2, action='append',
        metavar=('KEY', 'VALUE'),
        default=[], help='add one or more additional pxe variables which '
        'will be stored in the generated pxelinux.cfg file and could be used '
        'by seedBank templates in the overlay directory and the disable and '
        'enable hooks')
    parser_pxe.set_defaults(func=parse_arg.pxe)

    parser_iso = subparsers.add_parser('iso', parents=[parser_shared],
        help='build an (unattended) installation ISO')
    parser_iso.add_argument('-r', '--release', help='release name (default: '
        'settings -> default_release -> iso)')
    parser_iso.add_argument('-i', '--isofile', help='file name and location '
        'of the generated ISO (default: ./<fqdn>.iso)')
    parser_iso.add_argument('-v', '--variables', nargs=2, action='append',
        metavar=('KEY', 'VALUE'),
        default=[], help='add (or overrides) one or more seed and or overlay '
        'variables which could be used by seedBank templates in the overlay '
        'directory and preseed files')
    parser_iso.set_defaults(func=parse_arg.iso)

    parser_manage = subparsers.add_parser('manage', help='download and '
        'manage netboot images, syslinux files and ISOs')
    group = parser_manage.add_mutually_exclusive_group()
    group.add_argument('-s', '--syslinux', action='store_true',
        help='download the syslinux archive and extract the pxelinux.0, '
        'menu.c32 and vesamenu.c32 files and place those in the tftpboot '
        'directory. (those files are required for doing a PXE boot)')
    group.add_argument('-n', '--netboot', action='store', metavar='RELEASE',
        help='download and prepare a netboot image, when the release has been '
        'defined in the distributions -> firmware section Debian "non free" '
        'firmware files will be integrated into the netboot image, the '
        'contents of the netboot archive will be placed in the configured '
        'tftpboot path')
    group.add_argument('-i', '--iso', action='store', metavar='RELEASE',
        help='download ISO images which are used by the "seedbank iso command"')
    group.add_argument('-r', '--remove', action='store', metavar='RELEASE',
        help='remove an iso or netboot images from the tftpboot and seedBank '
        'archives directories')
    group.add_argument('-o', '--overlay', action='store_true',
        help='update or create <overlay>.permissions for all overlays, those '
        'files contain user, group and permissions which will be set by a '
        'dynamically generated script just before the end of an installation '
        'after the overlay got applied')
    group.set_defaults(func=parse_arg.manage)

    parser_daemon = subparsers.add_parser('daemon', help='seedBank daemon')
    parser_daemon.add_argument('-s', '--start', action='store_true',
        help='start the seedBank daemon which provides dynamically resources '
        'used by the installer, it also takes care of disabling the pxelinux '
        'configuration files after a successfull installation')
    parser_daemon.add_argument('-b', '--bottle', action='store_true',
        help='start the bottle')
    parser_daemon.set_defaults(func=parse_arg.daemon)

    args = parser.parse_args()
    logging.debug('given arguments: %s', args)

    if len(sys.argv) == 2:
        if sys.argv[1] == 'list':
            parser_list.print_help()
        if sys.argv[1] == 'pxe':
            parser_pxe.print_help()
        if sys.argv[1] == 'iso':
            parser_iso.print_help()
        elif sys.argv[1] == 'manage':
            parser_manage.print_help()
        elif sys.argv[1] == 'daemon':
            parser_daemon.print_help()
    else:
        args.func(args)

Example 121

Project: tp-libvirt
Source File: virsh_setmem.py
View license
def run(test, params, env):
    """
    Test command: virsh setmem.

    1) Prepare vm environment.
    2) Handle params
    3) Prepare libvirtd status.
    4) Run test command and wait for current memory's stable.
    5) Recover environment.
    4) Check result.
    """

    def vm_usable_mem(session):
        """
        Get total usable RAM from /proc/meminfo
        """
        cmd = "cat /proc/meminfo"
        proc_mem = session.cmd_output(cmd)
        total_usable_mem = re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                                     proc_mem).group(1)
        return int(total_usable_mem)

    def vm_unusable_mem(session):
        """
        Get the unusable RAM of the VM.
        """
        # Get total physical memory from dmidecode
        cmd = "dmidecode -t 17"
        dmi_mem = session.cmd_output(cmd)
        total_physical_mem = reduce(lambda x, y: int(x) + int(y),
                                    re.findall(r'Size:\s(\d+)\sMB', dmi_mem))
        return int(total_physical_mem) * 1024 - vm_usable_mem(session)

    def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
        """
        Create domain options of command
        """
        # Specify domain as argument or parameter
        if domarg == "yes":
            dom_darg_key = "domainarg"
        else:
            dom_darg_key = "domain"

        # How to reference domain
        if vm_ref == "domid":
            dom_darg_value = domid
        elif vm_ref == "domname":
            dom_darg_value = vm_name
        elif vm_ref == "domuuid":
            dom_darg_value = domuuid
        elif vm_ref == "none":
            dom_darg_value = None
        elif vm_ref == "emptystring":
            dom_darg_value = '""'
        else:  # stick in value directly
            dom_darg_value = vm_ref

        return {dom_darg_key: dom_darg_value}

    def make_sizeref(sizearg, mem_ref, original_mem):
        """
        Create size options of command
        """
        if sizearg == "yes":
            size_darg_key = "sizearg"
        else:
            size_darg_key = "size"

        if mem_ref == "halfless":
            size_darg_value = "%d" % (original_mem / 2)
        elif mem_ref == "halfmore":
            size_darg_value = "%d" % int(original_mem * 1.5)  # no fraction
        elif mem_ref == "same":
            size_darg_value = "%d" % original_mem
        elif mem_ref == "emptystring":
            size_darg_value = '""'
        elif mem_ref == "zero":
            size_darg_value = "0"
        elif mem_ref == "toosmall":
            size_darg_value = "1024"
        elif mem_ref == "toobig":
            size_darg_value = "1099511627776"  # (KiB) One Petabyte
        elif mem_ref == "none":
            size_darg_value = None
        else:  # stick in value directly
            size_darg_value = mem_ref

        return {size_darg_key: size_darg_value}

    def cal_deviation(actual, expected):
        """
        Calculate deviation of actual result and expected result
        """
        numerator = float(actual)
        denominator = float(expected)
        if numerator > denominator:
            numerator = denominator
            denominator = float(actual)
        return 100 - (100 * (numerator / denominator))

    def is_old_libvirt():
        """
        Check if libvirt is old version
        """
        regex = r'\s+\[--size\]\s+'
        return bool(not virsh.has_command_help_match('setmem', regex))

    def print_debug_stats(original_inside_mem, original_outside_mem,
                          test_inside_mem, test_outside_mem,
                          expected_outside_mem, expected_inside_mem,
                          delta_percentage, unusable_mem):
        """
        Print debug message for test
        """
        # Calculate deviation
        inside_deviation = cal_deviation(test_inside_mem, expected_inside_mem)
        outside_deviation = cal_deviation(test_outside_mem, expected_outside_mem)
        dbgmsg = ("Unusable memory of VM   : %d KiB\n"
                  "Original inside memory  : %d KiB\n"
                  "Expected inside memory  : %d KiB\n"
                  "Actual inside memory    : %d KiB\n"
                  "Inside memory deviation : %0.2f%%\n"
                  "Original outside memory : %d KiB\n"
                  "Expected outside memory : %d KiB\n"
                  "Actual outside memory   : %d KiB\n"
                  "Outside memory deviation: %0.2f%%\n"
                  "Acceptable deviation    : %0.2f%%" % (
                      unusable_mem,
                      original_inside_mem,
                      expected_inside_mem,
                      test_inside_mem,
                      inside_deviation,
                      original_outside_mem,
                      expected_outside_mem,
                      test_outside_mem,
                      outside_deviation,
                      delta_percentage))
        for dbgline in dbgmsg.splitlines():
            logging.debug(dbgline)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_ref = params.get("setmem_vm_ref", "")
    mem_ref = params.get("setmem_mem_ref", "")
    flags = params.get("setmem_flags", "")
    status_error = "yes" == params.get("status_error", "no")
    old_libvirt_fail = "yes" == params.get("setmem_old_libvirt_fail", "no")
    quiesce_delay = int(params.get("setmem_quiesce_delay", "1"))
    domarg = params.get("setmem_domarg", "no")
    sizearg = params.get("setmem_sizearg", "no")
    libvirt = params.get("libvirt", "on")
    delta_percentage = float(params.get("setmem_delta_per", "10"))
    start_vm = "yes" == params.get("start_vm", "yes")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    manipulate_dom_before_setmem = "yes" == params.get(
        "manipulate_dom_before_setmem", "no")
    manipulate_dom_after_setmem = "yes" == params.get(
        "manipulate_dom_after_setmem", "no")
    manipulate_action = params.get("manipulate_action", "")

    vm = env.get_vm(vm_name)
    # Back up domain XML
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    vmosxml = vmxml.os
    need_mkswap = False
    if manipulate_action in ['s3', 's4']:
        vm.destroy()
        BIOS_BIN = "/usr/share/seabios/bios.bin"
        if os.path.isfile(BIOS_BIN):
            vmosxml.loader = BIOS_BIN
            vmxml.os = vmosxml
            vmxml.sync()
        else:
            logging.error("Not find %s on host", BIOS_BIN)
        vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.prepare_guest_agent()
        if manipulate_action == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

    memballoon_model = params.get("memballoon_model", "")
    if memballoon_model:
        vm.destroy()
        vmxml.del_device('memballoon', by_tag=True)
        memballoon_xml = vmxml.get_device_class('memballoon')()
        memballoon_xml.model = memballoon_model
        vmxml.add_device(memballoon_xml)
        logging.info(memballoon_xml)
        vmxml.sync()
        vm.start()

    remove_balloon_driver = "yes" == params.get("remove_balloon_driver", "no")
    if remove_balloon_driver:
        if not vm.is_alive():
            logging.error("Can't remove module as guest not running")
        else:
            session = vm.wait_for_login()
            cmd = "rmmod virtio_balloon"
            s_rmmod, o_rmmod = session.cmd_status_output(cmd)
            if s_rmmod != 0:
                logging.error("Fail to remove module virtio_balloon in guest:\n%s",
                              o_rmmod)
            session.close()
    # Get original data
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    uri = vm.connect_uri
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if session.cmd_status('dmidecode'):
        # The physical memory size is in vm xml, use it when dmideode not
        # supported
        unusable_mem = int(vmxml.max_mem) - vm_usable_mem(session)
    else:
        unusable_mem = vm_unusable_mem(session)
    original_outside_mem = vm.get_used_mem()
    original_inside_mem = vm_usable_mem(session)
    session.close()
    # Prepare VM state
    if not start_vm:
        vm.destroy()
    else:
        if paused_after_start_vm:
            vm.pause()
    old_libvirt = is_old_libvirt()
    if old_libvirt:
        logging.info("Running test on older libvirt")
        use_kilobytes = True
    else:
        logging.info("Running test on newer libvirt")
        use_kilobytes = False

    # Argument pattern is complex, build with dargs
    dargs = {'flagstr': flags,
             'use_kilobytes': use_kilobytes,
             'uri': uri, 'ignore_status': True, "debug": True}
    dargs.update(make_domref(domarg, vm_ref, domid, vm_name, domuuid))
    dargs.update(make_sizeref(sizearg, mem_ref, original_outside_mem))

    # Prepare libvirtd status
    libvirtd = utils_libvirtd.Libvirtd()
    if libvirt == "off":
        libvirtd.stop()
    else:
        if not libvirtd.is_running():
            libvirtd.start()

    if status_error or (old_libvirt_fail & old_libvirt):
        logging.info("Error Test: Expecting an error to occur!")

    try:
        memory_change = True
        if manipulate_dom_before_setmem:
            manipulate_domain(vm_name, manipulate_action)
            if manipulate_action in ['save', 'managedsave', 's4']:
                memory_change = False

        result = virsh.setmem(**dargs)
        status = result.exit_status

        if status is 0:
            logging.info(
                "Waiting %d seconds for VM memory to settle", quiesce_delay)
            # It takes time for kernel to settle on new memory
            # and current clean pages is not predictable. Therefor,
            # extremely difficult to determine quiescence, so
            # sleep one second per error percent is reasonable option.
            time.sleep(quiesce_delay)

        if manipulate_dom_before_setmem:
            manipulate_domain(vm_name, manipulate_action, True)
        if manipulate_dom_after_setmem:
            manipulate_domain(vm_name, manipulate_action)
            manipulate_domain(vm_name, manipulate_action, True)

        # Recover libvirtd status
        if libvirt == "off":
            libvirtd.start()

        # Gather stats if not running error test
        if not status_error and not old_libvirt_fail:
            if not memory_change:
                test_inside_mem = original_inside_mem
                test_outside_mem = original_outside_mem
            else:
                if vm.state() == "shut off":
                    vm.start()
                # Make sure it's never paused
                vm.resume()
                session = vm.wait_for_login()

                # Actual results
                test_inside_mem = vm_usable_mem(session)
                session.close()
                test_outside_mem = vm.get_used_mem()

            # Expected results for both inside and outside
            if remove_balloon_driver:
                expected_mem = original_outside_mem
            else:
                if not memory_change:
                    expected_mem = original_inside_mem
                elif sizearg == "yes":
                    expected_mem = int(dargs["sizearg"])
                else:
                    expected_mem = int(dargs["size"])
            if memory_change:
                # Should minus unusable memory for inside memory check
                expected_inside_mem = expected_mem - unusable_mem
                expected_outside_mem = expected_mem
            else:
                expected_inside_mem = expected_mem
                expected_outside_mem = original_outside_mem

            print_debug_stats(original_inside_mem, original_outside_mem,
                              test_inside_mem, test_outside_mem,
                              expected_outside_mem, expected_inside_mem,
                              delta_percentage, unusable_mem)

            # Don't care about memory comparison on error test
            outside_pass = cal_deviation(test_outside_mem,
                                         expected_outside_mem) <= delta_percentage
            inside_pass = cal_deviation(test_inside_mem,
                                        expected_inside_mem) <= delta_percentage
            if status is not 0 or not outside_pass or not inside_pass:
                msg = "test conditions not met: "
                if status is not 0:
                    msg += "Non-zero virsh setmem exit code. "
                if not outside_pass:
                    msg += "Outside memory deviated. "
                if not inside_pass:
                    msg += "Inside memory deviated. "
                raise error.TestFail(msg)

            return  # Normal test passed
        elif not status_error and old_libvirt_fail:
            if status is 0:
                if old_libvirt:
                    raise error.TestFail("Error test did not result in an error")
            else:
                if not old_libvirt:
                    raise error.TestFail("Newer libvirt failed when it should not")
        else:  # Verify an error test resulted in error
            if status is 0:
                raise error.TestFail("Error test did not result in an error")
    finally:
        if need_mkswap:
            vm.cleanup_swap()
        vm.destroy()
        backup_xml.sync()

Example 122

Project: tp-qemu
Source File: pci_hotplug_check.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test hotplug of PCI devices and check the status in guest.
    1 Boot up a guest
    2 Hotplug virtio disk to the guest. Record the id and partition name of
      the disk in a list.
    3 Random choice a disk in the list. Unplug the disk and check the
      partition status.
    4 Hotpulg the disk back to guest with the same monitor cmdline and same
      id which is record in step 2.
    5 Check the partition status in guest. And confirm the disk with dd cmd
    6 Repeat step 3 to 5 for N times

    :param test:   KVM test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """

    def prepare_image_params(params):
        pci_num = int(params['pci_num'])
        for i in xrange(pci_num):
            image_name = '%s_%s' % ('stg', i)
            params['images'] = ' '.join([params['images'], image_name])
            image_image_name = '%s_%s' % ('image_name', image_name)
            params[image_image_name] = '%s_%s' % ('storage', i)
            image_image_format = '%s_%s' % ('image_format', image_name)
            params[image_image_format] = params.get('image_format_extra', 'qcow2')
            image_image_size = '%s_%s' % ('image_size', image_name)
            params[image_image_size] = params.get('image_size_extra', '128K')
        return params

    def find_new_device(check_cmd, device_string, chk_timeout=5.0):
        end_time = time.time() + chk_timeout
        idx = ("wmic" in check_cmd and [0] or [-1])[0]
        while time.time() < end_time:
            new_line = session.cmd_output(check_cmd)
            for line in re.split("\n+", new_line.strip()):
                dev_name = re.split("\s+", line.strip())[idx]
                if dev_name not in device_string:
                    return dev_name
            time.sleep(0.1)
        return None

    def find_del_device(check_cmd, device_string, chk_timeout=5.0):
        end_time = time.time() + chk_timeout
        idx = ("wmic" in check_cmd and [0] or [-1])[0]
        while time.time() < end_time:
            new_line = session.cmd_output(check_cmd)
            for line in re.split("\n+", device_string.strip()):
                dev_name = re.split("\s+", line.strip())[idx]
                if dev_name not in new_line:
                    return dev_name
            time.sleep(0.1)
        return None

    # Select an image file
    def find_image(pci_num):
        image_params = params.object_params("%s" % img_list[pci_num + 1])
        o = storage.get_image_filename(image_params, data_dir.get_data_dir())
        return o

    def pci_add_block(pci_num, queues, pci_id):
        image_filename = find_image(pci_num)
        pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
                       (image_filename, pci_model))
        return pci_add(pci_add_cmd)

    def pci_add(pci_add_cmd):
        guest_devices = session.cmd_output(chk_cmd)
        error.context("Adding pci device with command 'pci_add'")
        add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        guest_device = find_new_device(chk_cmd, guest_devices)
        pci_info.append(['', '', add_output, pci_model, guest_device])
        if "OK domain" not in add_output:
            raise error.TestFail("Add PCI device failed. "
                                 "Monitor command is: %s, Output: %r" %
                                 (pci_add_cmd, add_output))
        return vm.monitor.info("pci")

    def is_supported_device(dev):
        # Probe qemu to verify what is the supported syntax for PCI hotplug
        cmd_output = vm.monitor.human_monitor_cmd("?")
        if len(re.findall("\ndevice_add", cmd_output)) > 0:
            cmd_type = "device_add"
        elif len(re.findall("\npci_add", cmd_output)) > 0:
            cmd_type = "pci_add"
        else:
            raise error.TestError("Unknown version of qemu")

        # Probe qemu for a list of supported devices
        probe_output = vm.monitor.human_monitor_cmd("%s ?" % cmd_type)
        devices_supported = [j.strip('"') for j in
                             re.findall('\"[a-z|0-9|\-|\_|\,|\.]*\"',
                                        probe_output, re.MULTILINE)]
        logging.debug("QEMU reported the following supported devices for "
                      "PCI hotplug: %s", devices_supported)
        return (dev in devices_supported)

    def verify_supported_device(dev):
        if not is_supported_device(dev):
            raise error.TestError("%s doesn't support device: %s" %
                                  (cmd_type, dev))

    def device_add_block(pci_num, queues=1, pci_id=None):
        if pci_id is not None:
            device_id = pci_type + "-" + pci_id
        else:
            device_id = pci_type + "-" + utils_misc.generate_random_id()
            pci_info.append([device_id, device_id])

        image_format = params.get("image_format_%s" % img_list[pci_num + 1])
        if not image_format:
            image_format = params.get("image_format", "qcow2")
        image_filename = find_image(pci_num)

        pci_model = params.get("pci_model")
        controller_model = None
        if pci_model == "virtio":
            pci_model = "virtio-blk-pci"

        if pci_model == "scsi":
            pci_model = "scsi-disk"
            if arch.ARCH in ('ppc64', 'ppc64le'):
                controller_model = "spapr-vscsi"
            else:
                controller_model = "lsi53c895a"
            verify_supported_device(controller_model)
            controller_id = "controller-" + device_id
            controller_add_cmd = ("device_add %s,id=%s" %
                                  (controller_model, controller_id))
            error.context("Adding SCSI controller.")
            vm.monitor.send_args_cmd(controller_add_cmd)

        verify_supported_device(pci_model)
        if drive_cmd_type == "drive_add":
            driver_add_cmd = ("%s auto file=%s,if=none,format=%s,id=%s" %
                              (drive_cmd_type, image_filename, image_format,
                               pci_info[pci_num][0]))
        elif drive_cmd_type == "__com.redhat_drive_add":
            driver_add_cmd = ("%s file=%s,format=%s,id=%s" %
                              (drive_cmd_type, image_filename, image_format,
                               pci_info[pci_num][0]))
        # add driver.
        error.context("Adding driver.")
        vm.monitor.send_args_cmd(driver_add_cmd, convert=False)

        pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
                       (pci_info[pci_num][1],
                        pci_model,
                        pci_info[pci_num][0])
                       )
        return device_add(pci_num, pci_add_cmd, pci_id=pci_id)

    def device_add(pci_num, pci_add_cmd, pci_id=None):
        error.context("Adding pci device with command 'device_add'")
        guest_devices = session.cmd_output(chk_cmd)
        if vm.monitor.protocol == 'qmp':
            add_output = vm.monitor.send_args_cmd(pci_add_cmd)
        else:
            add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        guest_device = find_new_device(chk_cmd, guest_devices)
        if pci_id is None:
            pci_info[pci_num].append(add_output)
            pci_info[pci_num].append(pci_model)
            pci_info[pci_num].append(guest_device)

        after_add = vm.monitor.info("pci")
        if pci_info[pci_num][1] not in str(after_add):
            logging.error("Could not find matched id in monitor:"
                          " %s" % pci_info[pci_num][1])
            raise error.TestFail("Add device failed. Monitor command is: %s"
                                 ". Output: %r" % (pci_add_cmd, add_output))
        return after_add

    # Hot add a pci device
    def add_device(pci_num, queues=1, pci_id=None):
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)

        try:
            # get function for adding device.
            add_fuction = local_functions["%s_%s" % (cmd_type, pci_type)]
        except Exception:
            raise error.TestError("No function for adding " +
                                  "'%s' dev " % pci_type +
                                  "with '%s'" % cmd_type)
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num, queues, pci_id)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                o = session.cmd_output(reference_cmd)
                return o != reference

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output(params.get("find_pci_cmd"))
                output = map(string.strip, output.splitlines())
                ref = map(string.strip, reference.splitlines())
                output = [_ for _ in output if _ not in ref]
                output = "\n".join(output)
                if re.search(params.get("match_string"), output, re.I):
                    return True
                return False

            error.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                raise error.TestFail("No new PCI device shown after "
                                     "executing monitor command: 'info pci'")

            secs = int(params.get("wait_secs_for_hook_up", 3))
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                raise error.TestFail("No new device shown in output of" +
                                     "command executed inside the " +
                                     "guest: %s" % reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                raise error.TestFail("PCI %s %s " % (pci_model, pci_type) +
                                     "device not found in guest. Command " +
                                     "was: %s" % params.get("find_pci_cmd"))

            # Test the newly added device
            try:
                session.cmd(params.get("pci_test_cmd") % (pci_num + 1))
            except aexpect.ShellError, e:
                raise error.TestFail("Check for %s device failed" % pci_type +
                                     "after PCI hotplug." +
                                     "Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise

    # Hot delete a pci device
    def pci_del(pci_num, ignore_failure=False):
        def _device_removed():
            after_del = vm.monitor.info("pci")
            return after_del != before_del

        before_del = vm.monitor.info("pci")
        if cmd_type == "pci_add":
            slot_id = int(pci_info[pci_num][2].split(",")[2].split()[1])
            cmd = "pci_del pci_addr=%s" % hex(slot_id)
            vm.monitor.send_args_cmd(cmd, convert=False)
        elif cmd_type == "device_add":
            cmd = "device_del id=%s" % pci_info[pci_num][1]
            vm.monitor.send_args_cmd(cmd)

        if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) and
                not ignore_failure):
            raise error.TestFail("Failed to hot remove PCI device: %s. "
                                 "Monitor command: %s" %
                                 (pci_info[pci_num][3], cmd))

    params = prepare_image_params(params)
    env_process.process_images(env_process.preprocess_image, test, params)
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=timeout)

    test_timeout = int(params.get("hotplug_timeout", 360))
    reference_cmd = params["reference_cmd"]
    # Test if it is nic or block
    pci_type = params["pci_type"]
    pci_model = params["pci_model"]

    # Modprobe the module if specified in config file
    module = params.get("modprobe_module")
    if module:
        session.cmd("modprobe %s" % module)

    # check monitor type
    qemu_binary = utils_misc.get_qemu_binary(params)
    # Probe qemu to verify what is the supported syntax for PCI hotplug
    if vm.monitor.protocol == 'qmp':
        cmd_output = vm.monitor.info("commands")
    else:
        cmd_output = vm.monitor.human_monitor_cmd("help", debug=False)

    cmd_type = utils_misc.find_substring(str(cmd_output), "device_add",
                                         "pci_add")
    if not cmd_type:
        raise error.TestError("Could find a suitable method for hotplugging"
                              " device in this version of qemu")

    # Determine syntax of drive hotplug
    # __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
    # drive_add == qemu-kvm-0.13 onwards
    drive_cmd_type = utils_misc.find_substring(str(cmd_output),
                                               "__com.redhat_drive_add",
                                               "drive_add")
    if not drive_cmd_type:
        raise error.TestError("Unknown version of qemu")

    local_functions = locals()

    pci_num_range = int(params.get("pci_num"))
    rp_times = int(params.get("repeat_times"))
    img_list = params.get("images").split()
    chk_cmd = params.get("guest_check_cmd")
    mark_cmd = params.get("mark_cmd")
    offset = params.get("offset")
    confirm_cmd = params.get("confirm_cmd")

    pci_info = []
    # Add block device into guest
    for pci_num in xrange(pci_num_range):
        error.context("Prepare the %d removable pci device" % pci_num,
                      logging.info)
        add_device(pci_num)
        if pci_info[pci_num][4] is not None:
            partition = pci_info[pci_num][4]
            cmd = mark_cmd % (partition, partition, offset)
            session.cmd(cmd)
        else:
            raise error.TestError("Device not init in guest")

    for j in range(rp_times):
        # pci_info is a list of list.
        # each element 'i' has 4 members:
        # pci_info[i][0] == device drive id, only used for device_add
        # pci_info[i][1] == device id, only used for device_add
        # pci_info[i][2] == output of device add command
        # pci_info[i][3] == device module name.
        # pci_info[i][4] == partition id in guest
        pci_num = random.randint(0, len(pci_info) - 1)
        error.context("start unplug pci device, repeat %d" % j, logging.info)
        guest_devices = session.cmd_output(chk_cmd)
        pci_del(pci_num)
        device_del = find_del_device(chk_cmd, guest_devices)
        if device_del != pci_info[pci_num][4]:
            raise error.TestFail("Device is not deleted in guest.")
        error.context("Start plug pci device, repeat %d" % j, logging.info)
        guest_devices = session.cmd_output(chk_cmd)
        add_device(pci_num, pci_id=pci_info[pci_num][0])
        device_del = find_new_device(chk_cmd, guest_devices)
        if device_del != pci_info[pci_num][4]:
            raise error.TestFail("Device partition changed from %s to %s" %
                                 (pci_info[pci_num][4], device_del))
        cmd = confirm_cmd % (pci_info[pci_num][4], offset)
        confirm_info = session.cmd_output(cmd)
        if device_del not in confirm_info:
            raise error.TestFail("Can not find partition tag in Guest: %s" %
                                 confirm_info)

Example 123

Project: pycaching
Source File: cache.py
View license
    def load(self):
        """Load all possible cache details.

        Use full cache details page. Therefore all possible properties are filled in, but the
        loading is a bit slow.

        If you want to load basic details about a PM only cache, the :class:`.PMOnlyException` is
        still thrown, but avaliable details are filled in. If you know, that the cache you are
        loading is PM only, please consider using :meth:`load_quick` as it will load the same
        details, but quicker.

        .. note::
           This method is called automatically when you access a property which isn't yet filled in
           (so-called "lazy loading"). You don't have to call it explicitly.

        :raise .PMOnlyException: If cache is PM only and current user is basic member.
        :raise .LoadError: If cache loading fails (probably because of not existing cache).
        """
        try:
            # pick url based on what info we have right now
            if hasattr(self, "url"):
                root = self.geocaching._request(self.url)
            elif hasattr(self, "_wp"):
                root = self.geocaching._request("seek/cache_details.aspx", params={"wp": self._wp})
            else:
                raise errors.LoadError("Cache lacks info for loading")
        except errors.Error as e:
            # probably 404 during cache loading - cache not exists
            raise errors.LoadError("Error in loading cache") from e

        # check for PM only caches if using free account
        self.pm_only = root.find("section", "pmo-banner") is not None

        cache_details = root.find(id="ctl00_divContentMain") if self.pm_only else root.find(id="cacheDetails")

        # details also avaliable for basic members for PM only caches -----------------------------

        if self.pm_only:
            self.wp = cache_details.find("li", "li__gccode").text.strip()

            self.name = cache_details.find("h1").text.strip()

            author = cache_details.find(id="ctl00_ContentBody_uxCacheBy").text
            self.author = author[len("A cache by "):]

            # parse cache detail list into a python list
            details = cache_details.find("ul", "ul__hide-details").text.split("\n")

            self.difficulty = float(details[2])

            self.terrain = float(details[5])

            self.size = Size.from_string(details[8])

            self.favorites = int(details[11])
        else:
            # parse from <title> - get first word
            try:
                self.wp = root.title.string.split(" ")[0]
            except:
                raise errors.LoadError
            self.name = cache_details.find("h2").text

            self.author = cache_details("a")[1].text

            size = root.find("div", "CacheSize")

            D_and_T_img = root.find("div", "CacheStarLabels").find_all("img")

            size = size.find("img").get("src")  # size img src
            size = size.split("/")[-1].rsplit(".", 1)[0]  # filename w/o extension
            self.size = Size.from_filename(size)

            self.difficulty, self.terrain = [float(img.get("alt").split()[0]) for img in D_and_T_img]

        type = cache_details.find("img").get("src")  # type img src
        type = type.split("/")[-1].rsplit(".", 1)[0]  # filename w/o extension
        self.type = Type.from_filename(type)

        if self.pm_only:
            raise errors.PMOnlyException()

        # details not avaliable for basic members for PM only caches ------------------------------
        pm_only_warning = root.find("p", "Warning NoBottomSpacing")
        self.pm_only = pm_only_warning and ("Premium Member Only" in pm_only_warning.text) or False

        attributes_widget, inventory_widget, *_ = root.find_all("div", "CacheDetailNavigationWidget")

        hidden = cache_details.find("div", "minorCacheDetails").find_all("div")[1].text
        self.hidden = parse_date(hidden.split(":")[-1])

        self.location = Point.from_string(root.find(id="uxLatLon").text)

        self.state = root.find("ul", "OldWarning") is None

        found = root.find("div", "FoundStatus")
        self.found = found and ("Found It!" or "Attended" in found.text) or False

        attributes_raw = attributes_widget.find_all("img")
        attributes_raw = [_.get("src").split("/")[-1].rsplit("-", 1) for _ in attributes_raw]

        self.attributes = {attribute_name: appendix.startswith("yes") for attribute_name, appendix
                           in attributes_raw if not appendix.startswith("blank")}

        user_content = root.find_all("div", "UserSuppliedContent")
        self.summary = user_content[0].text
        self.description = str(user_content[1])

        self.hint = rot13(root.find(id="div_hint").text.strip())

        favorites = root.find("span", "favorite-value")
        self.favorites = 0 if favorites is None else int(favorites.text)

        self._log_page_url = root.find(id="ctl00_ContentBody_GeoNav_logButton")["href"]

        js_content = "\n".join(map(lambda i: i.text, root.find_all("script")))
        self._logbook_token = re.findall("userToken\\s*=\\s*'([^']+)'", js_content)[0]
        # find original location if any
        if "oldLatLng\":" in js_content:
            old_lat_long = js_content.split("oldLatLng\":")[1].split(']')[0].split('[')[1]
            self.original_location = Point(old_lat_long)
        else:
            self.original_location = None

        # if there are some trackables
        if len(inventory_widget.find_all("a")) >= 3:
            trackable_page_url = inventory_widget.find(id="ctl00_ContentBody_uxTravelBugList_uxViewAllTrackableItems")
            self._trackable_page_url = trackable_page_url.get("href")[3:]  # has "../" on start
        else:
            self._trackable_page_url = None

        # Additional Waypoints
        self.waypoints = Waypoint.from_html(root)

        logging.debug("Cache loaded: {}".format(self))

Example 124

Project: Nuitka
Source File: Standalone.py
View license
def _detectImports(command, user_provided, technical):
    # This is pretty complicated stuff, with variants to deal with.
    # pylint: disable=R0912,R0914,R0915

    # Print statements for stuff to show, the modules loaded.
    if python_version >= 300:
        command += '\nimport sys\nprint("\\n".join(sorted("import " + module.__name__ + " # sourcefile " + ' \
                   'module.__file__ for module in sys.modules.values() if hasattr(module, "__file__") and ' \
                   'module.__file__ != "<frozen>")), file = sys.stderr)'  # do not read it

    reduced_path = [
        path_element
        for path_element in
        sys.path
        if not Utils.areSamePaths(
            path_element,
            '.'
        )
        if not Utils.areSamePaths(
            path_element,
            Utils.dirname(sys.modules["__main__"].__file__)
        )
    ]

    # Make sure the right import path (the one Nuitka binary is running with)
    # is used.
    command = ("import sys; sys.path = %s;" % repr(reduced_path)) + command

    import tempfile
    tmp_file, tmp_filename = tempfile.mkstemp()

    try:
        if python_version >= 300:
            command = command.encode("ascii")
        os.write(tmp_file, command)
        os.close(tmp_file)

        process = subprocess.Popen(
            args   = [sys.executable, "-s", "-S", "-v", tmp_filename],
            stdout = subprocess.PIPE,
            stderr = subprocess.PIPE,
        )
        _stdout, stderr = process.communicate()
    finally:
        os.unlink(tmp_filename)

    # Don't let errors here go unnoticed.
    if process.returncode != 0:
        warning("There is a problem with detecting imports, CPython said:")
        for line in stderr.split(b"\n"):
            Tracing.printLine(line)
        sys.exit("Error, please report the issue with above output.")

    result = []

    debug("Detecting imports:")

    for line in stderr.replace(b"\r", b"").split(b"\n"):
        if line.startswith(b"import "):
            # print(line)

            parts = line.split(b" # ", 2)

            module_name = parts[0].split(b" ", 2)[1]
            origin = parts[1].split()[0]

            if python_version >= 300:
                module_name = module_name.decode("utf-8")

            if origin == b"precompiled":
                # This is a ".pyc" file that was imported, even before we have a
                # chance to do anything, we need to preserve it.
                filename = parts[1][len(b"precompiled from "):]
                if python_version >= 300:
                    filename = filename.decode("utf-8")

                # Do not leave standard library when freezing.
                if not isStandardLibraryPath(filename):
                    continue

                _detectedPrecompiledFile(
                    filename      = filename,
                    module_name   = module_name,
                    result        = result,
                    user_provided = user_provided,
                    technical     = technical
                )
            elif origin == b"sourcefile":
                filename = parts[1][len(b"sourcefile "):]
                if python_version >= 300:
                    filename = filename.decode("utf-8")

                # Do not leave standard library when freezing.
                if not isStandardLibraryPath(filename):
                    continue

                if filename.endswith(".py"):
                    _detectedSourceFile(
                        filename      = filename,
                        module_name   = module_name,
                        result        = result,
                        user_provided = user_provided,
                        technical     = technical
                    )
                elif not filename.endswith("<frozen>"):
                    # Python3 started lying in "__name__" for the "_decimal"
                    # calls itself "decimal", which then is wrong and also
                    # clashes with "decimal" proper
                    if python_version >= 300:
                        if module_name == "decimal":
                            module_name = "_decimal"

                    _detectedShlibFile(
                        filename    = filename,
                        module_name = module_name
                    )
            elif origin == b"dynamically":
                # Shared library in early load, happens on RPM based systems and
                # or self compiled Python installations.
                filename = parts[1][len(b"dynamically loaded from "):]
                if python_version >= 300:
                    filename = filename.decode("utf-8")

                # Do not leave standard library when freezing.
                if not isStandardLibraryPath(filename):
                    continue

                _detectedShlibFile(
                    filename    = filename,
                    module_name = module_name
                )

    return result

Example 125

Project: tp-qemu
Source File: qmp_basic_rhel6.py
View license
def run(test, params, env):
    """
    QMP Specification test-suite: this checks if the *basic* protocol conforms
    to its specification, which is file QMP/qmp-spec.txt in QEMU's source tree.

    IMPORTANT NOTES:

        o Most tests depend heavily on QMP's error information (eg. classes),
          this might have bad implications as the error interface is going to
          change in QMP

        o Command testing is *not* covered in this suite. Each command has its
          own specification and should be tested separately

        o We use the same terminology as used by the QMP specification,
          specially with regard to JSON types (eg. a Python dict is called
          a json-object)

        o This is divided in sub test-suites, please check the bottom of this
          file to check the order in which they are run

    TODO:

        o Finding which test failed is not as easy as it should be

        o Are all those check_*() functions really needed? Wouldn't a
          specialized class (eg. a Response class) do better?
    """
    def fail_no_key(qmp_dict, key):
        if not isinstance(qmp_dict, dict):
            raise error.TestFail("qmp_dict is not a dict (it's '%s')" %
                                 type(qmp_dict))
        if key not in qmp_dict:
            raise error.TestFail("'%s' key doesn't exist in dict ('%s')" %
                                 (key, str(qmp_dict)))

    def check_dict_key(qmp_dict, key, keytype):
        """
        Performs the following checks on a QMP dict key:

        1. qmp_dict is a dict
        2. key exists in qmp_dict
        3. key is of type keytype

        If any of these checks fails, error.TestFail is raised.
        """
        fail_no_key(qmp_dict, key)
        if not isinstance(qmp_dict[key], keytype):
            raise error.TestFail("'%s' key is not of type '%s', it's '%s'" %
                                 (key, keytype, type(qmp_dict[key])))

    def check_key_is_dict(qmp_dict, key):
        check_dict_key(qmp_dict, key, dict)

    def check_key_is_list(qmp_dict, key):
        check_dict_key(qmp_dict, key, list)

    def check_key_is_str(qmp_dict, key):
        check_dict_key(qmp_dict, key, unicode)

    def check_str_key(qmp_dict, keyname, value=None):
        check_dict_key(qmp_dict, keyname, unicode)
        if value and value != qmp_dict[keyname]:
            raise error.TestFail("'%s' key value '%s' should be '%s'" %
                                 (keyname, str(qmp_dict[keyname]), str(value)))

    def check_key_is_int(qmp_dict, key):
        fail_no_key(qmp_dict, key)
        try:
            int(qmp_dict[key])
        except Exception:
            raise error.TestFail("'%s' key is not of type int, it's '%s'" %
                                 (key, type(qmp_dict[key])))

    def check_bool_key(qmp_dict, keyname, value=None):
        check_dict_key(qmp_dict, keyname, bool)
        if value and value != qmp_dict[keyname]:
            raise error.TestFail("'%s' key value '%s' should be '%s'" %
                                 (keyname, str(qmp_dict[keyname]), str(value)))

    def check_success_resp(resp, empty=False):
        """
        Check QMP OK response.

        :param resp: QMP response
        :param empty: if True, response should not contain data to return
        """
        check_key_is_dict(resp, "return")
        if empty and len(resp["return"]) > 0:
            raise error.TestFail("success response is not empty ('%s')" %
                                 str(resp))

    def check_error_resp(resp, classname=None, datadict=None):
        """
        Check QMP error response.

        :param resp: QMP response
        :param classname: Expected error class name
        :param datadict: Expected error data dictionary
        """
        logging.debug("resp %s", str(resp))
        check_key_is_dict(resp, "error")
        check_key_is_str(resp["error"], "class")
        if classname and resp["error"]["class"] != classname:
            raise error.TestFail("got error class '%s' expected '%s'" %
                                 (resp["error"]["class"], classname))
        check_key_is_dict(resp["error"], "data")
        if datadict and resp["error"]["data"] != datadict:
            raise error.TestFail("got data dict '%s' expected '%s'" %
                                 (resp["error"]["data"], datadict))

    def test_version(version):
        """
        Check the QMP greeting message version key which, according to QMP's
        documentation, should be:

        { "qemu": { "major": json-int, "minor": json-int, "micro": json-int }
          "package": json-string }
        """
        check_key_is_dict(version, "qemu")
        check_key_is_str(version, "package")

    def test_greeting(greeting):
        check_key_is_dict(greeting, "QMP")
        check_key_is_dict(greeting["QMP"], "version")
        check_key_is_list(greeting["QMP"], "capabilities")

    def greeting_suite(monitor):
        """
        Check the greeting message format, as described in the QMP
        specfication section '2.2 Server Greeting'.

        { "QMP": { "version": json-object, "capabilities": json-array } }
        """
        greeting = monitor.get_greeting()
        test_greeting(greeting)
        test_version(greeting["QMP"]["version"])

    def json_parsing_errors_suite(monitor):
        """
        Check that QMP's parser is able to recover from parsing errors, please
        check the JSON spec for more info on the JSON syntax (RFC 4627).
        """
        # We're quite simple right now and the focus is on parsing errors that
        # have already biten us in the past.
        #
        # TODO: The following test-cases are missing:
        #
        #   - JSON numbers, strings and arrays
        #   - More invalid characters or malformed structures
        #   - Valid, but not obvious syntax, like zillion of spaces or
        #     strings with unicode chars (different suite maybe?)
        bad_json = []

        # A JSON value MUST be an object, array, number, string, true, false,
        # or null
        #
        # NOTE: QMP seems to ignore a number of chars, like: | and ?
        bad_json.append(":")
        bad_json.append(",")

        # Malformed json-objects
        #
        # NOTE: sending only "}" seems to break QMP
        # NOTE: Duplicate keys are accepted (should it?)
        bad_json.append("{ \"execute\" }")
        bad_json.append("{ \"execute\": \"query-version\", }")
        bad_json.append("{ 1: \"query-version\" }")
        bad_json.append("{ true: \"query-version\" }")
        bad_json.append("{ []: \"query-version\" }")
        bad_json.append("{ {}: \"query-version\" }")

        for cmd in bad_json:
            resp = monitor.cmd_raw(cmd)
            check_error_resp(resp, "JSONParsing")

    def test_id_key(monitor):
        """
        Check that QMP's "id" key is correctly handled.
        """
        # The "id" key must be echoed back in error responses
        id_key = "virt-test"
        resp = monitor.cmd_qmp("eject", {"foobar": True}, q_id=id_key)
        check_error_resp(resp)
        check_str_key(resp, "id", id_key)

        # The "id" key must be echoed back in success responses
        resp = monitor.cmd_qmp("query-status", q_id=id_key)
        check_success_resp(resp)
        check_str_key(resp, "id", id_key)

        # The "id" key can be any json-object
        for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"],
                       {"key": {}}):
            resp = monitor.cmd_qmp("query-status", q_id=id_key)
            check_success_resp(resp)
            if resp["id"] != id_key:
                raise error.TestFail("expected id '%s' but got '%s'" %
                                     (str(id_key), str(resp["id"])))

    def test_invalid_arg_key(monitor):
        """
        Currently, the only supported keys in the input object are: "execute",
        "arguments" and "id". Although expansion is supported, invalid key
        names must be detected.
        """
        resp = monitor.cmd_obj({"execute": "eject", "foobar": True})
        expected_error = "QMPExtraInputObjectMember"
        data_dict = {"member": "foobar"}
        check_error_resp(resp, expected_error, data_dict)

    def test_bad_arguments_key_type(monitor):
        """
        The "arguments" key must be an json-object.

        We use the eject command to perform the tests, but that's a random
        choice, any command that accepts arguments will do, as the command
        doesn't get called.
        """
        for item in (True, [], 1, "foo"):
            resp = monitor.cmd_obj({"execute": "eject", "arguments": item})
            check_error_resp(resp, "QMPBadInputObjectMember",
                             {"member": "arguments", "expected": "object"})

    def test_bad_execute_key_type(monitor):
        """
        The "execute" key must be a json-string.
        """
        for item in (False, 1, {}, []):
            resp = monitor.cmd_obj({"execute": item})
            check_error_resp(resp, "QMPBadInputObjectMember",
                             {"member": "execute", "expected": "string"})

    def test_no_execute_key(monitor):
        """
        The "execute" key must exist, we also test for some stupid parsing
        errors.
        """
        for cmd in ({}, {"execut": "qmp_capabilities"},
                    {"executee": "qmp_capabilities"}, {"foo": "bar"}):
            resp = monitor.cmd_obj(cmd)
            check_error_resp(resp)  # XXX: check class and data dict?

    def test_bad_input_obj_type(monitor):
        """
        The input object must be... an json-object.
        """
        for cmd in ("foo", [], True, 1):
            resp = monitor.cmd_obj(cmd)
            check_error_resp(resp, "QMPBadInputObject", {"expected": "object"})

    def test_good_input_obj(monitor):
        """
        Basic success tests for issuing QMP commands.
        """
        # NOTE: We don't use the cmd_qmp() method here because the command
        # object is in a 'random' order
        resp = monitor.cmd_obj({"execute": "query-version"})
        check_success_resp(resp)

        resp = monitor.cmd_obj({"arguments": {}, "execute": "query-version"})
        check_success_resp(resp)

        id_key = "1234foo"
        resp = monitor.cmd_obj({"id": id_key, "execute": "query-version",
                                "arguments": {}})
        check_success_resp(resp)
        check_str_key(resp, "id", id_key)

        # TODO: would be good to test simple argument usage, but we don't have
        # a read-only command that accepts arguments.

    def input_object_suite(monitor):
        """
        Check the input object format, as described in the QMP specfication
        section '2.3 Issuing Commands'.

        { "execute": json-string, "arguments": json-object, "id": json-value }
        """
        test_good_input_obj(monitor)
        test_bad_input_obj_type(monitor)
        test_no_execute_key(monitor)
        test_bad_execute_key_type(monitor)
        test_bad_arguments_key_type(monitor)
        test_id_key(monitor)
        test_invalid_arg_key(monitor)

    def argument_checker_suite(monitor):
        """
        Check that QMP's argument checker is detecting all possible errors.

        We use a number of different commands to perform the checks, but the
        command used doesn't matter much as QMP performs argument checking
        _before_ calling the command.
        """
        # qmp in RHEL6 is different from 0.13.*:
        # 1. 'stop' command just return {} evenif stop have arguments.
        # 2. there is no 'screendump' command.
        # 3. argument isn't checked in 'device' command.
        # so skip these tests in RHEL6.

        # test optional argument: 'force' is omitted, but it's optional, so
        # the handler has to be called. Test this happens by checking an
        # error that is generated by the handler itself.
        resp = monitor.cmd_qmp("eject", {"device": "foobar"})
        check_error_resp(resp, "DeviceNotFound")

        # val argument must be a json-int
        for arg in ({}, [], True, "foo"):
            resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo",
                                               "size": 10})
            check_error_resp(resp, "InvalidParameterType",
                             {"name": "val", "expected": "int"})

        # value argument must be a json-number
        for arg in ({}, [], True, "foo"):
            resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg})
            check_error_resp(resp, "InvalidParameterType",
                             {"name": "value", "expected": "number"})

        # qdev-type commands have their own argument checker, all QMP does
        # is to skip its checking and pass arguments through. Check this
        # works by providing invalid options to device_add and expecting
        # an error message from qdev
        resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"})
        check_error_resp(resp, "PropertyNotFound",
                               {"device": "e1000", "property": "foo"})

    def unknown_commands_suite(monitor):
        """
        Check that QMP handles unknown commands correctly.
        """
        # We also call a HMP-only command, to be sure it will fail as expected
        for cmd in ("bar", "query-", "query-foo", "help"):
            resp = monitor.cmd_qmp(cmd)
            check_error_resp(resp, "CommandNotFound", {"name": cmd})

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    # Look for the first qmp monitor available, otherwise, fail the test
    qmp_monitor = vm.get_monitors_by_type("qmp")
    if qmp_monitor:
        qmp_monitor = qmp_monitor[0]
    else:
        raise error.TestError('Could not find a QMP monitor, aborting test')

    # Run all suites
    greeting_suite(qmp_monitor)
    input_object_suite(qmp_monitor)
    argument_checker_suite(qmp_monitor)
    unknown_commands_suite(qmp_monitor)
    json_parsing_errors_suite(qmp_monitor)

    # check if QMP is still alive
    if not qmp_monitor.is_responsive():
        raise error.TestFail('QMP monitor is not responsive after testing')

Example 126

Project: tp-qemu
Source File: sr_iov_hotplug.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test hotplug of sr-iov devices.

    (Elements between [] are configurable test parameters)
    1) Set up sr-iov test environment in host.
    2) Start VM.
    3) Disable the primary link(s) of guest.
    4) PCI add one/multi sr-io  deivce with (or without) repeat
    5) Compare output of monitor command 'info pci'.
    6) Compare output of guest command [reference_cmd].
    7) Verify whether pci_model is shown in [pci_find_cmd].
    8) Check whether the newly added PCI device works fine.
    9) Delete the device, verify whether could remove the sr-iov device.
    10) Re-enabling the primary link(s) of guest.

    :param test:   QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env:    Dictionary with test environment.
    """

    def get_active_network_device(session, nic_filter):
        devnames = []
        cmd = "ifconfig -a"
        status, output = session.cmd_status_output(cmd)
        if status:
            msg = "Guest command '%s' fail with output: %s." % (cmd, output)
            raise error.TestError(msg)
        devnames = re.findall(nic_filter, output)
        return devnames

    def pci_add_iov(pci_num):
        pci_add_cmd = ("pci_add pci_addr=auto host host=%s,if=%s" %
                       (pa_pci_ids[pci_num], pci_model))
        if params.get("hotplug_params"):
            assign_param = params.get("hotplug_params").split()
            for param in assign_param:
                value = params.get(param)
                if value:
                    pci_add_cmd += ",%s=%s" % (param, value)
        return pci_add(pci_add_cmd)

    def pci_add(pci_add_cmd):
        error.context("Adding pci device with command 'pci_add'")
        add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        pci_info.append(['', add_output])
        if "OK domain" not in add_output:
            raise error.TestFail("Add PCI device failed. "
                                 "Monitor command is: %s, Output: %r" %
                                 (pci_add_cmd, add_output))
        return vm.monitor.info("pci")

    def check_support_device(dev):
        if vm.monitor.protocol == 'qmp':
            devices_supported = vm.monitor.human_monitor_cmd("%s ?" % cmd_type)
        else:
            devices_supported = vm.monitor.send_args_cmd("%s ?" % cmd_type)
        # Check if the device is support in qemu
        is_support = utils_misc.find_substring(devices_supported, dev)
        if not is_support:
            raise error.TestError("%s doesn't support device: %s" %
                                  (cmd_type, dev))

    def device_add_iov(pci_num):
        device_id = "%s" % pci_model + "-" + utils_misc.generate_random_id()
        pci_info.append([device_id])
        driver = params.get("device_driver", "pci-assign")
        check_support_device(driver)
        pci_add_cmd = ("device_add id=%s,driver=%s,host=%s" %
                       (pci_info[pci_num][0], driver, pa_pci_ids[pci_num]))
        if params.get("hotplug_params"):
            assign_param = params.get("hotplug_params").split()
            for param in assign_param:
                value = params.get(param)
                if value:
                    pci_add_cmd += ",%s=%s" % (param, value)
        return device_add(pci_num, pci_add_cmd)

    def device_add(pci_num, pci_add_cmd):
        error.context("Adding pci device with command 'device_add'")
        if vm.monitor.protocol == 'qmp':
            add_output = vm.monitor.send_args_cmd(pci_add_cmd)
        else:
            add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False)
        pci_info[pci_num].append(add_output)
        after_add = vm.monitor.info("pci")
        if pci_info[pci_num][0] not in str(after_add):
            logging.debug("Print info pci after add the block: %s" % after_add)
            raise error.TestFail("Add device failed. Monitor command is: %s"
                                 ". Output: %r" % (pci_add_cmd, add_output))
        return after_add

    # Hot add a pci device
    def add_device(pci_num):
        reference_cmd = params["reference_cmd"]
        find_pci_cmd = params["find_pci_cmd"]
        info_pci_ref = vm.monitor.info("pci")
        reference = session.cmd_output(reference_cmd)
        active_nics = get_active_network_device(session, nic_filter)
        try:
            # get function for adding device.
            add_fuction = local_functions["%s_iov" % cmd_type]
        except Exception:
            raise error.TestError(
                "No function for adding sr-iov dev with '%s'" %
                cmd_type)
        after_add = None
        if add_fuction:
            # Do add pci device.
            after_add = add_fuction(pci_num)

        try:
            # Define a helper function to compare the output
            def _new_shown():
                output = session.cmd_output(reference_cmd)
                return output != reference

            # Define a helper function to make sure new nic could get ip.
            def _check_ip():
                post_nics = get_active_network_device(session, nic_filter)
                return (len(active_nics) <= len(post_nics) and
                        active_nics != post_nics)

            # Define a helper function to catch PCI device string
            def _find_pci():
                output = session.cmd_output(find_pci_cmd)
                if re.search(match_string, output, re.IGNORECASE):
                    return True
                else:
                    return False

            error.context("Start checking new added device")
            # Compare the output of 'info pci'
            if after_add == info_pci_ref:
                raise error.TestFail("No new PCI device shown after executing "
                                     "monitor command: 'info pci'")

            secs = int(params["wait_secs_for_hook_up"])
            if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3):
                raise error.TestFail("No new device shown in output of command "
                                     "executed inside the guest: %s" %
                                     reference_cmd)

            if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3):
                raise error.TestFail("New add device not found in guest. "
                                     "Command was: %s" % find_pci_cmd)

            # Test the newly added device
            if not utils_misc.wait_for(_check_ip, 120, 3, 3):
                ifconfig = session.cmd_output("ifconfig -a")
                raise error.TestFail("New hotpluged device could not get ip "
                                     "after 120s in guest. guest ifconfig "
                                     "output: \n%s" % ifconfig)
            try:
                session.cmd(params["pci_test_cmd"] % (pci_num + 1))
            except aexpect.ShellError, e:
                raise error.TestFail("Check device failed after PCI "
                                     "hotplug. Output: %r" % e.output)

        except Exception:
            pci_del(pci_num, ignore_failure=True)
            raise

    # Hot delete a pci device
    def pci_del(pci_num, ignore_failure=False):
        def _device_removed():
            after_del = vm.monitor.info("pci")
            return after_del != before_del

        before_del = vm.monitor.info("pci")
        if cmd_type == "pci_add":
            slot_id = "0" + pci_info[pci_num][1].split(",")[2].split()[1]
            cmd = "pci_del pci_addr=%s" % slot_id
            vm.monitor.send_args_cmd(cmd, convert=False)
        elif cmd_type == "device_add":
            cmd = "device_del id=%s" % pci_info[pci_num][0]
            vm.monitor.send_args_cmd(cmd)

        if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) and
                not ignore_failure):
            raise error.TestFail("Failed to hot remove PCI device: %s. "
                                 "Monitor command: %s" %
                                 (pci_model, cmd))

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_serial_login(timeout=timeout)

    test_timeout = int(params.get("test_timeout", 360))
    # Test if it is nic or block
    pci_num_range = int(params.get("pci_num", 1))
    rp_times = int(params.get("repeat_times", 1))
    pci_model = params.get("pci_model", "pci-assign")
    # Need udpate match_string if you use a card other than 82576
    match_string = params.get("match_string", "82576")
    generate_mac = params.get("generate_mac", "yes")
    nic_filter = params["nic_interface_filter"]
    devices = []
    device_type = params.get("hotplug_device_type", "vf")
    for i in xrange(pci_num_range):
        device = {}
        device["type"] = device_type
        if generate_mac == "yes":
            device['mac'] = utils_net.generate_mac_address_simple()
        if params.get("device_name"):
            device["name"] = params.get("device_name")
        devices.append(device)
    device_driver = params.get("device_driver", "pci-assign")
    if vm.pci_assignable is None:
        vm.pci_assignable = test_setup.PciAssignable(
            driver=params.get("driver"),
            driver_option=params.get("driver_option"),
            host_set_flag=params.get("host_setup_flag"),
            kvm_params=params.get("kvm_default"),
            vf_filter_re=params.get("vf_filter_re"),
            pf_filter_re=params.get("pf_filter_re"),
            device_driver=device_driver)

    pa_pci_ids = vm.pci_assignable.request_devs(devices)
    # Modprobe the module if specified in config file
    module = params.get("modprobe_module")
    if module:
        error.context("modprobe the module %s" % module, logging.info)
        session.cmd("modprobe %s" % module)

    # Probe qemu to verify what is the supported syntax for PCI hotplug
    if vm.monitor.protocol == 'qmp':
        cmd_o = vm.monitor.info("commands")
    else:
        cmd_o = vm.monitor.send_args_cmd("help")

    cmd_type = utils_misc.find_substring(str(cmd_o), "device_add", "pci_add")
    if not cmd_o:
        raise error.TestError("Unknown version of qemu")

    local_functions = locals()

    if params.get("enable_set_link" "yes") == "yes":
        error.context("Disable the primary link(s) of guest", logging.info)
        for nic in vm.virtnet:
            vm.set_link(nic.device_id, up=False)

    try:
        for j in range(rp_times):
            # pci_info is a list of list.
            # each element 'i' has 4 members:
            # pci_info[i][0] == device id, only used for device_add
            # pci_info[i][1] == output of device add command
            pci_info = []
            for pci_num in xrange(pci_num_range):
                msg = "Start hot-adding %sth pci device," % (pci_num + 1)
                msg += " repeat %d" % (j + 1)
                error.context(msg, logging.info)
                add_device(pci_num)
            sub_type = params.get("sub_type_after_plug")
            if sub_type:
                error.context("Running sub test '%s' after hotplug" % sub_type,
                              logging.info)
                utils_test.run_virt_sub_test(test, params, env, sub_type)
                if "guest_suspend" == sub_type:
                    # Hotpluged device have been released after guest suspend,
                    # so do not need unpluged step.
                    break
            for pci_num in xrange(pci_num_range):
                msg = "start hot-deleting %sth pci device," % (pci_num + 1)
                msg += " repeat %d" % (j + 1)
                error.context(msg, logging.info)
                pci_del(-(pci_num + 1))
    finally:
        if params.get("enable_set_link", "yes") == "yes":
            error.context("Re-enabling the primary link(s) of guest",
                          logging.info)
            for nic in vm.virtnet:
                vm.set_link(nic.device_id, up=True)

Example 127

Project: inspectors-general
Source File: agriculture.py
View license
def report_from(result, page_url, year_range, report_type, agency_slug="agriculture"):
  published_on = None

  try:
    # Try to find the link with text first. Sometimes there are hidden links
    # (no text) that we want to ignore.
    link = result.find_all("a", text=True)[0]
  except IndexError:
    # If none of the links have text, try the first one with an image
    for temp in result.find_all("a"):
      if temp.img:
        link = temp
        break
    # Fallback: pick the first link
    else:
      link = result.find_all("a")[0]
  report_url = urljoin(page_url, link.get('href').strip())

  if result.name == 'li':
    title = link.text.strip()
  elif result.name == 'tr':
    # Remove the date and parenthetical metadata from the result, and save
    # the date for later. What's left will be the title.
    published_on_element = result.strong.extract()
    if result.em:
      while result.em:
        result.em.extract()
      title = result.text.strip()
    else:
      title = result.text
      title = title[:title.find('(')].strip()

    published_on_text = published_on_element.text.strip().rstrip(":")
    for date_format in DATE_FORMATS:
      try:
        published_on = datetime.datetime.strptime(published_on_text, date_format)
      except ValueError:
        pass

  # Normalize titles
  title = title.rstrip(",")
  if title.endswith("(PDF)"):
    title = title[:-5]
  if title.endswith("(PDF), (Report No: 30601-01-HY, Size: 847,872 bytes)"):
    title = title[:-52]
  title = title.rstrip(" ")
  title = title.replace("..", ".")
  title = title.replace("  ", " ")
  title = title.replace("REcovery", "Recovery")
  title = title.replace("Directy ", "Direct ")
  if title == title.upper():
    title = title.title()

  # These entries on the IG page have the wrong URLs associated with them. The
  # correct URLs were guessed or retrieved from an earlier version of the page,
  # via the Internet Archive Wayback Machine.
  if report_url == "http://www.usda.gov/oig/webdocs/IGtestimony110302.pdf" and \
      title == "Statement Of Phyllis K. Fong Inspector General: Before The " \
      "House Appropriations Subcommittee On Agriculture, Rural Development, " \
      "Food And Drug Administration And Related Agencies":
    report_url = "http://www.usda.gov/oig/webdocs/Testimonybudgt-2004.pdf"
  elif report_url == "http://www.usda.gov/oig/webdocs/Ebt.PDF" and \
      title == "Statement Of Roger C. Viadero: Before The U.S. House Of " \
      "Representatives Committee On Agriculture Subcommittee On Department " \
      "Operations, Oversight, Nutrition, And Forestry on the Urban Resources " \
      "Partnership Program":
    report_url = "http://www.usda.gov/oig/webdocs/URP-Testimony.PDF"
  elif report_url == "http://www.usda.gov/oig/webdocs/foodaidasst.PDF" and \
      title == "Testimony Of Roger C. Viadero: Before The United States " \
      "Senate Committee On Agriculture, Nutrition, And Forestry On The " \
      "Department's Processing Of Civil Rights Complaints":
    report_url = "http://www.usda.gov/oig/webdocs/IGstestimony.PDF"
  elif report_url == "http://www.usda.gov/oig/webdocs/34601-10-TE.pdf" and \
      title == "Rural Housing Service Single Family Housing Program - Maine":
    report_url = "http://www.usda.gov/oig/webdocs/04004-05-Hy.pdf"
  elif report_url == "http://www.usda.gov/oig/webdocs/04004-05-Hy.pdf" and \
      title == "Rural Development\u2019s Processing of Loan Guarantees to " \
      "Member of the Western Sugar Cooperative":
    report_url = "http://www.usda.gov/oig/webdocs/34601-03-Ch.pdf"
  elif report_url == "http://www.usda.gov/oig/webdocs/60801-%7E1.pdf" and \
      title == "Evaluation of the Office of Civil Rights\u2019 Efforts to " \
      "Reduce the Backlog of Program Complaints":
    report_url = "http://www.usda.gov/oig/webdocs/60801-1-HQ.pdf"
  elif report_url == "http://www.usda.gwebdocs/34703-0001-31.pdf":
    report_url = "http://www.usda.gov/oig/webdocs/34703-0001-31.pdf"

  # This report is listed twice on the same page with slightly different titles
  if title == "Animal and Plant Health Inspection Service Transition and " \
      "Coordination of Border Inspection Activities Between USDA and DHS":
    return

  report_filename = report_url.split("/")[-1]
  report_id = os.path.splitext(report_filename)[0]

  # Differentiate between two letters on the same report
  if report_url == "http://www.usda.gov/oig/webdocs/34099-12-TE.pdf":
    report_id = "34099-12-Te_1"
  elif report_url == "http://www.usda.gov/oig/webdocs/34099-12-Te.pdf":
    report_id = "34099-12-Te_2"

  if title == "American Recovery and Reinvestment Act - Emergency Watershed " \
      "Protection Program Floodplain Easements" and report_id == "10703-1-KC":
    return

  # These are just summary versions of other reports. Skip for now.
  if '508 Compliant Version' in title:
    return

  if report_id in REPORT_PUBLISHED_MAPPING:
    published_on = REPORT_PUBLISHED_MAPPING[report_id]
  if not published_on:
    try:
      # This is for the investigation reports
      published_on = datetime.datetime.strptime(result.text.strip(), '%B %Y (PDF)')
      title = "Investigation Bulletins {}".format(result.text.strip())
    except ValueError:
      pass
  if not published_on:
    published_on_text = result.text.split()[0].strip()
    for date_format in DATE_FORMATS:
      try:
        published_on = datetime.datetime.strptime(published_on_text, date_format)
      except ValueError:
        pass

  if published_on.year not in year_range:
    logging.debug("[%s] Skipping, not in requested range." % report_url)
    return

  if report_id in LOWER_PDF_REPORT_IDS:
    report_url = ".".join([report_url.rsplit(".", 1)[0], 'pdf'])

  report = {
    'inspector': 'agriculture',
    'inspector_url': 'http://www.usda.gov/oig/',
    'agency': agency_slug.lower(),
    'agency_name': AGENCY_NAMES.get(agency_slug, 'Department of Agriculture'),
    'report_id': report_id,
    'url': report_url,
    'title': title,
    'type': report_type,
    'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
  }
  return report

Example 128

Project: tp-libvirt
Source File: virsh_iface.py
View license
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' optioin
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name", "ENTER.BRIDGE.NAME")
    iface_xml = params.get("iface_xml")
    iface_type = params.get("iface_type", "ethernet")
    iface_pro = params.get("iface_pro", "")
    iface_eth = params.get("iface_eth", "")
    iface_tag = params.get("iface_tag", "0")
    if iface_type == "vlan":
        iface_name = iface_eth + "." + iface_tag
    iface_eth_using = "yes" == params.get("iface_eth_using", "no")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    net_restart = "yes" == params.get("iface_net_restart", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    if ping_ip.count("ENTER"):
        raise error.TestNAError("Please input a valid ip address")
    if iface_name.count("ENTER"):
        raise error.TestNAError("Please input a existing bridge/ethernet name")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user', "EXAMPLE")
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    virsh_dargs = {'debug': True}
    list_dumpxml_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        if not list_dumpxml_acl:
            virsh_dargs['uri'] = uri
            virsh_dargs['unprivileged_user'] = unprivileged_user
        else:
            list_dumpxml_dargs['uri'] = uri
            list_dumpxml_dargs['unprivileged_user'] = unprivileged_user
            list_dumpxml_dargs['ignore_status'] = False

    # acl api negative testing params
    write_save_status_error = "yes" == params.get("write_save_status_error",
                                                  "no")
    start_status_error = "yes" == params.get("start_status_error", "no")
    stop_status_error = "yes" == params.get("stop_status_error", "no")
    delete_status_error = "yes" == params.get("delete_status_error", "no")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm:
        xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(test.tmpdir, "iface-%s.bk" % iface_name)
    net_bridge = utils_net.Bridge()
    if use_exist_iface:
        if iface_type == "bridge":
            if iface_name not in net_bridge.list_br():
                raise error.TestError("Bridge '%s' not exists" % iface_name)
            ifaces = net_bridge.get_structure()[iface_name]
            if len(ifaces) < 1:
                # In this situation, dhcp maybe cannot get ip address
                # Unless you use static, we'd better skip such case
                raise error.TestNAError("Bridge '%s' has no interface"
                                        " bridged, perhaps cannot get"
                                        " ipaddress" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(test.tmpdir, "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            raise error.TestError("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            raise error.TestError("XML file is needed.")
        iface_xml = os.path.join(test.tmpdir, iface_xml)
        create_xml_file(iface_xml, params)

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_path.find_command("NetworkManager")
    except utils_path.CmdNotFoundError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            utils.run("cp %s %s" % (iface_script, iface_script_bk))
            # step 1.1
            # dumpxml for interface
            if list_dumpxml_acl:
                virsh.iface_list(**list_dumpxml_dargs)
            xml = virsh.iface_dumpxml(iface_name, "--inactive",
                                      to_file=iface_xml,
                                      **list_dumpxml_dargs)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, **virsh_dargs)
        if (params.get('setup_libvirt_polkit') == 'yes' and
                write_save_status_error):
            # acl_test negative test
            libvirt.check_exit_status(result, write_save_status_error)
            virsh.iface_define(iface_xml, debug=True)
        elif iface_type == "bond" and not ping_ip:
            libvirt.check_exit_status(result, True)
            return
        else:
            libvirt.check_exit_status(result, status_error)

        if net_restart:
            network = service.Factory.create_service("network")
            network.restart()

        # After network restart, (ethernet)interface will be started
        if (not net_restart and iface_type in ("bridge", "ethernet")) or\
           (not use_exist_iface and iface_type in ("vlan", "bond")):
            # Step 3
            # List inactive interfaces
            list_option = "--inactive"
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s." % iface_name)

            # Step 4
            # Start interface
            result = virsh.iface_start(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    start_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, start_status_error)
                virsh.iface_start(iface_name, debug=True)
            elif (not net_restart and not use_exist_iface and
                    (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or
                        iface_type == "bridge" and iface_pro == "dhcp")):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                iface_ip = net_iface.get_ip()
                ping_ip = ping_ip if not iface_ip else iface_ip
                if ping_ip:
                    if not libvirt.check_iface(iface_name, "ping", ping_ip):
                        raise error.TestFail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        if use_exist_iface or\
           (iface_pro != "dhcp" and iface_type == "bridge") or\
           (iface_eth_using and iface_type == "vlan"):
            list_option = ""
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("Fail to find %s in active "
                                         "interface list" % iface_name)
            if vm:
                if vm.is_alive():
                    vm.destroy()
                iface_index = 0
                iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name)
                # Before test, detach all interfaces in guest
                for mac in iface_mac_list:
                    iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                    type = iface_info.get('type')
                    virsh.detach_interface(vm_name,
                                           "--type %s --mac %s"
                                           " --config" % (type, mac))
                    # After detach interface, vm.virtnet also need update, the
                    # easy way is free these mac addresses before start VM
                    vm.free_mac_address(iface_index)
                    iface_index += 1
                virsh.attach_interface(vm_name,
                                       "--type %s --source %s"
                                       " --config" % (iface_type, iface_name))
                vm.start()
                try:
                    # Test if guest can be login
                    vm.wait_for_login()
                except remote.LoginError:
                    raise error.TestFail("Cannot login guest with %s" %
                                         iface_name)

        # Step 6
        # Dumpxml for interface
        if list_dumpxml_acl:
            virsh.iface_list(**list_dumpxml_dargs)
        xml = virsh.iface_dumpxml(iface_name, "", to_file="",
                                  **list_dumpxml_dargs)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error and result.stdout.strip():
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                raise error.TestFail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        # Bridge's Mac equal to bridged interface's mac
        if iface_type not in ("bridge", "vlan") and result.stdout.strip():
            iface_mac = net_iface.get_mac()
            result = virsh.iface_name(iface_mac, debug=True)
            libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.0
            # check if interface's state is active before destroy
            if libvirt.check_iface(iface_name, "state", "--all"):
                # Step 9.1
                # Destroy interface
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                elif (not net_restart and iface_type == "ethernet" and
                        iface_pro in ["", "dhcp"] or iface_type == "bridge" and
                        iface_pro == "dhcp"):
                    libvirt.check_exit_status(result, True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    raise error.TestFail("%s is still present." % iface_name)
    finally:
        if os.path.exists(iface_xml):
            os.remove(iface_xml)
        if os.path.exists(iface_script):
            os.remove(iface_script)

        if use_exist_iface:
            if not os.path.exists(iface_script):
                utils.run("mv %s %s" % (iface_script_bk, iface_script))
            if iface_is_up and\
               not libvirt.check_iface(iface_name, "exists", ""):
                # Need reload script
                utils.run("ifup %s" % iface_name)
            elif not iface_is_up and libvirt.check_iface(iface_name,
                                                         "exists", ""):
                net_iface.down()
            if vm:
                xml_bak.sync()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                try:
                    utils_net.bring_down_ifname(iface_name)
                except utils_net.TAPBringDownError:
                    pass
            if iface_type == "bridge":
                if iface_name in net_bridge.list_br():
                    try:
                        net_bridge.del_bridge(iface_name)
                    except IOError:
                        pass
        if NM_is_running:
            NM_service.start()

Example 129

Project: virt-test
Source File: qemu_virtio_port.py
View license
    def run_debug(self):
        """
        viz run_normal.
        Additionally it stores last n verified characters and in
        case of failures it quickly receive enough data to verify failure or
        allowed loss and then analyze this data. It provides more info about
        the situation.
        Unlike normal run this one supports booth - loss and duplications.
        It's not friendly to data corruption.
        """
        logging.debug("ThRecvCheck %s: run", self.getName())
        attempt = 10
        max_loss = 0
        sum_loss = 0
        verif_buf = deque(maxlen=max(self.blocklen, self.sendlen))
        while not self.exitevent.isSet():
            ret = select.select([self.port.sock], [], [], 1.0)
            if ret[0] and (not self.exitevent.isSet()):
                buf = self.port.sock.recv(self.blocklen)
                if buf:
                    # Compare the received data with the control data
                    for idx_char in xrange(len(buf)):
                        _char = self.buff.popleft()
                        if buf[idx_char] == _char:
                            self.idx += 1
                            verif_buf.append(_char)
                        else:
                            # Detect the duplicated/lost characters.
                            logging.debug("ThRecvCheck %s: fail to receive "
                                          "%dth character.", self.getName(),
                                          self.idx)
                            buf = buf[idx_char:]
                            for i in xrange(100):
                                if len(self.buff) < self.sendidx:
                                    time.sleep(0.01)
                                else:
                                    break
                            sendidx = min(self.sendidx, len(self.buff))
                            if sendidx < self.sendidx:
                                logging.debug("ThRecvCheck %s: sendidx was "
                                              "lowered as there is not enough "
                                              "data after 1s. Using sendidx="
                                              "%s.", self.getName(), sendidx)
                            for _ in xrange(sendidx / self.blocklen):
                                if self.exitevent.isSet():
                                    break
                                buf += self.port.sock.recv(self.blocklen)
                            queue = _char
                            for _ in xrange(sendidx):
                                queue += self.buff[_]
                            offset_a = None
                            offset_b = None
                            for i in xrange(sendidx):
                                length = min(len(buf[i:]), len(queue))
                                if buf[i:] == queue[:length]:
                                    offset_a = i
                                    break
                            for i in xrange(sendidx):
                                length = min(len(queue[i:]), len(buf))
                                if queue[i:][:length] == buf[:length]:
                                    offset_b = i
                                    break

                            if (offset_b and offset_b < offset_a) or offset_a:
                                # Data duplication
                                self.sendidx -= offset_a
                                max_loss = max(max_loss, offset_a)
                                sum_loss += offset_a
                                logging.debug("ThRecvCheck %s: DUP %s (out of "
                                              "%s)", self.getName(), offset_a,
                                              sendidx)
                                buf = buf[offset_a + 1:]
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                verif_buf.extend(buf)
                                self.idx += len(buf)
                            elif offset_b:  # Data loss
                                max_loss = max(max_loss, offset_b)
                                sum_loss += offset_b
                                logging.debug("ThRecvCheck %s: LOST %s (out of"
                                              " %s)", self.getName(), offset_b,
                                              sendidx)
                                # Pop-out the lost characters from verif_queue
                                # (first one is already out)
                                self.sendidx -= offset_b
                                for i in xrange(offset_b - 1):
                                    self.buff.popleft()
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                self.idx += len(buf)
                                verif_buf.extend(buf)
                            else:   # Too big data loss or duplication
                                verif = ""
                                for _ in xrange(-min(sendidx, len(verif_buf)),
                                                0):
                                    verif += verif_buf[_]
                                logging.error("ThRecvCheck %s: mismatched data"
                                              ":\nverified: ..%s\nreceived:   "
                                              "%s\nsent:       %s",
                                              self.getName(), repr(verif),
                                              repr(buf), repr(queue))
                                raise error.TestFail("Recv and sendqueue "
                                                     "don't match with any offset.")
                            # buf was changed, break from this loop
                            attempt = 10
                            break
                    attempt = 10
                else:   # ! buf
                    # Broken socket
                    if attempt > 0:
                        attempt -= 1
                        if self.migrate_event is None:
                            self.exitevent.set()
                            raise error.TestFail("ThRecvCheck %s: Broken pipe."
                                                 " If this is expected behavior set migrate"
                                                 "_event to support reconnection." %
                                                 self.getName())
                        logging.debug("ThRecvCheck %s: Broken pipe "
                                      ", reconnecting. ", self.getName())
                        self.reload_loss_idx()
                        # Wait until main thread sets the new self.port
                        while not (self.exitevent.isSet() or
                                   self.migrate_event.wait(1)):
                            pass
                        if self.exitevent.isSet():
                            break
                        logging.debug("ThRecvCheck %s: Broken pipe resumed, "
                                      "reconnecting...", self.getName())

                        self.port.sock = False
                        self.port.open()
        if self.sendidx >= 0:
            self.minsendidx = min(self.minsendidx, self.sendidx)
        if (self.sendlen - self.minsendidx):
            logging.debug("ThRecvCheck %s: Data loss occurred during socket"
                          "reconnection. Maximal loss was %d per one "
                          "migration.", self.getName(),
                          (self.sendlen - self.minsendidx))
        if sum_loss > 0:
            logging.debug("ThRecvCheck %s: Data offset detected, cumulative "
                          "err: %d, max err: %d(%d)", self.getName(), sum_loss,
                          max_loss, float(max_loss) / self.blocklen)
        logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
                      self.idx)
        self.ret_code = 0

Example 130

Project: virt-manager
Source File: ovf.py
View license
def _import_file(doc, ctx, conn, input_file):
    ignore = doc
    def xpath_str(path):
        ret = ctx.xpathEval(path)
        result = None
        if ret is not None:
            if type(ret) == list:
                if len(ret) >= 1:
                    result = ret[0].content
            else:
                result = ret
        return result

    def bool_val(val):
        if str(val).lower() == "false":
            return False
        elif str(val).lower() == "true":
            return True

        return False

    def xpath_nodechildren(path):
        # Return the children of the first node found by the xpath
        nodes = ctx.xpathEval(path)
        if not nodes:
            return []
        return node_list(nodes[0])

    def _lookup_disk_path(path):
        fmt = "vmdk"
        ref = None

        def _path_has_prefix(prefix):
            if path.startswith(prefix):
                return path[len(prefix):]
            if path.startswith("ovf:" + prefix):
                return path[len("ovf:" + prefix):]
            return False

        if _path_has_prefix("/disk/"):
            disk_ref = _path_has_prefix("/disk/")
            xpath = (_make_section_xpath(envbase, "DiskSection") +
                "/ovf:Disk[@ovf:diskId='%s']" % disk_ref)

            if not ctx.xpathEval(xpath):
                raise ValueError(_("Unknown disk reference id '%s' "
                                   "for path %s.") % (path, disk_ref))

            ref = xpath_str(xpath + "/@ovf:fileRef")

        elif _path_has_prefix("/file/"):
            ref = _path_has_prefix("/file/")

        else:
            raise ValueError(_("Unknown storage path type %s.") % path)

        xpath = (envbase + "/ovf:References/ovf:File[@ovf:id='%s']" % ref)

        if not ctx.xpathEval(xpath):
            raise ValueError(_("Unknown reference id '%s' "
                "for path %s.") % (ref, path))

        return xpath_str(xpath + "/@ovf:href"), fmt

    is_ovirt_format = False
    envbase = "/ovf:Envelope[1]"
    vsbase = envbase + "/ovf:VirtualSystem"
    if not ctx.xpathEval(vsbase):
        vsbase = envbase + "/ovf:Content[@xsi:type='ovf:VirtualSystem_Type']"
        is_ovirt_format = True

    def _make_section_xpath(base, section_name):
        if is_ovirt_format:
            return (base +
                    "/ovf:Section[@xsi:type='ovf:%s_Type']" % section_name)
        return base + "/ovf:%s" % section_name

    osbase = _make_section_xpath(vsbase, "OperatingSystemSection")
    vhstub = _make_section_xpath(vsbase, "VirtualHardwareSection")

    if not ctx.xpathEval(vsbase):
        raise RuntimeError("Did not find any VirtualSystem section")
    if not ctx.xpathEval(vhstub):
        raise RuntimeError("Did not find any VirtualHardwareSection")
    vhbase = vhstub + "/ovf:Item[rasd:ResourceType='%s']"

    # General info
    name = xpath_str(vsbase + "/ovf:Name")
    desc = xpath_str(vsbase + "/ovf:AnnotationSection/ovf:Annotation")
    if not desc:
        desc = xpath_str(vsbase + "/ovf:Description")
    vcpus = xpath_str((vhbase % DEVICE_CPU) + "/rasd:VirtualQuantity")
    sockets = xpath_str((vhbase % DEVICE_CPU) + "/rasd:num_of_sockets")
    cores = xpath_str((vhbase % DEVICE_CPU) + "/rasd:num_of_cores")
    mem = xpath_str((vhbase % DEVICE_MEMORY) + "/rasd:VirtualQuantity")
    alloc_mem = xpath_str((vhbase % DEVICE_MEMORY) +
        "/rasd:AllocationUnits")

    os_id = xpath_str(osbase + "/@id")
    os_version = xpath_str(osbase + "/@version")
    # This is the VMWare OS name
    os_vmware = xpath_str(osbase + "/@osType")

    logging.debug("OS parsed as: id=%s version=%s vmware=%s",
        os_id, os_version, os_vmware)

    # Sections that we handle
    # NetworkSection is ignored, since I don't have an example of
    # a valid section in the wild.
    parsed_sections = ["References", "DiskSection", "NetworkSection",
        "VirtualSystem"]

    # Check for unhandled 'required' sections
    for env_node in xpath_nodechildren(envbase):
        if env_node.name in parsed_sections:
            continue
        elif env_node.isText():
            continue

        logging.debug("Unhandled XML section '%s'",
                      env_node.name)

        if not bool_val(env_node.prop("required")):
            continue
        raise StandardError(_("OVF section '%s' is listed as "
                              "required, but parser doesn't know "
                              "how to handle it.") %
                              env_node.name)

    disk_buses = {}
    for node in ctx.xpathEval(vhbase % DEVICE_IDE_BUS):
        instance_id = _get_child_content(node, "InstanceID")
        disk_buses[instance_id] = "ide"
    for node in ctx.xpathEval(vhbase % DEVICE_SCSI_BUS):
        instance_id = _get_child_content(node, "InstanceID")
        disk_buses[instance_id] = "scsi"

    ifaces = []
    for node in ctx.xpathEval(vhbase % DEVICE_ETHERNET):
        iface = virtinst.VirtualNetworkInterface(conn)
        # XXX: Just ignore 'source' info and choose the default
        net_model = _get_child_content(node, "ResourceSubType")
        if net_model and not net_model.isdigit():
            iface.model = net_model.lower()
        iface.set_default_source()
        ifaces.append(iface)

    disks = []
    for node in ctx.xpathEval(vhbase % DEVICE_DISK):
        bus_id = _get_child_content(node, "Parent")
        path = _get_child_content(node, "HostResource")

        bus = disk_buses.get(bus_id, "ide")
        fmt = "raw"

        if path:
            path, fmt = _lookup_disk_path(path)

        disk = virtinst.VirtualDisk(conn)
        disk.path = path
        disk.driver_type = fmt
        disk.bus = bus
        disk.device = "disk"
        disks.append(disk)


    # XXX: Convert these OS values to something useful
    ignore = os_version
    ignore = os_id
    ignore = os_vmware

    guest = conn.caps.lookup_virtinst_guest()
    guest.installer = virtinst.ImportInstaller(conn)

    if not name:
        name = os.path.basename(input_file)

    guest.name = name.replace(" ", "_")
    guest.description = desc or None
    if vcpus:
        guest.vcpus = int(vcpus)
    elif sockets or cores:
        if sockets:
            guest.cpu.sockets = int(sockets)
        if cores:
            guest.cpu.cores = int(cores)
        guest.cpu.vcpus_from_topology()

    if mem:
        guest.memory = _convert_alloc_val(alloc_mem, mem) * 1024

    for dev in ifaces + disks:
        guest.add_device(dev)

    return guest

Example 131

Project: f90wrap
Source File: latex.py
View license
    def visit_Interface(self, node):

        if node.doc:
            if node.doc[0].strip() == 'OMIT':
                return

            if node.doc[0].strip() == 'OMIT SHORT':
                if self.short_doc:
                    return
                else:
                    node.doc = node.doc[1:]



        #        self.print_line( r"""\rule{\textwidth}{0.5pt}""")

        if len(node.procedures) == 0:
            return

        if any([isinstance(proc, ft.Prototype) for proc in node.procedures]):
            logging.debug('Skipping interface %s as some procedures were not found' % node.name)
            return

        n_sub = sum([isinstance(proc, ft.Subroutine) for proc in node.procedures])
        n_func = sum([isinstance(proc, ft.Function) for proc in node.procedures])

        if n_sub == len(node.procedures):
            is_sub = True
        elif n_func == len(node.procedures):
            is_sub = False
        else:
            raise ValueError('mixture of subroutines and functions in interface %s' % node.name)

        self.print_line(r'\index{general}{' + node.name + ' interface}')

        self.print_line(self.sections[self.depth] + r'{Interface \texttt{' + node.name + '}}')

        #        self.print_line(self.sections[depth+1]+r"""{Usage}""")

        printed_args = []
        #        self.print_line(r'\begin{boxedminipage}{\textwidth}')
        for sub in node.procedures:

            if sub.arguments != []:
                argl = '('
                for a in range(len(sub.arguments)):
                    arg = sub.arguments[a]
                    if isinstance(arg, ft.Declaration) and 'optional' in arg.attributes:
                        if argl[-2:] == '],':
                            argl = argl[:-2] + ',' + arg.name.rstrip() + '],'
                        elif argl.rstrip()[-4:] == '], &':
                            argl = argl.rstrip()[:-4] + ', &\n                        ' + arg.name.rstrip() + '],'
                        elif argl[-1] == ',':
                            argl = argl[:-1] + '[,' + arg.name.rstrip() + '],'
                        else:
                            argl = argl + '[' + arg.name.rstrip() + '],'
                    else:
                        argl = argl + arg.name.rstrip() + ','
                    if (a + 1) % 4 == 0.0 and a + 1 != len(sub.arguments):
                        argl = argl + ' &\n                        '
                argl = argl[:-1] + ')'
            else:
                argl = ''

            if not is_sub and sub.ret_val.name != sub.name:
                hash_value = argl
            else:
                hash_value = argl

            if hash_value in printed_args:
                continue

            printed_args.append(hash_value)

            if not is_sub:
                ret_name = sub.ret_val.name
                if ret_name.lower() == node.name.lower() or ret_name.lower() == sub.name.lower():
                    ret_name = ret_name[0].lower() + str(node.procedures.index(sub) + 1)
                self.print_line('>    ' + ret_name + ' = ' + node.name + argl)
            else:
                self.print_line('>    call ' + node.name + argl)
                #        self.print_line(r'\end{boxedminipage}'+'\n\n')


        for a in node.doc:
            self.print_line(a)

        for sub in node.procedures:
            for a in sub.doc:
                self.print_line(a)
            self.print_line('\n\n')

        got_args = (is_sub and sum([len(x.arguments) for x in node.procedures]) != 0) or not is_sub

        func_args = []
        if got_args:
            self.print_line(r'\begin{description}')


            arg_dict = {}
            i = 0  # counter for appearance order of args
            for sub in node.procedures:
                for a in sub.arguments:
                    if isinstance(a, ft.Subroutine) or isinstance(a, Function):
                        func_args.append(a)
                        continue
                    i = i + 1
                    if arg_dict.has_key(a.name):
                        if a.type.lower() + str(sorted(map(string.lower, a.attributes))) in \
                           [x[0].type.lower() + str(sorted(map(string.lower, x[0].attributes))) for x in arg_dict[a.name]]:
                            pass  # already got this name/type/attribute combo
                        else:
                            arg_dict[a.name].append((a, i))

                    else:
                        arg_dict[a.name] = [(a, i)]

            # Combine multiple types with the same name
            for name in arg_dict:
                types = [x[0].type for x in arg_dict[name]]
                types = uniq(types, string.lower)
                attr_lists = [x[0].attributes for x in arg_dict[name]]
                attributes = []

                contains_dimension = [ len([x for x in y if x.find('dimension') != -1]) != 0 for y in attr_lists ]

                for t in attr_lists:
                    attributes.extend(t)
                attributes = uniq(attributes, string.lower)

                dims = [x for x in attributes if x.find('dimension') != -1]
                attributes = [x for x in attributes if x.find('dimension') == -1]

                # If some attribute lists contains 'dimension' and some don't then
                # there are scalars in there as well.
                if True in contains_dimension and False in contains_dimension:
                    dims.insert(0, 'scalar')


                if (len(dims) != 0):
                    attributes.append(' \emph{or} '.join(dims))

                a = arg_dict[name][0][0]
                a.type = types  # r' \emph{or} '.join(types)
                a.attributes = attributes
                arg_dict[name] = (a, arg_dict[name][0][1])


            # Combine names with the same type, attributes and doc string
            rev_dict = {}
            for type, name in zip([ str([y.lower for y in x[0].type]) + \
                                     str([y.lower for y in x[0].attributes]) + str(x[0].doc) \
                                     for x in arg_dict.values() ], arg_dict.keys()):
                if rev_dict.has_key(type):
                    rev_dict[type].append(arg_dict[name])
                else:
                    rev_dict[type] = [arg_dict[name]]

            for k in rev_dict:
                names = [x[0].name for x in rev_dict[k]]
                a = rev_dict[k][0][0]
                names.sort(key=lambda x: arg_dict[x][1])

                # Split into pieces of max length 30 chars
                alist = []
                while names:
                    n = 0
                    length = 0
                    while (length < 30 and n < len(names)):
                        length = length + len(names[n])
                        n = n + 1
                    ns = names[:n]
                    del names[:n]
                    b = copy.copy(a)
                    b.name = ', '.join(ns)
                    alist.append(b)

                rev_dict[k] = (alist, min([x[1] for x in rev_dict[k]]))

            # Sort by original appearance order of first name
            keys = rev_dict.keys()
            keys.sort(key=lambda x: rev_dict[x][1])

            for k in keys:
                for a in rev_dict[k][0]:
                    self.visit(a)

            for f in func_args:
                self.compact = True
                self.visit(f)
                self.compact = False


        if not is_sub:
            #            self.print_line(self.sections[depth+1]+"{Return value --- ",)

            ret_types = [a.ret_val.type + str(a.ret_val.attributes) for a in node.procedures]

            if len(filter(lambda x: x != node.procedures[0].ret_val.type + str(node.procedures[0].ret_val.attributes), \
                          ret_types)) == 0:

                self.print_line(r"\item[Return value --- ",)
                self.is_ret_val = True
                self.visit(node.procedures[0].ret_val)
                self.is_ret_val = False
                self.print_line("]")
                for a in node.procedures[0].ret_val_doc:
                    self.print_line(a)
            else:
                self.print_line(r"\item[Return values:]\mbox{} \par\noindent")
                self.print_line(r'\begin{description}')
                for f in node.procedures:
                    shortname = f.ret_val.name[0].lower() + str(node.procedures.index(f) + 1)
                    self.print_line(r"\item[\texttt{" + shortname + "} --- ")
                    self.is_ret_val = True
                    self.visit(f.ret_val)
                    self.is_ret_val = False
                    self.print_line(']')
                    for a in f.ret_val_doc:
                        self.print_line(a)
                self.print_line(r'\end{description}')



        if got_args:
            self.print_line(r"\end{description}")

Example 132

Project: Verum
Source File: titan.py
View license
    def query(self, topic, max_depth=4, config=None, pivot_on=list(), dont_pivot_on=list(['enrichment', 'classification']), direction='successors'):
        """

            :param topic: a  graph to return the context of.  At least one node ID in topic \
             must be in full graph g to return any context.
            :param max_depth: The maximum distance from the topic to search
            :param config: The titanDB configuration to use if not using the one configured with the plugin
            :param pivot_on: A list of attribute types to pivot on.
            :param dont_pivot_on: A list of attribute types to not pivot on.
            :param direction: The direction to transverse the graph
            :return: subgraph in networkx format

            NOTE: If an attribute is in both pivot_on and dont_pivot_on it will not be pivoted on
        """
        if config is None:
            config = self.titandb_config

        # Connect to TitanDB Database
        titan_graph = TITAN_Graph(config)

        # Convert the topic nodes into titanDB eids
        current_nodes = set()
        eid_uri_map = {}
        # Validate the node URI
        for node in topic.nodes():
            titan_node = titan_graph.vertices.index.get_unique("uri", topic.node[node]["uri"])
            if titan_node:
                current_nodes.add(titan_node.eid)
                eid_uri_map[titan_node.eid] = node
        topic_nodes = frozenset(current_nodes)
        subgraph_nodes = current_nodes
        #sg = copy.deepcopy(topic)
        sg = nx.MultiDiGraph()
        sg.add_nodes_from(topic.nodes(data=True))
        sg.add_edges_from(topic.edges(data=True))
        distances = {node: 0 for node in topic.nodes()}
    #    Below 1 line is probably not necessary
    #    pivot_edges = list()
    #    print "Initial current Nodes: {0}".format(current_nodes)  # DEBUG
        for i in range(1, max_depth + 1):
            new_nodes = set()
            new_out_edges = set()
            new_in_edges = set()
            for eid in current_nodes:
    #            properties = og.node[node]
                node = titan_graph.vertices.get(eid)
                # If all directions, get all neighbors
                if direction == 'all' or eid in topic_nodes:
                    try:
                        new_nodes = new_nodes.union({n.eid for n in titan_graph.gremlin.query("g.v({0}).both".format(eid))})
                    except:
                        pass
                    try:
                        new_out_edges = new_out_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).outE".format(eid))})
                    except:
                        pass
                    try:
                        new_in_edges = new_in_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).inE".format(eid))})
                    except:
                        pass
                # If there is a list of things to NOT pivot on, pivot on everything else
                elif dont_pivot_on and 'attribute' in node and node.map()['attribute'] not in dont_pivot_on:
                    try:
                        new_nodes = new_nodes.union({n.eid for n in titan_graph.gremlin.query("g.v({0}).both".format(eid))})
                    except:
                        pass
                    try:
                        new_out_edges = new_out_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).outE".format(eid))})
                    except:
                        pass
                    try:
                        new_in_edges = new_in_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).inE".format(eid))})
                    except:
                        pass
                # Otherwise, only get all neighbors if the node is to be pivoted on.
                elif 'attribute' in node and \
                      node['attribute'] in pivot_on and \
                      node['attribute'] not in dont_pivot_on:
                    try:
                        new_nodes = new_nodes.union({n.eid for n in titan_graph.gremlin.query("g.v({0}).both".format(eid))})
                    except:
                        pass
                    try:
                        new_out_edges = new_out_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).outE".format(eid))})
                    except:
                        pass
                    try:
                        new_in_edges = new_in_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).inE".format(eid))})
                    except:
                        pass
                # If not all neighbors and not in pivot, if we are transversing up, get predecessors
                elif direction == 'predecessors':
                    # add edges to make predecessors successors for later probability calculation
                    try:
                        new_nodes = new_nodes.union({n.eid for n in titan_graph.gremlin.query("g.v({0}).out".format(eid))})
                    except:
                        pass
                    # add the reverse edges. These opposite of these edges will get placed in the subgraph
                    try:
                        new_in_edges = new_in_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).inE".format(eid))})
                    except:
                        pass
                # Otherwise assume we are transversing down and get all successors
                else:  # default to successors
                    try:
                        new_nodes = new_nodes.union({n.eid for n in titan_graph.gremlin.query("g.v({0}).both".format(eid))})
                    except:
                        pass
                    try:
                        new_out_edges = new_out_edges.union({n.eid for n in titan_graph.gremlin.query(
                                                        "g.v({0}).outE".format(eid))})
                    except:
                        pass

            # Remove nodes from new_nodes that are already in the subgraph so we don't overwrite their topic distance
            current_nodes = new_nodes - subgraph_nodes
            # combine the new nodes into the subgraph nodes set
            subgraph_nodes = subgraph_nodes.union(current_nodes)

            # Copy nodes, out-edges, in-edges, and reverse in-edges into subgraph
            # Add nodes
            for neighbor_eid in new_nodes:
                attr = titan_graph.vertices.get(neighbor_eid).map()
                sg.add_node(attr['uri'], attr)
                eid_uri_map[neighbor_eid] = attr['uri']
            # Add predecessor edges
            for out_eid in new_out_edges:
                out_edge = titan_graph.edges.get(out_eid)
                attr = out_edge.map()
                sg.add_edge(eid_uri_map[out_edge._outV], eid_uri_map[out_edge._inV], out_eid, attr)
            # Add successor edges & reverse pivot edges
            for in_eid in new_in_edges:
                in_edge = titan_graph.edges.get(in_eid)
                attr = in_edge.map()
                attr['origin'] = "subgraph_creation_pivot"
                sg.add_edge(eid_uri_map[in_edge._inV], eid_uri_map[in_edge._outV], in_eid, attr)

            # Set the distance from the topic on the nodes in the graph
            for eid in current_nodes:
                if eid_uri_map[eid] not in distances:
                    distances[eid_uri_map[eid]] = i
    #        logging.debug("Current nodes: {0}".format(current_nodes))  # DEBUG

        # add the distances to the subgraph
        nx.set_node_attributes(sg, "topic_distance", distances)

        logging.debug(nx.info(sg))  # DEBUG
        # Return the subgraph
        return sg

Example 133

Project: hiyapyco
Source File: __init__.py
View license
    def __init__(self, *args, **kwargs):
        """
        args: YAMLfile(s)
        kwargs:
          * method: one of hiyapyco.METHOD_SIMPLE | hiyapyco.METHOD_MERGE
          * interpolate: boolean (default: False)
          * castinterpolated: boolean (default: False) try to cast values after interpolating
          * usedefaultyamlloader: boolean (default: False)
          * loglevel: one of  the valid levels from the logging module
          * failonmissingfiles: boolean (default: True)
          * loglevelmissingfiles

        Returns a representation of the merged and (if requested) interpolated config.
        Will mostly be a OrderedDict (dict if usedefaultyamlloader), but can be of any other type, depending on the yaml files.
        """
        self._data = None
        self._files = []

        self.method = None
        if 'method' in kwargs:
            logging.debug('parse kwarg method: %s ...' % kwargs['method'])
            if kwargs['method'] not in METHODS.values():
                raise HiYaPyCoInvocationException(
                        'undefined method used, must be one of: %s' %
                        ' '.join(METHODS.keys())
                    )
            self.method = kwargs['method']
            del kwargs['method']
        if self.method == None:
            self.method = METHOD_SIMPLE

        self.interpolate = False
        self.castinterpolated = False
        if 'interpolate' in kwargs:
            if not isinstance(kwargs['interpolate'], bool):
                raise HiYaPyCoInvocationException(
                        'value of "interpolate" must be boolean (got: "%s" as %s)' %
                        (kwargs['interpolate'], type(kwargs['interpolate']),)
                        )
            self.interpolate = kwargs['interpolate']
            del kwargs['interpolate']
            if 'castinterpolated' in kwargs:
                if not isinstance(kwargs['castinterpolated'], bool):
                    raise HiYaPyCoInvocationException(
                            'value of "castinterpolated" must be boolean (got: "%s" as %s)' %
                            (kwargs['castinterpolated'], type(kwargs['castinterpolated']),)
                        )
                self.castinterpolated = kwargs['castinterpolated']
                del kwargs['castinterpolated']

        if 'usedefaultyamlloader' in kwargs:
            if not isinstance(kwargs['usedefaultyamlloader'], bool):
                raise HiYaPyCoInvocationException(
                        'value of "usedefaultyamlloader" must be boolean (got: "%s" as %s)' %
                        (kwargs['usedefaultyamlloader'], type(kwargs['usedefaultyamlloader']),)
                        )
            global _usedefaultyamlloader
            _usedefaultyamlloader = kwargs['usedefaultyamlloader']
            del kwargs['usedefaultyamlloader']

        self.failonmissingfiles = True
        if 'failonmissingfiles' in kwargs:
            if not isinstance(kwargs['failonmissingfiles'], bool):
                raise HiYaPyCoInvocationException(
                        'value of "failonmissingfiles" must be boolean (got: "%s" as %s)' %
                        (kwargs['failonmissingfiles'], type(kwargs['failonmissingfiles']),)
                        )
            self.failonmissingfiles = bool(kwargs['failonmissingfiles'])
            del kwargs['failonmissingfiles']

        self.loglevelonmissingfiles = logging.ERROR
        if not self.failonmissingfiles:
            self.loglevelonmissingfiles = logging.WARN
        if 'loglevelmissingfiles' in kwargs:
            logging.getLogger('testlevellogger').setLevel(kwargs['loglevelmissingfiles'])
            del kwargs['loglevelmissingfiles']

        if 'loglevel' in kwargs:
            logger.setLevel(kwargs['loglevel'])
            del kwargs['loglevel']

        if kwargs:
            raise HiYaPyCoInvocationException('undefined keywords: %s' % ' '.join(kwargs.keys()))

        if not args:
            raise HiYaPyCoInvocationException('no yaml files defined')

        for arg in args:
            self._updatefiles(arg)

        for yamlfile in self._files[:]:
            logger.debug('yamlfile: %s ...' % yamlfile)
            if '\n' in yamlfile:
                logger.debug('loading yaml doc from str ...')
                f = yamlfile
            else:
                fn = yamlfile
                if not os.path.isabs(yamlfile):
                    fn = os.path.join(os.getcwd(), yamlfile)
                    logger.debug('path extended for yamlfile: %s' % fn)
                try:
                    f = open(fn, 'r')
                    logger.debug('open4reading: file %s' % f)
                except IOError as e:
                    logger.log(self.loglevelonmissingfiles, e)
                    if not fn == yamlfile:
                        logger.log(self.loglevelonmissingfiles,
                                'file not found: %s (%s)' % (yamlfile, fn,))
                    else:
                        logger.log(self.loglevelonmissingfiles,
                                'file not found: %s' % yamlfile)
                    if self.failonmissingfiles:
                        raise HiYaPyCoInvocationException(
                                'yaml file not found: \'%s\'' % yamlfile
                            )
                    self._files.remove(yamlfile)
                    continue
            if _usedefaultyamlloader:
                ydata = yaml.safe_load(f)
            else:
                ydata = odyldo.safe_load(f)
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug('yaml data: %s' % ydata)
            if self._data is None:
                self._data = ydata
            else:
                if self.method == METHOD_SIMPLE:
                    self._data = self._simplemerge(self._data, ydata)
                else:
                    self._data = self._deepmerge(self._data, ydata)
                logger.debug('merged data: %s' % self._data)

        if self.interpolate:
            self._data = self._interpolate(self._data)

Example 134

Project: tp-libvirt
Source File: virsh_pool.py
View license
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    # The file for dumped pool xml
    pool_xml = os.path.join(test.tmpdir, "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("vol_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)[\ +\n]",
                            str(result.stdout))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            raise error.TestFail("Unexpect pool '%s' exist." % pool_name)
        if not expect_error and not found:
            raise error.TestFail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)[\ +\n]", str(result.stdout))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug(
                "Find volume '%s' in pool '%s'.", vol_name, pool_name)
        else:
            raise error.TestFail(
                "Not find volume '%s' in pool '%s'." %
                (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            raise error.TestFail("Pool info dictionary is needed.")
        val_tup = ('Capacity', 'Allocation', 'Available')
        if check_point in val_tup and float(value.split()[0]):
            # As from bytes to GiB, could cause deviation, and it should not
            # exceed 1 percent.
            if is_in_range(float(pool_info[check_point].split()[0]),
                           float(value.split()[0]), 1):
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))
        else:
            if pool_info[check_point] == value:
                logging.debug("Pool '%s' is '%s'.", check_point, value)
            else:
                raise error.TestFail("Pool '%s' isn't '%s'." %
                                     (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    kwargs = {'image_size': '1G', 'pre_disk_vol': ['1M'],
              'source_name': source_name, 'source_path': source_path,
              'source_format': source_format, 'persistent': True,
              'ip_protocal': ip_protocal}
    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=pool_xml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Step (4)
        # Undefine pool
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(pool_xml)
        utlv.check_exit_status(result, status_error)

        # Step (6)
        # Buid pool, this step may fail for 'disk' and 'logical' types pool
        if pool_type not in ["disk", "logical"]:
            option = ""
        # Options --overwrite and --no-overwrite can only be used to
        # build a filesystem pool, but it will fail for now
            # if pool_type == "fs":
            #    option = '--overwrite'
            result = virsh.pool_build(pool_name, option, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (7)
        # Pool start
        result = virsh.pool_start(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (8)
        # Pool list
        option = "--persistent --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (9)
        # Pool autostart
        result = virsh.pool_autostart(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        # Step (10)
        # Pool list
        option = "--autostart --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (11)
        # Restart libvirtd and check the autostart pool
        utils_libvirtd.libvirtd_restart()
        option = "--autostart --persistent"
        check_pool_list(pool_name, option)

        # Step (12)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (13)
        # Pool autostart disable
        result = virsh.pool_autostart(pool_name, "--disable",
                                      ignore_status=True)
        utlv.check_exit_status(result)

        # Step (14)
        # Repeat step (11)
        utils_libvirtd.libvirtd_restart()
        option = "--autostart"
        check_pool_list(pool_name, option, True)

        # Step (15)
        # Pool start
        # When libvirtd starts up, it'll check to see if any of the storage
        # pools have been activated externally. If so, then it'll mark the
        # pool as active. This is independent of autostart.
        # So a directory based storage pool is thus pretty much always active,
        # and so as the SCSI pool.
        if pool_type not in ["dir", 'scsi']:
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

        # Step (16)
        # Pool info
        pool_info = _pool.pool_info(pool_name)
        logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

        # Step (17)
        # Pool UUID
        result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "UUID", result.stdout.strip())

        # Step (18)
        # Pool Name
        result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_info(pool_info, "Name", result.stdout.strip())

        # Step (19)
        # Pool refresh for 'dir' type pool
        if pool_type == "dir":
            os.mknod(vol_path)
            result = virsh.pool_refresh(pool_name)
            utlv.check_exit_status(result)
            check_vol_list(vol_name, pool_name)

        # Step (20)
        # Create an over size vol in pool(expect fail), then check pool:
        # 'Capacity', 'Allocation' and 'Available'
        # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
        # and glusterfs pool not support create volume, so not test them
        if pool_type != "netfs":
            vol_capacity = "10000G"
            vol_allocation = "10000G"
            result = virsh.vol_create_as("oversize_vol", pool_name,
                                         vol_capacity, vol_allocation, "raw")
            utlv.check_exit_status(result, True)
            new_info = _pool.pool_info(pool_name)
            check_pool_info(pool_info, "Capacity", new_info['Capacity'])
            check_pool_info(pool_info, "Allocation", new_info['Allocation'])
            check_pool_info(pool_info, "Available", new_info['Available'])

        # Step (21)
        # Undefine pool, this should fail as the pool is active
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result, expect_error=True)
        check_pool_list(pool_name, "", False)

        # Step (22)
        # Pool destroy
        if virsh.pool_destroy(pool_name):
            logging.debug("Pool %s destroyed.", pool_name)
        else:
            raise error.TestFail("Destroy pool % failed." % pool_name)

        # Step (23)
        # Pool delete for 'dir' type pool
        if pool_type == "dir":
            for f in os.listdir(pool_target):
                os.remove(os.path.join(pool_target, f))
            result = virsh.pool_delete(pool_name, ignore_status=True)
            utlv.check_exit_status(result)
            option = "--inactive --type %s" % pool_type
            check_pool_list(pool_name, option)
            if os.path.exists(pool_target):
                raise error.TestFail("The target path '%s' still exist." %
                                     pool_target)
            result = virsh.pool_start(pool_name, ignore_status=True)
            utlv.check_exit_status(result, True)

        # Step (24)
        # Pool undefine
        result = virsh.pool_undefine(pool_name, ignore_status=True)
        utlv.check_exit_status(result)
        check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **kwargs)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(pool_xml):
            os.remove(pool_xml)

Example 135

Project: pytrainer
Source File: osm.py
View license
    def createHtml_osm(self, polyline, startinfo, finishinfo, laps, attrlist, linetype):
        '''
        Generate OSM map html file using MapLayers
        '''
        logging.debug(">>")

        # try using local cached versions of JS files for faster rendering
        self.cacheUrls();
        
        content = '''<html>
        <head>
            <!-- bring in the OpenLayers javascript library
                 (here we bring it from the remote site, but you could
                 easily serve up this javascript yourself) -->
            <script src="''' + self.URLS['OpenLayers.js'] + '''"></script>
            <!-- bring in the OpenStreetMap OpenLayers layers.
                 Using this hosted file will make sure we are kept up
                 to date with any necessary changes -->
            <script src="''' + self.URLS['OpenStreetMap.js'] + '''"></script>

            <script type="text/javascript">
                //complex object of type OpenLayers.Map
                var map;

                //icons data object
                var icons = {
                    iconSize : new OpenLayers.Size(30,30)'''

        # If have laps data insert markers here
        try:
            lapsContent=''
            for lap in laps[:500]:  # OpenLayers with firefox is limited to 500 markers -> TODO: Transfer to a constant somewhere ?
                lapNumber = int(lap['lap_number'])+1
                elapsedTime = float(lap['elapsed_time'])
                elapsedTimeHours = int(elapsedTime/3600)
                elapsedTimeMins = int((elapsedTime - (elapsedTimeHours * 3600)) / 60)
                elapsedTimeSecs = elapsedTime - (elapsedTimeHours * 3600) - (elapsedTimeMins * 60)
                if elapsedTimeHours > 0:
                    strElapsedTime = "%0.0dh:%0.2dm:%0.2fs" % (elapsedTimeHours, elapsedTimeMins, elapsedTimeSecs)
                elif elapsedTimeMins > 0:
                    strElapsedTime = "%0.0dm:%0.2fs" % (elapsedTimeMins, elapsedTimeSecs)
                else:
                    strElapsedTime = "%0.0fs" % (elapsedTimeSecs)
                #process lat and lon for this lap
                lapLat = float(lap['end_lat'])
                lapLon = float(lap['end_lon'])
                #build laps content string
                lapsContent+=',\n'
                lapsContent+='\t\t\t\t\tlap%d: { url : "/waypoint.png", coordinates : [%f,%f], popupInfo: "%s" }' % \
                        (lapNumber, lapLon, lapLat, \
                        "<div class='info_content'>End of lap:%d<br>Elapsed time:%s<br>Distance:%0.2f km<br>Calories:%s</div>" % \
                            (lapNumber, strElapsedTime, float(lap['distance'])/1000, lap['calories'])
                        )
            content+=lapsContent
        except Exception as e:
            # If something breaks here just skip laps data
            logging.error('Error formating laps data: ' + str(e))
        # Insert start/finish track markers
        content+=''',\n        start : { url : "/start.png", coordinates : %s, popupInfo : "%s" },
                    finish : { url : "/finish.png", coordinates : %s, popupInfo : "%s" },
                    url : "file://%s/glade"''' \
                    % (polyline[0], startinfo, polyline[-1], finishinfo, os.path.abspath(self.data_path))

        content+='''};\n
                function init() {

                // fool openlayers scripts so it will download images and themes from the web instead of local folder if cached
                OpenLayers.ImgPath="''' + self.staticURLS['OpenLayers'] + '''img/";
                OpenLayers.scriptLocation="''' + self.staticURLS['OpenLayers'] + '''";
                OpenLayers._getScriptLocation=function() { return "''' + self.staticURLS['OpenLayers'] + '''";};

                // for transforming WGS 1984 to Spherical Mercator Projection
                pWGS = new OpenLayers.Projection("EPSG:4326");
                pMP = new OpenLayers.Projection("EPSG:900913");

                map = new OpenLayers.Map ("map", {
                    controls:[
                        new OpenLayers.Control.Navigation(),
                        new OpenLayers.Control.PanZoomBar(),
                        new OpenLayers.Control.LayerSwitcher(),
                        new OpenLayers.Control.Attribution()],
                    maxExtent: new OpenLayers.Bounds(-20037508.34,-20037508.34,20037508.34,20037508.34),
                    maxResolution: 156543.0399,
                    numZoomLevels: 19,
                    units: 'm',
                    projection: pMP,
                    displayProjection: pWGS
                } );

                // Track painting style
                var trackStyle = {
                    strokeColor: "#33DDDD",
                    strokeWidth: 3,
                    strokeDashstyle: "solid",
                    strokeOpacity: 0.6,
                    pointRadius: 6,
                };

                //Build track object
                var track =
                    {
                    "type":"Feature",
                    "id":"OpenLayers.Feature.Vector_259",
                    "properties":{},
                    "geometry":
                    {
                        "type":"LineString",
                        "coordinates":
                            ['''
        #Insert track points here
        content+=",".join(polyline);
        content+=''']
                    },
                    "crs":
                        {
                        "type":"OGC",
                        "properties":
                            {
                            "urn":"urn:ogc:def:crs:OGC:1.3:CRS84"
                            }
                        }
                    }

                //Add open street maps layers
                layerMapnik = new OpenLayers.Layer.OSM.Mapnik("Mapnik");
                map.addLayer(layerMapnik);

                //Create vector layer to add the data on to
                var vector_layer = new OpenLayers.Layer.Vector();
                vector_layer.setName('Track');

                var geojson_format = new OpenLayers.Format.GeoJSON();
                var feature = geojson_format.read(track,"Feature");

                // transform from WGS 1984 to Spherical Mercator Projection
                feature.geometry.transform(pWGS, pMP);

                feature.geometry.calculateBounds();
                var vector=new OpenLayers.Feature.Vector();
                vector.geometry = feature.geometry;
                vector.style=trackStyle;

                vector_layer.addFeatures(vector);
                map.addLayer(vector_layer);

                // Insert start/finish markers
                layerMarkers = new OpenLayers.Layer.Markers("Markers");
                var offset = new OpenLayers.Pixel(-(icons.iconSize.w/2), -icons.iconSize.h);
                for (var i in icons) {
                    if (icons[i].coordinates) {
                        icons[i].icon = new OpenLayers.Icon(icons.url + icons[i].url,icons.iconSize,offset);
                        icons[i].lonLat = new OpenLayers.LonLat(icons[i].coordinates[0],icons[i].coordinates[1]);
                        icons[i].lonLat.transform(pWGS,pMP);
                        icons[i].marker = new OpenLayers.Marker(icons[i].lonLat,icons[i].icon);
                        icons[i].popup = new OpenLayers.Popup.FramedCloud("Info",
                                            icons[i].lonLat,
                                                          null,
                                                icons[i].popupInfo,
                                            icons[i].icon,
                                                true,
                                            null
                                            );
                        icons[i].onClick = function(e) { map.addPopup(this.popup); this.popup.show(); }
                        icons[i].marker.events.register("mousedown", icons[i], function(e) { this.onClick(e)} )
                        layerMarkers.addMarker(icons[i].marker);
                    }
                }
                map.addLayer(layerMarkers);

                //zoom and center to the track layouts
                map.zoomToExtent(feature.geometry.getBounds());

            }
        </script>

        </head>
        <!-- body.onload is called once the page is loaded (call the 'init' function) -->
        <body onload="init();">
            <!-- define a DIV into which the map will appear. Make it take up the whole window -->
            <div style="width:100%; height:100%" id="map"></div>
        </body>
        </html>
        '''
        file = fileUtils(self.htmlfile,content)
        file.run()
        logging.debug("<<")

Example 136

Project: asyncmongo
Source File: cursor.py
View license
    def find(self, spec=None, fields=None, skip=0, limit=0,
                 timeout=True, snapshot=False, tailable=False, sort=None,
                 max_scan=None, slave_okay=False,
                 _must_use_master=False, _is_command=False, hint=None, debug=False,
                 comment=None, callback=None):
        """Query the database.
        
        The `spec` argument is a prototype document that all results
        must match. For example:
        
        >>> db.test.find({"hello": "world"}, callback=...)
        
        only matches documents that have a key "hello" with value
        "world".  Matches can have other keys *in addition* to
        "hello". The `fields` argument is used to specify a subset of
        fields that should be included in the result documents. By
        limiting results to a certain subset of fields you can cut
        down on network traffic and decoding time.
        
        Raises :class:`TypeError` if any of the arguments are of
        improper type.
        
        :Parameters:
          - `spec` (optional): a SON object specifying elements which
            must be present for a document to be included in the
            result set
          - `fields` (optional): a list of field names that should be
            returned in the result set ("_id" will always be
            included), or a dict specifying the fields to return
          - `skip` (optional): the number of documents to omit (from
            the start of the result set) when returning the results
          - `limit` (optional): the maximum number of results to
            return
          - `timeout` (optional): if True, any returned cursor will be
            subject to the normal timeout behavior of the mongod
            process. Otherwise, the returned cursor will never timeout
            at the server. Care should be taken to ensure that cursors
            with timeout turned off are properly closed.
          - `snapshot` (optional): if True, snapshot mode will be used
            for this query. Snapshot mode assures no duplicates are
            returned, or objects missed, which were present at both
            the start and end of the query's execution. For details,
            see the `snapshot documentation
            <http://dochub.mongodb.org/core/snapshot>`_.
          - `tailable` (optional): the result of this find call will
            be a tailable cursor - tailable cursors aren't closed when
            the last data is retrieved but are kept open and the
            cursors location marks the final document's position. if
            more data is received iteration of the cursor will
            continue from the last document received. For details, see
            the `tailable cursor documentation
            <http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
          - `sort` (optional): a list of (key, direction) pairs
            specifying the sort order for this query. See
            :meth:`~pymongo.cursor.Cursor.sort` for details.
          - `max_scan` (optional): limit the number of documents
            examined when performing the query
          - `slave_okay` (optional): is it okay to connect directly
            to and perform queries on a slave instance
        
        .. mongodoc:: find
        """
        
        if spec is None:
            spec = {}
        
        if limit is None:
            limit = 0

        if not isinstance(spec, dict):
            raise TypeError("spec must be an instance of dict")
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int or None")
        if not isinstance(timeout, bool):
            raise TypeError("timeout must be an instance of bool")
        if not isinstance(snapshot, bool):
            raise TypeError("snapshot must be an instance of bool")
        if not isinstance(tailable, bool):
            raise TypeError("tailable must be an instance of bool")
        if not callable(callback):
            raise TypeError("callback must be callable")
        
        if fields is not None:
            if not fields:
                fields = {"_id": 1}
            if not isinstance(fields, dict):
                fields = helpers._fields_list_to_dict(fields)
        
        self.__spec = spec
        self.__fields = fields
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = 0
        
        self.__timeout = timeout
        self.__tailable = tailable
        self.__snapshot = snapshot
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = max_scan
        self.__slave_okay = slave_okay
        self.__explain = False
        self.__hint = hint
        self.__comment = comment
        self.__debug = debug
        # self.__as_class = as_class
        self.__tz_aware = False #collection.database.connection.tz_aware
        self.__must_use_master = _must_use_master
        self.__is_command = _is_command
        
        connection = self.__pool.connection()
        try:
            if self.__debug:
                logging.debug('QUERY_SPEC: %r' % self.__query_spec())

            connection.send_message(
                message.query(self.__query_options(),
                              self.full_collection_name,
                              self.__skip, 
                              self.__limit,
                              self.__query_spec(),
                              self.__fields), 
                callback=functools.partial(self._handle_response, orig_callback=callback))
        except Exception, e:
            logging.debug('Error sending query %s' % e)
            connection.close()
            raise

Example 137

Project: lnst
Source File: Netperf.py
View license
    def _run_client(self, cmd):
        logging.debug("running as client...")

        res_data = {}
        res_data["testname"] = self._testname

        rv = 0
        results = []
        rates = []
        for i in range(1, self._runs+1):
            if self._runs > 1:
                logging.info("Netperf starting run %d" % i)
            clients = []
            client_results = []
            for i in range(0, self._num_parallel):
                clients.append(ShellProcess(cmd))

            for client in clients:
                ret_code = None
                try:
                    ret_code = client.wait()
                    rv += ret_code
                except OSError as e:
                    if e.errno == errno.EINTR:
                        client.kill()

                output = client.read_nonblocking()
                logging.debug(output)

                if ret_code is not None and ret_code == 0:
                    client_results.append(self._parse_output(output))

            if len(client_results) > 0:
                #accumulate all the parallel results into one
                result = client_results[0]
                for res in client_results[1:]:
                    result = self._sum_results(result, res)

                results.append(result)
                rates.append(results[-1]["rate"])

        if results > 1:
            res_data["results"] = results

        if len(rates) > 0:
            rate = sum(rates)/len(rates)
        else:
            rate = 0.0

        if len(rates) > 1:
            # setting deviation to 2xstd_deviation because of the 68-95-99.7
            # rule this seems comparable to the -I 99 netperf setting
            res_data["std_deviation"] = std_deviation(rates)
            rate_deviation = 2*res_data["std_deviation"]
        elif len(rates) == 1 and self._confidence is not None:
            result = results[0]
            rate_deviation = rate * (result["confidence"][1] / 100)
        else:
            rate_deviation = 0.0

        res_data["rate"] = rate
        res_data["rate_deviation"] = rate_deviation

        rate_pretty = self._pretty_rate(rate)
        rate_dev_pretty = self._pretty_rate(rate_deviation, unit=rate_pretty["unit"])

        if rv != 0 and self._runs == 1:
            res_data["msg"] = "Could not get performance throughput!"
            logging.info(res_data["msg"])
            return (False, res_data)
        elif rv != 0 and self._runs > 1:
            res_data["msg"] = "At least one of the Netperf runs failed, "\
                              "check the logs and result data for more "\
                              "information."
            logging.info(res_data["msg"])
            return (False, res_data)

        res_val = False
        res_data["msg"] = "Measured rate was %.2f +-%.2f %s" %\
                                            (rate_pretty["rate"],
                                             rate_dev_pretty["rate"],
                                             rate_pretty["unit"])
        if rate > 0.0:
            res_val = True
        else:
            res_val = False
            return (res_val, res_data)

        if self._max_deviation is not None:
            if self._max_deviation["type"] == "percent":
                percentual_deviation = (rate_deviation / rate) * 100
                if percentual_deviation > self._max_deviation["value"]:
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %%)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       self._max_deviation["value"])
                    return (res_val, res_data)
            elif self._max_deviation["type"] == "absolute":
                if rate_deviation > self._max_deviation["value"]["rate"]:
                    pretty_deviation = self._pretty_rate(self._max_deviation["value"]["rate"])
                    res_val = False
                    res_data["msg"] = "Measured rate %.2f +-%.2f %s has bigger "\
                                      "deviation than allowed (+-%.2f %s)" %\
                                      (rate_pretty["rate"],
                                       rate_dev_pretty["rate"],
                                       rate_pretty["unit"],
                                       pretty_deviation["rate"],
                                       pretty_deviation["unit"])
                    return (res_val, res_data)
        if self._threshold_interval is not None:
            result_interval = (rate - rate_deviation,
                               rate + rate_deviation)

            threshold_pretty = self._pretty_rate(self._threshold["rate"])
            threshold_dev_pretty = self._pretty_rate(self._threshold_deviation["rate"],
                                                     unit = threshold_pretty["unit"])

            if self._threshold_interval[0] > result_interval[1]:
                res_val = False
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is lower "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
            else:
                res_val = True
                res_data["msg"] = "Measured rate %.2f +-%.2f %s is higher "\
                                  "than threshold %.2f +-%.2f %s" %\
                                  (rate_pretty["rate"],
                                   rate_dev_pretty["rate"],
                                   rate_pretty["unit"],
                                   threshold_pretty["rate"],
                                   threshold_dev_pretty["rate"],
                                   threshold_pretty["unit"])
                return (res_val, res_data)
        return (res_val, res_data)

Example 138

Project: bsdpy
Source File: bsdpserver.py
View license
def getSysIdEntitlement(nbisources, clientsysid, clientmacaddr, bsdpmsgtype):
    """
        The getSysIdEntitlement function takes a list of previously compiled NBI
        sources and a clientsysid parameter to determine which of the entries in
        nbisources the clientsysid is entitled to.

        The function:
        - Initializes the 'hasdupes' variable as False.
        - Checks for an enabledmacaddrs value:
            - If an empty list, no filtering is performed
            - It will otherwise contain one or more MAC addresses, and thisnbi
              will be skipped if the client's MAC address is not in this list.
            - Apple's NetInstall service also may create a "DisabledMACAddresses"
              blacklist, but this never seems to be used.
        - Checks for duplicate clientsysid entries in enabled/disabledsysids:
            - If found, there is a configuration issue with
              NBImageInfo.plist and thisnbi is skipped; a warning
              is thrown for the admin to act on. The hasdupes variable will be
              set to True.
        - Checks if hasdupes is False:
            - If True, continue with the tests below, otherwise iterate next.
        - Checks for empty disabledsysids and enabledsysids lists:
            - If both lists are zero length thisnbi is added to nbientitlements.
        - Checks for a missing clientsysid entry in enabledsysids OR a matching
          clientsysid entry in disabledsysids:
            - If if either is True thisnbi is skipped.
        - Checks for matching clientsysid entry in enabledsysids AND a missing
          clientsysid entry in disabledsysids:
            - If both are True thisnbi is added to nbientitlements.
    """

    # Globals are used to give other functions access to these later
    global defaultnbi
    global imagenameslist
    global hasdefault

    logging.debug('Determining image list for system ID ' + clientsysid)

    # Initialize lists for nbientitlements and imagenameslist, both will
    #   contain a series of dicts
    nbientitlements = []
    imagenameslist = []

    try:
        # Iterate over the NBI list
        for thisnbi in nbisources:

            # First a sanity check for duplicate system ID entries
            hasdupes = False

            if clientsysid in thisnbi['disabledsysids'] and \
               clientsysid in thisnbi['enabledsysids']:

                # Duplicate entries are bad mkay, so skip this NBI and warn
                logging.debug('!!! Image "' + thisnbi['description'] +
                        '" has duplicate system ID entries '
                        'for model "' + clientsysid + '" - skipping !!!')
                hasdupes = True

            # Check whether both disabledsysids and enabledsysids are empty and
            #   if so add the NBI to the list, there are no restrictions.
            if not hasdupes:
                # If the NBI had a non-empty EnabledMACAddresses array present,
                # skip this image if this client's MAC is not in the list.
                if thisnbi['enabledmacaddrs'] and \
                    clientmacaddr not in thisnbi['enabledmacaddrs']:
                    logging.debug('MAC address ' + clientmacaddr + ' is not '
                                  'in the enabled MAC list - skipping "' +
                                  thisnbi['description'] + '"')
                    continue

                if len(thisnbi['disabledsysids']) == 0 and \
                   len(thisnbi['enabledsysids']) == 0:
                    logging.debug('Image "' + thisnbi['description'] +
                            '" has no restrictions, adding to list')
                    nbientitlements.append(thisnbi)

                # Check for a missing entry in enabledsysids, this means we skip
                elif clientsysid in thisnbi['disabledsysids']:
                    logging.debug('System ID "' + clientsysid + '" is disabled'
                                    ' - skipping "' + thisnbi['description'] + '"')

                # Check for an entry in enabledsysids
                elif clientsysid not in thisnbi['enabledsysids'] or \
                     (clientsysid in thisnbi['enabledsysids'] and
                     clientsysid not in thisnbi['disabledsysids']):
                    logging.debug('Found enabled system ID ' + clientsysid +
                          ' - adding "' + thisnbi['description'] + '" to list')
                    nbientitlements.append(thisnbi)

    except:
        logging.debug("Unexpected error filtering image entitlements: %s" %
                        sys.exc_info()[1])
        raise

    try:
        # Now we iterate through the entitled NBIs in search of a default
        #   image, as determined by its "IsDefault" key
        for image in nbientitlements:

            # Check for an isdefault entry in the current NBI
            if image['isdefault'] is True:
                logging.debug('Found default image ID ' + str(image['id']))

                # By default defaultnbi is 0, so change it to the matched NBI's
                #   id. If more than one is found (shouldn't) we use the highest
                #   id found. This behavior may be changed if it proves to be
                #   problematic, such as breaking out of the for loop instead.
                if defaultnbi < image['id']:
                    defaultnbi = image['id']
                    hasdefault = True
                    # logging.debug('Setting default image ID ' + str(defaultnbi))
                    # logging.debug('hasdefault is: ' + str(hasdefault))

            # This is to match cases where there is  no default image found,
            #   a possibility. In that case we use the highest found id as the
            #   default. This too could be changed at a later time.
            elif not hasdefault:
                if defaultnbi < image['id']:
                    defaultnbi = image['id']
                    # logging.debug('Changing default image ID ' + str(defaultnbi))

            # Next we construct our imagenameslist which is a list of ints that
            #   encodes the image id, total name length and its name for use
            #   by the packet encoder

            # The imageid should be a zero-padded 4 byte string represented as
            #   ints
            imageid = '%04X' % image['id']

            # Our skip interval within the list; the "[129,0]" header each image
            #   ID requires, we don't want to count it for the length
            n = 2

            # Construct the list by iterating over the imageid, converting to a
            #   16 bit string as we go, for proper packet encoding
            imageid = [int(imageid[i:i+n], 16) \
                for i in range(0, len(imageid), n)]
            imagenameslist += [129,0] + imageid + [image['length']] + \
                              strlist(image['name']).list()
    except:
        logging.debug("Unexpected error setting default image: %s" %
                        sys.exc_info()[1])
        raise

    # print 'Entitlements: ' + str(len(nbientitlements)) + '\n' + str(nbientitlements) + '\n'
    # print imagenameslist

    # All done, pass the finalized list of NBIs the given clientsysid back
    return nbientitlements

Example 139

Project: pytrainer
Source File: waypointeditor.py
View license
	def createHtml(self,default_waypoint=None):
		logging.debug(">>")
		tmpdir = self.pytrainer_main.profile.tmpdir
		filename = tmpdir+"/waypointeditor.html"
	
		points = self.waypoint.getAllWaypoints()
		londef = 0
		latdef = 0
		content = """

<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
    "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"  xmlns:v="urn:schemas-microsoft-com:vml">
  <head>
    <meta http-equiv="content-type" content="text/html; charset=utf-8"/>
    <title>edit waypoints</title>

    <script id="googleapiimport" src="http://maps.google.com/maps/api/js?sensor=false"
            type="text/javascript"></script>
    <script type="text/javascript">
"""
		i = 0
		arrayjs = ""
		if default_waypoint is None and points: 
			default_waypoint = points[0][0]
		for point in points:
			if point[0] == default_waypoint:
				londef = point[2]
				latdef = point[1]
			content += "lon = '%f';\n"%point[2]
			content += "lat = '%f';\n"%point[1]
			content += "name = '%s';\n"%point[6]
			content += "description = '%s';\n"%point[4]
			content += "sym = '%s';\n"%point[7]
			content += "id = '%d';\n"%point[0]
			content += """waypoint%d = Array (lon,lat,name,description,sym,id);\n"""%i
			if i>0:
				arrayjs+=","
			arrayjs +="waypoint%d"%i
			i = i+1
		content += """waypointList = Array (%s);\n""" %arrayjs
		content += """ 
	is_addmode = 0;
    //<![CDATA[

	function addWaypoint(lon,lat) {
		document.title = "call:addWaypoint(" + lon + "," + lat + ")";
  		}  	
	
	function updateWaypoint(lon,lat,id) {
		document.title = "call:updateWaypoint(" + lon + "," + lat + "," + id + ")"; 
  		}  	

	function createMarker(waypoint) {
		var lon = waypoint[0];
		var lat = waypoint[1];
		var id = waypoint[5];
		var sym = waypoint[4];
		
		var point = new GLatLng(lat,lon);
		var text = "<b>"+waypoint[2]+"</b><br/>"+waypoint[3];

		var icon = new GIcon();
		if (sym=="Summit") {
			icon.image = \""""+os.path.abspath(self.data_path)+"""/glade/summit.png\";
			}
		else {
			icon.image = \""""+os.path.abspath(self.data_path)+"""/glade/waypoint.png\";
			}
		icon.iconSize = new GSize(32, 32);
		icon.iconAnchor = new GPoint(16, 16);
		icon.infoWindowAnchor = new GPoint(5, 1);
		
		var markerD = new GMarker(point, {icon:icon, draggable: true}); 
		map.addOverlay(markerD);

		markerD.enableDragging();

		GEvent.addListener(markerD, "mouseup", function(){
			position = markerD.getPoint();
			updateWaypoint(position.lng(),position.lat(),id);
		});
  		return markerD;
		}

	function load() {
		if (GBrowserIsCompatible()) {
			//Dibujamos el mapa
			map = new GMap2(document.getElementById("map"));
        		map.addControl(new GLargeMapControl());
        		map.addControl(new GMapTypeControl());
			map.addControl(new GScaleControl());
	"""
		if londef != 0:
        		content +="""
				lon = %s;
				lat = %s;
				""" %(londef,latdef)
		else:
			 content += """
				lon = 0;
				lat = 0;
				"""
		content +="""
			map.setCenter(new GLatLng(lat, lon), 11);

			//Dibujamos el minimapa
			ovMap=new GOverviewMapControl();
			map.addControl(ovMap);
			mini=ovMap.getOverviewMap();

			//Dibujamos los waypoints
			for (i=0; i<waypointList.length; i++){
  				createMarker(waypointList[i]);
				map.enableDragging();
				}

			//Preparamos los eventos para anadir nuevos waypoints
			GEvent.addListener(map, "click", function(marker, point) {
    				if (is_addmode==1){
					map.enableDragging();
					//map.addOverlay(new GMarker(point));
					var lon = point.lng();
					var lat = point.lat();
				
					var waypoint_id = addWaypoint(lon,lat);
					var waypoint = Array (lon,lat,"","","",waypoint_id);
  					createMarker(waypoint);
					is_addmode = 0;
					}
				});
      			}
    		}	

	function addmode(){
		is_addmode = 1;
		map.disableDragging();
		}

    //]]>
    </script>
<style>
.form {
	position: absolute;
	top: 200px;
	left: 300px;
	background: #ffffff;
	}
</style>

  </head>
  <body onload="load()" onunload="GUnload()" style="cursor:crosshair" border=0>
    		<div id="map" style="width: 100%; height: 460px; top: 0px; left: 0px"></div>
    		<div id="addButton" style="position: absolute; top: 32px;left: 86px;">
			<input type="button" value="New Waypoint" onclick="javascript:addmode();">
		</div>


  </body>
</html>
"""
		file = fileUtils(filename,content)
		file.run()
		logging.debug("<<")

Example 140

Project: bsdpy
Source File: bsdpserver.py
View license
def ack(packet, defaultnbi, msgtype):
    """
        The ack function constructs either a BSDP[LIST] or BSDP[SELECT] ACK
        DhcpPacket(), determined by the given msgtype, 'list' or 'select'.
        It calls the previously defined getSysIdEntitlement() and parseOptions()
        functions for either msgtype.
    """

    bsdpack = DhcpPacket()

    try:
        # Get the requesting client's clientsysid and MAC address from the
        # BSDP options
        clientsysid = \
        str(strlist(packet.GetOption('vendor_class_identifier'))).split('/')[2]

        clientmacaddr = chaddr_to_mac(packet.GetOption('chaddr'))

        # Decode and parse the BSDP options from vendor_encapsulated_options
        bsdpoptions = \
            parseOptions(packet.GetOption('vendor_encapsulated_options'))

        # Figure out the NBIs this clientsysid is entitled to
        enablednbis = getSysIdEntitlement(nbiimages, clientsysid, clientmacaddr, msgtype)

        # The Startup Disk preference panel in OS X uses a randomized reply port
        #   instead of the standard port 68. We check for the existence of that
        #   option in the bsdpoptions dict and if found set replyport to it.
        if 'reply_port' in bsdpoptions:
            replyport = int(str(format(bsdpoptions['reply_port'][0], 'x') +
                        format(bsdpoptions['reply_port'][1], 'x')), 16)
        else:
            replyport = 68

        # Get the client's IP address, a standard DHCP option
        clientip = ipv4(packet.GetOption('ciaddr'))
        if str(clientip) == '0.0.0.0':
            clientip = ipv4(packet.GetOption('request_ip_address'))
            logging.debug("Did not get a valid clientip, using request_ip_address %s instead" % (str(clientip),))
    except:
        logging.debug("Unexpected error: ack() common %s" %
                        sys.exc_info()[1])
        raise

    #print 'Configuring common BSDP packet options'

    # We construct the rest of our common BSDP reply parameters according to
    #   Apple's spec. The only noteworthy parameter here is sname, a zero-padded
    #   64 byte string list containing the BSDP server's hostname.
    bsdpack.SetOption("op", [2])
    bsdpack.SetOption("htype", packet.GetOption('htype'))
    bsdpack.SetOption("hlen", packet.GetOption('hlen'))
    bsdpack.SetOption("xid", packet.GetOption('xid'))
    bsdpack.SetOption("ciaddr", packet.GetOption('ciaddr'))
    bsdpack.SetOption("siaddr", serverip)
    bsdpack.SetOption("yiaddr", [0,0,0,0])
    bsdpack.SetOption("sname", strlist(serverhostname.ljust(64,'\x00')).list())
    bsdpack.SetOption("chaddr", packet.GetOption('chaddr'))
    bsdpack.SetOption("dhcp_message_type", [5])
    bsdpack.SetOption("server_identifier", serverip)
    bsdpack.SetOption("vendor_class_identifier", strlist('AAPLBSDPC').list())

    # Process BSDP[LIST] requests
    if msgtype == 'list':
        #print 'Creating LIST packet'
        try:
            nameslength = 0
            n = 2

            # First calculate the total length of the names of all combined
            #   NBIs, a required parameter that is part of the BSDP
            #   vendor_encapsulated_options.
            for i in enablednbis:
                nameslength += i['length']

            # Next calculate the total length of all enabled NBIs
            totallength = len(enablednbis) * 5 + nameslength

            # The bsdpimagelist var is inserted into vendor_encapsulated_options
            #   and comprises of the option code (9), total length of options,
            #   the IDs and names of all NBIs and the 4 byte string list that
            #   contains the default NBI ID. Promise, all of this is part of
            #   the BSDP spec, go look it up.
            bsdpimagelist = [9,totallength]
            bsdpimagelist += imagenameslist
            defaultnbi = '%04X' % defaultnbi

            # Encode the default NBI option (7) its standard length (4) and the
            #   16 bit string list representation of defaultnbi
            defaultnbi = [7,4,129,0] + \
            [int(defaultnbi[i:i+n], 16) for i in range(0, len(defaultnbi), n)]

            if int(defaultnbi[-1:][0]) == 0:
                hasnulldefault = True
            else:
                hasnulldefault = False

            # To prevent sending a default image ID of 0 (zero) to the client
            #   after the initial INFORM[LIST] request we test for 0 and if
            #   so, skip inserting the defaultnbi BSDP option. Since it is
            #   optional anyway we won't confuse the client.
            compiledlistpacket = strlist([1,1,1,4,2,128,128]).list()
            if not hasnulldefault:
                compiledlistpacket += strlist(defaultnbi).list()
            compiledlistpacket += strlist(bsdpimagelist).list()

            # And finally, once we have all the image list encoding taken care
            #   of, we plug them into the vendor_encapsulated_options DHCP
            #   option after the option header:
            #   - [1,1,1] = BSDP message type (1), length (1), value (1 = list)
            #   - [4,2,255,255] = Server priority message type 4, length 2,
            #       value 0xffff (65535 - Highest)
            #   - defaultnbi (option 7) - Optional, not sent if '0'
            #   - List of all available Image IDs (option 9)

            bsdpack.SetOption("vendor_encapsulated_options", compiledlistpacket)

            # Some debugging to stdout
            logging.debug('-=========================================-')
            logging.debug("Return ACK[LIST] to " +
                    str(clientip) +
                    ' on ' +
                    str(replyport))
            if hasnulldefault is False: logging.debug("Default boot image ID: " +
                                              str(defaultnbi[2:]))
        except:
            logging.debug("Unexpected error ack() list: %s" %
                            sys.exc_info()[1])
            raise

    # Process BSDP[SELECT] requests
    elif msgtype == 'select':
        #print 'Creating SELECT packet'
        # Get the value of selected_boot_image as sent by the client and convert
        #   the value for later use.
        try:
            imageid = int('%02X' % bsdpoptions['selected_boot_image'][2] +
                            '%02X' % bsdpoptions['selected_boot_image'][3], 16)
        except:
            logging.debug("Unexpected error ack() select: imageid %s" %
                            sys.exc_info()[1])
            raise

        # Initialize variables for the booter file (kernel) and the dmg path
        booterfile = ''
        rootpath = ''
        selectedimage = ''
        if nbiurl.hostname[0].isalpha():
            basedmgpath = getBaseDmgPath(nbiurl)

        # Iterate over enablednbis and retrieve the kernel and boot DMG for each
        try:
            for nbidict in enablednbis:
                if nbidict['id'] == imageid:
                    booterfile = nbidict['booter']
                    rootpath = basedmgpath + nbidict['dmg']
                    # logging.debug('-->> Using boot image URI: ' + str(rootpath))
                    selectedimage = bsdpoptions['selected_boot_image']
                    # logging.debug('ACK[SELECT] image ID: ' + str(selectedimage))
        except:
            logging.debug("Unexpected error ack() selectedimage: %s" %
                            sys.exc_info()[1])
            raise

        # Generate the rest of the BSDP[SELECT] ACK packet by encoding the
        #   name of the kernel (file), the TFTP path and the vendor encapsulated
        #   options:
        #   - [1,1,2] = BSDP message type (1), length (1), value (2 = select)
        #   - [8,4] = BSDP selected_image (8), length (4), encoded image ID
        try:
            bsdpack.SetOption("file",
                strlist(booterfile.ljust(128,'\x00')).list())
            bsdpack.SetOption("root_path", strlist(rootpath).list())
            bsdpack.SetOption("vendor_encapsulated_options",
                strlist([1,1,2,8,4] + selectedimage).list())
        except:
            logging.debug("Unexpected error ack() select SetOption: %s" %
                            sys.exc_info()[1])
            raise

        try:
            # Some debugging to stdout
            logging.debug('-=========================================-')
            logging.debug("Return ACK[SELECT] to " +
                          str(clientip) +
                          ' on ' +
                          str(replyport))
            logging.debug("--> TFTP path: %s\n-->Boot image URI: %s"
                          % (str(strlist(bsdpack.GetOption("file"))), str(rootpath)))
        except:
            logging.debug("Unexpected error ack() select print debug: %s" %
                            sys.exc_info()[1])
            raise

    # Return the finished packet, client IP and reply port back to the caller
    return bsdpack, clientip, replyport

Example 141

Project: pycounter
Source File: sushi.py
View license
def _raw_to_full(raw_report):
    """Convert a raw report to CounterReport.

    :param raw_report: raw XML report
    :return: a :class:`pycounter.report.CounterReport`
    """
    try:
        root = etree.fromstring(raw_report)
    except etree.XMLSyntaxError:
        logger.error("XML syntax error: %s", raw_report)
        raise pycounter.exceptions.SushiException(
            message="XML syntax error",
            raw=raw_report)
    o_root = objectify.fromstring(raw_report)
    rep = None
    try:
        rep = o_root.Body[_ns('sushicounter', "ReportResponse")]
        c_report = rep.Report[_ns('counter', 'Report')]
    except AttributeError:
        try:
            c_report = rep.Report[_ns('counter', 'Reports')].Report
        except AttributeError:
            logger.error("report not found in XML: %s", raw_report)
            raise pycounter.exceptions.SushiException(
                message="report not found in XML",
                raw=raw_report, xml=o_root)
    logger.debug("COUNTER report: %s", etree.tostring(c_report))
    start_date = datetime.datetime.strptime(
        root.find('.//%s' % _ns('sushi', 'Begin')).text,
        "%Y-%m-%d").date()

    end_date = datetime.datetime.strptime(
        root.find('.//%s' % _ns('sushi', 'End')).text,
        "%Y-%m-%d").date()

    report_data = {'period': (start_date, end_date)}

    rep_def = root.find('.//%s' % _ns('sushi', 'ReportDefinition'))
    report_data['report_version'] = int(rep_def.get('Release'))

    report_data['report_type'] = rep_def.get('Name')

    customer = root.find('.//%s' % _ns('counter', 'Customer'))
    try:
        report_data['customer'] = (customer.find('.//%s' %
                                                 _ns('counter', 'Name')).text)
    except AttributeError:
        report_data['customer'] = ""

    inst_id = customer.find('.//%s' % _ns('counter', 'ID')).text
    report_data['institutional_identifier'] = inst_id

    rep_root = root.find('.//%s' % _ns('counter', 'Report'))
    created_string = rep_root.get('Created')
    if created_string is not None:
        report_data['date_run'] = arrow.get(created_string)
    else:
        report_data['date_run'] = datetime.datetime.now()

    report = pycounter.report.CounterReport(**report_data)

    report.metric = pycounter.constants.METRICS.get(report_data['report_type'])

    for item in c_report.Customer.ReportItems:
        try:
            publisher_name = item.ItemPublisher.text
        except AttributeError:
            publisher_name = ""
        title = item.ItemName.text
        platform = item.ItemPlatform.text

        eissn = issn = isbn = ""

        try:
            for identifier in item.ItemIdentifier:
                if identifier.Type == "Print_ISSN":
                    issn = identifier.Value.text
                    if issn is None:
                        issn = ""
                elif identifier.Type == "Online_ISSN":
                    eissn = identifier.Value.text
                    if eissn is None:
                        eissn = ""
                elif identifier.Type == "Online_ISBN":
                    logging.debug("FOUND ISBN")
                    isbn = identifier.Value.text
                    if isbn is None:
                        isbn = ""

        except AttributeError:
            pass

        month_data = []
        html_usage = 0
        pdf_usage = 0

        metrics_for_db = collections.defaultdict(list)

        for perform_item in item.ItemPerformance:
            item_date = convert_date_run(perform_item.Period.Begin.text)
            logger.debug("perform_item date: %r", item_date)
            usage = None
            for inst in perform_item.Instance:
                if inst.MetricType == "ft_total":
                    usage = str(inst.Count)
                elif inst.MetricType == "ft_pdf":
                    pdf_usage += int(inst.Count)
                elif inst.MetricType == "ft_html":
                    html_usage += int(inst.Count)
                elif report.report_type.startswith('DB'):
                    metrics_for_db[inst.MetricType].append((item_date,
                                                            int(inst.Count)))
            if usage is not None:
                month_data.append((item_date, int(usage)))

        if report.report_type:
            if report.report_type.startswith('JR'):
                report.pubs.append(pycounter.report.CounterJournal(
                    title=title,
                    platform=platform,
                    publisher=publisher_name,
                    period=report.period,
                    metric=report.metric,
                    issn=issn,
                    eissn=eissn,
                    month_data=month_data,
                    html_total=html_usage,
                    pdf_total=pdf_usage
                ))
            elif report.report_type.startswith('BR'):
                report.pubs.append(
                    pycounter.report.CounterBook(
                        title=title,
                        platform=platform,
                        publisher=publisher_name,
                        period=report.period,
                        metric=report.metric,
                        issn=issn,
                        isbn=isbn,
                        month_data=month_data,
                    ))
            elif report.report_type.startswith('DB'):
                for metric_code, month_data in six.iteritems(metrics_for_db):
                    metric = pycounter.constants.DB_METRIC_MAP[metric_code]
                    report.pubs.append(
                        pycounter.report.CounterDatabase(
                            title=title,
                            platform=platform,
                            publisher=publisher_name,
                            period=report.period,
                            metric=metric,
                            month_data=month_data
                        ))

    return report

Example 142

Project: pytrainer
Source File: gpx.py
View license
    def _getValues(self):
        '''
        Migrated to eTree XML processing 26 Nov 2009 - jblance
        '''
        logging.debug(">>")
        tree  = self.tree
        # Calories data comes within laps. Maybe more than one, adding them together - dgranda 20100114
        # Distance data comes within laps where present as well - dgranda 20110204
        laps = tree.findall(lapTag)
        if laps is not None and laps != "":
            totalDistance = 0
            totalDuration = 0
            for lap in laps:
                lapCalories = lap.findtext(calorieTag)                
                self.calories += int(lapCalories)
                lapDistance = lap.findtext(distanceTag)
                totalDistance += float(lapDistance)
                lapDuration_tmp = lap.findtext(elapsedTimeTag)
                # When retrieving data from TCX file -> seconds (float)
                # When retrieving data from GPX+ file -> hh:mm:ss
                # EAFP -> http://docs.python.org/glossary.html
                try:
                    lapDuration = float(lapDuration_tmp)
                except ValueError:
                    hour,minu,sec = lapDuration_tmp.split(":")
                    lapDuration = float(sec) + int(minu)*60 + int(hour)*3600
                totalDuration += lapDuration 
                logging.info("Lap distance: %s m | Duration: %s s | Calories: %s kcal" % (lapDistance, lapDuration, lapCalories))
            self.total_dist = float(totalDistance/1000.0) # Returning km
            self.total_time = int(totalDuration) # Returning seconds
            logging.info("Laps - Distance: %.02f km | Duration: %d s | Calories: %s kcal" % (self.total_dist, self.total_time, self.calories))
        else:
            laps = []

        retorno = []
        his_vel = []
        last_lat = None
        last_lon = None
        last_time = None
        total_dist = 0
        dist_elapsed = 0 # distance since the last time found
        total_hr = 0
        tmp_alt = 0
        len_validhrpoints = 0
        trkpoints = tree.findall(trackPointTag)
        if trkpoints is None or len(trkpoints) == 0:
            logging.debug( "No trkpoints found in file")
            return retorno
        logging.debug("%d trkpoints in file" % len(trkpoints))

        date_ = tree.find(timeTag).text
        if date_ is None:
            logging.info("time tag is blank")
            self.date = None
        else:
            mk_time = self.getDateTime(date_)[1] #Local Date
            self.date = mk_time.strftime("%Y-%m-%d")
            self.start_time = mk_time.strftime("%H:%M:%S")
        waiting_points = []
        logging.debug("date: %s | start_time: %s | mk_time: %s" % (self.date, self.start_time, mk_time))

        for i, trkpoint in enumerate(trkpoints):
            #Get data from trkpoint
            try:
                lat = float(trkpoint.get("lat"))
                lon = float(trkpoint.get("lon"))
            except Exception as e:
                logging.debug(str(e))
                lat = lon = None
            if lat is None or lat == "" or lat == 0 or lon is None or lon == "" or lon == 0:
                logging.debug("lat or lon is blank or zero")
                continue
            #get the heart rate value from the gpx extended format file
            hrResult = trkpoint.find(hrTag)
            if hrResult is not None:
                hr = int(hrResult.text)
                len_validhrpoints += 1
                total_hr += hr          #TODO fix
                if hr>self.maxhr:
                    self.maxhr = hr
            else:
                hr = None
            #get the cadence (if present)
            cadResult = trkpoint.find(cadTag)
            if cadResult is not None:
                cadence = int(cadResult.text)
            else:
                cadence = None

            #get the time
            timeResult = trkpoint.find(timeTag)
            if timeResult is not None:
                date_ = timeResult.text
                mk_time = self.getDateTime(date_)[0]
                time_ = time.mktime(mk_time.timetuple()) #Convert date to seconds
                if i == 0:
                    time_elapsed = 0
                else:
                    time_elapsed = time_ - self.trkpoints[i-1]['time'] if self.trkpoints[i-1]['time'] is not None else 0
                    if time_elapsed > 10:
                        logging.debug("%d seconds from last trkpt, someone took a break!" % time_elapsed)
                        # Calculating average lapse between trackpoints to add it
                        average_lapse = round(self.total_time_trkpts/i)
                        logging.debug("Adding %d seconds (activity average) as lapse from last point" % average_lapse)
                        self.total_time_trkpts += average_lapse
                    else:
                        self.total_time_trkpts += time_elapsed
            else:
                time_ = None
                time_elapsed = None

            #get the elevation
            eleResult = trkpoint.find(elevationTag)
            rel_alt = 0
            if eleResult is not None:
                try:
                    ele = float(eleResult.text)
                    #Calculate elevation change
                    if i != 0:
                        rel_alt = ele - self.trkpoints[i-1]['ele'] if self.trkpoints[i-1]['ele'] is not None else 0
                except Exception as e:
                    logging.debug(str(e))
                    ele = None
            else:
                ele = None
                
            #Get corrected elevation if it exists
            correctedEleResult = trkpoint.find(pyt_eleTag)
            if correctedEleResult is not None:
                try:
                    corEle = float(correctedEleResult.text)
                    #Calculate elevation change
                except Exception as e:
                    logging.debug(str(e))
                    corEle = None
            else:
                corEle = None

            #Calculate climb or decent amount
            #Allow for some 'jitter' in height here
            JITTER_VALUE = 0  #Elevation changes less than this value are not counted in +-
            if abs(rel_alt) < JITTER_VALUE:
                rel_alt = 0
            if rel_alt > 0:
                self.upositive += rel_alt
            elif rel_alt < 0:
                self.unegative -= rel_alt

            #Calculate distance between two points
            if i == 0: #First point
                dist = None
            else:
                dist = self._distance_between_points(lat1=self.trkpoints[i-1]['lat'], lon1=self.trkpoints[i-1]['lon'], lat2=lat, lon2=lon)

            #Accumulate distances
            if dist is not None:
                dist_elapsed += dist #TODO fix
                self.total_dist_trkpts += dist

            #Calculate speed...
            vel = self._calculate_speed(dist, time_elapsed, smoothing_factor=3)
            if vel>self.maxvel:
                self.maxvel=vel

            #The waiting point stuff....
            #This 'fills in' the data for situations where some times are missing from the GPX file
            if time_ is not None:
                if len(waiting_points) > 0:
                    for ((w_total_dist, w_dist, w_alt, w_total_time, w_lat, w_lon, w_hr, w_cadence, w_corEle)) in waiting_points:
                        w_time = (w_dist/dist_elapsed) * time_elapsed
                        w_vel = w_dist/((w_time)/3600.0)
                        w_total_time += w_time
                        logging.info("Time added: %f" % w_time)
                        retorno.append((w_total_dist, w_alt, w_total_time, w_vel, w_lat, w_lon, w_hr, w_cadence, w_corEle))
                    waiting_points = []
                    dist_elapsed = 0
                else:
                    retorno.append((self.total_dist_trkpts,ele, self.total_time,vel,lat,lon,hr,cadence,corEle))
                    dist_elapsed = 0
            else: # time_ is None
                waiting_points.append((self.total_dist_trkpts, dist_elapsed, ele, self.total_time, lat, lon, hr, cadence, corEle))

            #Add to dict of values to trkpoint list
            self.trkpoints.append({ 'id': i,
                                    'lat':lat,
                                    'lon':lon,
                                    'hr':hr,
                                    'cadence':cadence,
                                    'time':time_,
                                    'time_since_previous': time_elapsed,
                                    'time_elapsed': self.total_time_trkpts,
                                    'ele':ele,
                                    'ele_change': rel_alt,
                                    'distance_from_previous': dist,
                                    'elapsed_distance': self.total_dist_trkpts,
                                    'velocity':vel,
                                    'correctedElevation':corEle,

                                })

        #end of for trkpoint in trkpoints loop

        #Calculate averages etc
        self.hr_average = 0
        if len_validhrpoints > 0:
            self.hr_average = total_hr/len_validhrpoints
        # In case there is no other way to calculate distance, we rely on trackpoints (number of trackpoints is configurable!)
        if self.total_dist is None or self.total_dist == 0:
            self.total_dist = self.total_dist_trkpts
        else:
            dist_diff = 1000*(self.total_dist_trkpts - self.total_dist)
            logging.debug("Distance difference between laps and trkpts calculation: %f m" % dist_diff)
        if self.total_time is None or self.total_time == 0:
            self.total_time = self.total_time_trkpts
        else:
            time_diff = self.total_time_trkpts - self.total_time
            logging.debug("Duration difference between laps and trkpts calculation: %d s" % time_diff)
        logging.info("Values - Distance: %.02f km | Duration: %d s | Calories: %s kcal" % (self.total_dist, self.total_time, self.calories))
        logging.debug("<<")
        return retorno

Example 143

Project: tp-libvirt
Source File: guest_numa.py
View license
def run(test, params, env):
    """
    Test guest numa setting
    """
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {'strict': 'bind', 'preferred': 'prefer',
                 'interleave': 'interleave'}

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline",)
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False
    default_nr_hugepages_path = "/sys/kernel/mm/hugepages/hugepages-2048kB/"
    default_nr_hugepages_path += "nr_hugepages"

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            raise error.TestNAError("Setting hugepages more specifically per "
                                    "numa node not supported on current "
                                    "version")

    hp_cl = test_setup.HugePageConfig(params)
    default_hp_size = hp_cl.get_hugepage_size()
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                raise error.TestError("Hugepage size [%s] isn't supported, "
                                      "please verify kernel cmdline configuration."
                                      % page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += utlv.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += utlv.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [h_list[p_size]['nodenum']
                         for p_size in range(len(h_list))]
            for i in h_nodenum:
                used_node += utlv.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    raise error.TestNAError("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i, mem_size)
                if not int(mem_size):
                    raise error.TestNAError("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        if default_hp_size == 2048:
            hp_cl.setup()
            deallocate = True
        else:
            _update_qemu_conf()
            qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.kernel_hp_file = default_nr_hugepages_path
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    raise error.TestNAError("The hugepage size %s not "
                                            "supported or not configured under"
                                            " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(i['nodenum'],
                                                         i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'],
                                                  i['nodenum'],
                                                  i['size'])

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = numa_cell
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if status_error:
                return
            else:
                raise error.TestFail("Test failed in positive case.\n error:"
                                     " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            numa_maps = open("/proc/%s/numa_maps" % vm_pid)
            numa_map_info = numa_maps.read()
            numa_maps.close()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                raise error.TestFail("Can't find hugepages usage info in vm "
                                     "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = utlv.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s",
                              map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = utlv.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in memnode_dict.keys():
                        for mk in memnode_dict[k].keys():
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                raise error.TestFail("vm pid numa map dict %s"
                                                     " not expected" %
                                                     map_dict)

        # qemu command line check
        f_cmdline = open("/proc/%s/cmdline" % vm_pid)
        q_cmdline_list = f_cmdline.read().split("\x00")
        f_cmdline.close()
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                raise error.TestFail("%s not found in vm qemu cmdline" %
                                     cmd['cmdline'])

        # vm inside check
        vm_cpu_info = utils_misc.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            raise error.TestFail("node number %s in vm is not expected" %
                                 node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = utlv.cpus_parser(cpu_str)
            cpu_list = utlv.cpus_parser(numa_cell[i]["cpus"])
            if vm_cpu_list != cpu_list:
                raise error.TestFail("vm node %s cpu list %s not expected" %
                                     (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    raise error.TestFail("%s in vm topology not expected." %
                                         topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'],
                                              i['nodenum'], i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)

Example 144

Project: avocado-vt
Source File: bootstrap.py
View license
def create_subtests_cfg(t_type):
    specific_test_list = []
    specific_file_list = []
    specific_subdirs = asset.get_test_provider_subdirs(t_type)
    provider_names_specific = asset.get_test_provider_names(t_type)
    config_filter = get_config_filter()

    provider_info_specific = []
    for specific_provider in provider_names_specific:
        provider_info_specific.append(
            asset.get_test_provider_info(specific_provider))

    for subdir in specific_subdirs:
        specific_test_list += data_dir.SubdirGlobList(subdir,
                                                      '*.py',
                                                      test_filter)
        specific_file_list += data_dir.SubdirGlobList(subdir,
                                                      '*.cfg',
                                                      config_filter)

    shared_test_list = []
    shared_file_list = []
    shared_subdirs = asset.get_test_provider_subdirs('generic')
    provider_names_shared = asset.get_test_provider_names('generic')

    provider_info_shared = []
    for shared_provider in provider_names_shared:
        provider_info_shared.append(
            asset.get_test_provider_info(shared_provider))

    if not t_type == 'lvsb':
        for subdir in shared_subdirs:
            shared_test_list += data_dir.SubdirGlobList(subdir,
                                                        '*.py',
                                                        test_filter)
            shared_file_list += data_dir.SubdirGlobList(subdir,
                                                        '*.cfg',
                                                        config_filter)

    all_specific_test_list = []
    for test in specific_test_list:
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_specific_test_list.append("%s.%s" %
                                          (provider_name,
                                           basename.split(".")[0]))
    all_shared_test_list = []
    for test in shared_test_list:
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in test:
                provider_name = p['name']
                break

        basename = os.path.basename(test)
        if basename != "__init__.py":
            all_shared_test_list.append("%s.%s" %
                                        (provider_name,
                                         basename.split(".")[0]))

    all_specific_test_list.sort()
    all_shared_test_list.sort()

    first_subtest_file = []
    last_subtest_file = []
    non_dropin_tests = []
    tmp = []

    for shared_file in shared_file_list:
        provider_name = None
        for p in provider_info_shared:
            provider_base_path = p['backends']['generic']['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if t_type not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    shared_file_list = tmp

    tmp = []
    for shared_file in specific_file_list:
        provider_name = None
        for p in provider_info_specific:
            provider_base_path = p['backends'][t_type]['path']
            if provider_base_path in shared_file:
                provider_name = p['name']
                break

        shared_file_obj = open(shared_file, 'r')
        for line in shared_file_obj.readlines():
            line = line.strip()
            if line.startswith("type"):
                cartesian_parser = cartesian_config.Parser()
                cartesian_parser.parse_string(line)
                td = cartesian_parser.get_dicts().next()
                values = td['type'].split(" ")
                for value in values:
                    if value not in non_dropin_tests:
                        non_dropin_tests.append("%s.%s" %
                                                (provider_name, value))

        shared_file_name = os.path.basename(shared_file)
        shared_file_name = shared_file_name.split(".")[0]
        if shared_file_name in first_subtest[t_type]:
            if [provider_name, shared_file] not in first_subtest_file:
                first_subtest_file.append([provider_name, shared_file])
        elif shared_file_name in last_subtest[t_type]:
            if [provider_name, shared_file] not in last_subtest_file:
                last_subtest_file.append([provider_name, shared_file])
        else:
            if [provider_name, shared_file] not in tmp:
                tmp.append([provider_name, shared_file])
    specific_file_list = tmp

    subtests_cfg = os.path.join(data_dir.get_backend_dir(t_type), 'cfg',
                                'subtests.cfg')
    subtests_file = open(subtests_cfg, 'w')
    subtests_file.write(
        "# Do not edit, auto generated file from subtests config\n")

    subtests_file.write("variants subtest:\n")
    write_subtests_files(first_subtest_file, subtests_file)
    write_subtests_files(specific_file_list, subtests_file, t_type)
    write_subtests_files(shared_file_list, subtests_file)
    write_subtests_files(last_subtest_file, subtests_file)

    subtests_file.close()
    logging.debug("Config file %s auto generated from subtest samples",
                  subtests_cfg)

Example 145

Project: couchbase-cli
Source File: pump_dcp.py
View license
    def provide_dcp_batch_actual(self):
        batch = pump.Batch(self)

        batch_max_size = self.opts.extra['batch_max_size']
        batch_max_bytes = self.opts.extra['batch_max_bytes']
        delta_ack_size = batch_max_bytes * 10 / 4 #ack every 25% of buffer size
        last_processed = 0
        total_bytes_read = 0

        vbid = 0
        cmd = 0
        start_seqno = 0
        end_seqno = 0
        vb_uuid = 0
        hi_seqno = 0
        ss_start_seqno = 0
        ss_end_seqno = 0
        try:
            while (not self.dcp_done and
                   batch.size() < batch_max_size and
                   batch.bytes < batch_max_bytes):

                if self.response.empty():
                    if len(self.stream_list) > 0:
                        logging.debug("no response while there %s active streams" % len(self.stream_list))
                        time.sleep(.25)
                    else:
                        self.dcp_done = True
                    continue
                unprocessed_size = total_bytes_read - last_processed
                if unprocessed_size > delta_ack_size:
                    rv = self.ack_buffer_size(unprocessed_size)
                    if rv:
                        logging.error(rv)
                    else:
                        last_processed = total_bytes_read

                cmd, errcode, opaque, cas, keylen, extlen, data, datalen, dtype, bytes_read = \
                    self.response.get()
                total_bytes_read += bytes_read
                rv = 0
                metalen = flags = flg = exp = 0
                key = val = ext = ''
                need_ack = False
                seqno = 0
                if cmd == couchbaseConstants.CMD_DCP_REQUEST_STREAM:
                    if errcode == couchbaseConstants.ERR_SUCCESS:
                        pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                        start = 0
                        step = DCPStreamSource.HIGH_SEQNO_BYTE + DCPStreamSource.UUID_BYTE
                        while start+step <= datalen:
                            uuid, seqno = struct.unpack(
                                            couchbaseConstants.DCP_VB_UUID_SEQNO_PKT_FMT, \
                                            data[start:start + step])
                            if pair_index not in self.cur['failoverlog']:
                                self.cur['failoverlog'][pair_index] = {}
                            if opaque not in self.cur['failoverlog'][pair_index] or \
                               not self.cur['failoverlog'][pair_index][opaque]:
                                self.cur['failoverlog'][pair_index][opaque] = [(uuid, seqno)]
                            else:
                                self.cur['failoverlog'][pair_index][opaque].append((uuid, seqno))
                            start = start + step
                    elif errcode == couchbaseConstants.ERR_KEY_ENOENT:
                        logging.warn("producer doesn't know about the vbucket uuid, rollback to 0")
                        vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno = \
                            self.stream_list[opaque]
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_KEY_EEXISTS:
                       logging.warn("a stream exists on the connection for vbucket:%s" % opaque)
                    elif errcode ==  couchbaseConstants.ERR_NOT_MY_VBUCKET:
                        logging.warn("Vbucket is not active anymore, skip it:%s" % vbid)
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_ERANGE:
                        logging.warn("Start or end sequence numbers specified incorrectly,(%s, %s)" % \
                                     (start_seqno, end_seqno))
                        del self.stream_list[opaque]
                    elif errcode == couchbaseConstants.ERR_ROLLBACK:
                        vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_stop_seqno = \
                            self.stream_list[opaque]
                        start_seqno, = struct.unpack(couchbaseConstants.DCP_VB_SEQNO_PKT_FMT, data)
                        #find the most latest uuid, hi_seqno that fit start_seqno
                        if self.cur['failoverlog']:
                            pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                            if self.cur['failoverlog'][pair_index].get("vbid"):
                                for uuid, seqno in self.cur['failoverlog'][pair_index][vbid]:
                                    if start_seqno >= seqno:
                                        vb_uuid = uuid
                                        break
                        ss_start_seqno = start_seqno
                        ss_end_seqno = start_seqno
                        self.request_dcp_stream(vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno)

                        del self.stream_list[opaque]
                        self.stream_list[opaque] = \
                            (vbid, flags, start_seqno, end_seqno, vb_uuid, ss_start_seqno, ss_end_seqno)
                    else:
                        logging.error("unprocessed errcode:%s" % errcode)
                        del self.stream_list[opaque]
                elif cmd == couchbaseConstants.CMD_DCP_MUTATION:
                    vbucket_id = errcode
                    seqno, rev_seqno, flg, exp, locktime, metalen, nru = \
                        struct.unpack(couchbaseConstants.DCP_MUTATION_PKT_FMT, data[0:extlen])
                    key_start = extlen
                    val_start = key_start + keylen
                    val_len = datalen- keylen - metalen - extlen
                    meta_start = val_start + val_len
                    key = data[extlen:val_start]
                    val = data[val_start:meta_start]
                    conf_res = 0
                    if meta_start < datalen:
                        # handle extra conflict resolution fields
                        extra_meta = data[meta_start:]
                        extra_index = 0
                        version = extra_meta[extra_index]
                        extra_index += 1
                        while extra_index < metalen:
                            id, extlen = struct.unpack(couchbaseConstants.DCP_EXTRA_META_PKG_FMT, extra_meta[extra_index:extra_index+3])
                            extra_index += 3
                            if id == couchbaseConstants.DCP_EXTRA_META_CONFLICT_RESOLUTION:
                                if extlen == 1:
                                    conf_res, = struct.unpack(">B",extra_meta[extra_index:extra_index+1])
                                elif extlen == 2:
                                    conf_res, = struct.unpack(">H",extra_meta[extra_index:extra_index+2])
                                elif extlen == 4:
                                    conf_res, = struct.unpack(">I", extra_meta[extra_index:extra_index+4])
                                elif extlen == 8:
                                    conf_res, = struct.unpack(">Q", extra_meta[extra_index:extra_index+8])
                                else:
                                    logging.error("unsupported extra meta data format:%d" % extlen)
                                    conf_res = 0
                            extra_index += extlen

                    if not self.skip(key, vbucket_id):
                        msg = (cmd, vbucket_id, key, flg, exp, cas, rev_seqno, val, seqno, dtype, \
                               metalen, conf_res)
                        batch.append(msg, len(val))
                        self.num_msg += 1
                elif cmd == couchbaseConstants.CMD_DCP_DELETE or \
                     cmd == couchbaseConstants.CMD_DCP_EXPIRATION:
                    vbucket_id = errcode
                    seqno, rev_seqno, metalen = \
                        struct.unpack(couchbaseConstants.DCP_DELETE_PKT_FMT, data[0:extlen])
                    key_start = extlen
                    val_start = key_start + keylen
                    key = data[extlen:val_start]
                    if not self.skip(key, vbucket_id):
                        msg = (cmd, vbucket_id, key, flg, exp, cas, rev_seqno, val, seqno, dtype, \
                               metalen, 0)
                        batch.append(msg, len(val))
                        self.num_msg += 1
                    if cmd == couchbaseConstants.CMD_DCP_DELETE:
                        batch.adjust_size += 1
                elif cmd == couchbaseConstants.CMD_DCP_FLUSH:
                    logging.warn("stopping: saw CMD_DCP_FLUSH")
                    self.dcp_done = True
                    break
                elif cmd == couchbaseConstants.CMD_DCP_END_STREAM:
                    del self.stream_list[opaque]
                    if not len(self.stream_list):
                        self.dcp_done = True
                elif cmd == couchbaseConstants.CMD_DCP_SNAPSHOT_MARKER:
                    ss_start_seqno, ss_end_seqno, _ = \
                        struct.unpack(couchbaseConstants.DCP_SNAPSHOT_PKT_FMT, data[0:extlen])
                    pair_index = (self.source_bucket['name'], self.source_node['hostname'])
                    if not self.cur['snapshot']:
                        self.cur['snapshot'] = {}
                    if pair_index not in self.cur['snapshot']:
                        self.cur['snapshot'][pair_index] = {}
                    self.cur['snapshot'][pair_index][opaque] = (ss_start_seqno, ss_end_seqno)
                elif cmd == couchbaseConstants.CMD_DCP_NOOP:
                    need_ack = True
                elif cmd == couchbaseConstants.CMD_DCP_BUFFER_ACK:
                    if errcode != couchbaseConstants.ERR_SUCCESS:
                        logging.warning("buffer ack response errcode:%s" % errcode)
                    continue
                else:
                    logging.warn("warning: unexpected DCP message: %s" % cmd)
                    return "unexpected DCP message: %s" % cmd, batch

                if need_ack:
                    self.ack_last = True
                    try:
                        self.dcp_conn._sendMsg(cmd, '', '', opaque, vbucketId=0,
                                          fmt=couchbaseConstants.RES_PKT_FMT,
                                          magic=couchbaseConstants.RES_MAGIC_BYTE)
                    except socket.error:
                        return ("error: socket.error on send();"
                                " perhaps the source server: %s was rebalancing"
                                " or had connectivity/server problems" %
                                (self.source_node['hostname'])), batch
                    except EOFError:
                        self.dcp_done = True
                        return ("error: EOFError on socket send();"
                                " perhaps the source server: %s was rebalancing"
                                " or had connectivity/server problems" %
                                (self.source_node['hostname'])), batch

                    # Close the batch when there's an ACK handshake, so
                    # the server can concurrently send us the next batch.
                    # If we are slow, our slow ACK's will naturally slow
                    # down the server.
                    self.ack_buffer_size(total_bytes_read - last_processed)
                    return 0, batch

                self.ack_last = False
                self.cmd_last = cmd

        except EOFError:
            if batch.size() <= 0 and self.ack_last:
                # A closed conn after an ACK means clean end of TAP dump.
                self.dcp_done = True

        if batch.size() <= 0:
            return 0, None
        self.ack_buffer_size(total_bytes_read - last_processed)
        return 0, batch

Example 146

Project: avocado-vt
Source File: qemu_virtio_port.py
View license
    def run_debug(self):
        """
        viz run_normal.
        Additionally it stores last n verified characters and in
        case of failures it quickly receive enough data to verify failure or
        allowed loss and then analyze this data. It provides more info about
        the situation.
        Unlike normal run this one supports booth - loss and duplications.
        It's not friendly to data corruption.
        """
        logging.debug("ThRecvCheck %s: run", self.getName())
        attempt = 10
        max_loss = 0
        sum_loss = 0
        verif_buf = deque(maxlen=max(self.blocklen, self.sendlen))
        while not self.exitevent.isSet():
            ret = select.select([self.port.sock], [], [], 1.0)
            if ret[0] and (not self.exitevent.isSet()):
                buf = self.port.sock.recv(self.blocklen)
                if buf:
                    # Compare the received data with the control data
                    for idx_char in xrange(len(buf)):
                        _char = self.buff.popleft()
                        if buf[idx_char] == _char:
                            self.idx += 1
                            verif_buf.append(_char)
                        else:
                            # Detect the duplicated/lost characters.
                            logging.debug("ThRecvCheck %s: fail to receive "
                                          "%dth character.", self.getName(),
                                          self.idx)
                            buf = buf[idx_char:]
                            for i in xrange(100):
                                if len(self.buff) < self.sendidx:
                                    time.sleep(0.01)
                                else:
                                    break
                            sendidx = min(self.sendidx, len(self.buff))
                            if sendidx < self.sendidx:
                                logging.debug("ThRecvCheck %s: sendidx was "
                                              "lowered as there is not enough "
                                              "data after 1s. Using sendidx="
                                              "%s.", self.getName(), sendidx)
                            for _ in xrange(sendidx / self.blocklen):
                                if self.exitevent.isSet():
                                    break
                                buf += self.port.sock.recv(self.blocklen)
                            queue = _char
                            for _ in xrange(sendidx):
                                queue += self.buff[_]
                            offset_a = None
                            offset_b = None
                            for i in xrange(sendidx):
                                length = min(len(buf[i:]), len(queue))
                                if buf[i:] == queue[:length]:
                                    offset_a = i
                                    break
                            for i in xrange(sendidx):
                                length = min(len(queue[i:]), len(buf))
                                if queue[i:][:length] == buf[:length]:
                                    offset_b = i
                                    break

                            if (offset_b and offset_b < offset_a) or offset_a:
                                # Data duplication
                                self.sendidx -= offset_a
                                max_loss = max(max_loss, offset_a)
                                sum_loss += offset_a
                                logging.debug("ThRecvCheck %s: DUP %s (out of "
                                              "%s)", self.getName(), offset_a,
                                              sendidx)
                                buf = buf[offset_a + 1:]
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                verif_buf.extend(buf)
                                self.idx += len(buf)
                            elif offset_b:  # Data loss
                                max_loss = max(max_loss, offset_b)
                                sum_loss += offset_b
                                logging.debug("ThRecvCheck %s: LOST %s (out of"
                                              " %s)", self.getName(), offset_b,
                                              sendidx)
                                # Pop-out the lost characters from verif_queue
                                # (first one is already out)
                                self.sendidx -= offset_b
                                for i in xrange(offset_b - 1):
                                    self.buff.popleft()
                                for _ in xrange(len(buf)):
                                    self.buff.popleft()
                                self.idx += len(buf)
                                verif_buf.extend(buf)
                            else:   # Too big data loss or duplication
                                verif = ""
                                for _ in xrange(-min(sendidx, len(verif_buf)),
                                                0):
                                    verif += verif_buf[_]
                                logging.error("ThRecvCheck %s: mismatched data"
                                              ":\nverified: ..%s\nreceived:   "
                                              "%s\nsent:       %s",
                                              self.getName(), repr(verif),
                                              repr(buf), repr(queue))
                                raise exceptions.TestFail("Recv and sendqueue "
                                                          "don't match with any offset.")
                            # buf was changed, break from this loop
                            attempt = 10
                            break
                    attempt = 10
                else:   # ! buf
                    # Broken socket
                    if attempt > 0:
                        attempt -= 1
                        if self.migrate_event is None:
                            self.exitevent.set()
                            raise exceptions.TestFail("ThRecvCheck %s: Broken pipe."
                                                      " If this is expected behavior set migrate"
                                                      "_event to support reconnection." %
                                                      self.getName())
                        logging.debug("ThRecvCheck %s: Broken pipe "
                                      ", reconnecting. ", self.getName())
                        self.reload_loss_idx()
                        # Wait until main thread sets the new self.port
                        while not (self.exitevent.isSet() or
                                   self.migrate_event.wait(1)):
                            pass
                        if self.exitevent.isSet():
                            break
                        logging.debug("ThRecvCheck %s: Broken pipe resumed, "
                                      "reconnecting...", self.getName())

                        self.port.sock = False
                        self.port.open()
        if self.sendidx >= 0:
            self.minsendidx = min(self.minsendidx, self.sendidx)
        if (self.sendlen - self.minsendidx):
            logging.debug("ThRecvCheck %s: Data loss occurred during socket"
                          "reconnection. Maximal loss was %d per one "
                          "migration.", self.getName(),
                          (self.sendlen - self.minsendidx))
        if sum_loss > 0:
            logging.debug("ThRecvCheck %s: Data offset detected, cumulative "
                          "err: %d, max err: %d(%d)", self.getName(), sum_loss,
                          max_loss, float(max_loss) / self.blocklen)
        logging.debug("ThRecvCheck %s: exit(%d)", self.getName(),
                      self.idx)
        self.ret_code = 0

Example 147

Project: pyqso
Source File: adif.py
View license
    def is_valid(self, field_name, data, data_type):
        """ Validate the data in a field with respect to the ADIF specification.

        :arg str field_name: The name of the ADIF field.
        :arg str data: The data of the ADIF field to validate.
        :arg str data_type: The type of data to be validated. See http://www.adif.org/304/ADIF_304.htm#Data_Types for the full list with descriptions.
        :returns: True or False to indicate whether the data is valid or not.
        :rtype: bool
        """

        logging.debug("Validating the following data in field '%s': %s" % (field_name, data))

        # Allow an empty string, in case the user doesn't want
        # to fill in this field.
        if(data == ""):
            return True

        if(data_type == "N"):
            # Allow a decimal point before and/or after any numbers,
            # but don't allow a decimal point on its own.
            m = re.match(r"-?(([0-9]+\.?[0-9]*)|([0-9]*\.?[0-9]+))", data)
            if(m is None):
                # Did not match anything.
                return False
            else:
                # Make sure we match the whole string,
                # otherwise there may be an invalid character after the match.
                return (m.group(0) == data)

        elif(data_type == "B"):
            # Boolean
            m = re.match(r"(Y|N)", data)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "D"):
            # Date
            pattern = re.compile(r"([0-9]{4})")
            m_year = pattern.match(data, 0)
            if((m_year is None) or (int(m_year.group(0)) < 1930)):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{2})")
                m_month = pattern.match(data, 4)
                if((m_month is None) or int(m_month.group(0)) > 12 or int(m_month.group(0)) < 1):
                    # Did not match anything.
                    return False
                else:
                    pattern = re.compile(r"([0-9]{2})")
                    m_day = pattern.match(data, 6)
                    days_in_month = calendar.monthrange(int(m_year.group(0)), int(m_month.group(0)))
                    if((m_day is None) or int(m_day.group(0)) > days_in_month[1] or int(m_day.group(0)) < 1):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 8)

        elif(data_type == "T"):
            # Time
            pattern = re.compile(r"([0-9]{2})")
            m_hour = pattern.match(data, 0)
            if((m_hour is None) or (int(m_hour.group(0)) < 0) or (int(m_hour.group(0)) > 23)):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{2})")
                m_minutes = pattern.match(data, 2)
                if((m_minutes is None) or int(m_minutes.group(0)) < 0 or int(m_minutes.group(0)) > 59):
                    # Did not match anything.
                    return False
                else:
                    if(len(data) == 4):
                        # HHMM format
                        return True
                    pattern = re.compile(r"([0-9]{2})")
                    m_seconds = pattern.match(data, 4)
                    if((m_seconds is None) or int(m_seconds.group(0)) < 0 or int(m_seconds.group(0)) > 59):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 6)  # HHMMSS format

        # FIXME: Need to make sure that the "S" and "M" data types accept ASCII-only characters
        # in the range 32-126 inclusive.
        elif(data_type == "S"):
            # String
            m = re.match(r"(.+)", data)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "I"):
            # IntlString
            m = re.match(r"(.+)", data, re.UNICODE)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "G"):
            # IntlMultilineString
            m = re.match(r"(.+(\r\n)*.*)", data, re.UNICODE)
            if(m is None):
                return False
            else:
                return (m.group(0) == data)

        elif(data_type == "M"):
            # MultilineString
            # m = re.match(r"(.+(\r\n)*.*)", data)
            # if(m is None):
            #   return False
            # else:
            #   return (m.group(0) == data)
            return True

        elif(data_type == "L"):
            # Location
            pattern = re.compile(r"([EWNS]{1})", re.IGNORECASE)
            m_directional = pattern.match(data, 0)
            if(m_directional is None):
                # Did not match anything.
                return False
            else:
                pattern = re.compile(r"([0-9]{3})")
                m_degrees = pattern.match(data, 1)
                if((m_degrees is None) or int(m_degrees.group(0)) < 0 or int(m_degrees.group(0)) > 180):
                    # Did not match anything.
                    return False
                else:
                    pattern = re.compile(r"([0-9]{2}\.[0-9]{3})")
                    m_minutes = pattern.match(data, 4)
                    if((m_minutes is None) or float(m_minutes.group(0)) < 0 or float(m_minutes.group(0)) > 59.999):
                        # Did not match anything.
                        return False
                    else:
                        # Make sure we match the whole string,
                        # otherwise there may be an invalid character after the match.
                        return (len(data) == 10)

        elif(data_type == "E" or data_type == "A"):
            # Enumeration, AwardList.
            if(field_name == "MODE"):
                return (data in list(MODES.keys()))
            elif(field_name == "BAND"):
                return (data in BANDS)
            else:
                return True

        else:
            return True

Example 148

Project: tp-libvirt
Source File: virsh_volume.py
View license
def run(test, params, env):
    """
    1. Create a pool
    2. Create n number of volumes(vol-create-as)
    3. Check the volume details from the following commands
       vol-info
       vol-key
       vol-list
       vol-name
       vol-path
       vol-pool
       qemu-img info
    4. Delete the volume and check in vol-list
    5. Repeat the steps for number of volumes given
    6. Delete the pool and target
    TODO: Handle negative testcases
    """

    def delete_volume(expected_vol):
        """
        Deletes Volume
        """
        pool_name = expected_vol['pool_name']
        vol_name = expected_vol['name']
        pv = libvirt_storage.PoolVolume(pool_name)
        if not pv.delete_volume(vol_name):
            raise error.TestFail("Delete volume failed." % vol_name)
        else:
            logging.debug("Volume: %s successfully deleted on pool: %s",
                          vol_name, pool_name)

    def get_vol_list(pool_name, vol_name):
        """
        Parse the volume list
        """
        output = virsh.vol_list(pool_name, "--details")
        rg = re.compile(
            r'^(\S+)\s+(\S+)\s+(\S+)\s+(\d+.\d+\s\S+)\s+(\d+.\d+.*)')
        vol = {}
        vols = []
        volume_detail = None
        for line in output.stdout.splitlines():
            match = re.search(rg, line.lstrip())
            if match is not None:
                vol['name'] = match.group(1)
                vol['path'] = match.group(2)
                vol['type'] = match.group(3)
                vol['capacity'] = match.group(4)
                vol['allocation'] = match.group(5)
                vols.append(vol)
                vol = {}
        for volume in vols:
            if volume['name'] == vol_name:
                volume_detail = volume
        return volume_detail

    def norm_capacity(capacity):
        """
        Normalize the capacity values to bytes
        """
        # Normaize all values to bytes
        norm_capacity = {}
        des = {'B': 'B', 'bytes': 'B', 'b': 'B', 'kib': 'K',
               'KiB': 'K', 'K': 'K', 'k': 'K', 'KB': 'K',
               'mib': 'M', 'MiB': 'M', 'M': 'M', 'm': 'M',
               'MB': 'M', 'gib': 'G', 'GiB': 'G', 'G': 'G',
               'g': 'G', 'GB': 'G', 'Gb': 'G', 'tib': 'T',
               'TiB': 'T', 'TB': 'T', 'T': 'T', 't': 'T'
               }
        val = {'B': 1,
               'K': 1024,
               'M': 1048576,
               'G': 1073741824,
               'T': 1099511627776
               }

        reg_list = re.compile(r'(\S+)\s(\S+)')
        match_list = re.search(reg_list, capacity['list'])
        if match_list is not None:
            mem_value = float(match_list.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['list'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value in"
                                 " virsh vol-list")

        match_info = re.search(reg_list, capacity['info'])
        if match_info is not None:
            mem_value = float(match_info.group(1))
            norm = val[des[match_list.group(2)]]
            norm_capacity['info'] = int(mem_value * norm)
        else:
            raise error.TestFail("Error in parsing capacity value "
                                 "in virsh vol-info")

        norm_capacity['qemu_img'] = capacity['qemu_img']
        norm_capacity['xml'] = int(capacity['xml'])

        return norm_capacity

    def check_vol(expected, avail=True):
        """
        Checks the expected volume details with actual volume details from
        vol-dumpxml
        vol-list
        vol-info
        vol-key
        vol-path
        qemu-img info
        """
        error_count = 0

        pv = libvirt_storage.PoolVolume(expected['pool_name'])
        vol_exists = pv.volume_exists(expected['name'])
        if vol_exists:
            if not avail:
                error_count += 1
                logging.error("Expect volume %s not exists but find it",
                              expected['name'])
                return error_count
        else:
            if avail:
                error_count += 1
                logging.error("Expect volume %s exists but not find it",
                              expected['name'])
                return error_count
            else:
                logging.info("Volume %s checked successfully for deletion",
                             expected['name'])
                return error_count

        actual_list = get_vol_list(expected['pool_name'], expected['name'])
        actual_info = pv.volume_info(expected['name'])
        # Get values from vol-dumpxml
        volume_xml = vol_xml.VolXML.new_from_vol_dumpxml(expected['name'],
                                                         expected['pool_name'])

        # Check against virsh vol-key
        vol_key = virsh.vol_key(expected['name'], expected['pool_name'])
        if vol_key.stdout.strip() != volume_xml.key:
            logging.error("Volume key is mismatch \n%s"
                          "Key from xml: %s\nKey from command: %s",
                          expected['name'], volume_xml.key, vol_key)
            error_count += 1
        else:
            logging.debug("virsh vol-key for volume: %s successfully"
                          " checked against vol-dumpxml", expected['name'])

        # Check against virsh vol-name
        get_vol_name = virsh.vol_name(expected['path'])
        if get_vol_name.stdout.strip() != expected['name']:
            logging.error("Volume name mismatch\n"
                          "Expected name: %s\nOutput of vol-name: %s",
                          expected['name'], get_vol_name)

        # Check against virsh vol-path
        vol_path = virsh.vol_path(expected['name'], expected['pool_name'])
        if expected['path'] != vol_path.stdout.strip():
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected path: %s\nOutput of vol-path: %s\n",
                          expected['name'],
                          expected['path'], vol_path)
            error_count += 1
        else:
            logging.debug("virsh vol-path for volume: %s successfully checked"
                          " against created volume path", expected['name'])

        # Check path against virsh vol-list
        if expected['path'] != actual_list['path']:
            logging.error("Volume path mismatch for volume:%s\n"
                          "Expected Path: %s\nPath from virsh vol-list: %s",
                          expected['name'], expected['path'],
                          actual_list['path'])
            error_count += 1
        else:
            logging.debug("Path of volume: %s from virsh vol-list "
                          "successfully checked against created "
                          "volume path", expected['name'])

        # Check path against virsh vol-dumpxml
        if expected['path'] != volume_xml.path:
            logging.error("Volume path mismatch for volume: %s\n"
                          "Expected Path: %s\nPath from virsh vol-dumpxml: %s",
                          expected['name'], expected['path'], volume_xml.path)
            error_count += 1

        else:
            logging.debug("Path of volume: %s from virsh vol-dumpxml "
                          "successfully checked against created volume path",
                          expected['name'])

        # Check type against virsh vol-list
        if expected['type'] != actual_list['type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-list: %s",
                          expected['name'], expected['type'],
                          actual_list['type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-list "
                          "successfully checked against the created "
                          "volume type", expected['name'])

        # Check type against virsh vol-info
        if expected['type'] != actual_info['Type']:
            logging.error("Volume type mismatch for volume: %s\n"
                          "Expected Type: %s\n Type from vol-info: %s",
                          expected['name'], expected['type'],
                          actual_info['Type'])
            error_count += 1
        else:
            logging.debug("Type of volume: %s from virsh vol-info successfully"
                          " checked against the created volume type",
                          expected['name'])

        # Check name against virsh vol-info
        if expected['name'] != actual_info['Name']:
            logging.error("Volume name mismatch for volume: %s\n"
                          "Expected name: %s\n Name from vol-info: %s",
                          expected['name'],
                          expected['name'], actual_info['Name'])
            error_count += 1
        else:
            logging.debug("Name of volume: %s from virsh vol-info successfully"
                          " checked against the created volume name",
                          expected['name'])

        # Check format from against qemu-img info
        img_info = utils_misc.get_image_info(expected['path'])
        if expected['format']:
            if expected['format'] != img_info['format']:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from qemu-img info: %s",
                              expected['name'], expected['format'],
                              img_info['format'])
                error_count += 1
            else:
                logging.debug("Format of volume: %s from qemu-img info "
                              "checked successfully against the created "
                              "volume format", expected['name'])

        # Check format against vol-dumpxml
        if expected['format']:
            if expected['format'] != volume_xml.format:
                logging.error("Volume format mismatch for volume: %s\n"
                              "Expected format: %s\n"
                              "Format from vol-dumpxml: %s",
                              expected['name'], expected['format'],
                              volume_xml.format)
                error_count += 1
            else:
                logging.debug("Format of volume: %s from virsh vol-dumpxml "
                              "checked successfully against the created"
                              " volume format", expected['name'])

        logging.info(expected['encrypt_format'])
        # Check encrypt against vol-dumpxml
        if expected['encrypt_format']:
            # As the 'default' format will change to specific valut(qcow), so
            # just output it here
            logging.debug("Encryption format of volume '%s' is: %s",
                          expected['name'], volume_xml.encryption.format)
            # And also output encryption secret uuid
            secret_uuid = volume_xml.encryption.secret['uuid']
            logging.debug("Encryption secret of volume '%s' is: %s",
                          expected['name'], secret_uuid)
            if expected['encrypt_secret']:
                if expected['encrypt_secret'] != secret_uuid:
                    logging.error("Encryption secret mismatch for volume: %s\n"
                                  "Expected secret uuid: %s\n"
                                  "Secret uuid from vol-dumpxml: %s",
                                  expected['name'], expected['encrypt_secret'],
                                  secret_uuid)
                    error_count += 1
                else:
                    # If no set encryption secret value, automatically
                    # generate a secret value at the time of volume creation
                    logging.debug("Volume encryption secret is %s", secret_uuid)

        # Check pool name against vol-pool
        vol_pool = virsh.vol_pool(expected['path'])
        if expected['pool_name'] != vol_pool.stdout.strip():
            logging.error("Pool name mismatch for volume: %s against"
                          "virsh vol-pool", expected['name'])
            error_count += 1
        else:
            logging.debug("Pool name of volume: %s checked successfully"
                          " against the virsh vol-pool", expected['name'])

        norm_cap = {}
        capacity = {}
        capacity['list'] = actual_list['capacity']
        capacity['info'] = actual_info['Capacity']
        capacity['xml'] = volume_xml.capacity
        capacity['qemu_img'] = img_info['vsize']
        norm_cap = norm_capacity(capacity)
        delta_size = params.get('delta_size', "1024")
        if abs(expected['capacity'] - norm_cap['list']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-list\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['list'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-list for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['info']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['info'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-info for volume %s", expected['name'])

        if abs(expected['capacity'] - norm_cap['xml']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against virsh"
                          " vol-dumpxml\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['xml'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " virsh vol-dumpxml for volume: %s",
                          expected['name'])

        if abs(expected['capacity'] - norm_cap['qemu_img']) > delta_size:
            logging.error("Capacity mismatch for volume: %s against "
                          "qemu-img info\nExpected: %s\nActual: %s",
                          expected['name'], expected['capacity'],
                          norm_cap['qemu_img'])
            error_count += 1
        else:
            logging.debug("Capacity value checked successfully against"
                          " qemu-img info for volume: %s",
                          expected['name'])
        return error_count

    def get_all_secrets():
        """
        Return all exist libvirt secrets uuid in a list
        """
        secret_list = []
        secrets = virsh.secret_list().stdout.strip()
        for secret in secrets.splitlines()[2:]:
            secret_list.append(secret.strip().split()[0])
        return secret_list

    # Initialize the variables
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(test.tmpdir, pool_target)
    vol_name = params.get("volume_name")
    vol_number = int(params.get("number_of_volumes", "2"))
    capacity = params.get("volume_size", "1048576")
    allocation = params.get("volume_allocation", "1048576")
    vol_format = params.get("volume_format")
    source_name = params.get("gluster_source_name", "gluster-vol1")
    source_path = params.get("gluster_source_path", "/")
    encrypt_format = params.get("vol_encrypt_format")
    encrypt_secret = params.get("encrypt_secret")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            raise error.TestNAError("Gluster pool is not supported in current"
                                    " libvirt version.")

    try:
        str_capa = utils_misc.normalize_data_size(capacity, "B")
        int_capa = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % capacity)
    try:
        str_capa = utils_misc.normalize_data_size(allocation, "B")
        int_allo = int(str(str_capa).split('.')[0])
    except ValueError:
        raise error.TestError("Translate size %s to 'B' failed" % allocation)

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Get exists libvirt secrets before test
    ori_secrets = get_all_secrets()
    expected_vol = {}
    vol_type = 'file'
    if pool_type in ['disk', 'logical']:
        vol_type = 'block'
    if pool_type == 'gluster':
        vol_type = 'network'
    logging.debug("Debug:\npool_name:%s\npool_type:%s\npool_target:%s\n"
                  "vol_name:%s\nvol_number:%s\ncapacity:%s\nallocation:%s\n"
                  "vol_format:%s", pool_name, pool_type, pool_target,
                  vol_name, vol_number, capacity, allocation, vol_format)

    libv_pvt = utlv.PoolVolumeTest(test, params)
    # Run Testcase
    total_err_count = 0
    try:
        # Create a new pool
        libv_pvt.pre_pool(pool_name=pool_name,
                          pool_type=pool_type,
                          pool_target=pool_target,
                          emulated_image=emulated_image,
                          image_size=emulated_image_size,
                          source_name=source_name,
                          source_path=source_path)
        for i in range(vol_number):
            volume_name = "%s_%d" % (vol_name, i)
            expected_vol['pool_name'] = pool_name
            expected_vol['pool_type'] = pool_type
            expected_vol['pool_target'] = pool_target
            expected_vol['capacity'] = int_capa
            expected_vol['allocation'] = int_allo
            expected_vol['format'] = vol_format
            expected_vol['name'] = volume_name
            expected_vol['type'] = vol_type
            expected_vol['encrypt_format'] = encrypt_format
            expected_vol['encrypt_secret'] = encrypt_secret
            # Creates volume
            if pool_type != "gluster":
                expected_vol['path'] = pool_target + '/' + volume_name
                new_volxml = vol_xml.VolXML()
                new_volxml.name = volume_name
                new_volxml.capacity = int_capa
                new_volxml.allocation = int_allo
                if vol_format:
                    new_volxml.format = vol_format
                encrypt_dict = {}
                if encrypt_format:
                    encrypt_dict.update({"format": encrypt_format})
                if encrypt_secret:
                    encrypt_dict.update({"secret": {'uuid': encrypt_secret}})
                if encrypt_dict:
                    new_volxml.encryption = new_volxml.new_encryption(**encrypt_dict)
                logging.debug("Volume XML for creation:\n%s", str(new_volxml))
                virsh.vol_create(pool_name, new_volxml.xml, debug=True)
            else:
                ip_addr = utlv.get_host_ipv4_addr()
                expected_vol['path'] = "gluster://%s/%s/%s" % (ip_addr,
                                                               source_name,
                                                               volume_name)
                utils.run("qemu-img create -f %s %s %s" % (vol_format,
                                                           expected_vol['path'],
                                                           capacity))
            virsh.pool_refresh(pool_name)
            # Check volumes
            total_err_count += check_vol(expected_vol)
            # Delete volume and check for results
            delete_volume(expected_vol)
            total_err_count += check_vol(expected_vol, False)
        if total_err_count > 0:
            raise error.TestFail("Get %s errors when checking volume" % total_err_count)
    finally:
        # Clean up
        for sec in get_all_secrets():
            if sec not in ori_secrets:
                virsh.secret_undefine(sec)
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image, source_name=source_name)
        except error.TestFail, detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()

Example 149

Project: allianceauth
Source File: authenticator.py
View license
def do_main_program():
    #
    # --- Authenticator implementation
    #    All of this has to go in here so we can correctly daemonize the tool
    #    without loosing the file descriptors opened by the Ice module
    slicedir = Ice.getSliceDir()
    if not slicedir:
        slicedir = ["-I/usr/share/Ice/slice", "-I/usr/share/slice"]
    else:
        slicedir = ['-I' + slicedir]
    Ice.loadSlice('', slicedir + [cfg.ice.slice])
    import Murmur

    class allianceauthauthenticatorApp(Ice.Application):
        def run(self, args):
            self.shutdownOnInterrupt()

            if not self.initializeIceConnection():
                return 1

            if cfg.ice.watchdog > 0:
                self.failedWatch = True
                self.checkConnection()

            # Serve till we are stopped
            self.communicator().waitForShutdown()
            self.watchdog.cancel()

            if self.interrupted():
                warning('Caught interrupt, shutting down')

            threadDB.disconnect()
            return 0

        def initializeIceConnection(self):
            """
            Establishes the two-way Ice connection and adds the authenticator to the
            configured servers
            """
            ice = self.communicator()

            if cfg.ice.secret:
                debug('Using shared ice secret')
                ice.getImplicitContext().put("secret", cfg.ice.secret)
            elif not cfg.glacier.enabled:
                warning('Consider using an ice secret to improve security')

            if cfg.glacier.enabled:
                # info('Connecting to Glacier2 server (%s:%d)', glacier_host, glacier_port)
                error('Glacier support not implemented yet')
                # TODO: Implement this

            info('Connecting to Ice server (%s:%d)', cfg.ice.host, cfg.ice.port)
            base = ice.stringToProxy('Meta:tcp -h %s -p %d' % (cfg.ice.host, cfg.ice.port))
            self.meta = Murmur.MetaPrx.uncheckedCast(base)

            adapter = ice.createObjectAdapterWithEndpoints('Callback.Client', 'tcp -h %s' % cfg.ice.host)
            adapter.activate()

            metacbprx = adapter.addWithUUID(metaCallback(self))
            self.metacb = Murmur.MetaCallbackPrx.uncheckedCast(metacbprx)

            authprx = adapter.addWithUUID(allianceauthauthenticator())
            self.auth = Murmur.ServerUpdatingAuthenticatorPrx.uncheckedCast(authprx)

            return self.attachCallbacks()

        def attachCallbacks(self, quiet=False):
            """
            Attaches all callbacks for meta and authenticators
            """

            # Ice.ConnectionRefusedException
            # debug('Attaching callbacks')
            try:
                if not quiet: info('Attaching meta callback')

                self.meta.addCallback(self.metacb)

                for server in self.meta.getBootedServers():
                    if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                        if not quiet: info('Setting authenticator for virtual server %d', server.id())
                        server.setAuthenticator(self.auth)

            except (Murmur.InvalidSecretException, Ice.UnknownUserException, Ice.ConnectionRefusedException) as e:
                if isinstance(e, Ice.ConnectionRefusedException):
                    error('Server refused connection')
                elif isinstance(e, Murmur.InvalidSecretException) or \
                                isinstance(e, Ice.UnknownUserException) and (
                                    e.unknown == 'Murmur::InvalidSecretException'):
                    error('Invalid ice secret')
                else:
                    # We do not actually want to handle this one, re-raise it
                    raise e

                self.connected = False
                return False

            self.connected = True
            return True

        def checkConnection(self):
            """
            Tries reapplies all callbacks to make sure the authenticator
            survives server restarts and disconnects.
            """
            # debug('Watchdog run')

            try:
                if not self.attachCallbacks(quiet=not self.failedWatch):
                    self.failedWatch = True
                else:
                    self.failedWatch = False
            except Ice.Exception as e:
                error('Failed connection check, will retry in next watchdog run (%ds)', cfg.ice.watchdog)
                debug(str(e))
                self.failedWatch = True

            # Renew the timer
            self.watchdog = Timer(cfg.ice.watchdog, self.checkConnection)
            self.watchdog.start()

    def checkSecret(func):
        """
        Decorator that checks whether the server transmitted the right secret
        if a secret is supposed to be used.
        """
        if not cfg.ice.secret:
            return func

        def newfunc(*args, **kws):
            if 'current' in kws:
                current = kws["current"]
            else:
                current = args[-1]

            if not current or 'secret' not in current.ctx or current.ctx['secret'] != cfg.ice.secret:
                error('Server transmitted invalid secret. Possible injection attempt.')
                raise Murmur.InvalidSecretException()

            return func(*args, **kws)

        return newfunc

    def fortifyIceFu(retval=None, exceptions=(Ice.Exception,)):
        """
        Decorator that catches exceptions,logs them and returns a safe retval
        value. This helps preventing the authenticator getting stuck in
        critical code paths. Only exceptions that are instances of classes
        given in the exceptions list are not caught.
        
        The default is to catch all non-Ice exceptions.
        """

        def newdec(func):
            def newfunc(*args, **kws):
                try:
                    return func(*args, **kws)
                except Exception as e:
                    catch = True
                    for ex in exceptions:
                        if isinstance(e, ex):
                            catch = False
                            break

                    if catch:
                        critical('Unexpected exception caught')
                        exception(e)
                        return retval
                    raise

            return newfunc

        return newdec

    class metaCallback(Murmur.MetaCallback):
        def __init__(self, app):
            Murmur.MetaCallback.__init__(self)
            self.app = app

        @fortifyIceFu()
        @checkSecret
        def started(self, server, current=None):
            """
            This function is called when a virtual server is started
            and makes sure an authenticator gets attached if needed.
            """
            if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                info('Setting authenticator for virtual server %d', server.id())
                try:
                    server.setAuthenticator(app.auth)
                # Apparently this server was restarted without us noticing
                except (Murmur.InvalidSecretException, Ice.UnknownUserException) as e:
                    if hasattr(e, "unknown") and e.unknown != "Murmur::InvalidSecretException":
                        # Special handling for Murmur 1.2.2 servers with invalid slice files
                        raise e

                    error('Invalid ice secret')
                    return
            else:
                debug('Virtual server %d got started', server.id())

        @fortifyIceFu()
        @checkSecret
        def stopped(self, server, current=None):
            """
            This function is called when a virtual server is stopped
            """
            if self.app.connected:
                # Only try to output the server id if we think we are still connected to prevent
                # flooding of our thread pool
                try:
                    if not cfg.murmur.servers or server.id() in cfg.murmur.servers:
                        info('Authenticated virtual server %d got stopped', server.id())
                    else:
                        debug('Virtual server %d got stopped', server.id())
                    return
                except Ice.ConnectionRefusedException:
                    self.app.connected = False

            debug('Server shutdown stopped a virtual server')

    if cfg.user.reject_on_error:  # Python 2.4 compat
        authenticateFortifyResult = (-1, None, None)
    else:
        authenticateFortifyResult = (-2, None, None)

    class allianceauthauthenticator(Murmur.ServerUpdatingAuthenticator):
        texture_cache = {}

        def __init__(self):
            Murmur.ServerUpdatingAuthenticator.__init__(self)

        @fortifyIceFu(authenticateFortifyResult)
        @checkSecret
        def authenticate(self, name, pw, certlist, certhash, strong, current=None):
            """
            This function is called to authenticate a user
            """

            # Search for the user in the database
            FALL_THROUGH = -2
            AUTH_REFUSED = -1

            if name == 'SuperUser':
                debug('Forced fall through for SuperUser')
                return (FALL_THROUGH, None, None)

            try:
                sql = 'SELECT id, pwhash, groups FROM %sservices_mumbleuser WHERE username = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [name])
            except threadDbException:
                return (FALL_THROUGH, None, None)

            res = cur.fetchone()
            cur.close()
            if not res:
                info('Fall through for unknown user "%s"', name)
                return (FALL_THROUGH, None, None)

            uid, upwhash, ugroups = res

            if ugroups:
                groups = ugroups.split(',')
            else:
                groups = []

            if allianceauth_check_hash(pw, upwhash):
                info('User authenticated: "%s" (%d)', name, uid + cfg.user.id_offset)
                debug('Group memberships: %s', str(groups))
                return (uid + cfg.user.id_offset, entity_decode(name), groups)

            info('Failed authentication attempt for user: "%s" (%d)', name, uid + cfg.user.id_offset)
            return (AUTH_REFUSED, None, None)

        @fortifyIceFu((False, None))
        @checkSecret
        def getInfo(self, id, current=None):
            """
            Gets called to fetch user specific information
            """

            # We do not expose any additional information so always fall through
            debug('getInfo for %d -> denied', id)
            return (False, None)

        @fortifyIceFu(-2)
        @checkSecret
        def nameToId(self, name, current=None):
            """
            Gets called to get the id for a given username
            """

            FALL_THROUGH = -2
            if name == 'SuperUser':
                debug('nameToId SuperUser -> forced fall through')
                return FALL_THROUGH

            try:
                sql = 'SELECT id FROM %sservices_mumbleuser WHERE username = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [name])
            except threadDbException:
                return FALL_THROUGH

            res = cur.fetchone()
            cur.close()
            if not res:
                debug('nameToId %s -> ?', name)
                return FALL_THROUGH

            debug('nameToId %s -> %d', name, (res[0] + cfg.user.id_offset))
            return res[0] + cfg.user.id_offset

        @fortifyIceFu("")
        @checkSecret
        def idToName(self, id, current=None):
            """
            Gets called to get the username for a given id
            """

            FALL_THROUGH = ""
            # Make sure the ID is in our range and transform it to the actual smf user id
            if id < cfg.user.id_offset:
                return FALL_THROUGH
            bbid = id - cfg.user.id_offset

            # Fetch the user from the database
            try:
                sql = 'SELECT username FROM %sservices_mumbleuser WHERE id = %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [bbid])
            except threadDbException:
                return FALL_THROUGH

            res = cur.fetchone()
            cur.close()
            if res:
                if res[0] == 'SuperUser':
                    debug('idToName %d -> "SuperUser" catched')
                    return FALL_THROUGH

                debug('idToName %d -> "%s"', id, res[0])
                return res[0]

            debug('idToName %d -> ?', id)
            return FALL_THROUGH

        @fortifyIceFu("")
        @checkSecret
        def idToTexture(self, id, current=None):
            """
            Gets called to get the corresponding texture for a user
            """

            FALL_THROUGH = ""

            debug('idToTexture "%s" -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu(-2)
        @checkSecret
        def registerUser(self, name, current=None):
            """
            Gets called when the server is asked to register a user.
            """

            FALL_THROUGH = -2
            debug('registerUser "%s" -> fall through', name)
            return FALL_THROUGH

        @fortifyIceFu(-1)
        @checkSecret
        def unregisterUser(self, id, current=None):
            """
            Gets called when the server is asked to unregister a user.
            """

            FALL_THROUGH = -1
            # Return -1 to fall through to internal server database, we will not modify the smf database
            # but we can make murmur delete all additional information it got this way.
            debug('unregisterUser %d -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu({})
        @checkSecret
        def getRegisteredUsers(self, filter, current=None):
            """
            Returns a list of usernames in the AllianceAuth database which contain
            filter as a substring.
            """

            if not filter:
                filter = '%'

            try:
                sql = 'SELECT id, username FROM %sservices_mumbleuser WHERE username LIKE %%s' % cfg.database.prefix
                cur = threadDB.execute(sql, [filter])
            except threadDbException:
                return {}

            res = cur.fetchall()
            cur.close()
            if not res:
                debug('getRegisteredUsers -> empty list for filter "%s"', filter)
                return {}
            debug('getRegisteredUsers -> %d results for filter "%s"', len(res), filter)
            return dict([(a + cfg.user.id_offset, b) for a, b in res])

        @fortifyIceFu(-1)
        @checkSecret
        def setInfo(self, id, info, current=None):
            """
            Gets called when the server is supposed to save additional information
            about a user to his database
            """

            FALL_THROUGH = -1
            # Return -1 to fall through to the internal server handler. We must not modify
            # the smf database so the additional information is stored in murmurs database
            debug('setInfo %d -> fall through', id)
            return FALL_THROUGH

        @fortifyIceFu(-1)
        @checkSecret
        def setTexture(self, id, texture, current=None):
            """
            Gets called when the server is asked to update the user texture of a user
            """

            FALL_THROUGH = -1

            debug('setTexture %d -> fall through', id)
            return FALL_THROUGH

    class CustomLogger(Ice.Logger):
        """
        Logger implementation to pipe Ice log messages into
        our own log
        """

        def __init__(self):
            Ice.Logger.__init__(self)
            self._log = getLogger('Ice')

        def _print(self, message):
            self._log.info(message)

        def trace(self, category, message):
            self._log.debug('Trace %s: %s', category, message)

        def warning(self, message):
            self._log.warning(message)

        def error(self, message):
            self._log.error(message)

    #
    # --- Start of authenticator
    #
    info('Starting AllianceAuth mumble authenticator')
    initdata = Ice.InitializationData()
    initdata.properties = Ice.createProperties([], initdata.properties)
    for prop, val in cfg.iceraw:
        initdata.properties.setProperty(prop, val)

    initdata.properties.setProperty('Ice.ImplicitContext', 'Shared')
    initdata.properties.setProperty('Ice.Default.EncodingVersion', '1.0')
    initdata.logger = CustomLogger()

    app = allianceauthauthenticatorApp()
    state = app.main(sys.argv[:1], initData=initdata)
    info('Shutdown complete')

Example 150

Project: pyqso
Source File: preferences_dialog.py
View license
    def __init__(self):
        logging.debug("Setting up the Records page of the preferences dialog...")

        Gtk.VBox.__init__(self, spacing=2)

        # Remember that the have_config conditional in the PyQSO class may be out-of-date the next time the user opens up the preferences dialog
        # because a configuration file may have been created after launching the application. Let's check to see if one exists again...
        config = configparser.ConfigParser()
        have_config = (config.read(PREFERENCES_FILE) != [])

        self.sources = {}

        # Autocomplete frame
        frame = Gtk.Frame()
        frame.set_label("Autocomplete")
        vbox = Gtk.VBox()
        self.sources["AUTOCOMPLETE_BAND"] = Gtk.CheckButton("Autocomplete the Band field")
        (section, option) = ("records", "autocomplete_band")
        if(have_config and config.has_option(section, option)):
            self.sources["AUTOCOMPLETE_BAND"].set_active(config.get(section, option) == "True")
        else:
            self.sources["AUTOCOMPLETE_BAND"].set_active(True)
        vbox.pack_start(self.sources["AUTOCOMPLETE_BAND"], False, False, 2)

        self.sources["USE_UTC"] = Gtk.CheckButton("Use UTC when autocompleting the Date and Time")
        (section, option) = ("records", "use_utc")
        if(have_config and config.has_option(section, option)):
            self.sources["USE_UTC"].set_active(config.get(section, option) == "True")
        else:
            self.sources["USE_UTC"].set_active(True)
        vbox.pack_start(self.sources["USE_UTC"], False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        # Default values frame
        frame = Gtk.Frame()
        frame.set_label("Default values")
        vbox = Gtk.VBox()

        # Mode
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Mode: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_MODE"] = Gtk.ComboBoxText()
        for mode in sorted(MODES.keys()):
            self.sources["DEFAULT_MODE"].append_text(mode)
        (section, option) = ("records", "default_mode")
        if(have_config and config.has_option(section, option)):
            mode = config.get(section, option)
        else:
            mode = ""
        self.sources["DEFAULT_MODE"].set_active(sorted(MODES.keys()).index(mode))
        self.sources["DEFAULT_MODE"].connect("changed", self._on_mode_changed)
        hbox_temp.pack_start(self.sources["DEFAULT_MODE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Submode
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Submode: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_SUBMODE"] = Gtk.ComboBoxText()
        for submode in MODES[mode]:
            self.sources["DEFAULT_SUBMODE"].append_text(submode)
        (section, option) = ("records", "default_submode")
        if(have_config and config.has_option(section, option)):
            submode = config.get(section, option)
        else:
            submode = ""
        self.sources["DEFAULT_SUBMODE"].set_active(MODES[mode].index(submode))
        hbox_temp.pack_start(self.sources["DEFAULT_SUBMODE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Power
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("TX Power (W): ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["DEFAULT_POWER"] = Gtk.Entry()
        (section, option) = ("records", "default_power")
        if(have_config and config.has_option(section, option)):
            self.sources["DEFAULT_POWER"].set_text(config.get(section, option))
        else:
            self.sources["DEFAULT_POWER"].set_text("")
        hbox_temp.pack_start(self.sources["DEFAULT_POWER"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        # Callsign lookup frame
        frame = Gtk.Frame()
        frame.set_label("Callsign lookup")
        vbox = Gtk.VBox()

        # Callsign database
        hbox_temp = Gtk.HBox()
        label = Gtk.Label("Database: ")
        label.set_width_chars(17)
        label.set_alignment(0, 0.5)
        hbox_temp.pack_start(label, False, False, 2)

        self.sources["CALLSIGN_DATABASE"] = Gtk.ComboBoxText()
        callsign_database = ["", "qrz.com", "hamqth.com"]
        for database in callsign_database:
            self.sources["CALLSIGN_DATABASE"].append_text(database)
        (section, option) = ("records", "callsign_database")
        if(have_config and config.has_option(section, option)):
            self.sources["CALLSIGN_DATABASE"].set_active(callsign_database.index(config.get(section, option)))
        else:
            self.sources["CALLSIGN_DATABASE"].set_active(callsign_database.index(""))
        hbox_temp.pack_start(self.sources["CALLSIGN_DATABASE"], False, False, 2)
        vbox.pack_start(hbox_temp, False, False, 2)

        # Login details
        subframe = Gtk.Frame()
        subframe.set_label("Login details")
        inner_vbox = Gtk.VBox()

        hbox = Gtk.HBox()
        label = Gtk.Label("Username: ")
        label.set_width_chars(9)
        label.set_alignment(0, 0.5)
        hbox.pack_start(label, False, False, 2)
        self.sources["CALLSIGN_DATABASE_USERNAME"] = Gtk.Entry()
        (section, option) = ("records", "callsign_database_username")
        if(have_config and config.has_option(section, option)):
            self.sources["CALLSIGN_DATABASE_USERNAME"].set_text(config.get(section, option))
        hbox.pack_start(self.sources["CALLSIGN_DATABASE_USERNAME"], False, False, 2)
        inner_vbox.pack_start(hbox, False, False, 2)

        hbox = Gtk.HBox()
        label = Gtk.Label("Password: ")
        label.set_width_chars(9)
        label.set_alignment(0, 0.5)
        hbox.pack_start(label, False, False, 2)
        self.sources["CALLSIGN_DATABASE_PASSWORD"] = Gtk.Entry()
        self.sources["CALLSIGN_DATABASE_PASSWORD"].set_visibility(False)  # Mask the password with the "*" character.
        (section, option) = ("records", "callsign_database_password")
        if(have_config and config.has_option(section, option)):
            password = base64.b64decode(config.get(section, option)).decode("utf-8")
            self.sources["CALLSIGN_DATABASE_PASSWORD"].set_text(password)
        hbox.pack_start(self.sources["CALLSIGN_DATABASE_PASSWORD"], False, False, 2)
        inner_vbox.pack_start(hbox, False, False, 2)

        label = Gtk.Label("Warning: Login details are currently stored as\nBase64-encoded plain text in the configuration file.")
        inner_vbox.pack_start(label, False, False, 2)

        subframe.add(inner_vbox)
        vbox.pack_start(subframe, False, False, 2)

        self.sources["IGNORE_PREFIX_SUFFIX"] = Gtk.CheckButton("Ignore callsign prefixes and/or suffixes")
        (section, option) = ("records", "ignore_prefix_suffix")
        if(have_config and config.has_option(section, option)):
            self.sources["IGNORE_PREFIX_SUFFIX"].set_active(config.get(section, option) == "True")
        else:
            self.sources["IGNORE_PREFIX_SUFFIX"].set_active(True)
        vbox.pack_start(self.sources["IGNORE_PREFIX_SUFFIX"], False, False, 2)

        frame.add(vbox)
        self.pack_start(frame, False, False, 2)

        logging.debug("Records page of the preferences dialog ready!")
        return