logging.debug

Here are the examples of the python api logging.debug taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: tp-qemu
Source File: rh_kernel_update.py
View license
@error.context_aware
def run(test, params, env):
    """
    Install/upgrade special kernel package via brew tool or link. And we have
    another case 'kernel_install' can to this too, but this case has addational
    steps. In future, we will merge this to kernel_install case.

    1) Boot the vm
    2) Get latest kernel package link from brew
    3) Verify the version of guest kernel
    4) Compare guest kernel version and brew latest kernel version
    5) Backup boot cfg file
    6) Install guest kernel firmware (Optional)
    7) Install guest kernel
    8) Install guest kernel debuginfo (Optional)
    9) Backup boot cfg file after installing new kernel
    10) Installing virtio driver (Optional)
    11) Backup initrd file
    12) Update initrd file
    13) Make the new installed kernel as default
    14) Backup boot cfg file after setting new kernel as default
    15) Update the guest kernel cmdline (Optional)
    16) Reboot guest after updating kernel
    17) Verifying the virtio drivers (Optional)

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    def get_brew_url(mnt_path, download_root):
        # get the url from the brew mnt path
        url = download_root + mnt_path[11:]
        logging.debug("Brew URL is %s" % url)
        return url

    def install_rpm(session, url, upgrade=False, nodeps=False, timeout=600):
        # install a package from brew
        cmd = "rpm -ivhf %s" % url
        if upgrade:
            # Upgrades or installs kernel to a newer version, then remove
            # old version.
            cmd = "rpm -Uvhf %s" % url
        if nodeps:
            cmd += " --nodeps"
        s, o = session.cmd_status_output(cmd, timeout=timeout)
        if s != 0 and ("already" not in o):
            raise error.TestFail("Fail to install %s:%s" % (url, o))

        return True
        # FIXME: need to add the check for newer version

    def copy_and_install_rpm(session, url, upgrade=False):
        rpm_name = os.path.basename(url)
        if url.startswith("http"):
            download_cmd = "wget %s" % url
            utils.system_output(download_cmd)
            rpm_src = rpm_name
        else:
            rpm_src = utils_misc.get_path(test.bindir, url)
        vm.copy_files_to(rpm_src, "/tmp/%s" % rpm_name)
        install_rpm(session, "/tmp/%s" % rpm_name, upgrade)

    def get_kernel_rpm_link():
        method = params.get("method", "link")
        if method not in ["link", "brew"]:
            raise error.TestError("Unknown installation method %s" % method)

        if method == "link":
            return (params.get("kernel_version"),
                    params.get("kernel_rpm"),
                    params.get("firmware_rpm"))

        error.context("Get latest kernel package link from brew", logging.info)
        # fetch the newest packages from brew
        # FIXME: really brain dead method to fetch the kernel version
        #        kernel_vesion = re... + hint from configuration file
        #        is there any smart way to fetch the `uname -r` from
        #        brew build?
        rh_kernel_hint = "[\d+][^\s]+"
        kernel_re = params.get("kernel_re")
        tag = params.get("brew_tag")

        latest_pkg_cmd = "brew latest-pkg %s kernel" % tag
        o = utils.system_output(latest_pkg_cmd, timeout=360)
        build = re.findall("kernel[^\s]+", o)[0]
        logging.debug("Latest package on brew for tag %s is %s" %
                      (tag, build))

        buildinfo = utils.system_output("brew buildinfo %s" % build,
                                        timeout=360)

        # install kernel-firmware
        firmware_url = None
        if "firmware" in buildinfo:
            logging.info("Found kernel-firmware")
            fw_pattern = ".*firmware.*"
            try:
                fw_brew_link = re.findall(fw_pattern, buildinfo)[0]
            except IndexError:
                raise error.TestError("Could not get kernel-firmware package"
                                      " brew link matching pattern '%s'" % fw_pattern)
            firmware_url = get_brew_url(fw_brew_link, download_root)

        knl_pattern = kernel_re % rh_kernel_hint
        try:
            knl_brew_link = re.findall(knl_pattern, buildinfo, re.I)[0]
        except IndexError:
            raise error.TestError("Could not get kernel package brew link"
                                  " matching pattern '%s'" % knl_pattern)
        kernel_url = get_brew_url(knl_brew_link, download_root)

        debug_re = kernel_re % ("(%s)" % rh_kernel_hint)
        try:
            kernel_version = re.findall(debug_re, kernel_url, re.I)[0]
        except IndexError:
            raise error.TestError("Could not get kernel version matching"
                                  " pattern '%s'" % debug_re)
        kernel_version += "." + params.get("kernel_suffix", "")

        return kernel_version, kernel_url, firmware_url

    def get_kernel_debuginfo_rpm_link():
        knl_dbginfo_re = params.get("knl_dbginfo_re")
        tag = params.get("brew_tag")

        latest_pkg_cmd = "brew latest-pkg %s kernel" % tag
        o = utils.system_output(latest_pkg_cmd, timeout=360)
        build = re.findall("kernel[^\s]+", o)[0]
        logging.debug("Latest package on brew for tag %s is %s" %
                      (tag, build))

        buildinfo = utils.system_output("brew buildinfo %s" % build,
                                        timeout=360)

        try:
            knl_dbginfo_links = re.findall(knl_dbginfo_re,
                                           buildinfo, re.I)
        except IndexError:
            raise error.TestError("Could not get kernel-debuginfo package "
                                  "brew link matching pattern '%s'" %
                                  knl_dbginfo_re)

        knl_dbginfo_urls = []
        for l in knl_dbginfo_links:
            link = get_brew_url(l, download_root)
            knl_dbginfo_urls.append(link)

        return knl_dbginfo_urls

    def get_guest_kernel_version():
        error.context("Verify the version of guest kernel", logging.info)
        s, o = session.cmd_status_output("uname -r")
        return o.strip()

    def is_kernel_debuginfo_installed():
        get_kernel_debuginfo_cmd = "rpm -qa | grep %s" % knl_dbginfo_version
        s, o = session.cmd_status_output(get_kernel_debuginfo_cmd)
        if s != 0:
            return False

        if knl_dbginfo_version not in o:
            logging.debug("%s has not been installed." % knl_dbginfo_version)
            return False

        logging.debug("%s has already been installed." % knl_dbginfo_version)

        return True

    def is_virtio_driver_installed():
        s, o = session.cmd_status_output("lsmod | grep virtio")
        if s != 0:
            return False

        for driver in virtio_drivers:
            if driver not in o:
                logging.debug("%s has not been installed." % driver)
                return False
            logging.debug("%s has already been installed." % driver)

        return True

    def compare_kernel_version(kernel_version, guest_version):
        error.context("Compare guest kernel version and brew's", logging.info)
        # return True: when kernel_version <= guest_version
        if guest_version == kernel_version:
            logging.info("The kernel version is matched %s" % guest_version)
            return True

        kernel_s = re.split('[.-]', kernel_version)
        guest_s = re.split('[.-]', guest_version)
        kernel_v = [int(i) for i in kernel_s if i.isdigit()]
        guest_v = [int(i) for i in guest_s if i.isdigit()]
        for i in range(min(len(kernel_v), len(guest_v))):
            if kernel_v[i] < guest_v[i]:
                logging.debug("The kernel version: '%s' is old than"
                              " guest version %s" % (kernel_version, guest_version))
                return True
            elif kernel_v[i] > guest_v[i]:
                return False

        if len(kernel_v) < len(guest_v):
            logging.debug("The kernel_version: %s is old than guest_version"
                          " %s" % (kernel_version, guest_version))
            return True

        return False

    def get_guest_pkgs(session, pkg, qformat=""):
        """
        Query requries packages in guest which name like 'pkg'.

        :parm session: session object to guest.
        :parm pkg: package name without version and arch info.
        :parm qformat: display format(eg, %{NAME}, %{VERSION}).

        :return: list of packages.
        :rtype: list
        """
        cmd = "rpm -q --whatrequires %s" % pkg
        if qformat:
            cmd += " --queryformat='%s\n'" % qformat
        pkgs = session.cmd_output(cmd).splitlines()
        pkgs.append(pkg)
        return pkgs

    def get_latest_pkgs_url(pkg, arch):
        """
        Get url of latest packages in brewweb.

        :parm pkg: package name without version info.
        :parm brew_tag:  requried in cfg file.
        :parm vm_arch_name: requried in cfg file.
        :parm latest_pkg_cmd: requried in cfg file.

        :return: urls for pkg in brewweb.
        :rtype: list
        """
        tag = params.get("brew_tag")
        latest_pkg_cmd = params.get("latest_pkg_cmd", "brew latest-pkg")
        latest_pkg_cmd = "%s %s %s" % (latest_pkg_cmd, tag, pkg)
        latest_pkg_cmd = "%s --arch=%s --paths" % (latest_pkg_cmd, arch)
        mnt_paths = utils.system_output(latest_pkg_cmd).splitlines()
        return [get_brew_url(_, download_root)
                for _ in mnt_paths if _.endswith(".rpm")]

    def upgrade_guest_pkgs(session, pkg, arch, debuginfo=False,
                           nodeps=True, timeout=600):
        """
        upgrade given packages in guest os.

        :parm session: session object.
        :parm pkg: package name without version info.
        :parm debuginfo: bool type, if True, install debuginfo package too.
        :parm nodeps: bool type, if True, ignore deps when install rpm.
        :parm timeout: float type, timeout value when install rpm.
        """
        error.context("Upgrade package '%s' in guest" % pkg, logging.info)
        pkgs = get_guest_pkgs(session, pkg, "%{NAME}")
        latest_pkgs_url = get_latest_pkgs_url(pkg, arch)
        for url in latest_pkgs_url:
            if "debuginfo" in url and not debuginfo:
                continue
            upgrade = bool(filter(lambda x: x in url, pkgs))
            logging.info("Install packages from: %s" % url)
            install_rpm(session, url, upgrade, nodeps, timeout)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    download_root = params["download_root_url"]
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    install_virtio = params.get("install_virtio", "yes")
    install_knl_debuginfo = params.get("install_knl_debuginfo")
    verify_virtio = params.get("verify_virtio", "yes")
    args_removed = params.get("args_removed", "").split()
    args_added = params.get("args_added", "").split()
    restore_initrd_cmd = ""
    virtio_drivers = params.get("virtio_drivers_list", "").split()
    kernel_version, kernel_rpm, firmware_rpm = get_kernel_rpm_link()
    knl_dbginfo_rpm = get_kernel_debuginfo_rpm_link()
    knl_dbginfo_version = "kernel-debuginfo-%s" % kernel_version

    logging.info("Kernel version:  %s" % kernel_version)
    logging.info("Kernel rpm    :  %s" % kernel_rpm)
    logging.info("Firmware rpm  :  %s" % firmware_rpm)

    boot_cfg_path = params.get("boot_cfg_path", "/boot/grub/grub.conf")
    bootcfg_backup_cmd = "\cp -af  {0} {0}-bk".format(boot_cfg_path)
    bootcfg_restore_cmd = "\cp -af {0}-bk {0}".format(boot_cfg_path)
    count = 0

    try:
        error.context("Backup '%s'" % boot_cfg_path)
        s, o = session.cmd_status_output(bootcfg_backup_cmd)
        if s != 0:
            raise error.TestError("Failed to backup '%s', guest output: '%s'"
                                  % (boot_cfg_path, o))
        count = 1

        # judge if need to install a new kernel
        ifupdatekernel = True
        guest_version = get_guest_kernel_version()
        if compare_kernel_version(kernel_version, guest_version):
            ifupdatekernel = False
            # set kernel_version to current version for later step to use
            kernel_version = guest_version

            if is_kernel_debuginfo_installed():
                install_knl_debuginfo = "no"

            if is_virtio_driver_installed():
                install_virtio = "no"
        else:
            logging.info("The guest kerenl is %s but expected is %s" %
                         (guest_version, kernel_version))

            rpm_install_func = install_rpm
            if params.get("install_rpm_from_local") == "yes":
                rpm_install_func = copy_and_install_rpm

            kernel_deps_pkgs = params.get("kernel_deps_pkgs", "dracut").split()
            if kernel_deps_pkgs:
                for pkg in kernel_deps_pkgs:
                    arch = params.get("arch_%s" % pkg,
                                      params.get("vm_arch_name"))
                    upgrade_guest_pkgs(session, pkg, arch)

            if firmware_rpm:
                error.context("Install guest kernel firmware", logging.info)
                rpm_install_func(session, firmware_rpm, upgrade=True)
            error.context("Install guest kernel", logging.info)
            status = rpm_install_func(session, kernel_rpm)
            if status:
                count = 2

            error.context("Backup '%s' after installing new kernel"
                          % boot_cfg_path, logging.info)
            s, o = session.cmd_status_output(bootcfg_backup_cmd)
            if s != 0:
                msg = ("Fail to backup '%s' after updating kernel,"
                       " guest output: '%s'" % (boot_cfg_path, o))
                logging.error(msg)
                raise error.TestError(msg)

        kernel_path = "/boot/vmlinuz-%s" % kernel_version

        if install_knl_debuginfo == "yes":
            error.context("Installing kernel-debuginfo packages", logging.info)

            links = ""

            for r in knl_dbginfo_rpm:
                links += " %s" % r

            install_rpm(session, links)

        if install_virtio == "yes":
            error.context("Installing virtio driver", logging.info)

            initrd_prob_cmd = "grubby --info=%s" % kernel_path
            s, o = session.cmd_status_output(initrd_prob_cmd)
            if s != 0:
                msg = ("Could not get guest kernel information,"
                       " guest output: '%s'" % o)
                logging.error(msg)
                raise error.TestError(msg)

            try:
                initrd_path = re.findall("initrd=(.*)", o)[0]
            except IndexError:
                raise error.TestError("Could not get initrd path from guest,"
                                      " guest output: '%s'" % o)

            driver_list = ["--with=%s " % drv for drv in virtio_drivers]
            mkinitrd_cmd = "mkinitrd -f %s " % initrd_path
            mkinitrd_cmd += "".join(driver_list)
            mkinitrd_cmd += " %s" % kernel_version
            cp_initrd_cmd = "\cp -af  %s %s-bk" % (initrd_path, initrd_path)
            restore_initrd_cmd = "\cp -af  %s-bk %s" % (initrd_path,
                                                        initrd_path)

            error.context("Backup initrd file")
            s, o = session.cmd_status_output(cp_initrd_cmd, timeout=200)
            if s != 0:
                logging.error("Failed to backup guest initrd,"
                              " guest output: '%s'", o)

            error.context("Update initrd file", logging.info)
            s, o = session.cmd_status_output(mkinitrd_cmd, timeout=360)
            if s != 0:
                msg = "Failed to install virtio driver, guest output '%s'" % o
                logging.error(msg)
                raise error.TestFail(msg)

            count = 3

        # make sure the newly installed kernel as default
        if ifupdatekernel:
            error.context("Make the new installed kernel as default",
                          logging.info)
            make_def_cmd = "grubby --set-default=%s " % kernel_path
            s, o = session.cmd_status_output(make_def_cmd)
            if s != 0:
                msg = ("Fail to set %s as default kernel,"
                       " guest output: '%s'" % (kernel_path, o))
                logging.error(msg)
                raise error.TestError(msg)

            count = 4
            error.context(
                "Backup '%s' after setting new kernel as default"
                % boot_cfg_path)
            s, o = session.cmd_status_output(bootcfg_backup_cmd)
            if s != 0:
                msg = ("Fail to backup '%s', guest output: '%s'"
                       % (boot_cfg_path, o))
                logging.error(msg)
                raise error.TestError(msg)

        # remove or add the required arguments

        error.context("Update the guest kernel cmdline", logging.info)
        remove_args_list = ["--remove-args=%s " % arg for arg in args_removed]
        update_kernel_cmd = "grubby --update-kernel=%s " % kernel_path
        update_kernel_cmd += "".join(remove_args_list)
        update_kernel_cmd += '--args="%s"' % " ".join(args_added)
        s, o = session.cmd_status_output(update_kernel_cmd)
        if s != 0:
            msg = "Fail to modify the kernel cmdline, guest output: '%s'" % o
            logging.error(msg)
            raise error.TestError(msg)

        count = 5

        # upgrade listed packages to latest version.
        for pkg in params.get("upgrade_pkgs", "").split():
            _ = params.object_params(pkg)
            arch = _.get("vm_arch_name", "x86_64")
            nodeps = _.get("ignore_deps") == "yes"
            install_debuginfo = _.get("install_debuginfo") == "yes"
            timeout = int(_.get("install_pkg_timeout", "600"))
            ver_before = session.cmd_output("rpm -q %s" % pkg)
            upgrade_guest_pkgs(
                session,
                pkg, arch,
                install_debuginfo,
                nodeps,
                timeout)
            ver_after = session.cmd_output("rpm -q %s" % pkg)
            if "not installed" in ver_before:
                mesg = "Install '%s' in guest" % ver_after
            else:
                mesg = "Upgrade '%s' from '%s'  to '%s'" % (pkg, ver_before, ver_after)
            logging.info(mesg)

        # reboot guest
        error.context("Reboot guest after updating kernel", logging.info)
        time.sleep(int(params.get("sleep_before_reset", 10)))
        session = vm.reboot(session, 'shell', timeout=login_timeout)
        # check if the guest can bootup normally after kernel update
        guest_version = get_guest_kernel_version()
        if guest_version != kernel_version:
            raise error.TestFail("Fail to verify the guest kernel, \n"
                                 "Expceted version %s \n"
                                 "In fact version %s \n" %
                                 (kernel_version, guest_version))

        if verify_virtio == "yes":
            error.context("Verifying the virtio drivers", logging.info)
            if not is_virtio_driver_installed():
                raise error.TestFail("Fail to verify the installation of"
                                     " virtio drivers")
    except Exception:
        if count in [4, 3, 1]:
            # restore boot cfg
            s, o = session.cmd_status_output(bootcfg_restore_cmd, timeout=100)
            if s != 0:
                logging.error("Failed to execute cmd '%s' in guest,"
                              " guest output: '%s'", bootcfg_restore_cmd, o)
        elif count == 2 and restore_initrd_cmd:
            # restore initrd file
            s, o = session.cmd_status_output(restore_initrd_cmd, timeout=200)
            if s != 0:
                logging.error("Failed to execute cmd '%s' in guest,"
                              " guest output: '%s'", restore_initrd_cmd, o)

        raise

Example 2

Project: tp-libvirt
Source File: virsh_snapshot_disk.py
View license
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash,fsid=0")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi",
                           "disk", "gluster"]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" % pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = utils.run("qemu-img info %s" % img_path)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                       extra=extra, debug=True)
            if result.exit_status:
                raise error.TestNAError("Failed to attach disk %s to VM."
                                        "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if disk_xml.source.attrs.has_key('file'):
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif disk_xml.source.attrs.has_key('name'):
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif (disk_xml.source.attrs.has_key('dev') and
                          disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search("live disk snapshot not supported with this "
                                 "QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search("internal snapshot of a running VM" +
                                     " must include the memory state",
                                     out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)

                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(vm_name,
                                                              snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options, debug=True)
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search("revert to external \w* ?snapshot not supported yet",
                             revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
                else:
                    raise error.TestFail("Revert snapshot failed. %s" %
                                         revert_result.stderr.strip())

            if vm.is_dead():
                raise error.TestFail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    raise error.TestFail("Revert command successed, but VM is not "
                                         "paused after reverting with --paused"
                                         "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" % tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                raise error.TestFail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name, snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s missing"
                                                 % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still"
                                                 % snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, source_name=vol_name)
            except error.TestFail, detail:
                libvirtd.restart()
                logging.error(str(detail))

Example 3

Project: benchexec
Source File: containerexecutor.py
View license
    def _setup_container_filesystem(self, temp_dir):
        """Setup the filesystem layout in the container.
         As first step, we create a copy of all existing mountpoints in mount_base, recursively,
        and as "private" mounts (i.e., changes to existing mountpoints afterwards won't propagate
        to our copy).
        Then we iterate over all mountpoints and change them
        according to the mode the user has specified (hidden, read-only, overlay, or full-access).
        This has do be done for each mountpoint because overlays are not recursive.
        Then we chroot into the new mount hierarchy.

        The new filesystem layout still has a view of the host's /proc.
        We do not mount a fresh /proc here because the grandchild still needs old the /proc.

        We do simply iterate over all existing mount points and set them to read-only/overlay them,
        because it is easier create a new hierarchy and chroot into it.
        First, we still have access to the original mountpoints while doing so,
        and second, we avoid race conditions if someone else changes the existing mountpoints.

        @param temp_dir: The base directory under which all our directories should be created.
        """
        # All strings here are bytes to avoid issues if existing mountpoints are invalid UTF-8.
        temp_dir = temp_dir.encode()
        mount_base = os.path.join(temp_dir, b"mount") # base dir for container mounts
        temp_base = os.path.join(temp_dir, b"temp") # directory with files created by tool
        os.mkdir(mount_base)
        os.mkdir(temp_base)

        def _is_below(path, target_path):
            # compare with trailing slashes for cases like /foo and /foobar
            path = os.path.join(path, b"")
            target_path = os.path.join(target_path, b"")
            return path.startswith(target_path)

        def find_mode_for_dir(path, fstype):
            if (path == b"/proc"):
                # /proc is necessary for the grandchild to read PID, will be replaced later.
                return DIR_READ_ONLY
            if _is_below(path, b"/proc"):
                # Irrelevant.
                return None

            parent_mode = None
            result_mode = None
            for special_dir, mode in self._dir_modes.items():
                if _is_below(path, special_dir):
                    if path != special_dir:
                        parent_mode = mode
                    result_mode = mode
            assert result_mode is not None

            if result_mode == DIR_OVERLAY and (
                    _is_below(path, b"/dev") or
                    _is_below(path, b"/sys") or
                    fstype == b"autofs" or
                    fstype == b"cgroup"):
                # Import /dev, /sys, cgroup, and autofs from host into the container,
                # overlay does not work for them.
                return DIR_READ_ONLY

            if result_mode == DIR_HIDDEN and parent_mode == DIR_HIDDEN:
                # No need to recursively recreate mountpoints in hidden dirs.
                return None
            return result_mode

        # Overlayfs needs its own additional temporary directory ("work" directory).
        # temp_base will be the "upper" layer, the host FS the "lower" layer,
        # and mount_base the mount target.
        work_base = os.path.join(temp_dir, b"overlayfs")
        os.mkdir(work_base)

        if self._container_system_config:
            container.setup_container_system_config(temp_base)

        # Create a copy of host's mountpoints.
        container.make_bind_mount(b"/", mount_base, recursive=True, private=True)

        # Ensure each special dir is a mountpoint such that the next loop covers it.
        for special_dir in self._dir_modes.keys():
            mount_path = mount_base + special_dir
            temp_path = temp_base + special_dir
            try:
                container.make_bind_mount(mount_path, mount_path)
            except OSError as e:
                logging.debug("Failed to make %s a bind mount: %s", mount_path, e)
            if not os.path.exists(temp_path):
                os.makedirs(temp_path)

        # Set desired access mode for each mountpoint.
        for unused_source, full_mountpoint, fstype, options in list(container.get_mount_points()):
            if not _is_below(full_mountpoint, mount_base):
                continue
            mountpoint = full_mountpoint[len(mount_base):] or b"/"

            mount_path = mount_base + mountpoint
            temp_path = temp_base + mountpoint
            work_path = work_base + mountpoint

            mode = find_mode_for_dir(mountpoint, fstype)
            if mode == DIR_OVERLAY:
                if not os.path.exists(temp_path):
                    os.makedirs(temp_path)
                if not os.path.exists(work_path):
                    os.makedirs(work_path)
                try:
                    # Previous mount in this place not needed if replaced with overlay dir.
                    libc.umount(mount_path)
                except OSError as e:
                    logging.debug(e)
                try:
                    container.make_overlay_mount(mount_path, mountpoint, temp_path, work_path)
                except OSError as e:
                    raise OSError(e.errno,
                        "Creating overlay mount for '{}' failed: {}. "
                        "Please use other directory modes."
                            .format(mountpoint.decode(), os.strerror(e.errno)))

            elif mode == DIR_HIDDEN:
                if not os.path.exists(temp_path):
                    os.makedirs(temp_path)
                try:
                    # Previous mount in this place not needed if replaced with hidden dir.
                    libc.umount(mount_path)
                except OSError as e:
                    logging.debug(e)
                container.make_bind_mount(temp_path, mount_path)

            elif mode == DIR_READ_ONLY:
                try:
                    container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)
                except OSError as e:
                    if e.errno == errno.EACCES:
                        logging.warning(
                            "Cannot mount '%s', directory may be missing from container.",
                            mountpoint.decode())
                    else:
                        # If this mountpoint is below an overlay/hidden dir re-create mountpoint.
                        # Linux does not support making read-only bind mounts in one step:
                        # https://lwn.net/Articles/281157/ http://man7.org/linux/man-pages/man8/mount.8.html
                        container.make_bind_mount(
                            mountpoint, mount_path, recursive=True, private=True)
                        container.remount_with_additional_flags(mount_path, options, libc.MS_RDONLY)

            elif mode == DIR_FULL_ACCESS:
                try:
                    # Ensure directory is still a mountpoint by attempting to remount.
                    container.remount_with_additional_flags(mount_path, options, 0)
                except OSError as e:
                    if e.errno == errno.EACCES:
                        logging.warning(
                            "Cannot mount '%s', directory may be missing from container.",
                            mountpoint.decode())
                    else:
                        # If this mountpoint is below an overlay/hidden dir re-create mountpoint.
                        container.make_bind_mount(
                            mountpoint, mount_path, recursive=True, private=True)

            elif mode is None:
                pass

            else:
                assert False

        # If necessary, (i.e., if /tmp is not already hidden),
        # hide the directory where we store our files from processes in the container
        # by mounting an empty directory over it.
        if os.path.exists(mount_base + temp_dir):
            os.makedirs(temp_base + temp_dir)
            container.make_bind_mount(temp_base + temp_dir, mount_base + temp_dir)

        os.chroot(mount_base)

Example 4

Project: tp-libvirt
Source File: virsh_managedsave.py
View license
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            raise error.TestFail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive")
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all")
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            raise error.TestFail("Guest state should be saved")

        virsh.start(vm_name)
        # This time vm should be in the list
        if vm.is_dead():
            raise error.TestFail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            raise error.TestFail("Guest should be active after"
                                 " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            raise error.TestFail("Managed save image exist "
                                 "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after started"
                                         " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in runing state and can be login.
            vm.shutdown()
            vm.wait_for_shutdown()
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            raise error.TestFail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            raise error.TestFail("Guest shouldn't be undefined"
                                 "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name, options="--managed-save",
                          ignore_status=True).exit_status:
            raise error.TestFail("Guest can't be undefine with "
                                 "managed-save option")

        if os.path.exists(managed_save_file):
            raise error.TestFail("Managed save image exists"
                                 " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = utils.run(cmd, ignore_status=True)
        output = ret.stdout.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.+%s" % flags, output, re.M)
        logging.debug("Find lines: %s" % lines)
        if not lines:
            raise error.TestFail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = utils.run("cat /proc/1/comm").stdout.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        else:
            ret = utils.run("service libvirt-guests restart | \
            awk '{ print strftime(\"%b %y %H:%M:%S\"), $0; fflush(); }'")
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout)
        resume_time = re.findall(pattern, ret.stdout, re.M)
        if not resume_time:
            raise error.TestFail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                raise error.TestFail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = utils.run("cat /proc/1/comm").stdout.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(virsh_cmd_stop, bash_cmd %
                                   (managed_save_file, managed_save_file,
                                    "1", flags), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout)
        if all(["Suspending %s" % vm_name not in ret.stdout,
                "stopped, with saved guests" not in ret.stdout]):
            raise error.TestFail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            raise error.TestFail("The exit code %s for libvirt-guests"
                                 " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(virsh_cmd_start, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "0", flags), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file):
            raise error.TestFail("Can't find managed save image")
        virsh.managedsave_remove(vm_name)
        if os.path.exists(managed_save_file):
            raise error.TestFail("Managed save image still exists")
        virsh.start(vm_name)
        # The domain state should be running
        if vm.state() != "running":
            raise error.TestFail("Guest state should be"
                                 " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name)
            virsh.start(vm_name)
        # Check libvirtd status.
        if not libvirtd.is_running():
            raise error.TestFail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            raise error.TestFail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {"type": "dynamic", "model": "selinux",
                                 "relabel": "yes"}
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception, e:
            logging.error(str(e))
            raise error.TestNAError("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            raise error.TestNAError("Older libvirt does not"
                                    " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            utils.run("sed -i -e 's/ = /=/g' "
                      "/etc/sysconfig/libvirt-guests")
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True).exit_status:
                vmxml_backup.define()
                raise error.TestNAError("Cann't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # excuting managedsave command
        bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
                    "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
                    "grep 'flags:.*%s') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = "014"
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "1", flags), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "0", flags), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests,
                                   start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                utils.run(cmd)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    raise error.TestFail("Run successfully with wrong command!")
            else:
                if status:
                    raise error.TestFail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        raise error.TestFail("Got invalid progress output")
                if remove_after_cmd:
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0", flags), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.

        # Ensure libvirtd is started
        if not libvirtd.is_running():
            libvirtd.start()
        if vm.is_paused():
            virsh.resume(vm_name)
        elif vm.is_dead():
            vm.start()
        # Wait for VM in running state
        wait_for_state("running")
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Wait for VM to be in shut off state
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        virsh.managedsave_remove(vm_name)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage")
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()

Example 5

Project: tp-libvirt
Source File: virsh_blockcopy.py
View license
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" % (target,
                                                                  vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = 'testacl'

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" % (dest_path,
                                                               expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if disk_xml.source.attrs.has_key('file'):
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif (disk_xml.source.attrs.has_key('dev') or
              disk_xml.source.attrs.has_key('name') or
              disk_xml.source.attrs.has_key('pool')):
            if (disk_xml.type_name == 'block' or
                    disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if new_attrs.has_key('name'):
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif new_attrs.has_key('pool'):
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(
            vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout + cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options, **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not utl.check_blockjob(vm_name, target, "bandwidth",
                                              bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout, excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout + cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout) or
                            chk_libvirtd_log(libvirtd_log_path,
                                             log_pattern, "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name,
                                       ignore_status=True).exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception, e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception, e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False, emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception, e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass

Example 6

Project: tp-libvirt
Source File: libvirt_mem.py
View license
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def get_vm_memtotal(session):
        """
        Get guest total memory
        """
        proc_meminfo = session.cmd_output("cat /proc/meminfo")
        # verify format and units are expected
        return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                             proc_meminfo).group(1))

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s"
                    % size)
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        utils.run(cmd)

    def check_guest_meminfo(old_mem):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: get_vm_memtotal(session) != int(old_mem), 5)
        new_mem = get_vm_memtotal(session)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        if new_mem != int(old_mem) + int(tg_size):
            raise error.TestFail("Total memory on guest couldn't"
                                 " changed after attach memory "
                                 "device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                if len(mem_dev) != 1:
                    raise error.TestFail("Found wrong number of"
                                         " memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                assert int(new_max_mem) - int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils.log_last_traceback()
            raise error.TestFail("Found unmatched memory setting"
                                 " from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(
                **{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = get_vm_memtotal(session)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise error.TestFail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError:
                if start_error:
                    pass
                else:
                    raise error.TestFail("VM Failed to start"
                                         " for some reason!")

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total)

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                raise error.TestFail("Numa memory can't be consumed"
                                     " on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            ret = virsh.detach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, detach_error)
            if test_dom_xml:
                check_dom_xml(dt_mem=detach_device)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

Example 7

Project: tp-libvirt
Source File: libvirt_rng.py
View license
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def modify_rng_xml(dparams, sync=True):
        """
        Modify interface xml options
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        backend_protocol = dparams.get("backend_protocol")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [ast.literal_eval(source) for source in
                           backend_source_list]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend

        logging.debug("Rng xml: %s", rng_xml)
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>",
                                      str(rng_xml), re.M
                                      )[0].replace("/", "\/"))])
            if not status:
                raise error.TestFail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'"
                    % (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device virtio-rng-pci'")
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'"
                    % (rate['bytes'], rate['period']))
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestFail("Cann't see rng option"
                                 " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof %s" % backend_dev
            ret = utils.run(cmd, ignore_status=True)
            if ret.exit_status or not ret.stdout.count("qemu"):
                raise error.TestFail("Failed to check random device"
                                     " on host, command output: %s",
                                     ret.stdout)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name not in snap_lists:
            raise error.TestFail("Snapshot %s doesn't exist"
                                 % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)))
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name)
        if ret.stdout.count("<rng model="):
            raise error.TestFail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                raise error.TestNAError(err_msgs)
            else:
                raise error.TestFail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot_name2 not in snap_lists:
            raise error.TestFail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name)
        if not ret.stdout.count(snapshot_name2):
            raise error.TestFail("Failed to find snapshot disk")

    def check_guest(session):
        """
        Check random device on guest
        """
        rng_files = (
            "/sys/devices/virtual/misc/hw_random/rng_available",
            "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0]).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1]).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            raise error.TestFail("Failed to check rng file on guest")

        # Read the random device
        cmd = ("dd if=/dev/hwrng of=rng.test count=100"
               " && rm -f rng.test")
        ret, output = session.cmd_status_output(cmd, timeout=120)
        if ret:
            raise error.TestFail("Failed to read the random device")
        rng_rate = params.get("rng_rate")
        if rng_rate:
            rate_bytes, rate_period = ast.literal_eval(rng_rate).values()
            rate_conf = float(rate_bytes) / (float(rate_period)/1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s",
                            output, re.M)
            if not ret:
                raise error.TestFail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s",
                          rate_real, rate_conf)
            if rate_real > rate_conf * 1.2:
                raise error.TestFail("The rate of reading exceed"
                                     " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                raise error.TestNAError("Multiple virtio-rng devices are not"
                                        " supported on this guest kernel. "
                                        "Bug: https://bugzilla.redhat.com/"
                                        "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=120):
                raise error.TestFail("Failed to read the random device")

    start_error = "yes" == params.get("start_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running",
                                              "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        raise error.TestNAError("Multiple virtio-rng devices not "
                                "supported on this libvirt version")
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_misc.yum_install(["rng-tools"], timeout=300):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            utils.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                      " -f -r /dev/urandom -o /dev/random#' %s"
                      % rngd_srv_conf)
            utils.run('systemctl daemon-reload')
        utils.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None
        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                dparams[i] = {"rng_model": params.get(
                    "rng_model_%s" % i, "virtio")}
                dparams[i].update({"backend_model": params.get(
                    "backend_model_%s" % i, "random")})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            modify_rng_xml(params, not test_snapshot)

        try:
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils.AsyncJob(cmd)

            # Start the VM.
            vm.start()
            if start_error:
                raise error.TestFail("VM started unexpectedly")

            if test_qemu_cmd:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            session.close()

            if test_snapshot:
                check_snapshot(bgjob)
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                raise error.TestFail('VM failed to start, '
                                     'please refer to https://bugzilla.'
                                     'redhat.com/show_bug.cgi?id=1220252:'
                                     '\n%s' % details)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()

Example 8

Project: jcvi
Source File: chromosome.py
View license
def main():
    """
    %prog bedfile id_mappings

    Takes a bedfile that contains the coordinates of features to plot on the
    chromosomes, and `id_mappings` file that map the ids to certain class. Each
    class will get assigned a unique color. `id_mappings` file is optional (if
    omitted, will not paint the chromosome features, except the centromere).
    """
    p = OptionParser(main.__doc__)
    p.add_option("--title", default="Medicago truncatula v3.5",
            help="title of the image [default: `%default`]")
    p.add_option("--gauge", default=False, action="store_true",
            help="draw a gauge with size label [default: %default]")
    p.add_option("--imagemap", default=False, action="store_true",
            help="generate an HTML image map associated with the image [default: %default]")
    p.add_option("--winsize", default=50000, type="int",
            help="if drawing an imagemap, specify the window size (bases) of each map element "
                 "[default: %default bp]")
    p.add_option("--empty", help="Write legend for unpainted region")
    opts, args, iopts = p.set_image_options(figsize="6x6", dpi=300)

    if len(args) not in (1, 2):
        sys.exit(p.print_help())

    bedfile = args[0]
    mappingfile = None
    if len(args) == 2:
        mappingfile = args[1]

    winsize = opts.winsize
    imagemap = opts.imagemap
    w, h = iopts.w, iopts.h
    dpi = iopts.dpi

    prefix = bedfile.rsplit(".", 1)[0]
    figname = prefix + "." + opts.format
    if imagemap:
        imgmapfile = prefix + '.map'
        mapfh = open(imgmapfile, "w")
        print >> mapfh, '<map id="' + prefix + '">'

    if mappingfile:
        mappings = DictFile(mappingfile, delimiter="\t")
        classes = sorted(set(mappings.values()))
        logging.debug("A total of {0} classes found: {1}".format(len(classes),
            ','.join(classes)))
    else:
        mappings = {}
        classes = []
        logging.debug("No classes registered (no id_mappings given).")

    mycolors = "rgbymc"
    class_colors = dict(zip(classes, mycolors))

    bed = Bed(bedfile)
    chr_lens = {}
    centromeres = {}
    for b, blines in groupby(bed, key=(lambda x: x.seqid)):
        blines = list(blines)
        maxlen = max(x.end for x in blines)
        chr_lens[b] = maxlen

    for b in bed:
        accn = b.accn
        if accn == "centromere":
            centromeres[b.seqid] = b.start
        if accn in mappings:
            b.accn = mappings[accn]
        else:
            b.accn = '-'

    chr_number = len(chr_lens)
    if centromeres:
        assert chr_number == len(centromeres)

    fig = plt.figure(1, (w, h))
    root = fig.add_axes([0, 0, 1, 1])

    r = .7  # width and height of the whole chromosome set
    xstart, ystart = .15, .85
    xinterval = r / chr_number
    xwidth = xinterval * .5  # chromosome width
    max_chr_len = max(chr_lens.values())
    ratio = r / max_chr_len  # canvas / base

    # first the chromosomes
    for a, (chr, clen) in enumerate(sorted(chr_lens.items())):
        xx = xstart + a * xinterval + .5 * xwidth
        root.text(xx, ystart + .01, chr, ha="center")
        if centromeres:
            yy = ystart - centromeres[chr] * ratio
            ChromosomeWithCentromere(root, xx, ystart, yy,
                    ystart - clen * ratio, width=xwidth)
        else:
            Chromosome(root, xx, ystart, ystart - clen * ratio, width=xwidth)

    chr_idxs = dict((a, i) for i, a in enumerate(sorted(chr_lens.keys())))

    alpha = .75
    # color the regions
    for chr in sorted(chr_lens.keys()):
        segment_size, excess = 0, 0
        bac_list = []
        for b in bed.sub_bed(chr):
            clen = chr_lens[chr]
            idx = chr_idxs[chr]
            klass = b.accn
            start = b.start
            end = b.end
            xx = xstart + idx * xinterval
            yystart = ystart - end * ratio
            yyend = ystart - start * ratio
            root.add_patch(Rectangle((xx, yystart), xwidth, yyend - yystart,
                fc=class_colors.get(klass, "w"), lw=0, alpha=alpha))

            if imagemap:
                """
                `segment` : size of current BAC being investigated + `excess`
                `excess`  : left-over bases from the previous BAC, as a result of
                            iterating over `winsize` regions of `segment`
                """
                if excess == 0:
                    segment_start = start
                segment = (end - start + 1) + excess
                while True:
                    if segment < winsize:
                        bac_list.append(b.accn)
                        excess = segment
                        break
                    segment_end = segment_start + winsize - 1
                    tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \
                                  xx + xwidth, (1 - ystart) + segment_end * ratio
                    print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \
                            w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end)

                    segment_start += winsize
                    segment -= winsize
                    bac_list = []

        if imagemap and excess > 0:
            bac_list.append(b.accn)
            segment_end = end
            tlx, tly, brx, bry = xx, (1 - ystart) + segment_start * ratio, \
                          xx + xwidth, (1 - ystart) + segment_end * ratio
            print >> mapfh, '\t' + write_ImageMapLine(tlx, tly, brx, bry, \
                    w, h, dpi, chr+":"+",".join(bac_list), segment_start, segment_end)

    if imagemap:
        print >> mapfh, '</map>'
        mapfh.close()
        logging.debug("Image map written to `{0}`".format(mapfh.name))

    if opts.gauge:
        xstart, ystart = .9, .85
        Gauge(root, xstart, ystart - r, ystart, max_chr_len)

    # class legends, four in a row
    xstart = .1
    xinterval = .2
    xwidth = .04
    yy = .08
    for klass, cc in sorted(class_colors.items()):
        if klass == '-':
            continue
        root.add_patch(Rectangle((xstart, yy), xwidth, xwidth, fc=cc, lw=0,
            alpha=alpha))
        root.text(xstart + xwidth + .01, yy, klass, fontsize=10)
        xstart += xinterval

    empty = opts.empty
    if empty:
        root.add_patch(Rectangle((xstart, yy), xwidth, xwidth, fill=False, lw=1))
        root.text(xstart + xwidth + .01, yy, empty, fontsize=10)

    root.text(.5, .95, opts.title, fontstyle="italic", ha="center", va="center")

    root.set_xlim(0, 1)
    root.set_ylim(0, 1)
    root.set_axis_off()

    savefig(figname, dpi=dpi, iopts=iopts)

Example 9

Project: RMG-Py
Source File: statmech.py
View license
def projectRotors(conformer, F, rotors, linear, TS):
    """
    For a given `conformer` with associated force constant matrix `F`, lists of
    rotor information `rotors`, `pivots`, and `top1`, and the linearity of the
    molecule `linear`, project out the nonvibrational modes from the force
    constant matrix and use this to determine the vibrational frequencies. The
    list of vibrational frequencies is returned in cm^-1.
    """
    
    Nrotors = len(rotors)
    Natoms = len(conformer.mass.value)
    Nvib = 3 * Natoms - (5 if linear else 6) - Nrotors - (1 if (TS) else 0)
    mass = conformer.mass.value_si
    coordinates = conformer.coordinates.getValue() 


    # Put origin in center of mass
    xm=0.0
    ym=0.0
    zm=0.0
    totmass=0.0
    for i in range(Natoms):
        xm+=mass[i]*coordinates[i,0]
        ym+=mass[i]*coordinates[i,1]
        zm+=mass[i]*coordinates[i,2]
        totmass+=mass[i]
          
    xm/=totmass
    ym/=totmass
    zm/=totmass

    for i in range(Natoms):
        coordinates[i,0]-=xm
        coordinates[i,1]-=ym 
        coordinates[i,2]-=zm
    # Make vector with the root of the mass in amu for each atom
    amass=numpy.sqrt(mass/constants.amu)

    # Rotation matrix
    I=conformer.getMomentOfInertiaTensor()
    PMoI, Ixyz = numpy.linalg.eigh(I)
 
    external=6
    if linear:
        external=5
    
    D = numpy.zeros((Natoms*3,external), numpy.float64)

    P = numpy.zeros((Natoms,3), numpy.float64)

    # Transform the coordinates to the principal axes
    P = numpy.dot(coordinates,Ixyz)

    for i in range(Natoms):
        # Projection vectors for translation
        D[3*i+0,0] = amass[i]
        D[3*i+1,1] = amass[i]
        D[3*i+2,2] = amass[i]

    # Construction of the projection vectors for external rotation
    for i in range(Natoms):
        D[3*i,3] = (P[i,1]*Ixyz[0,2]-P[i,2]*Ixyz[0,1])*amass[i]
        D[3*i+1,3] = (P[i,1]*Ixyz[1,2]-P[i,2]*Ixyz[1,1])*amass[i]
        D[3*i+2,3] = (P[i,1]*Ixyz[2,2]-P[i,2]*Ixyz[2,1])*amass[i]
        D[3*i,4] = (P[i,2]*Ixyz[0,0]-P[i,0]*Ixyz[0,2])*amass[i]
        D[3*i+1,4] = (P[i,2]*Ixyz[1,0]-P[i,0]*Ixyz[1,2])*amass[i]
        D[3*i+2,4] = (P[i,2]*Ixyz[2,0]-P[i,0]*Ixyz[2,2])*amass[i]
        if not linear:
            D[3*i,5] = (P[i,0]*Ixyz[0,1]-P[i,1]*Ixyz[0,0])*amass[i]
            D[3*i+1,5] = (P[i,0]*Ixyz[1,1]-P[i,1]*Ixyz[1,0])*amass[i]
            D[3*i+2,5] = (P[i,0]*Ixyz[2,1]-P[i,1]*Ixyz[2,0])*amass[i]

    # Make sure projection matrix is orthonormal
    import scipy.linalg

    I = numpy.identity(Natoms*3, numpy.float64)

    P = numpy.zeros((Natoms*3,3*Natoms+external), numpy.float64)

    P[:,0:external] = D[:,0:external]
    P[:,external:external+3*Natoms] = I[:,0:3*Natoms]

    for i in range(3*Natoms+external):
        norm=0.0
        for j in range(3*Natoms):
            norm+=P[j,i]*P[j,i]
        for j in range(3*Natoms):
            if (norm>1E-15):
                P[j,i]/=numpy.sqrt(norm)
            else:
                P[j,i]=0.0
        for j in range(i+1,3*Natoms+external):
            proj=0.0
            for k in range(3*Natoms):
                proj+=P[k,i]*P[k,j]
            for k in range(3*Natoms):
                P[k,j]-=proj*P[k,i]

    # Order D, there will be vectors that are 0.0  
    i=0
    while i < 3*Natoms:
        norm=0.0
        for j in range(3*Natoms):
            norm+=P[j,i]*P[j,i]
        if (norm<0.5):
            P[:,i:3*Natoms+external-1] = P[:,i+1:3*Natoms+external]
        else:
            i+=1

    # T is the transformation vector from cartesian to internal coordinates
    T = numpy.zeros((Natoms*3,3*Natoms-external), numpy.float64)

    T[:,0:3*Natoms-external] = P[:,external:3*Natoms]

    # Generate mass-weighted force constant matrix
    # This converts the axes to mass-weighted Cartesian axes
    # Units of Fm are J/m^2*kg = 1/s^2
    Fm = F.copy()
    for i in range(Natoms):
        for j in range(Natoms):
            for u in range(3):
                for v in range(3):
                    Fm[3*i+u,3*j+v] /= math.sqrt(mass[i] * mass[j])

    Fint = numpy.dot(T.T, numpy.dot(Fm,T))

    # Get eigenvalues of internal force constant matrix, V = 3N-6 * 3N-6
    eig, V = numpy.linalg.eigh(Fint)

    logging.debug('Frequencies from internal Hessian')  
    for i in range(3*Natoms-external):
        logging.debug(numpy.sqrt(eig[i])/(2 * math.pi * constants.c * 100))

    # Now we can start thinking about projecting out the internal rotations
    Dint=numpy.zeros((3*Natoms,Nrotors), numpy.float64)

    counter=0
    for i, rotor in enumerate(rotors):
        scanLog, pivots, top, symmetry, fit = rotor
        # Determine pivot atom
        if pivots[0] in top:
            pivot1 = pivots[0]
            pivot2 = pivots[1]
        elif pivots[1] in top:
            pivot1 = pivots[1]
            pivot2 = pivots[0]
        else: raise Exception('Could not determine pivot atom.')
        # Projection vectors for internal rotation
        e12 = coordinates[pivot1-1,:] - coordinates[pivot2-1,:]
        for j in range(Natoms):
            atom=j+1
            if atom in top:
                e31 = coordinates[atom-1,:] - coordinates[pivot1-1,:]
                Dint[3*(atom-1):3*(atom-1)+3,counter] = numpy.cross(e31, e12)*amass[atom-1]
            else:
                e31 = coordinates[atom-1,:] - coordinates[pivot2-1,:]
                Dint[3*(atom-1):3*(atom-1)+3,counter] = numpy.cross(e31, -e12)*amass[atom-1]
        counter+=1

    # Normal modes in mass weighted cartesian coordinates
    Vmw = numpy.dot(T,V)
    eigM = numpy.zeros((3*Natoms-external,3*Natoms-external), numpy.float64)

    for i in range(3*Natoms-external):
        eigM[i,i]=eig[i]
 
    Fm=numpy.dot(Vmw,numpy.dot(eigM,Vmw.T))

    # Internal rotations are not normal modes => project them on the normal modes and orthogonalize
    # Dintproj =  (3N-6) x (3N) x (3N) x (Nrotors)
    Dintproj=numpy.dot(Vmw.T,Dint)    

    # Reconstruct Dint
    for i in range(Nrotors):
        for j in range (3*Natoms):
            Dint[j,i]=0
            for k in range(3*Natoms-external):
                Dint[j,i]+=Dintproj[k,i]*Vmw[j,k]

    # Ortho normalize
    for i in range(Nrotors):
        norm=0.0
        for j in range(3*Natoms):
            norm+=Dint[j,i]*Dint[j,i]
        for j in range(3*Natoms):
            Dint[j,i]/=numpy.sqrt(norm)
        for j in range(i+1,Nrotors):
            proj=0.0
            for k in range (3*Natoms):
                proj+=Dint[k,i]*Dint[k,j]
            for k in range(3*Natoms):
                Dint[k,j]-=proj*Dint[k,i]

    Dintproj=numpy.dot(Vmw.T,Dint)
    Proj = numpy.dot(Dint, Dint.T)
    I = numpy.identity(Natoms*3, numpy.float64)
    Proj = I - Proj 
    Fm=numpy.dot(Proj, numpy.dot(Fm,Proj))
    # Get eigenvalues of mass-weighted force constant matrix
    eig, V = numpy.linalg.eigh(Fm)
    eig.sort()

    # Convert eigenvalues to vibrational frequencies in cm^-1
    # Only keep the modes that don't correspond to translation, rotation, or internal rotation

    logging.debug('Frequencies from projected Hessian')
    for i in range(3*Natoms):
        logging.debug(numpy.sqrt(eig[i])/(2 * math.pi * constants.c * 100))
        
    return numpy.sqrt(eig[-Nvib:]) / (2 * math.pi * constants.c * 100)

Example 10

Project: pb-tracker
Source File: handler.py
View license
    def get_pblist( self, username, page_num, show_all ):
        key = self.get_pblist_memkey( username )
        cached_pblists = memcache.get( key )
        if cached_pblists is None:
            cached_pblists = dict( )
        res = cached_pblists.get( page_num )
        if res is None or ( show_all and not res['show_all'] ):
            pblist = [ ]
            c = None
            if res is None:
                res = dict( page_num=page_num, has_next=False,
                            show_all=show_all )
                # Not in memcache, so construct the pblist and store in
                # memcache.
                # pblist is a list of dictionaries with 3 indices, 'game', 
                # 'game_code' and 'infolist'.  The infolist is another list of 
                # dictionaries containing all the info for each pb of the game.
                c = memcache.get( self.get_pblist_cursor_memkey(
                    username, page_num ) )
            else:
                # Need to update this page to a show_all
                res['show_all'] = show_all
                res['has_next'] = False
                res['page_num'] = page_num
                if page_num == 1:
                    # Can try to start from normal page 2
                    pblist = res['pblist']
                    c = memcache.get( self.get_pblist_cursor_memkey(
                        username, page_num + 1 ) )
                        
            try:
                q = db.Query( runs.Runs,
                              projection=['game', 'category', 'seconds',
                                          'date', 'video', 'version'] )
                q.ancestor( runs.key() )
                q.filter( 'username =', username )
                q.order( 'game' )
                q.order( 'category' )
                q.order( 'seconds' )
                if c:
                    try:
                        q.with_cursor( start_cursor=c )
                    except BadRequestError:
                        res['page_num'] = 1
                        pblist = [ ]
                else:
                    res['page_num'] = 1
                    pblist = [ ]
                cur_game = None
                pb = None
                if len( pblist ) > 0:
                    cur_game = pblist[ -1 ]['game']
                    pb = pblist[ -1 ]
                cur_category = None
                info = None
                cursor_to_save = c
                last_cursor = None
                runs_queried = 0
                limit = self.PB_PAGE_LIMIT
                if show_all:
                    limit = self.PB_PAGE_SHOW_ALL_LIMIT
                for run in q.run( limit=limit ):
                    if run.game != cur_game:
                        # New game
                        pb = dict( game = run.game,
                                   game_code = util.get_code( run.game ),
                                   num_runs = 0,
                                   infolist = [ ] )
                        pblist.append( pb )
                        cur_game = run.game
                        cur_category = None

                    if run.category != cur_category:
                        # New category
                        info = dict( username = username,
                                     username_code = util.get_code( username ),
                                     category = run.category,
                                     category_code = util.get_code(
                                         run.category ),
                                     pb_seconds = run.seconds,
                                     pb_time = util.seconds_to_timestr(
                                         run.seconds ),
                                     pb_date = run.date,
                                     num_runs = 1,
                                     avg_seconds = run.seconds,
                                     avg_time = util.seconds_to_timestr(
                                         run.seconds, dec_places=0 ),
                                     video = run.video,
                                     version = run.version )
                        pb['infolist'].append( info )
                        cur_category = run.category
                        if last_cursor is not None:
                            cursor_to_save = last_cursor
                    else:
                        # Repeat game, category
                        info['num_runs'] += 1
                        info['avg_seconds'] += ( 1.0 / info['num_runs'] ) * (
                            run.seconds - info['avg_seconds'] )
                        info['avg_time'] = util.seconds_to_timestr(
                            info['avg_seconds'], dec_places=0 )
                    pb['num_runs'] += 1
                    runs_queried += 1
                    last_cursor = q.cursor( )
                
                if runs_queried < limit:
                    # Made it to the end
                    cursor_to_save = q.cursor( )
                    if res['page_num'] == 1:
                        res['show_all'] = True
                else:
                    res['has_next'] = True

                    # Last category found is possibly incomplete, so remove
                    del pblist[ -1 ]['infolist'][ -1 ]
                    if len( pblist[ -1 ]['infolist'] ) <= 0:
                        del pblist[ -1 ]
                        if len( pblist ) <= 0:
                            # Too many runs for this game, category
                            pb = dict(
                                game = cur_game,
                                game_code = util.get_code( cur_game ),
                                num_runs = 0,
                                infolist = [ dict( 
                                    username=username,
                                    username_code = util.get_code(
                                        username ),
                                    category=( 'TOO MANY RUNS FOR '
                                               + 'CATEGORY: '
                                               + cur_category
                                               + ' (max is '
                                               + str( self.PB_PAGE_LIMIT - 1 )
                                               + ', please delete some runs)' ),
                                    category_code=util.get_code(
                                        cur_category ),
                                    pb_seconds=0,
                                    pb_time=util.seconds_to_timestr( 0 ),
                                    pb_date=None,
                                    num_runs=0,
                                    avg_seconds=0,
                                    avg_time=util.seconds_to_timestr( 0 ),
                                    video=None ) ] )
                            pblist.append( pb )
                    if res['show_all']:
                        # Over limit even with show all
                        res['has_next'] = False
                        pb = dict( game='TOO MANY RUNS TO SHOW ALL',
                                   game_code='???',
                                   num_runs=0,
                                   infolist=[ dict(
                                       username=username,
                                       username_code = util.get_code(
                                           username ),
                                       category=( 'MAX IS ' + str(
                                           self.PB_PAGE_SHOW_ALL_LIMIT )
                                                  + ', PLEASE DELETE '
                                                  + 'SOME RUNS' ),
                                       category_code='???',
                                       pb_seconds=0,
                                       pb_time=util.seconds_to_timestr( 0 ),
                                       pb_date=None,
                                       num_runs=0,
                                       avg_seconds=0,
                                       avg_time=util.seconds_to_timestr( 0 ),
                                       video=None ) ] )
                        res['pblist'] = [ pb ]
                        return res

                cursor_key = self.get_pblist_cursor_memkey(
                    username, res['page_num'] + 1 )
                if memcache.set( cursor_key, cursor_to_save ):
                    logging.debug( 'Set ' + cursor_key + " in memcache" )
                else:
                    logging.warning( 'Failed to set ' + cursor_key
                                     + ' in memcache' )
            except apiproxy_errors.OverQuotaError, msg:
                logging.error( msg )
                return self.OVER_QUOTA_ERROR

            # Sort the categories for a game by num_runs
            for pb in pblist:
                pb['infolist'].sort( key=itemgetter('num_runs'), reverse=True )

            res['pblist'] = pblist
            cached_pblists[ res['page_num'] ] = res
            if memcache.set( key, cached_pblists ):
                logging.debug( "Set " + key + " in memcache" )
            else:
                logging.warning( "Failed to set " + key + " in memcache" )
        else:
            logging.debug( "Got " + key + " from memcache" )
        return res

Example 11

Project: tp-libvirt
Source File: virsh_domtime.py
View license
@error.context_aware
def run(test, params, env):
    """
    This test virsh domtime command and its options.

    1) Start a guest with/without guest agent configured;
    2) Record guest times;
    3) Do some operation to stop VM;
    4) Run virsh domtime command with different options;
    5) Check the command result;
    6) Check the guest times against expectation;
    7) Cleanup test environment.
    """
    epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)
    # Max time can be set with domtime successfully in newer qemu-ga
    time_max_1 = 3155731199
    # Max time can be set with domtime successfully in older qemu-ga
    time_max_2 = 3155759999
    # Max time can be set with domtime bug failed to set RTC in older qemu-ga
    time_max_3 = 9223372035

    def init_time(session):
        """
        Initialize guest RTC time to epoch + 1234567890 and system time
        one day latter.

        :param session: Session from which to access guest
        """
        res = virsh.domtime(vm_name, time=1234567890)
        if res.exit_status:
            logging.debug("Failed to init time to 1234567890:\n%s", res)
        status, output = session.cmd_status_output('date -s "1 day"')
        if status:
            raise error.TestError("Failed to set guest time:\n%s" % output)

    def get_host_utc_time():
        """
        Get host UTC time from date command.
        """
        res = utils.run("date -u")
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', res.stdout.strip())
        return datetime.datetime.strptime(time_str,
                                          r"%a %b %d %H:%M:%S %Y")

    def run_cmd(session, cmd):
        """
        Run a command in a session and record duration of call.
        """
        start = time.time()
        output = session.cmd_output(cmd)
        duration = time.time() - start
        logging.info('Result of command "%s". Duration: %s. Output:%s',
                     cmd, duration, output.strip())
        return output, duration

    def get_guest_times(session):
        """
        Retrieve different guest time as a dict for checking.
        Keys:
            local_hw: Guest RTC time in local timezone
            local_sys: Guest system time in local timezone
            utc_sys: Guest system time in UTC
            domtime: Guest system time in UTC got from virsh domtime command

        :param session: Session from which to access guest
        """
        times = {}
        get_begin = time.time()
        # Guest RTC local timezone time
        output, _ = run_cmd(session, 'hwclock')
        time_str, _ = re.search(r"(.+)  (\S+ seconds)", output).groups()

        try:
            # output format 1: Tue 01 Mar 2016 01:53:46 PM CST
            # Remove timezone info from output
            new_str = re.sub(r'\S+$', '', time_str)
            times['local_hw'] = datetime.datetime.strptime(
                new_str, r"%a %d %b %Y %I:%M:%S %p")
        except ValueError:
            # There are two possible output format for `hwclock`
            # output format 2: Sat Feb 14 07:31:33 2009
            times['local_hw'] = datetime.datetime.strptime(
                time_str, r"%a %b %d %H:%M:%S %Y")
        delta = time.time() - get_begin
        times['local_hw'] -= datetime.timedelta(seconds=delta)

        # Guest system local timezone time
        output, _ = run_cmd(session, 'date')
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip())
        times['local_sys'] = datetime.datetime.strptime(
            time_str, r"%a %b %d %H:%M:%S %Y")
        delta = time.time() - get_begin
        times['local_sys'] -= datetime.timedelta(seconds=delta)

        # Guest system UTC timezone time
        output, _ = run_cmd(session, 'date -u')
        # Strip timezone info from output
        # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009'
        time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip())
        times['utc_sys'] = datetime.datetime.strptime(
            time_str, r"%a %b %d %H:%M:%S %Y")
        delta = time.time() - get_begin
        times['utc_sys'] -= datetime.timedelta(seconds=delta)

        # Guest UTC time from virsh domtime
        res = virsh.domtime(vm_name, pretty=True, ignore_status=True)
        if not res.exit_status:
            logging.info('Result of "domtime". Duration: %s. Output:%s',
                         res.duration, res.stdout.strip())
            _, time_str = res.stdout.split(" ", 1)
            times['domtime'] = datetime.datetime.strptime(
                time_str.strip(), r"%Y-%m-%d %H:%M:%S")
            delta = time.time() - get_begin
            times['domtime'] -= datetime.timedelta(seconds=delta)
        else:
            logging.debug("Unable to get domain time:\n%s", res)
            times['domtime'] = None

        return times, time.time() - get_begin

    def check_get_success(expected_times):
        """
        Check virsh command get result against expected times

        :param expected_times: Expected time for checking
        """
        _, time_str = res.stdout.split(" ", 1)
        if pretty:
            # Time: 2015-01-13 06:29:18
            domtime = datetime.datetime.strptime(time_str.strip(),
                                                 r"%Y-%m-%d %H:%M:%S")
        else:
            # Time: 1421130740
            domtime = epoch + datetime.timedelta(seconds=int(time_str))
        time_shift = time.time() - start
        logging.debug("Time shift is %s", time_shift)
        result_diff = (domtime - expected_times['domtime']).total_seconds()
        if abs(result_diff) > 2.0:
            raise error.TestFail("Expect get time %s, but got %s, time "
                                 "diff: %s" % (org_times['domtime'],
                                               domtime, result_diff))

    def check_guest_times(expected_times, cur_times):
        """
        Check guest times after test against expected times

        :param expected_times: Expected time for checking
        """
        time_shift = time.time() - start
        logging.debug("Time shift is %s", time_shift)

        error_msgs = []
        for key in cur_times:
            if cur_times[key] is not None:
                cur = cur_times[key]
                expect = expected_times[key]

                diff = (cur - expect).total_seconds()
                msg = "For %s, expect get time %s, got %s, time diff: %s" % (
                    key, expect, cur, diff)
                logging.debug(msg)
                if abs(diff) > 2.0:
                    error_msgs.append(msg)
        if error_msgs:
            raise error.TestFail('\n'.join(error_msgs))

    def check_time(result, org_times, cur_times):
        """
        Check whether domain time has been change accordingly.

        :param result: virsh domtime CmdResult instance
        :param org_times: Original guest times
        """
        action = "get"
        if now or sync or (set_time is not None):
            action = "set"

        tz_diff = org_times['local_sys'] - org_times['utc_sys']
        logging.debug("Timezone diff on guest is %d hours.",
                      (tz_diff.total_seconds() / 3600))

        # Hardware time will never stop
        logging.info('Add %ss to expected guest time', interval)
        if action == 'get':
            expected_times = org_times
        elif action == 'set':
            if result.exit_status:
                # Time not change if domtime fails
                expected_times = org_times
            else:
                # Time change accordingly if succeed.
                if now:
                    utc_time = org_host_time
                    local_time = utc_time + tz_diff
                elif sync:
                    local_time = org_times["local_hw"]
                    utc_time = local_time - tz_diff
                elif set_time is not None:
                    utc_time = epoch + datetime.timedelta(
                        seconds=(int(set_time) - guest_duration))
                    local_time = utc_time + tz_diff
                expected_times = {}
                expected_times['local_hw'] = local_time
                expected_times['local_sys'] = local_time
                expected_times["utc_sys"] = utc_time
                expected_times["domtime"] = utc_time

        # Add interval between two checks of guest time
        for key in expected_times:
            if expected_times[key] is not None:
                expected_times[key] += interval

        # Hardware time will never stop
        # Software time will stop if suspended or managed-saved
        if suspend or managedsave:
            logging.info('Remove %ss from expected guest software time',
                         stop_time)
            expected_times["domtime"] -= stop_time
            expected_times["local_sys"] -= stop_time
            expected_times["utc_sys"] -= stop_time

        # Check guest time if domtime succeeded
        check_guest_times(expected_times, cur_times)

        # Check if output of domtime is correct
        if action == 'get' and not result.exit_status:
            check_get_success(expected_times)

    def prepare_fail_patts():
        """
        Predict fail pattern from test parameters.
        """
        fail_patts = []
        if not channel:
            fail_patts.append(r"QEMU guest agent is not configured")
        if not agent:
            # For older version
            fail_patts.append(r"Guest agent not available for now")
            # For newer version
            fail_patts.append(r"Guest agent is not responding")
        if int(now) + int(sync) + int(bool(set_time)) > 1:
            fail_patts.append(r"Options \S+ and \S+ are mutually exclusive")
        if shutdown:
            fail_patts.append(r"domain is not running")

        if set_time is not None:
            if int(set_time) < 0:
                fail_patts.append(r"Invalid argument")
            elif time_max_1 < int(set_time) <= time_max_2:
                fail_patts.append(r"Invalid time")
            elif time_max_2 < int(set_time) <= time_max_3:
                fail_patts.append(r"Invalid time")
            elif time_max_3 < int(set_time):
                fail_patts.append(r"too big for guest agent")
        return fail_patts

    def stop_vm():
        """
        Suspend, managedsave, pmsuspend or shutdown a VM for a period of time
        """
        stop_start = time.time()
        if suspend:
            vm.pause()
            time.sleep(10)
            vm.resume()
        elif managedsave:
            vm.managedsave()
            time.sleep(10)
            vm.start()
            vm.wait_for_login()
        elif pmsuspend:
            vm.pmsuspend()
            time.sleep(10)
            vm.pmwakeup()
            vm.wait_for_login()
        elif shutdown:
            vm.destroy()

        # Check real guest stop time
        stop_seconds = time.time() - stop_start
        stop_time = datetime.timedelta(seconds=stop_seconds)
        logging.debug("Guest stopped: %s", stop_time)
        return stop_time

    # Check availability of virsh command domtime
    if not virsh.has_help_command('domtime'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the domtime test")

    channel = (params.get("prepare_channel", "yes") == 'yes')
    agent = (params.get("start_agent", "yes") == 'yes')
    pretty = (params.get("domtime_pretty", "no") == 'yes')
    now = (params.get("domtime_now", "no") == 'yes')
    sync = (params.get("domtime_sync", "no") == 'yes')
    set_time = params.get("domtime_time", None)

    shutdown = (params.get("shutdown_vm", "no") == 'yes')
    suspend = (params.get("suspend_vm", "no") == 'yes')
    managedsave = (params.get("managedsave_vm", "no") == 'yes')
    pmsuspend = (params.get("pmsuspend_vm", "no") == 'yes')

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Backup domain XML
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        if pmsuspend:
            vm_xml.VMXML.set_pm_suspend(vm_name)
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=channel, start=agent)
        session = vm.wait_for_login()
        try:
            if channel and agent:
                init_time(session)

            # Expected fail message patterns
            fail_patts = prepare_fail_patts()

            # Message patterns test should skip when met
            skip_patts = [
                r'The command \S+ has not been found',
            ]

            # Record start time
            start = time.time()

            # Record host time before testing
            org_host_time = get_host_utc_time()
            # Get original guest times
            org_times, guest_duration = get_guest_times(session)

            # Run some operations to stop guest system
            stop_time = stop_vm()

            # Run command with specified options.
            res = virsh.domtime(vm_name, now=now, pretty=pretty, sync=sync,
                                time=set_time)
            libvirt.check_result(res, fail_patts, skip_patts)

            # Check interval between two check of guest time
            interval = datetime.timedelta(
                seconds=(time.time() - start))
            logging.debug("Interval between guest checking: %s", interval)

            if not shutdown:
                # Get current guest times
                cur_times, _ = get_guest_times(session)

                check_time(res, org_times, cur_times)
        finally:
            # Sync guest time with host
            if channel and agent and not shutdown:
                res = virsh.domtime(vm_name, now=True)
                if res.exit_status:
                    session.close()
                    raise error.TestError("Failed to recover guest time:\n%s"
                                          % res)
            session.close()
    finally:
        # Restore VM XML
        xml_backup.sync()

Example 12

Project: tp-qemu
Source File: netperf.py
View license
@error.context_aware
def launch_client(sessions, server, server_ctl, host, clients, l, nf_args,
                  port, params, server_cyg):
    """ Launch netperf clients """

    netperf_version = params.get("netperf_version", "2.6.0")
    client_path = "/tmp/netperf-%s/src/netperf" % netperf_version
    server_path = "/tmp/netperf-%s/src/netserver" % netperf_version
    get_status_flag = params.get("get_status_in_guest", "no") == "yes"
    global _netserver_started
    # Start netserver
    if _netserver_started:
        logging.debug("Netserver already started.")
    else:
        error.context("Start Netserver on guest", logging.info)
        if params.get("os_type") == "windows":
            timeout = float(params.get("timeout", "240"))
            cdrom_drv = utils_misc.get_winutils_vol(server_ctl)
            if params.get("use_cygwin") == "yes":
                netserv_start_cmd = params.get("netserv_start_cmd")
                netperf_src = params.get("netperf_src") % cdrom_drv
                cygwin_root = params.get("cygwin_root")
                netserver_path = params.get("netserver_path")
                netperf_install_cmd = params.get("netperf_install_cmd")
                start_session = server_cyg
                logging.info("Start netserver with cygwin, cmd is: %s" %
                             netserv_start_cmd)
                if "netserver" not in server_ctl.cmd_output("tasklist"):
                    netperf_pack = "netperf-%s" % params.get("netperf_version")
                    s_check_cmd = "dir %s" % netserver_path
                    p_check_cmd = "dir %s" % cygwin_root
                    if not ("netserver.exe" in server_ctl.cmd(s_check_cmd) and
                            netperf_pack in server_ctl.cmd(p_check_cmd)):
                        error.context("Install netserver in Windows guest cygwin",
                                      logging.info)
                        cmd = "xcopy %s %s /S /I /Y" % (netperf_src, cygwin_root)
                        server_ctl.cmd(cmd)
                        server_cyg.cmd_output(netperf_install_cmd, timeout=timeout)
                        if "netserver.exe" not in server_ctl.cmd(s_check_cmd):
                            err_msg = "Install netserver cygwin failed"
                            raise error.TestNAError(err_msg)
                        logging.info("Install netserver in cygwin successfully")
            else:
                start_session = server_ctl
                netserv_start_cmd = params.get("netserv_start_cmd") % cdrom_drv
                logging.info("Start netserver without cygwin, cmd is: %s" %
                             netserv_start_cmd)

            error.context("Start netserver on windows guest", logging.info)
            start_netserver_win(start_session, netserv_start_cmd)

        else:
            logging.info("Netserver start cmd is '%s'" % server_path)
            ssh_cmd(server_ctl, "pidof netserver || %s" % server_path)
            ncpu = ssh_cmd(server_ctl, "cat /proc/cpuinfo |grep processor |wc -l")
            ncpu = re.findall(r"\d+", ncpu)[-1]

        logging.info("Netserver start successfully")

    def count_interrupt(name):
        """
        Get a list of interrut number for each queue

        @param name: the name of interrupt, such as "virtio0-input"
        """
        sum = 0
        intr = []
        stat = ssh_cmd(server_ctl, "cat /proc/interrupts |grep %s" % name)
        for i in stat.strip().split("\n"):
            for cpu in range(int(ncpu)):
                sum += int(i.split()[cpu + 1])
            intr.append(sum)
            sum = 0
        return intr

    def get_state():
        for i in ssh_cmd(server_ctl, "ifconfig").split("\n\n"):
            if server in i:
                ifname = re.findall(r"(\w+\d+)[:\s]", i)[0]

        path = "find /sys/devices|grep net/%s/statistics" % ifname
        cmd = "%s/rx_packets|xargs cat;%s/tx_packets|xargs cat;" \
            "%s/rx_bytes|xargs cat;%s/tx_bytes|xargs cat" % (path,
                                                             path, path, path)
        output = ssh_cmd(server_ctl, cmd).split()[-4:]

        nrx = int(output[0])
        ntx = int(output[1])
        nrxb = int(output[2])
        ntxb = int(output[3])

        nre = int(ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1"
                          ).split()[12])
        state_list = ['rx_pkts', nrx, 'tx_pkts', ntx, 'rx_byts', nrxb,
                      'tx_byts', ntxb, 're_pkts', nre]
        try:
            nrx_intr = count_interrupt("virtio.-input")
            ntx_intr = count_interrupt("virtio.-output")
            sum = 0
            for i in range(len(nrx_intr)):
                state_list.append('rx_intr_%s' % i)
                state_list.append(nrx_intr[i])
                sum += nrx_intr[i]
            state_list.append('rx_intr_sum')
            state_list.append(sum)

            sum = 0
            for i in range(len(ntx_intr)):
                state_list.append('tx_intr_%s' % i)
                state_list.append(ntx_intr[i])
                sum += ntx_intr[i]
            state_list.append('tx_intr_sum')
            state_list.append(sum)

        except IndexError:
            ninit = count_interrupt("virtio.")
            state_list.append('intr')
            state_list.append(ninit)

        if arch.ARCH in ('ppc64', 'ppc64le'):
            exits = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/exits"))
            state_list.append('exits')
            state_list.append(exits)
        else:
            io_exits = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/io_exits"))
            irq_injs = int(
                ssh_cmd(host, "cat /sys/kernel/debug/kvm/irq_injections"))
            state_list.append('io_exits')
            state_list.append(io_exits)
            state_list.append('irq_injs')
            state_list.append(irq_injs)

        return state_list

    def netperf_thread(i, numa_enable, client_s, timeout):
        cmd = ""
        fname = "/tmp/netperf.%s.nf" % pid
        if numa_enable:
            output = ssh_cmd(client_s, "numactl --hardware")
            n = re.findall(r"node (\d+) cpus:", output)[-1]
            cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n)
        cmd += "/tmp/netperf_agent.py %d %s -D 1 -H %s -l %s %s" % (i,
                                                                    client_path, server, int(l) * 1.5, nf_args)
        cmd += " >> %s" % fname
        logging.info("Start netperf thread by cmd '%s'" % cmd)
        ssh_cmd(client_s, cmd)

    def all_clients_up():
        try:
            content = ssh_cmd(clients[-1], "cat %s" % fname)
        except:
            content = ""
            return False
        if int(sessions) == len(re.findall("MIGRATE", content)):
            return True
        return False

    def parse_demo_result(fname, sessions):
        """
        Process the demo result, remove the noise from head,
        and compute the final throughout.

        :param fname: result file name
        :param sessions: sessions' number
        """
        fd = open(fname)
        lines = fd.readlines()
        fd.close()

        for i in range(1, len(lines) + 1):
            if "AF_INET" in lines[-i]:
                break
        nresult = i - 1
        if nresult < int(sessions):
            raise error.TestError("We couldn't expect this parallism,"
                                  "expect %s get %s" % (sessions, nresult))

        niteration = nresult / sessions
        result = 0.0
        for this in lines[-sessions * niteration:]:
            if "Interim" in this:
                result += float(re.findall(r"Interim result: *(\S+)", this)[0])
        result = result / niteration
        logging.debug("niteration: %s" % niteration)
        return result

    error.context("Start netperf client threads", logging.info)
    pid = str(os.getpid())
    fname = "/tmp/netperf.%s.nf" % pid
    ssh_cmd(clients[-1], "rm -f %s" % fname)
    numa_enable = params.get("netperf_with_numa", "yes") == "yes"
    timeout_netperf_start = int(l) * 0.5
    client_thread = threading.Thread(target=netperf_thread,
                                     kwargs={"i": int(sessions),
                                             "numa_enable": numa_enable,
                                             "client_s": clients[0],
                                             "timeout": timeout_netperf_start})
    client_thread.start()

    ret = {}
    ret['pid'] = pid

    if utils_misc.wait_for(all_clients_up, timeout_netperf_start, 0.0, 0.2,
                           "Wait until all netperf clients start to work"):
        logging.debug("All netperf clients start to work.")
    else:
        raise error.TestNAError("Error, not all netperf clients at work")

    # real & effective test starts
    if get_status_flag:
        start_state = get_state()
    ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1))
    finished_result = ssh_cmd(clients[-1], "cat %s" % fname)

    # stop netperf clients
    kill_cmd = "killall netperf"
    if params.get("os_type") == "windows":
        kill_cmd = "taskkill /F /IM netperf*"
    ssh_cmd(clients[-1], kill_cmd, ignore_status=True)

    # real & effective test ends
    if get_status_flag:
        end_state = get_state()
        if len(start_state) != len(end_state):
            msg = "Initial state not match end state:\n"
            msg += "  start state: %s\n" % start_state
            msg += "  end state: %s\n" % end_state
            logging.warn(msg)
        else:
            for i in range(len(end_state) / 2):
                ret[end_state[i * 2]] = (end_state[i * 2 + 1] -
                                         start_state[i * 2 + 1])

    client_thread.join()

    error.context("Testing Results Treatment and Report", logging.info)
    f = open(fname, "w")
    f.write(finished_result)
    f.close()
    ret['thu'] = parse_demo_result(fname, int(sessions))
    return ret

Example 13

Project: 8-bits
Source File: posts.py
View license
def apply_posts(shard=None,
                insertion_post_id=None,
                lease_seconds=10,
                max_tasks=20):
    """Applies a set of pending posts to a shard.

    If shard is None then this function will apply mods for whatever is the
    first shard it can find in the pull task queue.

    insertion_post_id is the post_id that first caused this apply task to be
    enqueued. This task will retry until it applies the insertion_post_id
    itself or it can confirm that the insertion_post_id has already been
    applied. insertion_post_id may be empty if the apply task is not associated
    with a particular post (such as cronjobs/cleanup tasks).
    """
    # Do not use caching for NDB in this task queue worker.
    ctx = ndb.get_context()
    ctx.set_cache_policy(lambda x: False)
    ctx.set_memcache_policy(lambda x: False)

    # Fetch the new Posts to put in sequence.
    queue = taskqueue.Queue(config.pending_queue)

    # When no shard is specified, process the first tag we find.
    task_list = []
    if not shard:
        task_list.extend(queue.lease_tasks(lease_seconds, 1))
        if not task_list:
            logging.debug('apply_posts with no specific shard found no tasks')
            return
        params = task_list[0].extract_params()
        shard = params['shard']
        logging.debug('apply_posts with no specific shard found shard=%r',
                      shard)

    # Clear the dirty bit on this shard to start the time horizon.
    dirty_bit(shard, clear=True)

    # Find tasks pending for the current shard.
    task_list.extend(
        queue.lease_tasks_by_tag(lease_seconds, max_tasks, tag=str(shard)))

    receipt_key_list = []
    new_topic = None
    for task in task_list:
        params = task.extract_params()

        # Extract the new topic shard associated with this task, if any. The
        # last one wins. If all of the found posts have already been applied,
        # then topic assignment will be ignored.
        new_topic = params.get('new_topic') or new_topic

        post_id_list = params.get('post_ids')
        if post_id_list is None:
            # This may happen on replica shards if it turns out there are no
            # unapplied post IDs but an apply task still ran.
            post_id_list = []
        elif not isinstance(post_id_list, list):
            post_id_list = [post_id_list]

        for post_id in post_id_list:
            receipt_key = ndb.Key(
                models.Post._get_kind(), post_id,
                models.Receipt._get_kind(), shard)
            receipt_key_list.append(receipt_key)

    receipt_list = ndb.get_multi(receipt_key_list)

    # Some tasks may be in the pull queue that were already put in sequence.
    # So ignore these and only apply the new ones.
    unapplied_receipts = [
        models.Receipt(key=k)
        for k, r in zip(receipt_key_list, receipt_list)
        if r is None]
    unapplied_post_ids = [r.post_id for r in unapplied_receipts]

    # Double check if we think there should be work to apply but we didn't find
    # any. This will force the apply task to retry immediately if the post task
    # was not found. This can happen when the pull queue's consistency is
    # behind.
    if not unapplied_receipts and insertion_post_id:
        receipt_key = ndb.Key(
            models.Post._get_kind(), insertion_post_id,
            models.Receipt._get_kind(), shard)
        receipt = receipt_key.get()
        if receipt:
            logging.warning(
                'No post application to do for shard=%r, but post_id=%r '
                'already applied; doing nothing in this task',
                shard, insertion_post_id)
            new_topic = None
            # Do not 'return' here. We need to increment the shard sequence or
            # else tasks will not run for this shard in the future because of
            # de-duping.
        else:
            raise base.Error('No post application to do for shard=%r, but'
                             'post_id=%r has not been applied; will retry' %
                             (shard, insertion_post_id))

    now = datetime.datetime.now()

    def txn():
        shard_record = models.Shard.get_by_id(shard)
        # TODO(bslatkin): Just drop this task entirely if the shard cannot
        # be found. Could happen for old shards that were cleaned up.
        assert shard_record

        # One of the tasks in this batch has a topic assignment. Apply it here.
        if new_topic:
            logging.debug('Changing topic from %r to %r',
                          shard_record.current_topic, new_topic)
            shard_record.current_topic = new_topic
            shard_record.topic_change_time = now

        new_sequence_numbers = list(xrange(
            shard_record.sequence_number,
            shard_record.sequence_number + len(unapplied_receipts)))
        shard_record.sequence_number += max(1, len(unapplied_receipts))

        # Write post references that point at the newly sequenced posts.
        to_put = [shard_record]
        for receipt, sequence in zip(unapplied_receipts, new_sequence_numbers):
            to_put.append(models.PostReference(
                id=sequence,
                parent=shard_record.key,
                post_id=receipt.post_id))
            # Update the receipt entity here; it will be written outside this
            # transaction, since these receipts may span multiple entity
            # groups.
            receipt.sequence = sequence

        # Enqueue replica posts transactionally, to make sure everything
        # definitely will get copied over to the replica shard.
        if shard_record.current_topic:
            enqueue_post_task(shard_record.current_topic, unapplied_post_ids)

        ndb.put_multi(to_put)

        return shard_record, new_sequence_numbers

    # Have this only attempt a transaction a single time. If the transaction
    # fails the task queue will retry this task within 4 seconds. Because
    # apply tasks are always named by the current Shard.sequence_number we
    # can be reasonably sure that no other apply task for this shard will be
    # running concurrently when this fails.
    shard_record, new_sequence_numbers = ndb.transaction(txn, retries=1)
    replica_shard = shard_record.current_topic

    logging.debug('Applied %d posts for shard=%r, sequence_numbers=%r',
                  len(unapplied_receipts), shard, new_sequence_numbers)

    futures = []

    # Save receipts for all the posts.
    futures.extend(ndb.put_multi_async(unapplied_receipts))

    # Notify all logged in users of the new posts.
    futures.append(notify_posts(
        shard, unapplied_post_ids, sequence_numbers=new_sequence_numbers))

    # Replicate posts to a topic shard.
    if replica_shard:
        logging.debug('Replicating source shard=%r to replica shard=%r',
                      shard, replica_shard)
        futures.append(enqueue_apply_task(replica_shard))

    # Success! Delete the tasks from this queue.
    queue.delete_tasks(task_list)

    # Always run one more apply task to clean up any posts that came in
    # while this transaction was processing.
    if dirty_bit(shard, check=True):
        futures.append(enqueue_apply_task(shard))

    # Wait on all pending futures in case they raise errors.
    ndb.Future.wait_all(futures)

    # For root shards, add shard cleanup task to check for user presence and
    # cause notification of user logouts if the channel API did not detect the
    # user closing the connection.
    if not shard_record.root_shard:
        presence.enqueue_cleanup_task(shard)

Example 14

Project: videonotes
Source File: main.py
View license
    def post(self):
        """
        Called when HTTP POST requests are received by the web application.

        The POST body is JSON which is deserialized and used as values to create a
        new file in Drive. The authorization access token for this action is
        retrieved from the data store.
        """

        # Create a Drive service
        service = self.CreateDrive()
        if service is None:
            return

        # Load the data that has been posted as JSON
        logging.debug('Get JSON data')
        data = self.RequestJSON()
        logging.debug('JSON data retrieved %s', json.dumps(data))

        content = FileUtils.get_content_from_data(data)

        max_try = 5
        for n in range(0, max_try):
            try:
                if 'templateId' in data:
                    body = {'title': 'Your notes'}
                    resource = service.files().copy(fileId=data['templateId'], body=body).execute()
                else:
                    # Create a new file data structure.
                    resource = {
                        'title': data['title'],
                        'description': data['description'],
                        'mimeType': data['mimeType'],
                    }

                    if 'parent' in data and data['parent']:
                        logging.debug('Creating from a parent folder %s', data['parent'])
                        default_folder_id = data['parent']
                    else:
                        if 'defaultFolderId' in self.session and self.session['defaultFolderId']:
                            default_folder_id = self.session['defaultFolderId']
                        else:
                            default_folder_list = service.files().list(q='title="VideoNot.es"').execute()
                            if default_folder_list and 'items' in default_folder_list and len(default_folder_list['items']):
                                default_folder_id = default_folder_list['items'][0]['id']
                                self.session['defaultFolderId'] = default_folder_id
                            else:
                                folder_ressource = {
                                    'title': 'VideoNot.es',
                                    'mimeType': 'application/vnd.google-apps.folder'
                                }
                                default_folder = service.files().insert(body=folder_ressource).execute()
                                default_folder_id = default_folder['id']
                                self.session['defaultFolderId'] = default_folder_id
                    resource['parents'] = [{'id':default_folder_id}]

                    # Make an insert request to create a new file. A MediaInMemoryUpload
                    # instance is used to upload the file body.
                    logging.debug('Calling Drive API with content %s', str(content))
                    resource = service.files().insert(
                        body=resource,
                        media_body=MediaInMemoryUpload(
                            content,
                            data['mimeType'],
                            resumable=True)
                    ).execute()

                    if BaseHandler.is_production():
                        # clement_permission = {
                        #     'value': '[email protected]',
                        #     'type': 'user',
                        #     'role': 'reader'
                        # }

                        anyone_permission = {
                            'type': 'anyone',
                            'role': 'reader',
                            'withLink': True
                        }

                        # try:
                        #     logging.info('Add Clement as a reader')
                        #     service.permissions().insert(fileId=resource['id'], body=clement_permission).execute()
                        # except HttpError:
                        #     logging.info('Error when adding Clement as a reader')

                        try:
                            logging.info('Add anyone as a reader')
                            service.permissions().insert(fileId=resource['id'], body=anyone_permission).execute()
                        except HttpError:
                            logging.info('Error when adding anyone as a reader')

                # Respond with the new file id as JSON.
                logging.debug('New ID created %s', resource['id'])
                return self.RespondJSON({'id': resource['id']})
            except AccessTokenRefreshError:
                # In cases where the access token has expired and cannot be refreshed
                # (e.g. manual token revoking) redirect the user to the authorization page
                # to authorize.
                logging.info('AccessTokenRefreshError')
                return self.abort(401)
            except HttpError, http_error:
                logging.getLogger("error").exception("Try #%d: Exception occurred when creating file", n)
                # HTTP status code 403 indicates that the app is not authorized to save the file (third-party app disabled, user without access, etc.)
                # Don't need to try several times
                if http_error.resp.status == 403:
                    return self.abort(403)
                else:
                    time.sleep((2 ** n) + (random.randint(0, 1000) / 1000))
            except HTTPException:
                logging.getLogger("error").exception("Try #%d: Exception occurred when creating file", n)
                time.sleep((2 ** n) + (random.randint(0, 1000) / 1000))

        logging.getLogger("error").exception("Exception occurred when creating file after %d tries", max_try)
        return self.abort(500)

Example 15

Project: exocortex-halo
Source File: server.py
View license
    def do_PUT(self):
        content = ""
        content_length = 0
        arguments = {}
        response = ""
        sentences = []
        all_keys_found = True

        # Variables that hold data from the database accesses.
        row = ""
        name = ""
        bots_api_key = ""
        learn = ""

        if self.path == "/learn":
            logger.info("A client has contacted the /learn API rail.")

            # For debugging purposes, dump the headers the server gets from
            # the client.
            logging.debug("List of headers in the HTTP request:")
            for key in self.headers:
                logging.debug("    " + key + " - " + self.headers[key])

            # Read any content sent from the client.  If there is no
            # "Content-Length" header, something screwy is happening, in which
            # case we fire an error.
            content = self._read_content()
            if not content:
                logger.debug("Client sent zero-lenth content.")
                return

            # Ensure that the client sent JSON and not something else.
            if not self._ensure_json():
                return

            # Try to deserialize the JSON sent from the client.  If we can't,
            # pitch a fit.
            arguments = self._deserialize_content(content)
            if not arguments:
                return

            # Normalize the keys in the JSON to lowercase.
            arguments = self._normalize_keys(arguments)

            # Ensure that all of the required keys are in the JSON document.
            if not self._ensure_all_keys(arguments):
                return

            # See if the bot is in the database.
            row = self._bot_in_database(arguments['botname'],
                arguments['apikey'])
            if not row:
                self._send_http_response(404, '{"response": "Bot not found.", "id": 404}')
                return

            # Take apart the response.  This is a little messy but necessary
            # to get at the very end of the tuple.
            (name, bots_api_key, learn) = row[0]

            # If the bot does not have permission to teach the Markov brain,
            # send back an error.  Again, this is a little messy but it's
            # easier to get to a Bool to work with than it is playing with
            # identifying single letters.  And I'm sick while I'm writing this.
            learn = learn.lower()
            if learn == 'n':
                learn = False
            if not learn :
                logger.info("Bot does not have permission to update the Markov brain.")
                self._send_http_response(401, '{"response": "Bot does not have permission to update the brain.", "id": 401}')
                return

            # Run the text through the Markov brain to update it and return a
            # success message.
            sentence_ends = re.compile('[.!?]')
            sentences = sentence_ends.split(arguments['stimulus'])

            # Get rid of the spurious entry at the end of the array...
            sentences.pop()
            logger.debug("List of sentences to learn from: " + str(sentences))

            # Run the sentences through the markov brain.
            if not len(sentences):
                logger.info("No sentences to update the Markov brain.")
                self._send_http_response(400, '{"response": "failed", "id": 400}')
                return
            for i in sentences:
                response = brain.learn(i)
            logger.info("Bot has updated the Markov brain.")

            temp = {}
            temp['response'] = response
            temp['id'] = 200
            logger.debug("Sending back to client: " + str(temp))

            self._send_http_response(200, json.dumps(temp))
            return

        if self.path == "/register":
            logger.info("A client has contacted the /register API rail.  This makes things somewhat interesting.")

            # For debugging purposes, dump the headers the server gets from
            # the client.
            logging.debug("List of headers in the HTTP request:")
            for key in self.headers:
                logging.debug("    " + key + " - " + self.headers[key])

            # Read any content sent from the client.  If there is no
            # "Content-Length" header, something screwy is happening, in which
            # case we fire an error.
            content = self._read_content()
            if not content:
                logger.debug("Client sent zero-lenth content.")
                return

            # Ensure that the client sent JSON and not something else.
            if not self._ensure_json():
                return

            # Try to deserialize the JSON sent from the client.  If we can't,
            # pitch a fit.
            arguments = self._deserialize_content(content)
            if not arguments:
                return

            # Ensure that the management API key was sent in an HTTP header.
            # If it wasn't, abort.
            if "x-api-key" not in self.headers.keys():
                logger.info("User tried to /register a bot but didn't include the management API key.")
                self._send_http_response(401, '{"result": null, "error": "Management API key not included.", "id": 401}')
                return

            # Check the included management API key against the one in the
            # server's config file.
            if self.headers['x-api-key'] != apikey:
                logger.info("User tried to /register a bot with an incorrect management API key.")
                self._send_http_response(401, '{"result": null, "error": "Incorrect management API key.", "id": 401}')
                return

            # Normalize the keys in the JSON to lowercase.
            arguments = self._normalize_keys(arguments)

            # Ensure that all of the required keys are in the JSON document.
            if not self._ensure_all_keys(arguments):
                return

            # There are additional JSON keys that have to be present for this
            # API rail.  This can probably be split out into a separate helper
            # method later.
            if "respond" not in arguments.keys():
                all_keys_found = False
            if "learn" not in arguments.keys():
                all_keys_found = False
            if not all_keys_found:
                logger.debug('{"result": null, "error": "All required keys were not found in the JSON document.  Look at the online help.", "id": 400}')
                self._send_http_response(400, '{"result": null, "error": "All required keys were not found in the JSON document.  Look at the online help.", "id": 400}')
                return

            # Ensure that the values of the respond and learn keys are either
            # Y or N.  Start by normalizing the values before testing them.
            valid_responses = ['Y', 'N']
            arguments['respond'] = arguments['respond'].upper()
            if arguments['respond'] not in valid_responses:
                self._send_http_response(400, '{"result": null, "error": "The only valid values for respond are Y or N.", "id": 400}')
                return

            arguments['learn'] = arguments['learn'].upper()
            if arguments['learn'] not in valid_responses:
                self._send_http_response(400, '{"result": null, "error": "The only valid values for learn are Y or N.", "id": 400}')
                return

            # See if the bot is in the database already.  Send back an error
            # 409 (Conflict) if it is.
            row = self._bot_in_database(arguments['botname'],
                arguments['apikey'])
            if row:
                logger.info("Bot already in database.")
                self._send_http_response(409, '{"response": "Bot already in database.", "id": 409}')
                return

            # Add the bot to the database.
            if self._add_bot_to_database(arguments['botname'],
                arguments['apikey'], arguments['respond'], arguments['learn']):
                self._send_http_response(200, '{"response": "success", "id": 200}')
            else:
                self._send_http_response(400, '{"response": "failure", "id": 400}')
            return

        if self.path == "/deregister":
            logger.info("A client has contacted the /deregister API rail.  This makes things somewhat interesting.")

            # For debugging purposes, dump the headers the server gets from
            # the client.
            logging.debug("List of headers in the HTTP request:")
            for key in self.headers:
                logging.debug("    " + key + " - " + self.headers[key])

            # Read any content sent from the client.  If there is no
            # "Content-Length" header, something screwy is happening, in which
            # case we fire an error.
            content = self._read_content()
            if not content:
                logger.debug("Client sent zero-lenth content.")
                return

            # Ensure that the client sent JSON and not something else.
            if not self._ensure_json():
                return

            # Try to deserialize the JSON sent from the client.  If we can't,
            # pitch a fit.
            arguments = self._deserialize_content(content)
            if not arguments:
                return

            # Ensure that the management API key was sent in an HTTP header.
            # If it wasn't, abort.
            if "x-api-key" not in self.headers.keys():
                logger.info("User tried to /deregister a bot but didn't include the management API key.")
                self._send_http_response(401, '{"result": null, "error": "No management API key.", "id": 401}')
                return

            # Check the included management API key against the one in the
            # server's config file.
            if self.headers['x-api-key'] != apikey:
                logger.info("User tried to /deregister a bot with an incorrect management API key.")
                self._send_http_response(401, '{"result": null, "error": "Incorrect management API key.", "id": 401}')
                return

            # Normalize the keys in the JSON to lowercase.
            arguments = self._normalize_keys(arguments)

            # Ensure that all of the required keys are in the JSON document.
            if not self._ensure_all_keys(arguments):
                logger.debug('{"result": null, "error": "All required keys were not found in the JSON document.  Look at the online help.", "id": 400}')
                self._send_http_response(400, '{"result": null, "error": "All required keys were not found in the JSON document.  Look at the online help.", "id": 400}')
                return

            # See if the bot is not in the database.  Send back an error (404
            # (Not Found) if it's not.
            row = self._bot_in_database(arguments['botname'],
                arguments['apikey'])
            if not row:
                logger.info("Bot does not exist in database.")
                self._send_http_response(404, '{"response": "failure", "id": 404}')
                return

            # Delete the bot from the database.
            if self._delete_bot_from_database(arguments['botname'],
                arguments['apikey']):
                self._send_http_response(200, '{"response": "success", "id": 200}')
            else:
                self._send_http_response(404, '{"response": "failure", "id": 404}')
            return

        # If we've fallen through to here, bounce.
        return

Example 16

Project: jcvi
Source File: grabseeds.py
View license
def seeds(args):
    """
    %prog seeds [pngfile|jpgfile]

    Extract seed metrics from [pngfile|jpgfile]. Use --rows and --cols to crop image.
    """
    p = OptionParser(seeds.__doc__)
    p.set_outfile()
    opts, args, iopts = add_seeds_options(p, args)

    if len(args) != 1:
        sys.exit(not p.print_help())

    pngfile, = args
    pf = opts.prefix or op.basename(pngfile).rsplit(".", 1)[0]
    sigma, kernel = opts.sigma, opts.kernel
    rows, cols = opts.rows, opts.cols
    labelrows, labelcols = opts.labelrows, opts.labelcols
    ff = opts.filter
    calib = opts.calibrate
    outdir = opts.outdir
    if outdir != '.':
        mkdir(outdir)
    if calib:
        calib = json.load(must_open(calib))
        pixel_cm_ratio, tr = calib["PixelCMratio"], calib["RGBtransform"]
        tr = np.array(tr)

    resizefile, mainfile, labelfile, exif = \
                      convert_image(pngfile, pf, outdir=outdir,
                                    rotate=opts.rotate,
                                    rows=rows, cols=cols,
                                    labelrows=labelrows, labelcols=labelcols)

    oimg = load_image(resizefile)
    img = load_image(mainfile)

    fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, nrows=1,
                                             figsize=(iopts.w, iopts.h))

    # Edge detection
    img_gray = rgb2gray(img)
    logging.debug("Running {0} edge detection ...".format(ff))
    if ff == "canny":
        edges = canny(img_gray, sigma=opts.sigma)
    elif ff == "roberts":
        edges = roberts(img_gray)
    elif ff == "sobel":
        edges = sobel(img_gray)
    edges = clear_border(edges, buffer_size=opts.border)
    selem = disk(kernel)
    closed = closing(edges, selem) if kernel else edges
    filled = binary_fill_holes(closed)

    # Watershed algorithm
    if opts.watershed:
        distance = distance_transform_edt(filled)
        local_maxi = peak_local_max(distance, threshold_rel=.05, indices=False)
        coordinates = peak_local_max(distance, threshold_rel=.05)
        markers, nmarkers = label(local_maxi, return_num=True)
        logging.debug("Identified {0} watershed markers".format(nmarkers))
        labels = watershed(closed, markers, mask=filled)
    else:
        labels = label(filled)

    # Object size filtering
    w, h = img_gray.shape
    canvas_size = w * h
    min_size = int(round(canvas_size * opts.minsize / 100))
    max_size = int(round(canvas_size * opts.maxsize / 100))
    logging.debug("Find objects with pixels between {0} ({1}%) and {2} ({3}%)"\
                    .format(min_size, opts.minsize, max_size, opts.maxsize))

    # Plotting
    ax1.set_title('Original picture')
    ax1.imshow(oimg)

    params = "{0}, $\sigma$={1}, $k$={2}".format(ff, sigma, kernel)
    if opts.watershed:
        params += ", watershed"
    ax2.set_title('Edge detection\n({0})'.format(params))
    closed = gray2rgb(closed)
    ax2_img = labels
    if opts.edges:
        ax2_img = closed
    elif opts.watershed:
        ax2.plot(coordinates[:, 1], coordinates[:, 0], 'g.')
    ax2.imshow(ax2_img, cmap=iopts.cmap)

    ax3.set_title('Object detection')
    ax3.imshow(img)

    filename = op.basename(pngfile)
    if labelfile:
        accession = extract_label(labelfile)
    else:
        accession = pf

    # Calculate region properties
    rp = regionprops(labels)
    rp = [x for x in rp if min_size <= x.area <= max_size]
    nb_labels = len(rp)
    logging.debug("A total of {0} objects identified.".format(nb_labels))
    objects = []
    for i, props in enumerate(rp):
        i += 1
        if i > opts.count:
            break

        y0, x0 = props.centroid
        orientation = props.orientation
        major, minor = props.major_axis_length, props.minor_axis_length
        major_dx = cos(orientation) * major / 2
        major_dy = sin(orientation) * major / 2
        minor_dx = sin(orientation) * minor / 2
        minor_dy = cos(orientation) * minor / 2
        ax2.plot((x0 - major_dx, x0 + major_dx),
                 (y0 + major_dy, y0 - major_dy), 'r-')
        ax2.plot((x0 - minor_dx, x0 + minor_dx),
                 (y0 - minor_dy, y0 + minor_dy), 'r-')

        npixels = int(props.area)
        # Sample the center of the blob for color
        d = min(int(round(minor / 2 * .35)) + 1, 50)
        square = img[(y0 - d):(y0 + d), (x0 - d):(x0 + d)]
        pixels = []
        for row in square:
            pixels.extend(row)
        logging.debug("Seed #{0}: {1} pixels ({2} sampled) - {3:.2f}%".\
                        format(i, npixels, len(pixels), 100. * npixels / canvas_size))

        rgb = pixel_stats(pixels)
        objects.append(Seed(filename, accession, i, rgb, props, exif))
        minr, minc, maxr, maxc = props.bbox
        rect = Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, ec='w', lw=1)
        ax3.add_patch(rect)
        mc, mr = (minc + maxc) / 2, (minr + maxr) / 2
        ax3.text(mc, mr, "{0}".format(i), color='w',
                    ha="center", va="center", size=6)

    for ax in (ax2, ax3):
        ax.set_xlim(0, h)
        ax.set_ylim(w, 0)

    # Output identified seed stats
    ax4.text(.1, .92, "File: {0}".format(latex(filename)), color='g')
    ax4.text(.1, .86, "Label: {0}".format(latex(accession)), color='m')
    yy = .8
    fw = must_open(opts.outfile, "w")
    if not opts.noheader:
        print >> fw, Seed.header(calibrate=calib)
    for o in objects:
        if calib:
            o.calibrate(pixel_cm_ratio, tr)
        print >> fw, o
        i = o.seedno
        if i > 7:
            continue
        ax4.text(.01, yy, str(i), va="center", bbox=dict(fc='none', ec='k'))
        ax4.text(.1, yy, o.pixeltag, va="center")
        yy -= .04
        ax4.add_patch(Rectangle((.1, yy - .025), .12, .05, lw=0,
                      fc=rgb_to_hex(o.rgb)))
        ax4.text(.27, yy, o.hashtag, va="center")
        yy -= .06
    ax4.text(.1 , yy, "(A total of {0} objects displayed)".format(nb_labels),
             color="darkslategrey")
    normalize_axes(ax4)

    for ax in (ax1, ax2, ax3):
        xticklabels = [int(x) for x in ax.get_xticks()]
        yticklabels = [int(x) for x in ax.get_yticks()]
        ax.set_xticklabels(xticklabels, family='Helvetica', size=8)
        ax.set_yticklabels(yticklabels, family='Helvetica', size=8)

    image_name = op.join(outdir, pf + "." + iopts.format)
    savefig(image_name, dpi=iopts.dpi, iopts=iopts)
    return objects

Example 17

Project: jcvi
Source File: dotplot.py
View license
def dotplot(anchorfile, qbed, sbed, fig, root, ax, vmin=0, vmax=1,
        is_self=False, synteny=False, cmap_text=None, cmap="copper",
        genomenames=None, sample_number=10000, minfont=5, palette=None,
        chrlw=.01, title=None, sepcolor="gainsboro"):

    fp = open(anchorfile)

    qorder = qbed.order
    sorder = sbed.order

    data = []
    if cmap_text:
        logging.debug("Capping values within [{0:.1f}, {1:.1f}]"\
                        .format(vmin, vmax))

    block_id = 0
    for row in fp:
        atoms = row.split()
        block_color = None
        if row[0] == "#":
            block_id += 1
            if palette:
                block_color = palette.get(block_id, "k")
            continue

        # first two columns are query and subject, and an optional third column
        if len(atoms) < 2:
            continue

        query, subject = atoms[:2]
        value = atoms[-1]

        if cmap_text:
            try:
                value = float(value)
            except ValueError:
                value = vmax

            if value < vmin:
                continue
            if value > vmax:
                continue
        else:
            value = 0

        if query not in qorder:
            continue
        if subject not in sorder:
            continue

        qi, q = qorder[query]
        si, s = sorder[subject]

        nv = value if block_color is None else block_color
        data.append((qi, si, nv))
        if is_self:  # Mirror image
            data.append((si, qi, nv))

    npairs = len(data)
    # Only show random subset
    if npairs > sample_number:
        logging.debug("Showing a random subset of {0} data points (total {1}) " \
                      "for clarity.".format(sample_number, npairs))
        data = sample(data, sample_number)

    # the data are plotted in this order, the least value are plotted
    # last for aesthetics
    #if not palette:
    #    data.sort(key=lambda x: -x[2])

    x, y, c = zip(*data)

    if palette:
        ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
    else:
        ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0, cmap=cmap,
                vmin=vmin, vmax=vmax)

    if synteny:
        clusters = batch_scan(data, qbed, sbed)
        draw_box(clusters, ax)

    if cmap_text:
        draw_cmap(root, cmap_text, vmin, vmax, cmap=cmap)

    xsize, ysize = len(qbed), len(sbed)
    logging.debug("xsize=%d ysize=%d" % (xsize, ysize))
    xlim = (0, xsize)
    ylim = (ysize, 0)  # invert the y-axis

    # Tag to mark whether to plot chr name (skip small ones)
    xchr_labels, ychr_labels = [], []
    th = TextHandler(fig)

    # plot the chromosome breaks
    for (seqid, beg, end) in qbed.get_breaks():
        xsize_ratio = abs(end - beg) * .8 / xsize
        fontsize = th.select_fontsize(xsize_ratio)
        seqid = "".join(seqid_parse(seqid)[:2])

        xchr_labels.append((seqid, (beg + end) / 2, fontsize))
        ax.plot([beg, beg], ylim, "-", lw=chrlw, color=sepcolor)

    for (seqid, beg, end) in sbed.get_breaks():
        ysize_ratio = abs(end - beg) * .8 / ysize
        fontsize = th.select_fontsize(ysize_ratio)
        seqid = "".join(seqid_parse(seqid)[:2])

        ychr_labels.append((seqid, (beg + end) / 2, fontsize))
        ax.plot(xlim, [beg, beg], "-", lw=chrlw, color=sepcolor)

    # plot the chromosome labels
    for label, pos, fontsize in xchr_labels:
        pos = .1 + pos * .8 / xsize
        if fontsize >= minfont:
            root.text(pos, .91, latex(label), size=fontsize,
                ha="center", va="bottom", rotation=45, color="grey")

    # remember y labels are inverted
    for label, pos, fontsize in ychr_labels:
        pos = .9 - pos * .8 / ysize
        if fontsize >= minfont:
            root.text(.91, pos, latex(label), size=fontsize,
                va="center", color="grey")

    # create a diagonal to separate mirror image for self comparison
    if is_self:
        ax.plot(xlim, (0, ysize), 'm-', alpha=.5, lw=2)

    ax.set_xlim(xlim)
    ax.set_ylim(ylim)

    # add genome names
    if genomenames:
        gx, gy = genomenames.split("_")
    else:
        to_ax_label = lambda fname: op.basename(fname).split(".")[0]
        gx, gy = [to_ax_label(x.filename) for x in (qbed, sbed)]
    ax.set_xlabel(markup(gx), size=16)
    ax.set_ylabel(markup(gy), size=16)

    # beautify the numeric axis
    for tick in ax.get_xticklines() + ax.get_yticklines():
        tick.set_visible(False)

    set_human_axis(ax)

    plt.setp(ax.get_xticklabels() + ax.get_yticklabels(),
            color='gray', size=10)

    if palette:  # bottom-left has the palette, if available
        colors = palette.colors
        xstart, ystart = .1, .05
        for category, c in sorted(colors.items()):
            root.add_patch(Rectangle((xstart, ystart), .03, .02, lw=0, fc=c))
            root.text(xstart + .04, ystart, category, color=c)
            xstart += .1

    if not title:
        title = "Inter-genomic comparison: {0} vs {1}".format(gx, gy)
        if is_self:
            title = "Intra-genomic comparison within {0}".format(gx)
            npairs /= 2
        title += " ({0} gene pairs)".format(thousands(npairs))
    root.set_title(markup(title), x=.5, y=.96, color="k")
    logging.debug(title)

    root.set_xlim(0, 1)
    root.set_ylim(0, 1)
    root.set_axis_off()

Example 18

Project: pytrainer
Source File: windowimportdata.py
View license
    def on_buttonCSVImport_clicked(self, widget):
        logging.debug('>>')
        #Determine values
        dateCol = self.cbCSVDate.get_active()
        distanceCol = self.cbCSVDistance.get_active()
        durationCol = self.cbCSVDuration.get_active()
        titleCol = self.cbCSVTitle.get_active()
        sportCol = self.cbCSVSport.get_active()
        avgspeedCol = self.cbCSVAvgSpeed.get_active()
        maxspeedCol = self.cbCSVMaxSpeed.get_active()
        calCol = self.cbCSVCal.get_active()
        accCol = self.cbCSVAccent.get_active()
        desCol = self.cbCSVDescent.get_active()
        hrCol = self.cbCSVHR.get_active()
        maxHRCol = self.cbCSVMaxHR.get_active()
        paceCol = self.cbCSVPace.get_active()
        maxPaceCol = self.cbCSVMaxPace.get_active()
        commentsCol = self.cbCSVComments.get_active()

        if dateCol == 0:
            #Error need to have at least a date
            self.updateStatusbar(self.statusbarCSVImport, _("ERROR: Must define at least a date column"))
            return

        #Import...
        #Get selected file
        if not os.path.isfile(self.CSVfilename):
            return
        #Read as delimited file
        csvfile = open(self.CSVfilename, 'rb')
        reader = csv.reader(csvfile, delimiter=self.delimiter)
        #Process File

        for i, row in enumerate(reader):
            if self.has_header and i==0:
                #Ignore first row
                continue
            if not row:
                continue
            data = {}
            #Determine dates
            _date = Date().getDateTime(row[dateCol-1])
            #year, month, day = date.split("-")
            date = _date[1].strftime("%Y-%m-%d")
            zuluDateTime = _date[0].strftime("%Y-%m-%dT%H:%M:%SZ")
            localDateTime = str(_date[1])
            data['date'] = date
            data['date_time_utc'] = zuluDateTime
            data['date_time_local'] = localDateTime
            if distanceCol:
                try:
                    data['distance'] = locale.atof(row[distanceCol-1])
                except:
                    data['distance'] = 0
            else:
                data['distance'] = 0
            if durationCol:
                #calculate duration in sec...
                try:
                    _duration = row[durationCol-1]
                except:
                    _duration = 0
                if _duration.count(':') == 2:
                    #Have 00:00:00 duration
                    h, m, s = _duration.split(':')
                    try:
                        durationSec = int(h)*3600 + int(m)*60 + int(s)
                    except:
                        logging.debug("Error calculating duration for '%s'" % _duration)
                        durationSec = None
                else:
                    try:
                        durationSec = locale.atoi(_duration)
                    except:
                        #Unknown duration
                        logging.debug("Could not determine duration for '%s'" % _duration)
                        durationSec = None
                if durationSec is not None:
                    data['duration'] = durationSec
                    data['time'] = str(durationSec)
            if titleCol:
                try:
                    data['title'] = row[titleCol-1]
                except:
                    pass
            if self.checkbCSVForceSport.get_active():
                sport_id = self.pytrainer_main.record.getSportId(self.comboCSVForceSport.get_active_text(),add=True)
                data['sport'] = sport_id
            elif sportCol:
                #retrieving sport id (adding sport if it doesn't exist yet)
                sport_id = self.pytrainer_main.record.getSportId(row[sportCol-1],add=True)
                data['sport'] = sport_id
            else:
                self.comboCSVForceSport.set_active(0)
                sport_id = self.pytrainer_main.record.getSportId(self.comboCSVForceSport.get_active_text(),add=True)
                data['sport'] = sport_id

            if avgspeedCol:
                #
                try:
                    data['average'] = locale.atof(row[avgspeedCol-1])
                except:
                    pass
            if maxspeedCol:
                try:
                    data['maxspeed'] = locale.atof(row[maxspeedCol-1])
                except:
                    pass
            if calCol:
                try:
                    data['calories'] = locale.atoi(row[calCol-1])
                except:
                    pass
            if accCol:
                try:
                    data['upositive'] = locale.atof(row[accCol-1])
                except:
                    pass
            if desCol:
                try:
                    data['unegative'] = locale.atof(row[desCol-1])
                except:
                    pass
            if hrCol:
                try:
                    data['beats'] = locale.atof(row[hrCol-1])
                except:
                    pass
            if maxHRCol:
                try:
                    data['maxbeats'] = locale.atof(row[maxHRCol-1])
                except:
                    pass
            if paceCol:
                try:
                    data['pace'] = locale.atof(row[paceCol-1])
                except:
                    pass
            if maxPaceCol:
                try:
                    data['maxpace'] = locale.atof(row[maxPaceCol-1])
                except:
                    pass
            if commentsCol:
                try:
                    data['comments'] = row[commentsCol--1]
                except:
                    pass

            #Insert into DB
            logging.debug("Data", data)
            self.pytrainer_main.ddbb.insert_dict('records', data)
        #Display message....
        self.updateStatusbar(self.statusbarCSVImport, _("Import completed. %d rows processed") % i)
        #Disable import button
        self.buttonCSVImport.set_sensitive(0)
        logging.debug('<<')

Example 19

Project: ictf-framework
Source File: exploit.py
View license
    def execute(self, ip, port, flag_id):
        from multiprocessing import Process, Queue
        from collections import defaultdict
        from twisted.internet.protocol import Protocol, ClientFactory
        from twisted.internet import reactor
        from twisted.protocols.basic import LineReceiver
        from sys import stdout
        import sys
        import re
        import logging

        NUM_FILES_PER_AGENT = 15

        class File(object):
            def __init__(self, name, size, value):
                self.name = name
                self.size = size
                self.value = value
            def __repr__(self):
                return "Name:{0}, Size:{1}, Value:{2}".format(\
                        self.name, self.size, self.value)

        class Solver(object):
            def __init__(self, host, port, room_id):
                self.agents = []
                self.host = host
                self.port = port
                self.room_id = room_id
                self.flag = None
                reactor.connectTCP(self.host, port, AgentFactory(self))
                reactor.connectTCP(self.host, port, AgentFactory(self))
                reactor.connectTCP(self.host, port, AgentFactory(self))

            def add_agent(self, agent):
                self.agents.append(agent)

            def solve_individual(self):
                # Begin communication
                self.agents[0].msg_snowden("hi")
                reactor.callLater(.1, self.tell_agents_to_send)

            def tell_agents_to_send(self):
                for agent in self.agents:
                    agent.send_files()

            def check_if_ready(self):
                received_files = map(lambda agent: agent.received_all_files(), self.agents)
                if all(received_files):
                    self.solve_individual()


        def memoize(f):
            """ Memoization decorator for functions taking one or more arguments. """
            class memodict(dict):
                def __init__(self, f):
                    self.f = f
                def __call__(self, *args):
                    return self[args]
                def __missing__(self, key):
                    ret = self[key] = self.f(*key)
                    return ret
            return memodict(f)

        class Agent(LineReceiver):
            delimiter = "\n"

            def __init__(self, solver):
                self.solver = solver
                self.solver.add_agent(self)
                self.identity = None
                self.files = []
                self.bandwidth = 0
                self.committed = False

            def received_all_files(self):
                return len(self.files) == NUM_FILES_PER_AGENT

            def determine_identity(self, data):
                find_self = re.search('(\w+) has joined', data)
                if find_self:
                    self.identity = find_self.group(1)

            # list -- | Remaining Bandwidth: 16079 KB
            def determine_bandwidth(self, data):
                find_bandwidth = re.search('(\d+)', data)
                if find_bandwidth:
                    self.bandwidth = int(find_bandwidth.group(1))
                    logging.debug("{0} has bandwidth {1}".format(self.identity,
                            self.bandwidth))

            def msg_snowden(self,msg):
                cmd = "/msg E.Snowden {0}".format(msg)
                if msg == "DONE" and not self.committed:
                    logging.debug("{0} types {1}".format(self.identity, cmd))
                    self.sendLine(cmd)
                    self.committed = True
                elif msg != "DONE":
                    self.sendLine("/msg E.Snowden {0}".format(msg))

            def send_files(self):
                value, files_to_send = self.knapsack(self.files, self.bandwidth)
                for myfile in files_to_send:
                    self.send_file(myfile.name, "E.Snowden")
                    self.files.remove(myfile)
                self.bandwidth -= sum(map(lambda sent_file: sent_file.size,
                                    files_to_send))
                logging.debug("Bandwidth for {0} is now {1}".format(self.identity,
                        self.bandwidth))
                logging.debug("Remaining Files for {0} is {1}".format(self.identity,
                        len(self.files)))

                if len(self.files) >= 3:
                    for agent, my_file in zip(self.solver.agents, self.files[:3]):
                        self.send_file(my_file.name, agent.identity)
                    self.msg_snowden("DONE")

            def send_file (self, file_name, agent):
                cmd = "/send {0} {1}".format(agent, file_name)
                logging.debug("{0} types {1}".format(agent, cmd))
                self.sendLine(cmd)

            def show_up_for_work(self):
                self.sendLine("1")

            def enter_room(self):
                self.sendLine(self.solver.room_id)

            def begin_mission(self):
                self.sendLine("/list")

            def received_file(self, line):
                valid_file = re.search('(\S+)\s+(\d+)KB\s+(\d*)', line)
                if valid_file:
                    new_file = File(valid_file.group(1), int(valid_file.group(2)),
                                    int(valid_file.group(3)))
                    self.files.append(new_file)
                    self.solver.check_if_ready()

            # send -- | *Received File: MasteringTheInternet.ppt from Agent2 *
            def receive_file_from_friend(self, line):
                valid_file = re.search('Received File:\s+(\S+)\((\d+)\)', line)
                if valid_file:
                    new_file_name = valid_file.group(1)
                    file_size = int(valid_file.group(2))
                    if file_size < self.bandwidth:
                        self.send_file(new_file_name, "E.Snowden")
                        self.msg_snowden("DONE")

            def lineReceived(self, data):
                if "Enter the number" in data:
                    self.show_up_for_work()
                elif "Enter your room id" in data:
                    self.enter_room()
                elif self.identity is None and "has joined" in data:
                    self.determine_identity(data)
                elif "Everyone has arrived" in data:
                    self.begin_mission()
                elif "Remaining Bandwidth" not in data and "list -- |" in data:
                    self.received_file(data[12:])
                elif "Remaining Bandwidth" in data:
                    self.determine_bandwidth(data)
                elif "Received File" in data:
                    self.receive_file_from_friend(data)
                # think your boss will want this: test
                elif "boss will want" in data:
                    logging.debug(data)
                    flag = re.search('(\S+)$', data)
                    if flag:
                        self.solver.flag = flag.group(1)
                    reactor.stop()

            def knapsack(self, files, maxweight):

                # Return the value of the most valuable subsequence of the first i
                # elements in items whose weights sum to no more than j.
                @memoize
                def bestvalue(i, j):
                    if i == 0: return 0
                    value = files[i - 1].value
                    weight = files[i - 1].size
                    if weight > j:
                        return bestvalue(i - 1, j)
                    else:
                        return max(bestvalue(i - 1, j),
                                bestvalue(i - 1, j - weight) + value)

                j = maxweight
                result = []
                for i in xrange(len(files), 0, -1):
                    if bestvalue(i, j) != bestvalue(i - 1, j):
                        result.append(files[i - 1])
                        j -= files[i - 1].size
                result.reverse()
                return bestvalue(len(files), maxweight), result

        class AgentFactory(ClientFactory):
            def __init__(self, solver):
                self.solver = solver

            def buildProtocol(self, addr):
                return Agent(self.solver)

        def run(host, port, room_id, queue):
            logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.CRITICAL)
            # Connect three agents
            solver = Solver(host, port, room_id)
            reactor.run()
            #return solver.flag
            queue.put(solver.flag)

                
        flag = ''
        error = 0
        error_msg = ''

        try:
            queue = Queue()
            p = Process(target=run, args=(ip, port, flag_id, queue))
            p.start()
            p.join()
            flag = queue.get()
        except Exception as e:
            error = -1 #down
            error_msg = str(e)

        self.flag = flag
        self.error = error
        self.error_msg = error_msg

Example 20

Project: tp-libvirt
Source File: virsh_blockcommit.py
View license
def run(test, params, env):
    """
    Test command: virsh blockcommit <domain> <path>

    1) Prepare test environment.
    2) Commit changes from a snapshot down to its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(postfix_n):
        # Add all disks into commandline.
        disks = vm.get_disk_devices()

        # Make three external snapshots for disks only
        for count in range(1, 4):
            options = "%s_%s %s%s-desc " % (postfix_n, count,
                                            postfix_n, count)
            options += "--disk-only --atomic --no-metadata"
            if needs_agent:
                options += " --quiesce"

            for disk in disks:
                disk_detail = disks[disk]
                basename = os.path.basename(disk_detail['source'])

                # Remove the original suffix if any, appending
                # ".postfix_n[0-9]"
                diskname = basename.split(".")[0]
                snap_name = "%s.%s%s" % (diskname, postfix_n, count)
                disk_external = os.path.join(tmp_dir, snap_name)

                snapshot_external_disks.append(disk_external)
                options += " %s,snapshot=external,file=%s" % (disk,
                                                              disk_external)

            cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                  ignore_status=True,
                                                  debug=True)
            status = cmd_result.exit_status
            if status != 0:
                raise error.TestFail("Failed to make snapshots for disks!")

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                raise error.TestFail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    top_inactive = ("yes" == params.get("top_inactive"))
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", "none")
    middle_base = "yes" == params.get("middle_base", "no")
    pivot_opt = "yes" == params.get("pivot_opt", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", "no")
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", "no")
    with_active_commit = "yes" == params.get("with_active_commit", "no")
    multiple_chain = "yes" == params.get("multiple_chain", "no")
    virsh_dargs = {'debug': True}

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", 'no')
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    if not top_inactive:
        if not libvirt_version.version_compare(1, 2, 4):
            raise error.TestNAError("live active block commit is not supported"
                                    " in current libvirt version.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        raise error.TestFail("There are snapshots created for %s already" %
                             vm_name)

    snapshot_external_disks = []
    cmd_session = None
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                raise error.TestNAError("'iscsi' disk doesn't support in"
                                        " current libvirt version.")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    raise error.TestNAError("Please provide rbd host first.")
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockcommit operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        postfix_n = 'snap'
        make_disk_snapshot(postfix_n)

        basename = os.path.basename(blk_source)
        diskname = basename.split(".")[0]
        snap_src_lst = [blk_source]
        if multiple_chain:
            snap_name = "%s.%s1" % (diskname, postfix_n)
            snap_top = os.path.join(tmp_dir, snap_name)
            top_index = snapshot_external_disks.index(snap_top) + 1
            omit_list = snapshot_external_disks[top_index:]
            vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml.get_devices(device_type="disk")[0]
            vmxml.del_device(disk_xml)
            disk_dict = {'attrs': {'file': snap_top}}
            disk_xml.source = disk_xml.new_disk_source(**disk_dict)
            vmxml.add_device(disk_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            postfix_n = 'new_snap'
            make_disk_snapshot(postfix_n)
            snap_src_lst = [blk_source]
            snap_src_lst += snapshot_external_disks
            logging.debug("omit list is %s", omit_list)
            for i in omit_list:
                snap_src_lst.remove(i)
        else:
            # snapshot src file list
            snap_src_lst += snapshot_external_disks
        backing_chain = ''
        for i in reversed(range(4)):
            if i == 0:
                backing_chain += "%s" % snap_src_lst[i]
            else:
                backing_chain += "%s -> " % snap_src_lst[i]

        logging.debug("The backing chain is: %s" % backing_chain)

        # check snapshot disk xml backingStore is expected
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != blk_target:
                continue
            else:
                disk_xml = disk.xmltreefile
                logging.debug("the target disk xml after snapshot is %s",
                              disk_xml)
                break

        if not disk_xml:
            raise error.TestFail("Can't find disk xml with target %s" %
                                 blk_target)
        elif libvirt_version.version_compare(1, 2, 4):
            # backingStore element introuduced in 1.2.4
            chain_lst = snap_src_lst[::-1]
            ret = check_chain_xml(disk_xml, chain_lst)
            if not ret:
                raise error.TestFail("Domain image backing chain check failed")

        # set blockcommit_options
        top_image = None
        blockcommit_options = "--wait --verbose"

        if with_timeout:
            blockcommit_options += " --timeout 1"

        if base_option == "shallow":
            blockcommit_options += " --shallow"
        elif base_option == "base":
            if middle_base:
                snap_name = "%s.%s1" % (diskname, postfix_n)
                blk_source = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --base %s" % blk_source

        if top_inactive:
            snap_name = "%s.%s2" % (diskname, postfix_n)
            top_image = os.path.join(tmp_dir, snap_name)
            blockcommit_options += " --top %s" % top_image
        else:
            blockcommit_options += " --active"
            if pivot_opt:
                blockcommit_options += " --pivot"

        if vm_state == "shut off":
            vm.destroy(gracefully=True)

        if with_active_commit:
            # inactive commit follow active commit will fail with bug 1135339
            cmd = "virsh blockcommit %s %s --active --pivot" % (vm_name,
                                                                blk_target)
            cmd_session = aexpect.ShellSession(cmd)

        # Run test case
        # Active commit does not support on rbd based disk with bug 1200726
        result = virsh.blockcommit(vm_name, blk_target,
                                   blockcommit_options, **virsh_dargs)

        # Check status_error
        libvirt.check_exit_status(result, status_error)
        if result.exit_status and status_error:
            return

        while True:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            if not top_inactive:
                disk_mirror = disk_xml.find('mirror')
                if '--pivot' not in blockcommit_options:
                    if disk_mirror is not None:
                        job_type = disk_mirror.get('job')
                        job_ready = disk_mirror.get('ready')
                        src_element = disk_mirror.find('source')
                        disk_src_file = None
                        for elem in ('file', 'name', 'dev'):
                            elem_val = src_element.get(elem)
                            if elem_val:
                                disk_src_file = elem_val
                                break
                        err_msg = "blockcommit base source "
                        err_msg += "%s not expected" % disk_src_file
                        if '--shallow' in blockcommit_options:
                            if not multiple_chain:
                                if disk_src_file != snap_src_lst[2]:
                                    raise error.TestFail(err_msg)
                            else:
                                if disk_src_file != snap_src_lst[3]:
                                    raise error.TestFail(err_msg)
                        else:
                            if disk_src_file != blk_source:
                                raise error.TestFail(err_msg)
                        if libvirt_version.version_compare(1, 2, 7):
                            # The job attribute mentions which API started the
                            # operation since 1.2.7.
                            if job_type != 'active-commit':
                                raise error.TestFail("blockcommit job type '%s'"
                                                     " not expected" % job_type)
                            if job_ready != 'yes':
                                # The attribute ready, if present, tracks
                                # progress of the job: yes if the disk is known
                                # to be ready to pivot, or, since 1.2.7, abort
                                # or pivot if the job is in the process of
                                # completing.
                                continue
                            else:
                                logging.debug("after active block commit job "
                                              "ready for pivot, the target disk"
                                              " xml is %s", disk_xml)
                                break
                        else:
                            break
                    else:
                        break
                else:
                    if disk_mirror is None:
                        logging.debug(disk_xml)
                        if "--shallow" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            chain_lst.pop(0)
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                raise error.TestFail("Domain image backing "
                                                     "chain check failed")
                        elif "--base" in blockcommit_options:
                            chain_lst = snap_src_lst[::-1]
                            base_index = chain_lst.index(blk_source)
                            chain_lst = chain_lst[base_index:]
                            ret = check_chain_xml(disk_xml, chain_lst)
                            if not ret:
                                raise error.TestFail("Domain image backing "
                                                     "chain check failed")
                        break
                    else:
                        # wait pivot after commit is synced
                        continue
            else:
                logging.debug("after inactive commit the disk xml is: %s"
                              % disk_xml)
                if libvirt_version.version_compare(1, 2, 4):
                    if "--shallow" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        chain_lst.remove(top_image)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            raise error.TestFail("Domain image backing chain "
                                                 "check failed")
                    elif "--base" in blockcommit_options:
                        chain_lst = snap_src_lst[::-1]
                        top_index = chain_lst.index(top_image)
                        base_index = chain_lst.index(blk_source)
                        val_tmp = []
                        for i in range(top_index, base_index):
                            val_tmp.append(chain_lst[i])
                        for i in val_tmp:
                            chain_lst.remove(i)
                        ret = check_chain_xml(disk_xml, chain_lst)
                        if not ret:
                            raise error.TestFail("Domain image backing chain "
                                                 "check failed")
                    break
                else:
                    break

        # Check flag files
        if not vm_state == "shut off" and not multiple_chain:
            for flag in snapshot_flag_files:
                status, output = session.cmd_status_output("cat %s" % flag)
                if status:
                    raise error.TestFail("blockcommit failed: %s" % output)

        if not pivot_opt and snap_in_mirror:
            # do snapshot during mirror phase
            snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
            snap_opt = "--disk-only --atomic --no-metadata "
            snap_opt += "vda,snapshot=external,file=%s" % snap_path
            snapshot_external_disks.append(snap_path)
            cmd_result = virsh.snapshot_create_as(vm_name, snap_opt,
                                                  ignore_statues=True,
                                                  debug=True)
            libvirt.check_exit_status(cmd_result, snap_in_mirror_err)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        if cmd_session:
            cmd_session.close()
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)

Example 21

Project: theanolm
Source File: network.py
View license
    def __init__(self, vocabulary, architecture, mode=None, profile=False):
        """Initializes the neural network parameters for all layers, and
        creates Theano shared variables from them.

        :type vocabulary: Vocabulary
        :param vocabulary: mapping between word IDs and word classes

        :type architecture: Architecture
        :param architecture: an object that describes the network architecture

        :type mode: Network.Mode
        :param mode: selects mini-batch or single time step processing

        :type profile: bool
        :param profile: if set to True, creates a Theano profile object
        """

        self.vocabulary = vocabulary
        self.architecture = architecture
        self.mode = self.Mode() if mode is None else mode

        M1 = 2147483647
        M2 = 2147462579
        random_seed = [
            numpy.random.randint(0, M1),
            numpy.random.randint(0, M1),
            numpy.random.randint(1, M1),
            numpy.random.randint(0, M2),
            numpy.random.randint(0, M2),
            numpy.random.randint(1, M2)]
        self.random = RandomStreams(random_seed)

        # Word and class inputs will be available to NetworkInput layers.
        self.input_word_ids = tensor.matrix('network/input_word_ids', dtype='int64')
        self.input_class_ids = tensor.matrix('network/input_class_ids', dtype='int64')
        if self.mode.minibatch:
            self.input_word_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_words())
            self.input_class_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_classes())
        else:
            self.input_word_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_words())
            self.input_class_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_classes())

        # Recurrent layers will create these lists, used to initialize state
        # variables of appropriate sizes, for doing forward passes one step at a
        # time.
        self.recurrent_state_input = []
        self.recurrent_state_size = []

        # Create the layers.
        logging.debug("Creating layers.")
        self.layers = OrderedDict()
        for input_options in architecture.inputs:
            input = NetworkInput(input_options, self)
            self.layers[input.name] = input
        for layer_description in architecture.layers:
            layer_options = self._layer_options_from_description(
                layer_description)
            if layer_options['name'] == architecture.output_layer:
                layer_options['size'] = vocabulary.num_classes()
            layer = create_layer(layer_options, self, profile=profile)
            self.layers[layer.name] = layer
        self.output_layer = self.layers[architecture.output_layer]

        # This list will be filled by the recurrent layers to contain the
        # recurrent state outputs, for doing forward passes one step at a time.
        self.recurrent_state_output = [None] * len(self.recurrent_state_size)

        # This input variable can be used to specify the classes whose
        # probabilities will be computed, instead of the whole distribution.
        self.target_class_ids = tensor.matrix('network/target_class_ids',
                                              dtype='int64')
        if self.mode.minibatch:
            self.target_class_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_classes())
        else:
            self.target_class_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_classes())

        # This input variable is used only for detecting <unk> target words.
        self.target_word_ids = tensor.matrix('network/target_word_ids',
                                             dtype='int64')
        if self.mode.minibatch:
            self.target_word_ids.tag.test_value = test_value(
                size=(100, 16),
                max_value=vocabulary.num_words())
        else:
            self.target_word_ids.tag.test_value = test_value(
                size=(1, 16),
                max_value=vocabulary.num_words())

        # Create initial parameter values.
        logging.debug("Initializing parameters.")
        self.param_init_values = OrderedDict()
        num_params = 0
        for layer in self.layers.values():
            for name, value in layer.param_init_values.items():
                logging.debug("- %s size=%d", name, value.size)
                num_params += value.size
            self.param_init_values.update(layer.param_init_values)
        logging.debug("Total number of parameters: %d", num_params)

        # Create Theano shared variables.
        self.params = {name: theano.shared(value, name)
                       for name, value in self.param_init_values.items()}
        for layer in self.layers.values():
            layer.set_params(self.params)

        # mask is used to mask out the rest of the input matrix, when a sequence
        # is shorter than the maximum sequence length. The mask is kept as int8
        # data type, which is how Tensor stores booleans.
        if self.mode.minibatch:
            self.mask = tensor.matrix('network/mask', dtype='int8')
            self.mask.tag.test_value = test_value(
                size=(100, 16),
                max_value=True)
        else:
            self.mask = tensor.ones(self.input_word_ids.shape, dtype='int8')

        # Dropout layer needs to know whether we are training or evaluating.
        self.is_training = tensor.scalar('network/is_training', dtype='int8')
        self.is_training.tag.test_value = 1

        # Softmax layer needs to know how many noise words to sample for noise-
        # contrastive estimation.
        self.num_noise_samples = tensor.scalar('network/num_noise_samples',
                                               dtype='int64')
        self.num_noise_samples.tag.test_value = 100

        for layer in self.layers.values():
            layer.create_structure()

Example 22

Project: RoBO
Source File: dngo.py
View license
    def train(self, X, Y, **kwargs):
        """
        Trains the model on the provided data.

        Parameters
        ----------
        X: np.ndarray (N, D)
            Input datapoints. The dimensionality of X is (N, D),
            with N as the number of points and D is the number of features.
        Y: np.ndarray (N, T)
            The corresponding target values.
            The dimensionality of Y is (N, T), where N has to
            match the number of points of X and T is the number of objectives
        """
        # Normalize inputs
        self.X = X
        self.X_mean = np.mean(X)
        self.X_std = np.std(X)
        self.norm_X = (X - self.X_mean) / self.X_std

        if self.X.shape[0] <= self.batch_size:
            batch_size = self.X.shape[0]
        else:
            batch_size = self.batch_size

        # Normalize ouputs
        self.Y_mean = np.mean(Y)
        self.Y_std = np.std(Y)
        self.Y = (Y - self.Y_mean) / self.Y_std
        #self.Y = Y
        start_time = time.time()

        # Create the neural network
        features = X.shape[1]

        self.learning_rate = theano.shared(np.array(self.init_learning_rate,
                                                dtype=theano.config.floatX))
        self.network = self._build_net(self.input_var, features)



        prediction = lasagne.layers.get_output(self.network)

        # Define loss function for training
        loss = T.mean(T.square(prediction - self.target_var)) / 0.001

        # Add l2 regularization for the weights
        l2_penalty = self.l2 * lasagne.regularization.regularize_network_params(
            self.network, lasagne.regularization.l2)
        loss += l2_penalty
        loss = loss.mean()

        params = lasagne.layers.get_all_params(self.network, trainable=True)


        updates = lasagne.updates.adam(loss, params,
                                        learning_rate=self.learning_rate)


        logging.debug("... compiling theano functions")
        self.train_fn = theano.function([self.input_var, self.target_var], loss,
                                        updates=updates,
                                        allow_input_downcast=True)

        # Start training
        lc = np.zeros([self.num_epochs])
        for epoch in range(self.num_epochs):

            epoch_start_time = time.time()

            # Full pass over the training data:
            train_err = 0
            train_batches = 0

            for batch in self.iterate_minibatches(self.norm_X, self.Y,
                                            batch_size, shuffle=True):
                inputs, targets = batch
                train_err += self.train_fn(inputs, targets)
                train_batches += 1

            lc[epoch] = train_err / train_batches
            logging.debug("Epoch {} of {}".format(epoch + 1, self.num_epochs))
            curtime = time.time()
            epoch_time = curtime - epoch_start_time
            total_time = curtime - start_time
            logging.debug("Epoch time {:.3f}s, "
                 "total time {:.3f}s".format(epoch_time, total_time))
            logging.debug("Training loss:\t\t{:.5g}".format(train_err / train_batches))

            #Adapt the learning rate
            if epoch % self.adapt_epoch == 0:
                self.learning_rate.set_value(
                            np.float32(self.init_learning_rate * 0.1))

        # Design matrix
        layers = lasagne.layers.get_all_layers(self.network)
        self.Theta = lasagne.layers.get_output(layers[:-1], self.norm_X)[-1].eval()

        if self.do_optimize:
            if self.do_mcmc:
                self.sampler = emcee.EnsembleSampler(self.n_hypers,
                                                 2,
                                                 self.marginal_log_likelihood)

                # Do a burn-in in the first iteration
                if not self.burned:
                    # Initialize the walkers by sampling from the prior
                    self.p0 = self.prior.sample_from_prior(self.n_hypers)
                    # Run MCMC sampling
                    self.p0, _, _ = self.sampler.run_mcmc(self.p0,
                                                          self.burnin_steps)

                    self.burned = True

                # Start sampling
                pos, _, _ = self.sampler.run_mcmc(self.p0,
                                                  self.chain_length)

                # Save the current position, it will be the startpoint in
                # the next iteration
                self.p0 = pos

                # Take the last samples from each walker
                self.hypers = np.exp(self.sampler.chain[:, -1])
            else:
                # Optimize hyperparameters of the Bayesian linear regression
                res = optimize.fmin(self.nll, np.random.rand(2))
                self.hypers = [[np.exp(res[0]), np.exp(res[1])]]
        else:

            self.hypers = [[self.alpha, self.beta]]

        logging.info("Hypers: %s" % self.hypers)
        self.models = []
        for sample in self.hypers:

            # Instantiate a model for each hyperparameter configuration
            model = BayesianLinearRegression(alpha=sample[0],
                                             beta=sample[1],
                                             basis_func=None)
            model.train(self.Theta, self.Y, do_optimize=False)

            self.models.append(model)

Example 23

View license
def main():
    """
    Read input zip file, minhash the documents in it and put them in buckets
    The zip file should have been created with data_prep/prepare_blobstore_zips
    """
    try:
        filename = os.path.abspath(sys.argv[1])
    except IndexError:
        print 'filename not provided'
        exit(1)
    try:
        zip_reader = zipfile.ZipFile(filename)
    except IOError:
        print 'unable to read file {file}'.format(file = filename)
        exit(1)
    except zipfile.BadZipfile:
        print 'file {file} is not a zip file'.format(file = filename)
        exit(1)

    infolist = zip_reader.infolist()
    mtxname = filename.replace('.zip', '.matrix.csv')
    dummydoc = Document.create()            # force the creation of the table
    dataset = DatasetPB.create('bash', filename)    # force the creation of the table and filling it with a row
    # logging.debug('%s %s', dataset.ds_key, dataset.filename)
    dataset = DatasetPB.find(dataset.ds_key)
    start = time.time()
    all_stats = defaultdict(float)
    new_docs_count = 0
    docs_cache = Cache(max_size = 15)
    buckets = set()
        
    for info in infolist:
        with zip_reader.open(info) as file_reader:
            logging.debug('Reading file %s', info.filename)
            stats = {}
            for line in file_reader.readlines():
                found_pattern = text_file_pattern.search(line)
                doc_id = found_pattern.group(1)
                html = found_pattern.group(2)
                udata=html.decode("utf-8")
                html=udata.encode("ascii","ignore")
                html = html.replace('\\n',' ').replace('\\t',' ').replace("'", "''")
                doc = dataset.create_doc(doc_id, html, stats)
                buckets |= set(doc['buckets'])
                docs_cache.set(doc_id, (html, doc['buckets'] if doc['buckets'] else [], doc['minhashes']))
                if not stats['found']:
                    new_docs_count += 1
                    for stat in stats:
                        if stat != 'found':
                            all_stats[stat] += stats[stat]
                stats = {}
            end = time.time()
            if new_docs_count:
                logging.info('File %s %d seconds, stats: %s over %d docs', info.filename, int(0.5+end-start), all_stats, new_docs_count)
            start = end 
    if new_docs_count:
        for stat in all_stats:
            if stat != 'found':
                all_stats[stat] /= new_docs_count
        logging.info('Average stats: %s over %d docs', all_stats, new_docs_count)
    
    buckets = list(buckets)#[:200]
    with open(mtxname, 'wb') as mtx_handler:
        fileout = csv.writer(mtx_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        fileout.writerow([' '] + sorted(list(buckets)))
        row_count = 0
        for info in infolist:
            with zip_reader.open(info) as file_reader:
                logging.debug('Reading file %s', info.filename)
                stats = {}
                for line in file_reader.readlines():
                    found_pattern = text_file_pattern.search(line)
                    doc_id = found_pattern.group(1)
                    doc = dataset.create_doc(doc_id, '', stats)
                    doc_buckets = doc['buckets']
                    x_marks = [('n' if _ in doc_buckets else '') for _ in buckets]
                    fileout.writerow(["'"+doc_id]+x_marks)
                    row_count += 1
                    if row_count >= 200:
                        break
            if row_count >= 200:
                break

    outname = filename.replace('.zip', '.dists.csv')
    doc_ids = docs_cache.keys()
    with open(outname, 'wb') as out_handler:
        fileout = csv.writer(out_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        fileout.writerow(['doc_i', 'doc_j', 'com_bkts', 'jac_dist'])
        for idx in xrange(len(doc_ids)):
            (ihtml, ibkts, imhs) = docs_cache.get(doc_ids[idx])
            for jdx in xrange(idx+1, len(doc_ids)):
                (jhtml, jbkts, jmhs) = docs_cache.get(doc_ids[jdx])
                com_bkts = len(set(ibkts) & set(jbkts))
                jac_dist = 1.0 - reduce(lambda x, y: x+y, map(lambda a,b: a == b, imhs,jmhs)) / float(len(imhs)) 
                # if jac_dist <= 0.1:
                #     lev_pick = 50
                # else:
                #     lev_pick = 100
                # if 0 == int(str(uuid.uuid4()).replace('-',''), 16) % lev_pick:
                #     lev_dist = '%8d' % levenshtein(ihtml, jhtml)
                # else:
                #     lev_dist = '...xx...'
                lev_dist = ''
                logging.debug(' %s | %s, %3d %6.3f %s %s', doc_ids[idx], doc_ids[jdx], 
                              com_bkts, jac_dist, lev_dist, sorted(list(set(ibkts) & set(jbkts))))
                csv_line = [doc_ids[idx], doc_ids[jdx], com_bkts, jac_dist, lev_dist]
                csv_line.extend(sorted(list(set(ibkts) & set(jbkts))))
                fileout.writerow(csv_line)

Example 24

Project: tp-qemu
Source File: ethtool.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test offload functions of ethernet device using ethtool

    1) Log into a guest.
    2) Saving ethtool configuration.
    3) Enable sub function of NIC.
    4) Execute callback function.
    5) Disable sub function of NIC.
    6) Run callback function again.
    7) Run file transfer test.
       7.1) Creating file in source host.
       7.2) Listening network traffic with tcpdump command.
       7.3) Transfer file.
       7.4) Comparing md5sum of the files on guest and host.
    8) Repeat step 3 - 7.
    9) Restore original configuration.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.

    @todo: Not all guests have ethtool installed, so
        find a way to get it installed using yum/apt-get/
        whatever
    """
    def ethtool_get(session):
        feature_pattern = {
            'tx': 'tx.*checksumming',
            'rx': 'rx.*checksumming',
            'sg': 'scatter.*gather',
            'tso': 'tcp.*segmentation.*offload',
            'gso': 'generic.*segmentation.*offload',
            'gro': 'generic.*receive.*offload',
            'lro': 'large.*receive.*offload',
        }

        o = session.cmd("ethtool -k %s" % ethname)
        status = {}
        for f in feature_pattern.keys():
            try:
                status[f] = re.findall(
                    "%s: (.*)" % feature_pattern.get(f), o)[0]
            except IndexError:
                status[f] = None
                logging.debug("(%s) failed to get status '%s'", ethname, f)

        logging.debug("(%s) offload status: '%s'", ethname, str(status))
        return status

    def ethtool_set(session, status):
        """
        Set ethernet device offload status

        :param status: New status will be changed to
        """
        txt = "Set offload status for device "
        txt += "'%s': %s" % (ethname, str(status))
        error.context(txt, logging.info)

        cmd = "ethtool -K %s " % ethname
        cmd += " ".join([o + ' ' + s for o, s in status.items()])
        err_msg = "Failed to set offload status for device '%s'" % ethname
        try:
            session.cmd_output_safe(cmd)
        except aexpect.ShellCmdError, e:
            logging.error("%s, detail: %s", err_msg, e)
            return False

        curr_status = dict((k, v) for k, v in ethtool_get(session).items()
                           if k in status.keys())
        if curr_status != status:
            logging.error("%s, got: '%s', expect: '%s'", err_msg,
                          str(curr_status), str(status))
            return False

        return True

    def ethtool_save_params(session):
        error.context("Saving ethtool configuration", logging.info)
        return ethtool_get(session)

    def ethtool_restore_params(session, status):
        error.context("Restoring ethtool configuration", logging.info)
        ethtool_set(session, status)

    def compare_md5sum(name):
        txt = "Comparing md5sum of the files on guest and host"
        error.context(txt, logging.info)
        host_result = utils.hash_file(name, method="md5")
        try:
            o = session.cmd_output("md5sum %s" % name)
            guest_result = re.findall("\w+", o)[0]
        except IndexError:
            logging.error("Could not get file md5sum in guest")
            return False
        logging.debug("md5sum: guest(%s), host(%s)", guest_result, host_result)
        return guest_result == host_result

    def transfer_file(src):
        """
        Transfer file by scp, use tcpdump to capture packets, then check the
        return string.

        :param src: Source host of transfer file
        :return: Tuple (status, error msg/tcpdump result)
        """
        sess = vm.wait_for_login(timeout=login_timeout)
        session.cmd_output("rm -rf %s" % filename)
        dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" %
                  (filename, params.get("filesize")))
        failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd)
        txt = "Creating file in source host, cmd: %s" % dd_cmd
        error.context(txt, logging.info)
        ethname = utils_net.get_linux_ifname(session,
                                             vm.get_mac_address(0))
        tcpdump_cmd = "tcpdump -lep -i %s -s 0 tcp -vv port ssh" % ethname
        if src == "guest":
            tcpdump_cmd += " and src %s" % guest_ip
            copy_files_func = vm.copy_files_from
            try:
                sess.cmd_output(dd_cmd, timeout=360)
            except aexpect.ShellCmdError, e:
                return failure
        else:
            tcpdump_cmd += " and dst %s" % guest_ip
            copy_files_func = vm.copy_files_to
            try:
                utils.system(dd_cmd)
            except error.CmdError, e:
                return failure

        # only capture the new tcp port after offload setup
        original_tcp_ports = re.findall("tcp.*:(\d+).*%s" % guest_ip,
                                        utils.system_output("/bin/netstat -nap"))

        for i in original_tcp_ports:
            tcpdump_cmd += " and not port %s" % i

        txt = "Listening traffic using command: %s" % tcpdump_cmd
        error.context(txt, logging.info)
        sess.sendline(tcpdump_cmd)
        if not utils_misc.wait_for(
                lambda: session.cmd_status("pgrep tcpdump") == 0, 30):
            return (False, "Tcpdump process wasn't launched")

        txt = "Transferring file %s from %s" % (filename, src)
        error.context(txt, logging.info)
        try:
            copy_files_func(filename, filename)
        except remote.SCPError, e:
            return (False, "File transfer failed (%s)" % e)

        session.cmd("killall tcpdump")
        try:
            tcpdump_string = sess.read_up_to_prompt(timeout=60)
        except aexpect.ExpectError:
            return (False, "Failed to read tcpdump's output")

        if not compare_md5sum(filename):
            return (False, "Failure, md5sum mismatch")
        return (True, tcpdump_string)

    def tx_callback(status="on"):
        s, o = transfer_file("guest")
        if not s:
            logging.error(o)
            return False
        return True

    def rx_callback(status="on"):
        s, o = transfer_file("host")
        if not s:
            logging.error(o)
            return False
        return True

    def so_callback(status="on"):
        s, o = transfer_file("guest")
        if not s:
            logging.error(o)
            return False
        error.context("Check if contained large frame", logging.info)
        # MTU: default IPv4 MTU is 1500 Bytes, ethernet header is 14 Bytes
        return (status == "on") ^ (len([i for i in re.findall(
                                   "length (\d*):", o) if int(i) > mtu]) == 0)

    def ro_callback(status="on"):
        s, o = transfer_file("host")
        if not s:
            logging.error(o)
            return False
        return True

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    error.context("Log into a guest.", logging.info)
    login_timeout = int(params.get("login_timeout", 360))
    session = vm.wait_for_login(timeout=login_timeout)

    # Let's just error the test if we identify that there's no ethtool
    # installed
    error.context("Check whether ethtool installed in guest.")
    session.cmd("ethtool -h")
    mtu = 1514
    pretest_status = {}
    filename = "/tmp/ethtool.dd"
    guest_ip = vm.get_address()
    error.context("Try to get ethernet device name in guest.")
    ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0))

    supported_features = params.get("supported_features")
    if supported_features:
        supported_features = supported_features.split()
    else:
        raise error.TestError("No supported features set on the parameters")

    test_matrix = {
        # type:(callback,    (dependence), (exclude)
        "tx": (tx_callback, (), ()),
        "rx": (rx_callback, (), ()),
        "sg": (tx_callback, ("tx",), ()),
        "tso": (so_callback, ("tx", "sg",), ("gso",)),
        "gso": (so_callback, (), ("tso",)),
        "gro": (ro_callback, ("rx",), ("lro",)),
        "lro": (rx_callback, (), ("gro",)),
    }
    pretest_status = ethtool_save_params(session)
    failed_tests = []
    try:
        for f_type in supported_features:
            callback = test_matrix[f_type][0]

            offload_stat = {f_type: "on"}
            offload_stat.update(dict.fromkeys(test_matrix[f_type][1], "on"))
            offload_stat.update(dict.fromkeys(test_matrix[f_type][2], "off"))
            if not ethtool_set(session, offload_stat):
                e_msg = "Failed to set offload status"
                logging.error(e_msg)
                failed_tests.append(e_msg)

            txt = "Run callback function %s" % callback.func_name
            error.context(txt, logging.info)

            # Some older kernel versions split packets by GSO
            # before tcpdump can capture the big packet, which
            # corrupts our results. Disable check when GSO is
            # enabled.
            if not callback(status="on") and f_type != "gso":
                e_msg = "Callback failed after enabling %s" % f_type
                logging.error(e_msg)
                failed_tests.append(e_msg)

            if not ethtool_set(session, {f_type: "off"}):
                e_msg = "Failed to disable %s" % f_type
                logging.error(e_msg)
                failed_tests.append(e_msg)
            txt = "Run callback function %s" % callback.func_name
            error.context(txt, logging.info)
            if not callback(status="off"):
                e_msg = "Callback failed after disabling %s" % f_type
                logging.error(e_msg)
                failed_tests.append(e_msg)

        if failed_tests:
            raise error.TestFail("Failed tests: %s" % failed_tests)

    finally:
        try:
            if session:
                session.close()
        except Exception, detail:
            logging.error("Fail to close session: '%s'", detail)

        try:
            session = vm.wait_for_serial_login(timeout=login_timeout)
            ethtool_restore_params(session, pretest_status)
        except Exception, detail:
            logging.warn("Could not restore parameter of"
                         " eth card: '%s'", detail)

Example 25

Project: tp-libvirt
Source File: virsh_blockpull.py
View license
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot():
        # Make four external snapshots for disks only
        for count in range(1, 5):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile

                # Skip cdrom
                if disk_xml.device == "cdrom":
                    continue
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if disk_xml.source.attrs.has_key('file'):
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0],
                                              count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif (disk_xml.source.attrs.has_key('name') and
                      disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0],
                                              count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif (disk_xml.source.attrs.has_key('dev') or
                      disk_xml.source.attrs.has_key('name')):
                    if (disk_xml.type_name == 'block' or
                            disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if new_attrs.has_key('dev'):
                            del new_attrs['dev']
                        elif new_attrs.has_key('name'):
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                raise error.TestFail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                raise error.TestFail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        raise error.TestFail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                raise error.TestNAError("'iscsi' disk doesn't support in"
                                        " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                raise error.TestNAError("Snapshot on glusterfs not"
                                        " support in current "
                                        "version. Check more info "
                                        " with https://bugzilla.re"
                                        "dhat.com/show_bug.cgi?id="
                                        "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    raise error.TestNAError("Please provide ceph host first.")
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4) or
                disk_src_protocol == 'gluster'):
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        # Run test case
        result = virsh.blockpull(vm_name, blk_target,
                                 blockpull_options, **virsh_dargs)
        status = result.exit_status

        # If pull job aborted as timeout, the exit status is different
        # on RHEL6(0) and RHEL7(1)
        if with_timeout and 'Pull aborted' in result.stdout:
            if libvirt_version.version_compare(1, 1, 1):
                status_error = True
            else:
                status_error = False

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name, snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s"
                          % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)
                elif "base" or "shallow" in base_option:
                    chain_lst = snap_src_lst[::-1]
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)
                    val_tmp = []
                    for i in range(1, base_index):
                        val_tmp.append(chain_lst[i])
                    for i in val_tmp:
                        chain_lst.remove(i)
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        raise error.TestFail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                raise error.TestFail("blockpull failed: %s" % output)

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)

Example 26

View license
def run(test, params, env):
    """
    Verify the vmcore written by dump-guest-memory by a big guest.

    :param test: QEMU test object.
    :param params: Dictionary with the test parameters.
    :param env: Dictionary with test environment.
    """
    def check_requirements(vm, session):
        """
        Check guest RAM size and guest architecture.

        :param vm: virtual machine.
        :param session: login shell session.
        :raise: error.TestError if the test is misconfigured.
        """
        mem_size = vm.get_memory_size()
        if (mem_size != REQ_GUEST_MEM):
            raise error.TestError("the guest must have %d MB RAM exactly "
                                  "(current: %d MB)" % (REQ_GUEST_MEM,
                                                        mem_size))
        arch = session.cmd("uname -m").rstrip()
        if (arch != REQ_GUEST_ARCH):
            raise error.TestError("this test only supports %s guests "
                                  "(current: %s)" % (REQ_GUEST_ARCH, arch))

    def install_kernel_debuginfo(vm, session, login_timeout):
        """
        In the guest, install a kernel debuginfo package that matches
        the running kernel.

        Debuginfo packages are available for the most recent kernels
        only, so this step may need a kernel upgrade and a corresponding
        VM reboot. Also, the "debuginfo-install" yum utility is not good
        enough for this, because its exit status doesn't seem to reflect
        any failure to find a matching debuginfo package. Only "yum
        install" seems to do that, and only if an individual package is
        requested.

        :param vm: virtual machine. Can be None if the caller demands a
                debuginfo package for the running kernel.
        :param session: login shell session.
        :param login_timeout: passed to vm.reboot() as timeout. Can be
                None if vm is None.
        :return: If the debuginfo package has been successfully
                installed, None is returned. If no debuginfo package
                matching the running guest kernel is available.
                If vm is None, an exception is raised; otherwise, the
                guest kernel is upgraded, and a new session is returned
                for the rebooted guest. In this case the next call to
                this function should succeed, using the new session and
                with vm=None.
        :raise: error.TestError (guest uname command failed),
                ShellCmdError (unexpected guest yum command failure),
                exceptions from vm.reboot().
        """
        def install_matching_debuginfo(session):
            try:
                guest_kernel = session.cmd("uname -r").rstrip()
            except ShellCmdError, details:
                raise error.TestError("guest uname command failed: %s" %
                                      details)
            return session.cmd("yum -y install --enablerepo='*debuginfo' "
                               "kernel-debuginfo-%s" % guest_kernel,
                               timeout=LONG_TIMEOUT)

        try:
            output = install_matching_debuginfo(session)
            logging.debug("%s", output)
            new_sess = None
        except ShellCmdError, details:
            if (vm is None):
                raise
            logging.info("failed to install matching debuginfo, "
                         "upgrading kernel")
            logging.debug("shell error was: %s", details)
            output = session.cmd("yum -y upgrade kernel",
                                 timeout=LONG_TIMEOUT)
            logging.debug("%s", output)
            new_sess = vm.reboot(session, timeout=login_timeout)
        return new_sess

    def install_crash(session):
        """
        Install the "crash" utility in the guest.

        :param session: login shell session.
        :raise: exceptions from session.cmd().
        """
        output = session.cmd("yum -y install crash")
        logging.debug("%s", output)

    def check_disk_space(session):
        """
        Check free disk space in the guest before uploading,
        uncompressing and analyzing the vmcore.

        :param session: login shell session.
        :raise: exceptions from session.cmd(); error.TestError if free
                space is insufficient.
        """
        output = session.cmd("rm -f -v %s %s.gz" % (VMCORE_BASE, VMCORE_BASE))
        logging.debug("%s", output)
        output = session.cmd("yum clean all")
        logging.debug("%s", output)
        output = session.cmd("LC_ALL=C df --portability --block-size=1M .")
        logging.debug("%s", output)
        df_megs = int(string.split(output)[10])
        if (df_megs < REQ_GUEST_DF):
            raise error.TestError("insufficient free disk space: %d < %d" %
                                  (df_megs, REQ_GUEST_DF))

    def dump_and_compress(qmp_monitor, vmcore_host):
        """
        Dump the guest vmcore on the host side and compress it.

        Use the "dump-guest-memory" QMP command with paging=false. Start
        a new Python thread that compresses data from a file descriptor
        to a host file. Create a pipe and pass its writeable end to qemu
        for vmcore dumping. Pass the pipe's readable end (with full
        ownership) to the compressor thread. Track references to the
        file descriptions underlying the pipe end fds carefully.

        Compressing the vmcore on the fly, then copying it to the guest,
        then decompressing it inside the guest should be much faster
        than dumping and copying a huge plaintext vmcore, especially on
        rotational media.

        :param qmp_monitor: QMP monitor for the guest.
        :param vmcore_host: absolute pathname of gzipped destination
                file.
        :raise: all sorts of exceptions. No resources should be leaked.
        """
        def compress_from_fd(input_fd, gzfile):
            # Run in a separate thread, take ownership of input_fd.
            try:
                buf = os.read(input_fd, 4096)
                while (buf):
                    gzfile.write(buf)
                    buf = os.read(input_fd, 4096)
            finally:
                # If we've run into a problem, this causes an EPIPE in
                # the qemu process, preventing it from blocking in
                # write() forever.
                os.close(input_fd)

        def dump_vmcore(qmp_monitor, vmcore_fd):
            # Temporarily create another reference to vmcore_fd, in the
            # qemu process. We own the duplicate.
            qmp_monitor.cmd(cmd="getfd",
                            args={"fdname": "%s" % VMCORE_FD_NAME},
                            fd=vmcore_fd)
            try:
                # Includes ownership transfer on success, no need to
                # call the "closefd" command then.
                qmp_monitor.cmd(cmd="dump-guest-memory",
                                args={"paging": False,
                                      "protocol": "fd:%s" % VMCORE_FD_NAME},
                                timeout=LONG_TIMEOUT)
            except:
                qmp_monitor.cmd(cmd="closefd",
                                args={"fdname": "%s" % VMCORE_FD_NAME})
                raise

        gzfile = gzip.open(vmcore_host, "wb", 1)
        try:
            try:
                (read_by_gzip, written_by_qemu) = os.pipe()
                try:
                    compressor = threading.Thread(target=compress_from_fd,
                                                  name="compressor",
                                                  args=(read_by_gzip, gzfile))
                    compressor.start()
                    # Compressor running, ownership of readable end has
                    # been transferred.
                    read_by_gzip = -1
                    try:
                        dump_vmcore(qmp_monitor, written_by_qemu)
                    finally:
                        # Close Python's own reference to the writeable
                        # end as well, so that the compressor can
                        # experience EOF before we try to join it.
                        os.close(written_by_qemu)
                        written_by_qemu = -1
                        compressor.join()
                finally:
                    if (read_by_gzip != -1):
                        os.close(read_by_gzip)
                    if (written_by_qemu != -1):
                        os.close(written_by_qemu)
            finally:
                # Close the gzipped file first, *then* delete it if
                # there was an error.
                gzfile.close()
        except:
            os.unlink(vmcore_host)
            raise

    def verify_vmcore(vm, session, host_compr, guest_compr, guest_plain):
        """
        Verify the vmcore with the "crash" utility in the guest.

        Standard output needs to be searched for "crash:" and "WARNING:"
        strings; the test is successful iff there are no matches and
        "crash" exits successfully.

        :param vm: virtual machine.
        :param session: login shell session.
        :param host_compr: absolute pathname of gzipped vmcore on host,
                source file.
        :param guest_compr: single-component filename of gzipped vmcore
                on guest, destination file.
        :param guest_plain: single-component filename of gunzipped
                vmcore on guest that guest-side gunzip is expected to
                create.
        :raise: vm.copy_files_to() and session.cmd() exceptions;
                error.TestFail if "crash" meets trouble in the vmcore.
        """
        vm.copy_files_to(host_compr, guest_compr)
        output = session.cmd("gzip -d -v %s" % guest_compr,
                             timeout=LONG_TIMEOUT)
        logging.debug("%s", output)

        session.cmd("{ echo bt; echo quit; } > %s" % CRASH_SCRIPT)
        output = session.cmd("crash -i %s "
                             "/usr/lib/debug/lib/modules/$(uname -r)/vmlinux "
                             "%s" % (CRASH_SCRIPT, guest_plain))
        logging.debug("%s", output)
        if (string.find(output, "crash:") >= 0 or
                string.find(output, "WARNING:") >= 0):
            raise error.TestFail("vmcore corrupt")

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    qmp_monitor = vm.get_monitors_by_type("qmp")
    if qmp_monitor:
        qmp_monitor = qmp_monitor[0]
    else:
        raise error.TestError('Could not find a QMP monitor, aborting test')

    login_timeout = int(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=login_timeout)
    try:
        check_requirements(vm, session)

        new_sess = install_kernel_debuginfo(vm, session, login_timeout)
        if (new_sess is not None):
            session = new_sess
            install_kernel_debuginfo(None, session, None)

        install_crash(session)
        check_disk_space(session)

        vmcore_compr = "%s.gz" % VMCORE_BASE
        vmcore_host = os.path.join(test.tmpdir, vmcore_compr)
        dump_and_compress(qmp_monitor, vmcore_host)
        try:
            verify_vmcore(vm, session, vmcore_host, vmcore_compr, VMCORE_BASE)
        finally:
            os.unlink(vmcore_host)
    finally:
        session.close()

Example 27

Project: RMG-Py
Source File: adjlist.py
View license
def fromAdjacencyList(adjlist, group=False, saturateH=False):
    """
    Convert a string adjacency list `adjlist` into a set of :class:`Atom` and
    :class:`Bond` objects.
    """
    atoms = []
    atomdict = {}
    bonds = {}
    multiplicity = None
    
    adjlist = adjlist.strip()
    lines = adjlist.splitlines()
    if adjlist == '' or len(lines) == 0:
        raise InvalidAdjacencyListError('Empty adjacency list.')

    # Detect old-style adjacency lists by looking at the last line's syntax
    lastLine = lines[-1].strip()
    while not lastLine:  # Remove any empty lines from the end
        lines.pop()
        lastLine = lines[-1].strip()
    if re_IntermediateAdjList.match(lastLine):
        logging.debug("adjacency list:\n{1}\nline '{0}' looks like an intermediate style adjacency list".format(lastLine, adjlist))
        return fromOldAdjacencyList(adjlist, group=group, saturateH=saturateH)
    if re_OldAdjList.match(lastLine):
        logging.debug("Adjacency list:\n{1}\nline '{0}' looks like an old style adjacency list".format(lastLine, adjlist))
        if not group:
            logging.debug("Will assume implicit H atoms")
        return fromOldAdjacencyList(adjlist, group=group, saturateH=(not group))

    # Interpret the first line if it contains a label
    if len(lines[0].split()) == 1:
        label = lines.pop(0)
        if len(lines) == 0:
            raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
        
    # Interpret the second line if it contains a multiplicity
    if lines[0].split()[0] == 'multiplicity':
        line = lines.pop(0)
        if group:
            match = re.match('\s*multiplicity\s+\[\s*(\d(?:,\s*\d)*)\s*\]\s*$', line)
            if not match:
                rematch = re.match('\s*multiplicity\s+x\s*$', line)
                assert rematch, "Invalid multiplicity line '{0}'. Should be a list like 'multiplicity [1,2,3]' or a wildcard 'multiplicity x'".format(line)
            else:
            # should match "multiplicity [1]" or " multiplicity   [ 1, 2, 3 ]" or " multiplicity [1,2,3]"
            # and whatever's inside the [] (excluding leading and trailing spaces) should be captured as group 1.
            # If a wildcard is desired, this line can be omitted or replaced with 'multiplicity x'
            # Multiplicities must be only one digit (i.e. less than 10)
            # The (?:,\s*\d)* matches patters like ", 2" 0 or more times, but doesn't capture them (because of the leading ?:)            
                multiplicities = match.group(1).split(',')
                multiplicity = [int(i) for i in multiplicities]
        else:
            match = re.match('\s*multiplicity\s+\d+\s*$', line)
            assert match, "Invalid multiplicity line '{0}'. Should be an integer like 'multiplicity 2'".format(line)
            multiplicity = int(line.split()[1])
        if len(lines) == 0:
            raise InvalidAdjacencyListError('No atoms specified in adjacency list: \n{0}'.format(adjlist))
    
    mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
    # Iterate over the remaining lines, generating Atom or GroupAtom objects
    for line in lines:

        # Sometimes people put spaces after commas, which messes up the
        # parse-by-whitespace. Examples include '[Cd, Ct]'.
        if mistake1.search(line):
            raise InvalidAdjacencyListError(
                "{1} Shouldn't have spaces inside braces:\n{0}".format(mistake1.search(line).group(), adjlist)
                )

        # Sometimes commas are used to delimit bonds in the bond list,
        # so replace them just in case
        line = line.replace('},{', '} {')
        
        data = line.split()

        # Skip if blank line
        if len(data) == 0: continue

        # First item is index for atom
        # Sometimes these have a trailing period (as if in a numbered list),
        # so remove it just in case
        aid = int(data[0].strip('.'))

        # If second item starts with '*', then atom is labeled
        label = ''; index = 1
        if data[1][0] == '*':
            label = data[1]
            index += 1

        # Next is the element or functional group element
        # A list can be specified with the {,} syntax
        atomType = data[index]
        if atomType[0] == '[':
            if not group:
                raise InvalidAdjacencyListError("Error on:\n{0}\nA molecule should not assign more than one atomtype per atom.".format(adjlist))
            atomType = atomType[1:-1].split(',')
        else:
            atomType = [atomType]
        index += 1
        
        # Next the number of unpaired electrons
        unpairedElectrons = []
        uState = data[index]
        if uState[0] == 'u':
            if uState[1] == '[':
                uState = uState[2:-1].split(',')
            else:
                uState = [uState[1]]
            for u in uState:
                if u == '0':
                    unpairedElectrons.append(0)
                elif u == '1':
                    unpairedElectrons.append(1)
                elif u == '2':
                    unpairedElectrons.append(2)
                elif u == '3':
                    unpairedElectrons.append(3)
                elif u == '4':
                    unpairedElectrons.append(4)
                elif u == 'x':
                    if not group:
                        raise InvalidAdjacencyListError("Error on:\n{0}\nA molecule should not assign a wildcard to number of unpaired electrons.".format(adjlist))
                else:
                    raise InvalidAdjacencyListError('Number of unpaired electrons not recognized on\n{0}.'.format(adjlist))
            index += 1
        else:
            raise InvalidAdjacencyListError('Number of unpaired electrons not defined on\n{0}.'.format(adjlist))
        
        # Next the number of lone electron pairs (if provided)
        lonePairs = []
        if len(data) > index:
            lpState = data[index]
            if lpState[0] == 'p':
                if lpState[1] == '[':
                    lpState = lpState[2:-1].split(',')
                else:
                    lpState = [lpState[1]]
                for l in lpState:
                    if l == '0':
                        lonePairs.append(0)
                    elif l == '1':
                        lonePairs.append(1)
                    elif l == '2':
                        lonePairs.append(2)
                    elif l == '3':
                        lonePairs.append(3)
                    elif l == '4':
                        lonePairs.append(4)
                    elif l == 'x':
                        if not group:
                            raise InvalidAdjacencyListError("Error in adjacency list:\n{0}\nA molecule should not have a wildcard assigned to number of lone pairs.".format(adjlist))
                    else:
                        raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nNumber of lone electron pairs not recognized.'.format(adjlist))
                index += 1
            else:
                if not group:
                    lonePairs.append(0)
        else:
            if not group:
                lonePairs.append(0)
            
        # Next the number of partial charges (if provided)
        partialCharges = []
        if len(data) > index:
            eState = data[index]
            if eState[0] == 'c':
                if eState[1] == '[':
                    eState = eState[2:-1].split(',')
                else:
                    eState = [eState[1:]]
                for e in eState:
                    if e == '0':
                        partialCharges.append(0)
                    elif e == '+1':
                        partialCharges.append(1)
                    elif e == '+2':
                        partialCharges.append(2)
                    elif e == '+3':
                        partialCharges.append(3)
                    elif e == '+4':
                        partialCharges.append(4)
                    elif e == '-1':
                        partialCharges.append(-1)
                    elif e == '-2':
                        partialCharges.append(-2)
                    elif e == '-3':
                        partialCharges.append(-3)
                    elif e == '-4':
                        partialCharges.append(-4)
                    elif e == 'x':
                        if not group:
                            raise InvalidAdjacencyListError("Error on adjacency list:\n{0}\nA molecule should not have a wildcard assigned to number of charges.".format(adjlist))
                    else:
                        raise InvalidAdjacencyListError('Error on adjacency list:\n{0}\nNumber of partial charges not recognized.'.format(adjlist))
                index += 1
            else:
                if not group:
                    partialCharges.append(0)
        else:
            if not group:
                partialCharges.append(0)
        

        # Next the isotope (if provided)
        isotope = -1
        if len(data) > index:
            iState = data[index]
            if iState[0] == 'i':
                isotope = int(iState[1:])
                index += 1


        # Create a new atom based on the above information
        if group:
            atom = GroupAtom(atomType, unpairedElectrons, partialCharges, label, lonePairs)
        else:
            atom = Atom(atomType[0], unpairedElectrons[0], partialCharges[0], label, lonePairs[0])
            if isotope != -1:
                atom.element = getElement(atom.number, isotope)

        # Add the atom to the list
        atoms.append(atom)
        atomdict[aid] = atom
        
        # Process list of bonds
        bonds[aid] = {}
        for datum in data[index:]:

            # Sometimes commas are used to delimit bonds in the bond list,
            # so strip them just in case
            datum = datum.strip(',')
            
            aid2, comma, order = datum[1:-1].partition(',')
            aid2 = int(aid2)
            if aid == aid2:
                raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAttempted to create a bond between atom {0:d} and itself.'.format(aid, adjlist))
            
            if order[0] == '[':
                order = order[1:-1].split(',')
            else:
                order = [order]

            bonds[aid][aid2] = order

    # Check consistency using bonddict
    for atom1 in bonds:
        for atom2 in bonds[atom1]:
            if atom2 not in bonds:
                raise InvalidAdjacencyListError('Error in adjacency list:\n{1}\nAtom {0:d} not in bond dictionary.'.format(atom2, adjlist))
            elif atom1 not in bonds[atom2]:
                raise InvalidAdjacencyListError('Error in adjacency list:\n{2}\nFound bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2, adjlist))
            elif bonds[atom1][atom2] != bonds[atom2][atom1]:
                raise InvalidAdjacencyListError('Error in adjacency list:\n{4}\nFound bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1], adjlist))

    # Convert bonddict to use Atom[group] and Bond[group] objects
    atomkeys = atomdict.keys()
    atomkeys.sort()
    for aid1 in atomkeys:
        atomkeys2 = bonds[aid1].keys()
        atomkeys2.sort()
        for aid2 in atomkeys2:
            if aid1 < aid2:
                atom1 = atomdict[aid1]
                atom2 = atomdict[aid2]
                order = bonds[aid1][aid2]
                if group:
                    bond = GroupBond(atom1, atom2, order)
                elif len(order) == 1:
                    bond = Bond(atom1, atom2, order[0])
                else:
                    raise InvalidAdjacencyListError('Error in adjacency list:\n{0}\nMultiple bond orders specified for an atom in a Molecule.'.format(adjlist))
                atom1.edges[atom2] = bond
                atom2.edges[atom1] = bond
    
    if saturateH:
        # Add explicit hydrogen atoms to complete structure if desired
        if not group:
            Saturator.saturate(atoms)

    
    # Consistency checks
    if not group:
        # Molecule consistency check
        # Electron and valency consistency check for each atom
        for atom in atoms: ConsistencyChecker.check_partial_charge(atom)

        nRad = sum([atom.radicalElectrons for atom in atoms])
        absolute_spin_per_electron = 1/2.
        if multiplicity == None: multiplicity = 2* (nRad * absolute_spin_per_electron) + 1
            
        ConsistencyChecker.check_multiplicity(nRad, multiplicity)
        for atom in atoms: ConsistencyChecker.check_hund_rule(atom, multiplicity)
        return atoms, multiplicity
    else:
        # Currently no group consistency check
        return atoms, multiplicity

Example 28

Project: vimhelp
Source File: update.py
View license
    @ndb.toplevel
    def _do_update(self, no_rfi):
        # Kick off retrieval of all RawFileInfo entities from the Datastore
        if no_rfi:
            all_rfi_future = ndb.tasklet(lambda: ())()
        else:
            all_rfi_future = RawFileInfo.query().fetch_async()

        # Kick off check for new vim version
        refresh_vim_version_future = self.refresh_vim_version_async()

        # Kick off retrieval of 'runtime/doc' dir listing in github
        docdir_future = vim_github_request_async(
            '/repos/vim/vim/contents/runtime/doc', self._g.docdir_etag)

        # Put all RawFileInfo entites into a map
        rfi_map = { r.key.string_id(): r for r in all_rfi_future.get_result() }

        processor_futures = set()
        processor_futures_by_name = {}

        def processor_futures_add(name, value):
            processor_futures.add(value)
            processor_futures_by_name[name] = value

        def queue_urlfetch(name, url, git_sha=None):
            rfi = rfi_map.get(name)
            etag = rfi.etag if rfi is not None else None
            logging.debug("fetching %s (etag: %s)", name, etag)
            processor_future = ProcessorHTTP.create_async(name, git_sha,
                                                          url=url, etag=etag)
            processor_futures_add(name, processor_future)

        # Kick off FAQ download

        queue_urlfetch(FAQ_NAME, FAQ_BASE_URL + FAQ_NAME)

        # Iterating over 'runtime/doc' dir listing, kick off download for all
        # modified items

        docdir = docdir_future.get_result()

        if docdir.status_code == HTTP_NOT_MOD:
            logging.info("doc dir not modified")
        elif docdir.status_code == HTTP_OK:
            self._g.docdir_etag = docdir.headers.get(HTTP_HDR_ETAG)
            self._g_changed = True
            logging.debug("got doc dir etag %s", self._g.docdir_etag)
            for item in docdir.json:
                name = item['name'].encode()
                if item['type'] == 'file' and DOC_ITEM_RE.match(name):
                    assert name not in processor_futures_by_name
                    git_sha = item['sha'].encode()
                    rfi = rfi_map.get(name)
                    if rfi is not None and rfi.git_sha == git_sha:
                        logging.debug("%s unchanged (sha=%s)", name,
                                      rfi.git_sha)
                        continue
                    elif rfi is None:
                        logging.debug("%s is new (sha=%s)", name, git_sha)
                    else:
                        logging.debug("%s changed (%s != %s)", name,
                                      rfi.git_sha, git_sha)
                    queue_urlfetch(name, item['download_url'], git_sha)

        # Check if we have a new vim version
        is_new_vim_version = refresh_vim_version_future.get_result()

        # If there is no new vim version, and if the only file we're downloading
        # is the FAQ, and if the FAQ was not modified, then there is nothing to
        # do for us, so bail out now

        if not is_new_vim_version and len(processor_futures) == 1:
            faq_uf = processor_futures_by_name[FAQ_NAME].get_result()
            if faq_uf.http_result().status_code == HTTP_NOT_MOD:
                return

        @ndb.tasklet
        def get_content_async(name):
            processor_future = processor_futures_by_name.get(name)
            # Do we already have retrieval queued?
            if processor_future is not None:
                # If so, wait for that and return the content.
                processor = yield processor_future
                content = yield processor.raw_content_async()
            else:
                # If we don't have retrieval queued, that means we must already
                # have the latest version in the Datastore, so get the content
                # from there.
                rfc = yield RawFileContent.get_by_id_async(name)
                content = rfc.data
            raise ndb.Return(content)

        # Make sure we are retrieving tags, either from HTTP or from Datastore
        tags_future = get_content_async(TAGS_NAME)

        # Make sure we are retrieving FAQ, either from HTTP or from Datastore
        faq_future = get_content_async(FAQ_NAME)

        # If we found a new vim version and we're not already downloading
        # help.txt, kick off its retrieval from the Datastore instead
        # (since we're displaying the current vim version in the rendered
        # help.txt.html)
        if is_new_vim_version and HELP_NAME not in processor_futures_by_name:
            processor_futures_add(HELP_NAME,
                                  ProcessorDB.create_async(HELP_NAME))

        # Construct the vimhelp-to-html converter, providing it the tags file,
        # and adding on the FAQ for extra tags
        h2h = VimH2H(tags_future.get_result(), version=self._g.vim_version)
        h2h.add_tags(FAQ_NAME, faq_future.get_result())

        # Wait for urlfetches and Datastore accesses to return; kick off the
        # processing as they do so

        while len(processor_futures) > 0:
            try:
                future = ndb.Future.wait_any(processor_futures)
                processor = future.get_result()
            except urlfetch.Error as e:
                logging.error(e)
                # If we could not fetch the URL, continue with the others, but
                # set 'self._g_changed' to False so we do not save the
                # 'GlobalInfo' object at the end, so that we will retry at the
                # next run
                self._g_changed = False
            else:  # no exception was raised
                processor.process_async(h2h)
                # Because this method is decorated '@ndb.toplevel', we don't
                # need to keep hold of the future returned by the above line:
                # this method automatically waits for all outstanding futures
                # before returning.
            processor_futures.remove(future)
            del processor_futures_by_name[processor.name()]

Example 29

View license
@error.context_aware
def run(test, params, env):
    """
    KVM multi-host migration test:

    Tests multi-host migration with network problem on destination side.

    :param test: kvm test object.
    :param params: Dictionary with test parameters.
    :param env: Dictionary with the test environment.
    """
    mig_protocol = params.get("mig_protocol", "tcp")
    base_class = migration.MultihostMigration
    if mig_protocol == "fd":
        base_class = migration.MultihostMigrationFd
    if mig_protocol == "exec":
        base_class = migration.MultihostMigrationExec
    if "rdma" in mig_protocol:
        base_class = migration.MultihostMigrationRdma

    sub_type = params["sub_type"]

    def wait_for_migration(vm, timeout):
        def mig_finished():
            ret = True
            if (vm.params["display"] == "spice" and
                    vm.get_spice_var("spice_seamless_migration") == "on"):
                s = vm.monitor.info("spice")
                if isinstance(s, str):
                    ret = "migrated: true" in s
                else:
                    ret = s.get("migrated") == "true"
            o = vm.monitor.info("migrate")
            if isinstance(o, str):
                return ret and ("status: active" not in o)
            else:
                return ret and (o.get("status") != "active")

        if not utils_misc.wait_for(mig_finished, timeout, 2, 2,
                                   "Waiting for migration to complete"):
            raise virt_vm.VMMigrateTimeoutError("Timeout expired while waiting "
                                                "for migration to finish")

    class TestMultihostMigrationLongWait(base_class):

        def __init__(self, test, params, env):
            super(TestMultihostMigrationLongWait, self).__init__(
                test, params, env)
            self.install_path = params.get("cpuflags_install_path", "/tmp")
            self.vm_mem = int(params.get("mem", "512"))

            self.mig_timeout = int(params.get("mig_timeout", "550"))
            self.mig_fir_timeout = self.mig_timeout - 5

            self.srchost = self.params.get("hosts")[0]
            self.dsthost = self.params.get("hosts")[1]
            self.vms = params.get("vms").split()

        def firewall_block_port(self, port):
            utils.run("iptables -A INPUT -p tcp --dport %s"
                      " -j REJECT" % (port), ignore_status=True)

        def clean_firewall(self):
            utils.run("iptables -F", ignore_status=True)

        def migrate_vms_src(self, mig_data):
            super(TestMultihostMigrationLongWait,
                  self).migrate_vms_src(mig_data)
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started',
                                self.mig_timeout)
            vm = mig_data.vms[0]
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted',
                                self.mig_timeout)

            session = vm.wait_for_login(timeout=self.login_timeout)
            session.cmd("killall cpuflags-test")
            if params.get("mig_cancel", "no") == "yes":
                vm.monitor.cmd("migrate_cancel")
                vm.monitor.info("migrate")
            else:
                for _ in range(self.mig_fir_timeout):
                    state = vm.monitor.info("migrate")
                    if type(state) is str:
                        if "failed" in state:
                            break
                    else:
                        if state["status"] == "failed":
                            break
                    time.sleep(1)
                else:
                    raise error.TestWarn("Firewall block migraiton timeout"
                                         " is too short: %s. For completing"
                                         " the test increase mig_timeout in"
                                         " variant dest-problem-test." %
                                         (self.mig_fir_timeout))

            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interfynish',
                                self.mig_timeout)

        def migrate_vms_dest(self, mig_data):
            """
            Migrate vms destination. This function is started on dest host during
            migration.

            :param mig_Data: Data for migration.
            """
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started',
                                self.mig_timeout)

            time.sleep(3)
            for vm in mig_data.vms:
                self.firewall_block_port(mig_data.vm_ports[vm.name])
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted',
                                self.mig_timeout)
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interfynish',
                                self.mig_fir_timeout + 10)
            try:
                stat = []
                for vm in mig_data.vms:
                    stat.append(vm.monitor.get_status())
            except (qemu_monitor.MonitorProtocolError,
                    qemu_monitor.QMPCmdError):
                logging.debug("Guest %s not working" % (vm))

        def check_vms_src(self, mig_data):
            """
            Check vms after migrate.

            :param mig_data: object with migration data.
            """
            for vm in mig_data.vms:
                vm.resume()
                if not utils_test.qemu.guest_active(vm):
                    raise error.TestFail("Guest not active after migration")

            logging.info("Migrated guest appears to be running")

            logging.info("Logging into guest after interrupted migration...")
            for vm in mig_data.vms:
                vm.wait_for_serial_login(timeout=self.login_timeout)
                # There is sometime happen that system sends some message on
                # serial console and IP renew command block test. Because
                # there must be added "sleep" in IP renew command.
                vm.wait_for_login(timeout=self.login_timeout)

        def check_vms_dst(self, mig_data):
            """
            Check vms after migrate.

            :param mig_data: object with migration data.
            """
            for vm in mig_data.vms:
                try:
                    vm.resume()
                    if utils_test.qemu.guest_active(vm):
                        raise error.TestFail("Guest can't be active after"
                                             " interrupted migration.")
                except (qemu_monitor.MonitorProtocolError,
                        qemu_monitor.MonitorLockError,
                        qemu_monitor.QMPCmdError):
                    pass

        def migration_scenario(self, worker=None):
            error.context("Migration from %s to %s over protocol %s." %
                          (self.srchost, self.dsthost, mig_protocol),
                          logging.info)

            def worker_func(mig_data):
                vm = mig_data.vms[0]
                session = vm.wait_for_login(timeout=self.login_timeout)

                cpuflags.install_cpuflags_util_on_vm(test, vm,
                                                     self.install_path,
                                                     extra_flags="-msse3 -msse2")

                cmd = ("nohup %s/cpuflags-test --stressmem %d,%d &" %
                       (os.path.join(self.install_path, "cpu_flags"),
                        self.vm_mem * 100, self.vm_mem / 2))
                logging.debug("Sending command: %s" % (cmd))
                session.sendline(cmd)
                time.sleep(3)

            if worker is None:
                worker = worker_func

            try:
                self.migrate_wait(self.vms, self.srchost, self.dsthost,
                                  start_work=worker)
            finally:
                self.clean_firewall()

    class TestMultihostMigrationShortInterrupt(TestMultihostMigrationLongWait):

        def __init__(self, test, params, env):
            super(TestMultihostMigrationShortInterrupt, self).__init__(
                test, params, env)

        def migrate_vms_src(self, mig_data):
            super(TestMultihostMigrationShortInterrupt,
                  self).migrate_vms_src(mig_data)
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started',
                                self.mig_timeout)
            vm = mig_data.vms[0]
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted',
                                self.mig_timeout)

            session = vm.wait_for_login(timeout=self.login_timeout)
            session.cmd("killall cpuflags-test")

            wait_for_migration(vm, self.mig_timeout)

            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_done',
                                self.mig_timeout)

        def migrate_vms_dest(self, mig_data):
            """
            Migrate vms destination. This function is started on dest host during
            migration.

            :param mig_Data: Data for migration.
            """
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started',
                                self.mig_timeout)

            time.sleep(3)
            for vm in mig_data.vms:
                self.firewall_block_port(mig_data.vm_ports[vm.name])
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted',
                                self.mig_timeout)
            self.clean_firewall()
            self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_done',
                                self.mig_fir_timeout)
            try:
                for vm in mig_data.vms:
                    vm.monitor.get_status()
            except (qemu_monitor.MonitorProtocolError,
                    qemu_monitor.QMPCmdError):
                logging.debug("Guest %s not working" % (vm))

        def check_vms_dst(self, mig_data):
            """
            Check vms after migrate.

            :param mig_data: object with migration data.
            """
            super(TestMultihostMigrationShortInterrupt, self).check_vms_dst(mig_data)

        def check_vms_src(self, mig_data):
            """
            Check vms after migrate.

            :param mig_data: object with migration data.
            """
            super(TestMultihostMigrationShortInterrupt, self).check_vms_src(mig_data)

    mig = None
    if sub_type == "long_wait":
        mig = TestMultihostMigrationLongWait(test, params, env)
    elif sub_type == "short_interrupt":
        mig = TestMultihostMigrationShortInterrupt(test, params, env)
    else:
        raise error.TestNAError("Unsupported sub_type = '%s'." % sub_type)
    mig.run()

Example 30

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.search', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def cdr_view(request):
    """List of CDRs

    **Attributes**:

        * ``template`` - cdr/list.html
        * ``form`` - CdrSearchForm

    **Logic Description**:

        * get the call records as well as daily call analytics
          from postgresql according to search parameters
    """
    logging.debug('CDR View Start')
    result = 1  # default min
    switch_id = 0  # default all
    hangup_cause_id = 0  # default all
    destination, destination_type, accountcode = '', '', ''
    direction, duration, duration_type = '', '', ''
    caller_id_number, caller_id_number_type, country_id = '', '', ''
    action = 'tabs-1'
    menu = 'on'
    records_per_page = settings.PAGE_SIZE

    form = CdrSearchForm(request.POST or None)
    if form.is_valid():
        logging.debug('CDR Search View')
        # set session var value
        field_list = ['destination', 'result', 'destination_type', 'accountcode',
                      'caller_id_number', 'caller_id_number_type', 'duration',
                      'duration_type', 'hangup_cause_id', 'switch_id', 'direction',
                      'country_id', 'export_query_var']
        unset_session_var(request, field_list)
        from_date = getvar(request, 'from_date', setsession=False)
        to_date = getvar(request, 'to_date', setsession=False)
        result = getvar(request, 'result', setsession=True)
        destination = getvar(request, 'destination', setsession=True)
        destination_type = getvar(request, 'destination_type', setsession=True)
        accountcode = getvar(request, 'accountcode', setsession=True)
        caller_id_number = getvar(request, 'caller_id_number', setsession=True)
        caller_id_number_type = getvar(request, 'caller_id_number_type', setsession=True)
        duration = getvar(request, 'duration', setsession=True)
        duration_type = getvar(request, 'duration_type', setsession=True)
        direction = getvar(request, 'direction', setsession=True)
        if direction and direction != 'all' and direction != '0':
            request.session['session_direction'] = str(direction)

        switch_id = getvar(request, 'switch_id', setsession=True)
        hangup_cause_id = getvar(request, 'hangup_cause_id', setsession=True)
        records_per_page = getvar(request, 'records_per_page', setsession=True)

        country_id = form.cleaned_data.get('country_id')
        # convert list value in int
        country_id = [int(row) for row in country_id]
        if len(country_id) >= 1:
            request.session['session_country_id'] = country_id

        start_date = ceil_strdate(str(from_date), 'start', True)
        end_date = ceil_strdate(str(to_date), 'end', True)
        converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
        converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
        request.session['session_start_date'] = converted_start_date
        request.session['session_end_date'] = converted_end_date

    menu = show_menu(request)

    using_session = False
    # Display a specific page or sort
    if request.GET.get('page') or request.GET.get('sort_by'):
        using_session = True
        from_date = start_date = request.session.get('session_start_date')
        to_date = end_date = request.session.get('session_end_date')
        start_date = ceil_strdate(start_date, 'start', True)
        end_date = ceil_strdate(end_date, 'end', True)

        destination = request.session.get('session_destination')
        destination_type = request.session.get('session_destination_type')
        accountcode = request.session.get('session_accountcode')
        caller_id_number = request.session.get('session_caller_id_number')
        caller_id_number_type = request.session.get('session_caller_id_number_type')
        duration = request.session.get('session_duration')
        duration_type = request.session.get('session_duration_type')
        direction = request.session.get('session_direction')
        switch_id = request.session.get('session_switch_id')
        hangup_cause_id = request.session.get('session_hangup_cause_id')
        result = request.session.get('session_result')
        records_per_page = request.session.get('session_records_per_page')
        country_id = request.session['session_country_id']

    # Set default cause we display page for the first time
    if request.method == 'GET' and not using_session:
        tday = datetime.today()
        from_date = datetime(tday.year, tday.month, 1, 0, 0, 0, 0)
        last_day = ((datetime(tday.year, tday.month, 1, 23, 59, 59, 999999) +
                     relativedelta(months=1)) -
                    relativedelta(days=1)).strftime('%d')
        # to_date = tday.strftime('%Y-%m-' + last_day + ' 23:59')
        to_date = datetime(tday.year, tday.month, int(last_day), 23, 59, 59, 999999)
        start_date = ceil_strdate(str(from_date), 'start', True)
        end_date = ceil_strdate(str(to_date), 'end', True)

        converted_start_date = start_date.strftime('%Y-%m-%d %H:%M')
        converted_end_date = end_date.strftime('%Y-%m-%d %H:%M')
        request.session['session_start_date'] = converted_start_date
        request.session['session_end_date'] = converted_end_date
        request.session['session_result'] = 1
        field_list = [
            'destination', 'destination_type', 'accountcode',
            'caller_id_number', 'caller_id_number_type', 'duration',
            'duration_type', 'hangup_cause_id',
            'switch_id', 'direction', 'country_id']
        unset_session_var(request, field_list)
        request.session['session_records_per_page'] = records_per_page
        request.session['session_country_id'] = ''

    # Define no of records per page
    records_per_page = int(records_per_page)

    sort_col_field_list = ['id', 'caller_id_number', 'destination_number', 'starting_date']
    page_vars = get_pagination_vars(request, sort_col_field_list, default_sort_field='id')

    # Build filter for CDR.object
    kwargs = {}
    if hangup_cause_id and hangup_cause_id != '0':
        kwargs['hangup_cause_id'] = int(hangup_cause_id)

    if switch_id and switch_id != '0':
        kwargs['switch_id'] = int(switch_id)

    if direction and direction != 'all' and direction != "0":
        kwargs['direction'] = direction

    if len(country_id) >= 1 and country_id[0] != 0:
        kwargs['country_id__in'] = country_id

    if start_date:
        kwargs['starting_date__gte'] = start_date

    if end_date:
        kwargs['starting_date__lte'] = end_date

    if destination:
        operator_query = get_filter_operator_str('destination_number', destination_type)
        kwargs[operator_query] = destination

    if duration:
        operator_query = get_filter_operator_int('duration', duration_type)
        kwargs[operator_query] = duration

    if caller_id_number:
        operator_query = get_filter_operator_str('caller_id_number', caller_id_number_type)
        kwargs[operator_query] = caller_id_number

    # user are restricted to their own CDRs
    if not request.user.is_superuser:
        kwargs['user_id'] = request.user.id

    if request.user.is_superuser and accountcode:
        try:
            acc = AccountCode.objects.get(accountcode=accountcode)
            kwargs['user_id'] = acc.user.id
            # on specific accountcode filter let only display that one
            kwargs['accountcode'] = accountcode
        except AccountCode.DoesNotExist:
            # cannot find a user for this accountcode
            pass

    cdrs = CDR.objects.filter(**kwargs).order_by(page_vars['sort_order'])
    page_cdr_list = cdrs[page_vars['start_page']:page_vars['end_page']]
    cdr_count = cdrs.count()

    logging.debug('Create cdr result')

    # store query_var in session without date
    export_kwargs = kwargs.copy()
    if 'starting_date__gte' in export_kwargs:
        export_kwargs['starting_date__gte'] = export_kwargs['starting_date__gte'].strftime('%Y-%m-%dT%H:%M:%S')
    if 'starting_date__lte' in export_kwargs:
        export_kwargs['starting_date__lte'] = export_kwargs['starting_date__lte'].strftime('%Y-%m-%dT%H:%M:%S')

    request.session['session_export_kwargs'] = export_kwargs

    form = CdrSearchForm(
        initial={
            'from_date': from_date,
            'to_date': to_date,
            'destination': destination,
            'destination_type': destination_type,
            'accountcode': accountcode,
            'caller_id_number': caller_id_number,
            'caller_id_number_type': caller_id_number_type,
            'duration': duration,
            'duration_type': duration_type,
            'result': result,
            'direction': direction,
            'hangup_cause_id': hangup_cause_id,
            'switch_id': switch_id,
            'country_id': country_id,
            'records_per_page': records_per_page
        }
    )

    template_data = {
        'page_cdr_list': page_cdr_list,
        'cdrs': cdrs,
        'form': form,
        'cdr_count': cdr_count,
        'cdr_daily_data': {},
        'col_name_with_order': page_vars['col_name_with_order'],
        'menu': menu,
        'start_date': start_date,
        'end_date': end_date,
        'action': action,
        'result': result,
        'CDR_COLUMN_NAME': CDR_COLUMN_NAME,
        'records_per_page': records_per_page,
        'up_icon': '<i class="glyphicon glyphicon-chevron-up"></i>',
        'down_icon': '<i class="glyphicon glyphicon-chevron-down"></i>'
    }
    logging.debug('CDR View End')
    return render_to_response('cdr/list.html', template_data, context_instance=RequestContext(request))

Example 31

Project: tp-qemu
Source File: openflow_acl_test.py
View license
@error.context_aware
def run(test, params, env):
    """
    Test Step:
        1. Boot up guest using the openvswitch bridge
        2. Setup related service in test enviroment(http, ftp etc.)(optional)
        3. Access the service in guest
        4. Setup access control rules in ovs to disable the access
        5. Access the service in guest
        6. Setup access control rules in ovs to enable the access
        7. Access the service in guest
        8. Delete the access control rules in ovs
        9. Access the service in guest

    Params:
        :param test: QEMU test object
        :param params: Dictionary with the test parameters
        :param env: Dictionary with test environment.
    """
    def access_service(access_sys, access_targets, disabled, host_ip,
                       ref=False):
        err_msg = ""
        err_type = ""
        for asys in access_sys:
            for atgt in access_targets:
                logging.debug("Try to access target %s from %s" % (atgt, asys))

                access_params = access_sys[asys]
                atgt_disabled = access_params['disabled_%s' % atgt]
                if asys in vms_tags:
                    vm = env.get_vm(asys)
                    session = vm.wait_for_login(timeout=timeout)
                    run_func = session.cmd
                    remote_src = vm
                    ssh_src_ip = vm.get_address()
                else:
                    run_func = utils.system_output
                    remote_src = "localhost"
                    ssh_src_ip = host_ip
                if atgt in vms_tags:
                    vm = env.get_vm(atgt)
                    access_re_sub_string = vm.wait_for_get_address(0)
                else:
                    access_re_sub_string = host_ip

                access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
                                    access_params['access_cmd'])
                ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string,
                                 access_params['ref_cmd'])

                if access_cmd in ["ssh", "telnet"]:
                    if atgt in vms_tags:
                        target_vm = env.get_vm(atgt)
                        target_ip = target_vm.get_address()
                    else:
                        target_vm = "localhost"
                        target_ip = host_ip
                    out = ""
                    out_err = ""
                    try:
                        out = remote_login(access_cmd, target_ip,
                                           remote_src, params, host_ip)
                        stat = 0
                    except remote.LoginError, err:
                        stat = 1
                        out_err = "Failed to login %s " % atgt
                        out_err += "from %s, err: %s" % (asys, err.output)
                    try:
                        out += remote_login(access_cmd, ssh_src_ip,
                                            target_vm, params, host_ip)
                    except remote.LoginError, err:
                        stat += 1
                        out_err += "Failed to login %s " % asys
                        out_err += "from %s, err: %s" % (atgt, err.output)
                    if out_err:
                        out = out_err
                else:
                    try:
                        out = run_func(access_cmd, timeout=op_timeout)
                        stat = 0
                        check_string = access_params.get("check_from_output")
                        if check_string and check_string in out:
                            stat = 1
                    except (aexpect.ShellCmdError, error.CmdError,
                            aexpect.ShellTimeoutError), err:
                        if isinstance(err, error.CmdError):
                            out = err.result_obj.stderr
                            stat = err.result_obj.exit_status
                        else:
                            out = err.output
                            if isinstance(err, aexpect.ShellTimeoutError):
                                stat = 1
                                session.close()
                                session = vm.wait_for_login(timeout=timeout)
                                run_func = session.cmd
                            else:
                                stat = err.status
                    if access_params.get("clean_cmd"):
                        try:
                            run_func(access_params['clean_cmd'])
                        except Exception:
                            pass

                if disabled and atgt_disabled and stat == 0:
                    err_msg += "Still can access %s after" % atgt
                    err_msg += " disable it from ovs. "
                    err_msg += "Command: %s. " % access_cmd
                    err_msg += "Output: %s" % out
                if disabled and atgt_disabled and stat != 0:
                    logging.debug("Can not access target as expect.")
                if not disabled and stat != 0:
                    if ref:
                        err_msg += "Can not access %s at the" % atgt
                        err_msg += " beginning. Please check your setup."
                        err_type = "ref"
                    else:
                        err_msg += "Still can not access %s" % atgt
                        err_msg += " after enable the access"
                    err_msg += "Command: %s. " % access_cmd
                    err_msg += "Output: %s" % out
                if err_msg:
                    session.close()
                    if err_type == "ref":
                        raise error.TestNAError(err_msg)
                    raise error.TestFail(err_msg)

                if not ref_cmd:
                    session.close()
                    return

                try:
                    out = run_func(ref_cmd, timeout=op_timeout)
                    stat = 0
                except (aexpect.ShellCmdError, error.CmdError,
                        aexpect.ShellTimeoutError), err:
                    if isinstance(err, error.CmdError):
                        out = err.result_obj.stderr
                        stat = err.result_obj.exit_status
                    else:
                        out = err.output
                        if isinstance(err, aexpect.ShellTimeoutError):
                            stat = 1
                        else:
                            stat = err.status

                if stat != 0:
                    if ref:
                        err_msg += "Refernce command failed at beginning."
                        err_type = "ref"
                    else:
                        err_msg += "Refernce command failed after setup"
                        err_msg += " the rules"
                    err_msg += "Command: %s. " % ref_cmd
                    err_msg += "Output: %s" % out
                if err_msg:
                    session.close()
                    if err_type == "ref":
                        raise error.TestNAError(err_msg)
                    raise error.TestFail(err_msg)
                session.close()

    def get_acl_cmd(protocol, in_port, action, extra_options):
        acl_cmd = protocol.strip()
        acl_cmd += ",in_port=%s" % in_port.strip()
        if extra_options.strip():
            acl_cmd += ",%s" % ",".join(extra_options.strip().split())
        if action.strip():
            acl_cmd += ",action=%s" % action.strip()
        return acl_cmd

    def acl_rules_check(acl_rules, acl_setup_cmd):
        acl_setup_cmd = re.sub("action=", "actions=", acl_setup_cmd)
        acl_option = re.split(",", acl_setup_cmd)
        for line in acl_rules.splitlines():
            rule = [_.lower() for _ in re.split("[ ,]", line) if _]
            item_in_rule = 0

            for acl_item in acl_option:
                if acl_item.lower() in rule:
                    item_in_rule += 1

            if item_in_rule == len(acl_option):
                return True
        return False

    def remote_login(client, host, src, params_login, host_ip):
        src_name = src
        if src != "localhost":
            src_name = src.name
        logging.info("Login %s from %s" % (host, src))
        port = params_login["target_port"]
        username = params_login["username"]
        password = params_login["password"]
        prompt = params_login["shell_prompt"]
        linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n"))
        quit_cmd = params.get("quit_cmd", "exit")
        if host == host_ip:
            # Try to login from guest to host.
            prompt = "^\[.*\][\#\$]\s*$"
            linesep = "\n"
            username = params_login["host_username"]
            password = params_login["host_password"]
            quit_cmd = "exit"

        if client == "ssh":
            # We only support ssh for Linux in this test
            cmd = ("ssh -o UserKnownHostsFile=/dev/null "
                   "-o StrictHostKeyChecking=no "
                   "-o PreferredAuthentications=password -p %s %[email protected]%s" %
                   (port, username, host))
        elif client == "telnet":
            cmd = "telnet -l %s %s %s" % (username, host, port)
        else:
            raise remote.LoginBadClientError(client)

        if src == "localhost":
            logging.debug("Login with command %s" % cmd)
            session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
        else:
            if params_login.get("os_type") == "windows":
                if client == "telnet":
                    cmd = "C:\\telnet.py %s %s " % (host, username)
                    cmd += "%s \"%s\" && " % (password, prompt)
                    cmd += "C:\\wait_for_quit.py"
                cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd
            else:
                cmd += " || sleep 5"
            session = src.wait_for_login()
            logging.debug("Sending login command: %s" % cmd)
            session.sendline(cmd)
        try:
            out = remote.handle_prompts(session, username, password,
                                        prompt, timeout, debug=True)
        except Exception, err:
            session.close()
            raise err
        try:
            session.cmd(quit_cmd)
            session.close()
        except Exception:
            pass
        return out

    def setup_service(setup_target):
        setup_timeout = int(params.get("setup_timeout", 360))
        if setup_target == "localhost":
            setup_func = utils.system_output
            os_type = "linux"
        else:
            setup_vm = env.get_vm(setup_target)
            setup_session = setup_vm.wait_for_login(timeout=timeout)
            setup_func = setup_session.cmd
            os_type = params["os_type"]

        setup_params = params.object_params(os_type)
        setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart")
        prepare_cmd = setup_params.get("prepare_cmd")
        setup_cmd = re.sub("SERVICE", setup_params.get("service", ""),
                           setup_cmd)

        error.context("Set up %s service in %s" % (setup_params.get("service"),
                                                   setup_target),
                      logging.info)
        if prepare_cmd:
            setup_func(prepare_cmd, timeout=setup_timeout)
        setup_func(setup_cmd, timeout=setup_timeout)
        if setup_target != "localhost":
            setup_session.close()

    def stop_service(setup_target):
        setup_timeout = int(params.get("setup_timeout", 360))
        if setup_target == "localhost":
            setup_func = utils.system_output
            os_type = "linux"
        else:
            setup_vm = env.get_vm(setup_target)
            setup_session = setup_vm.wait_for_login(timeout=timeout)
            setup_func = setup_session.cmd
            os_type = params["os_type"]

        setup_params = params.object_params(os_type)
        stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop")
        cleanup_cmd = setup_params.get("cleanup_cmd")
        stop_cmd = re.sub("SERVICE", setup_params.get("service", ""),
                          stop_cmd)

        error.context("Stop %s service in %s" % (setup_params.get("service"),
                                                 setup_target),
                      logging.info)
        if stop_cmd:
            setup_func(stop_cmd, timeout=setup_timeout)

        if cleanup_cmd:
            setup_func(cleanup_cmd, timeout=setup_timeout)

        if setup_target != "localhost":
            setup_session.close()

    timeout = int(params.get("login_timeout", '360'))
    op_timeout = int(params.get("op_timeout", "360"))
    acl_protocol = params['acl_protocol']
    acl_extra_options = params.get("acl_extra_options", "")

    for vm in env.get_all_vms():
        session = vm.wait_for_login(timeout=timeout)
        if params.get("disable_iptables") == "yes":
            session.cmd("iptables -F")
            #session.cmd_status_output("service iptables stop")
        if params.get("copy_scripts"):
            root_dir = data_dir.get_root_dir()
            script_dir = os.path.join(root_dir, "shared", "scripts")
            tmp_dir = params.get("tmp_dir", "C:\\")
            for script in params.get("copy_scripts").split():
                script_path = os.path.join(script_dir, script)
                vm.copy_files_to(script_path, tmp_dir)
        session.close()

    vms_tags = params.objects("vms")
    br_name = params.get("netdst")
    if br_name == "private":
        br_name = params.get("priv_brname", 'autotest-prbr0')

    for setup_target in params.get("setup_targets", "").split():
        setup_service(setup_target)

    access_targets = params.get("access_targets", "localhost").split()
    deny_target = params.get("deny_target", "localhost")
    all_target = params.get("extra_target", "").split() + vms_tags
    target_port = params["target_port"]
    vm = env.get_vm(vms_tags[0])
    nic = vm.virtnet[0]
    if_name = nic.ifname
    params_nic = params.object_params("nic1")
    if params["netdst"] == "private":
        params_nic["netdst"] = params_nic.get("priv_brname", "atbr0")
    host_ip = utils_net.get_host_ip_address(params_nic)
    if deny_target in vms_tags:
        deny_vm = env.get_vm(deny_target)
        deny_vm_ip = deny_vm.wait_for_get_address(0)
    elif deny_target == "localhost":
        deny_vm_ip = host_ip
    if "NW_DST" in acl_extra_options:
        acl_extra_options = re.sub("NW_DST", deny_vm_ip, acl_extra_options)
    acl_extra_options = re.sub("TARGET_PORT", target_port, acl_extra_options)

    access_sys = {}
    for target in all_target:
        if target not in access_targets:
            if target in vms_tags:
                os_type = params["os_type"]
            else:
                os_type = "linux"
            os_params = params.object_params(os_type)
            access_param = os_params.object_params(target)
            check_from_output = access_param.get("check_from_output")

            access_sys[target] = {}
            access_sys[target]['access_cmd'] = access_param['access_cmd']
            access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "")
            access_sys[target]['clean_cmd'] = access_param.get('clean_guest',
                                                               "")
            if check_from_output:
                access_sys[target]['check_from_output'] = check_from_output
            for tgt in access_targets:
                tgt_param = access_param.object_params(tgt)
                acl_disabled = tgt_param.get("acl_disabled") == "yes"
                access_sys[target]['disabled_%s' % tgt] = acl_disabled

    error.context("Try to access target before setup the rules", logging.info)
    access_service(access_sys, access_targets, False, host_ip, ref=True)
    error.context("Disable the access in ovs", logging.info)
    br_infos = utils_net.openflow_manager(br_name, "show").stdout
    if_port = re.findall("(\d+)\(%s\)" % if_name, br_infos)
    if not if_port:
        raise error.TestNAError("Can not find %s in bridge %s" % (if_name,
                                                                  br_name))
    if_port = if_port[0]

    acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options)
    utils_net.openflow_manager(br_name, "add-flow", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
    if not acl_rules_check(acl_rules, acl_cmd):
        raise error.TestFail("Can not find the rules from"
                             " ovs-ofctl: %s" % acl_rules)

    error.context("Try to acess target to exam the disable rules",
                  logging.info)
    access_service(access_sys, access_targets, True, host_ip)
    error.context("Enable the access in ovs", logging.info)
    acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options)
    utils_net.openflow_manager(br_name, "mod-flows", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
    if not acl_rules_check(acl_rules, acl_cmd):
        raise error.TestFail("Can not find the rules from"
                             " ovs-ofctl: %s" % acl_rules)
    error.context("Try to acess target to exam the enable rules",
                  logging.info)
    access_service(access_sys, access_targets, False, host_ip)
    error.context("Delete the access rules in ovs", logging.info)
    acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options)
    utils_net.openflow_manager(br_name, "del-flows", acl_cmd)
    acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout
    if acl_rules_check(acl_rules, acl_cmd):
        raise error.TestFail("Still can find the rules from"
                             " ovs-ofctl: %s" % acl_rules)
    error.context("Try to acess target to exam after delete the rules",
                  logging.info)
    access_service(access_sys, access_targets, False, host_ip)

    for setup_target in params.get("setup_targets", "").split():
        stop_service(setup_target)

Example 32

View license
    def run(self):
        # Read CSV data and replace the variables in the Scribus File with the cooresponding data. Finaly export to the specified format.
        # may throw exceptions if errors are met, use traceback to get all error details
        
        # log options
        optionsTxt = self.__dataObject.toString()
        logging.debug("active options: %s%s"%(optionsTxt[:1], optionsTxt[172:]))
        
        #defaults for missing info
        if(self.__dataObject.getSingleOutput() and (self.__dataObject.getOutputFileName() is CONST.EMPTY)):
            self.__dataObject.setOutputFileName(os.path.split(os.path.splitext(self.__dataObject.getScribusSourceFile())[0])[1] +'__single')    

        #parsing
        logging.info("parsing data source file %s"%(self.__dataObject.getDataSourceFile()))
        try:
            csvData = self.getCsvData(self.__dataObject.getDataSourceFile())
        except IOError as e:
            logging.error("CSV file not found: %s"%(self.__dataObject.getDataSourceFile()))
            raise             
        if(len(csvData) < 1):
            logging.error("Data file %s is empty. At least a header line and a line of data is needed. Halting."%(self.__dataObject.getDataSourceFile()))
            return -1
        if(len(csvData) < 2):
            logging.error("Data file %s has only one line. At least a header line and a line of data is needed. Halting."%(self.__dataObject.getDataSourceFile()))
            return -1

        #range
        firstElement = 1
        if(self.__dataObject.getFirstRow() != CONST.EMPTY):
            try:
                newFirstElementValue = int(self.__dataObject.getFirstRow())
                firstElement = max(newFirstElementValue, 1) # Guard against 0 or negative numbers
            except:
                logging.warning("Could not parse value of 'first row' as an integer, using default value instead")
        lastElement = len(csvData)
        if(self.__dataObject.getLastRow() != CONST.EMPTY):
            try:
                newLastElementValue = int(self.__dataObject.getLastRow())
                lastElement = min(newLastElementValue + 1, lastElement) # Guard against numbers higher than the length of csvData
            except:
                logging.warning("Could not parse value of 'last row' as an integer, using default value instead")       
        if ( (firstElement != 1) or (lastElement != len(csvData)) ):
            csvData = csvData[0:1] + csvData[firstElement : lastElement]
            logging.debug("custom data range is: %s - %s"%(firstElement, lastElement))
        else:
            logging.debug("full data range will be used")

        #generation
        dataC = len(csvData)-1
        fillCount = len(str(dataC))
        template = [] # XML-Content/Text-Content of the Source Scribus File (List of Lines)
        outputFileNames = []
        index = 0
        # Generate the Scribus Files
        for row in csvData:
            if(index == 0): # first line is the Header-Row of the CSV-File                
                varNamesForFileName = row
                varNamesForReplacingVariables = self.handleAmpersand(row) # Header-Row contains the variable names
                logging.info("parsing scribus source file %s"%(self.__dataObject.getScribusSourceFile()))
                try:
                    tree = ET.parse(self.__dataObject.getScribusSourceFile())
                except IOError as e:
                    logging.error("Scribus file not found: %s"%(self.__dataObject.getScribusSourceFile()))
                    raise
                root = tree.getroot()                
                # overwrite attributes from their /*/ItemAttribute[Type=SGAttribute] sibling, when applicable.
                templateElt = self.overwriteAttributesFromSGAttributes(root)                 

                #save settings
                if (self.__dataObject.getSaveSettings()):                                    
                    serial=self.__dataObject.toString()
                    logging.debug("saving current Scribus Generator settings in your source file")# as: %s"%serial)
                    docElt = root.find('DOCUMENT')
                    storageElt = docElt.find('./JAVA[@NAME="'+CONST.STORAGE_NAME+'"]')
                    if (storageElt is None):
                        colorElt = docElt.find('./COLOR[1]')                     
                        scriptPos = docElt.getchildren().index(colorElt)
                        logging.debug("creating new storage element in SLA template at position %s"%scriptPos)
                        storageElt = ET.Element("JAVA", {"NAME":CONST.STORAGE_NAME})
                        docElt.insert(scriptPos, storageElt)
                    storageElt.set("SCRIPT",serial)
                    tree.write(self.__dataObject.getScribusSourceFile()) #todo check if scribus reloads (or overwrites :/ ) when doc is opened, opt use API to add a script if there's an open doc.

               
            else:
                outContent = self.substituteData(varNamesForReplacingVariables, self.handleAmpersand(row), ET.tostring(templateElt, method='xml').split('\n'), keepTabsLF=CONST.KEEP_TAB_LINEBREAK)                
                # using capturing parenthesis in re.split pattern above to make sure the closing '>' is included in the splitted array.
                if (self.__dataObject.getSingleOutput()):
                    if (index == 1):
                        logging.debug("generating reference content from row #1")                        
                        outputElt = ET.fromstring(outContent)
                        docElt = outputElt.find('DOCUMENT')  
                        pagescount = int(docElt.get('ANZPAGES'))
                        pageheight = float(docElt.get('PAGEHEIGHT'))
                        vgap = float(docElt.get('GapVertical'))
                        groupscount = int(docElt.get('GROUPC'))
                        objscount = len(outputElt.findall('.//PAGEOBJECT'))
                        logging.debug("current template has #%s pageobjects"%(objscount))
                        version = outputElt.get('Version')
#                        if version.startswith('1.4'):
#                            docElt.set('GROUPC', str(groupscount*dataC))
                        docElt.set('ANZPAGES', str(pagescount*dataC))                        
                        docElt.set('DOCCONTRIB',docElt.get('DOCCONTRIB')+CONST.CONTRIB_TEXT)
                    else:
                        logging.debug("merging content from row #%s"%(index))
                        tmpElt = ET.fromstring(outContent).find('DOCUMENT')
                        shiftedElts = self.shiftPagesAndObjects(tmpElt, pagescount, pageheight, vgap, index-1, groupscount, objscount, version)                        
                        docElt.extend(shiftedElts)                                                
                else: # write one of multiple sla
                    outputFileName = self.createOutputFileName(index, self.__dataObject.getOutputFileName(), varNamesForFileName, row, fillCount)                    
                    self.writeSLA(ET.fromstring(outContent), outputFileName)
                    outputFileNames.append(outputFileName)                    
            index = index + 1
        
        # clean & write single sla
        if (self.__dataObject.getSingleOutput()):            
            self.writeSLA(outputElt, self.__dataObject.getOutputFileName())
            outputFileNames.append(self.__dataObject.getOutputFileName())        

        # Export the generated Scribus Files as PDF
        if(CONST.FORMAT_PDF == self.__dataObject.getOutputFormat()):
            for outputFileName in outputFileNames:
                pdfOutputFilePath = self.createOutputFilePath(self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_PDF)
                scribusOutputFilePath = self.createOutputFilePath(self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
                self.exportPDF(scribusOutputFilePath, pdfOutputFilePath)
                logging.info("pdf file created: %s"%(pdfOutputFilePath))
        
        # Cleanup the generated Scribus Files
        if(not (CONST.FORMAT_SLA == self.__dataObject.getOutputFormat()) and CONST.FALSE == self.__dataObject.getKeepGeneratedScribusFiles()):
            for outputFileName in outputFileNames:
                scribusOutputFilePath = self.createOutputFilePath(self.__dataObject.getOutputDirectory(), outputFileName, CONST.FILE_EXTENSION_SCRIBUS)
                self.deleteFile(scribusOutputFilePath)

        return 1;

Example 33

View license
def run(test, params, env):
    """
    Test command: virsh update-device.

    Update device from an XML <file>.
    1.Prepare test environment, adding a cdrom/floppy to VM.
    2.Perform virsh update-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_device_pre_vm_state")
    virsh_dargs = {"debug": True, "ignore_status": True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            logging.debug("Check disk XML:\n%s", open(disk['xml']).read())
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target,
                     flags, attach=True):
        """
        Check the test result of update-device command.
        """
        vm_state = pre_vm_state
        active_vmxml = VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = VMXML.new_from_dumpxml(vm_name,
                                                    options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail("Active domain XML updated "
                                                      "when --config options used"
                                                      " for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --config options used for"
                                                  " detachment")
                    if vm_state != "shutoff":
                        if not active_attached:
                            raise exceptions.TestFail("Active domain XML updated "
                                                      "when --config options used"
                                                      " for detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --live options used for"
                                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live options used for"
                                                  " detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --live options used for"
                                                  " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live --config options"
                                                  " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --live --config options"
                                                  " used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML not updated"
                                                  " when --live --config options "
                                                  "used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail("Active domain XML not updated "
                                                  "when --current options used "
                                                  "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated "
                                              "when --current options used for "
                                              "attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail("Active domain XML not updated"
                                                  " when --current options used "
                                                  "for detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        raise exceptions.TestFail("Inactive domain XML updated "
                                                  "when --current options used "
                                                  "for live detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail("Inactive domain XML not updated"
                                              " when --current options used "
                                              "for detachment")

    def check_rhel_version(release_ver, session=None):
        """
        Login to guest and check its release version
        """
        rhel_release = {"rhel6": "Red Hat Enterprise Linux Server release 6",
                        "rhel7": "Red Hat Enterprise Linux Server release 7",
                        "fedora": "Fedora release"}
        version_file = "/etc/redhat-release"
        if not rhel_release.has_key(release_ver):
            logging.error("Can't support this version of guest: %s",
                          release_ver)
            return False

        cmd = "grep '%s' %s" % (rhel_release[release_ver], version_file)
        if session:
            s = session.cmd_status(cmd)
        else:
            s = process.run(cmd, ignore_status=True, shell=True).exit_status

        logging.debug("Check version cmd return:%s", s)
        if s == 0:
            return True
        else:
            return False

    vmxml_backup = VMXML.new_from_dumpxml(vm_name, options="--inactive")
    # Before doing anything - let's be sure we can support this test
    # Parse flag list, skip testing early if flag is not supported
    # NOTE: "".split("--") returns [''] which messes up later empty test
    at_flag = params.get("at_dt_device_at_options", "")
    dt_flag = params.get("at_dt_device_dt_options", "")
    flag_list = []
    if at_flag.count("--"):
        flag_list.extend(at_flag.split("--"))
    if dt_flag.count("--"):
        flag_list.extend(dt_flag.split("--"))
    for item in flag_list:
        option = item.strip()
        if option == "":
            continue
        if not bool(virsh.has_command_help_match("update-device", option)):
            raise exceptions.TestSkipError("virsh update-device doesn't support "
                                           "--%s" % option)

    # As per RH BZ 961443 avoid testing before behavior changes
    if 'config' in flag_list:
        # SKIP tests using --config if libvirt is 0.9.10 or earlier
        if not libvirt_version.version_compare(0, 9, 10):
            raise exceptions.TestSkipError("BZ 961443: --config behavior change "
                                           "in version 0.9.10")
    if 'persistent' in flag_list or 'live' in flag_list:
        # SKIP tests using --persistent if libvirt 1.0.5 or earlier
        if not libvirt_version.version_compare(1, 0, 5):
            raise exceptions.TestSkipError("BZ 961443: --persistent behavior "
                                           "change in version 1.0.5")

    # Get the target bus/dev
    disk_type = params.get("disk_type", "cdrom")
    target_bus = params.get("updatedevice_target_bus", "ide")
    target_dev = params.get("updatedevice_target_dev", "hdc")
    disk_mode = params.get("disk_mode", "")
    support_mode = ['readonly', 'shareable']
    if not disk_mode and disk_mode not in support_mode:
        raise exceptions.TestError("%s not in support mode %s"
                                   % (disk_mode, support_mode))

    # Prepare tmp directory and files.
    orig_iso = os.path.join(data_dir.get_tmp_dir(), "orig.iso")
    test_iso = os.path.join(data_dir.get_tmp_dir(), "test.iso")

    # Check the version first.
    host_rhel6 = check_rhel_version('rhel6')
    guest_rhel6 = False
    if not vm.is_alive():
        vm.start()
    session = vm.wait_for_login()
    if check_rhel_version('rhel6', session):
        guest_rhel6 = True
    session.close()
    vm.destroy(gracefully=False)

    try:
        # Prepare the disk first.
        create_disk(vm_name, orig_iso, disk_type, target_dev, disk_mode)
        vmxml_for_test = VMXML.new_from_dumpxml(vm_name,
                                                options="--inactive")

        # Turn VM into certain state.
        if pre_vm_state == "running":
            if at_flag == "--config" or dt_flag == "--config":
                if host_rhel6:
                    raise exceptions.TestSkipError("Config option not supported"
                                                   " on this host")
            logging.info("Starting %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
        elif pre_vm_state == "shutoff":
            if not at_flag or not dt_flag:
                if host_rhel6:
                    raise exceptions.TestSkipError("Default option not supported"
                                                   " on this host")
            logging.info("Shuting down %s..." % vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        elif pre_vm_state == "paused":
            if at_flag == "--config" or dt_flag == "--config":
                if host_rhel6:
                    raise exceptions.TestSkipError("Config option not supported"
                                                   " on this host")
            logging.info("Pausing %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if not vm.pause():
                raise exceptions.TestSkipError("Cann't pause the domain")
        elif pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise exceptions.TestSkipError("Cann't create the domain")
            vm.wait_for_login().close()
    except Exception, e:
        logging.error(str(e))
        if os.path.exists(orig_iso):
            os.remove(orig_iso)
        vmxml_backup.sync()
        raise exceptions.TestSkipError(str(e))

    # Get remaining parameters for configuration.
    vm_ref = params.get("updatedevice_vm_ref", "domname")
    at_status_error = "yes" == params.get("at_status_error", "no")
    dt_status_error = "yes" == params.get("dt_status_error", "no")

    dom_uuid = vm.get_uuid()
    dom_id = vm.get_id()
    # Set domain reference.
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = dom_id
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "hexdomid" and dom_id is not None:
        vm_ref = hex(int(dom_id))

    try:

        # Firstly detach the disk.
        update_xmlfile = os.path.join(data_dir.get_tmp_dir(),
                                      "update.xml")
        create_attach_xml(update_xmlfile, disk_type, target_bus,
                          target_dev, "", disk_mode)
        ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
                                  flagstr=dt_flag, ignore_status=True,
                                  debug=True)
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
        if vm.is_alive() and not guest_rhel6:
            time.sleep(5)
            # For rhel7 guest, need to update twice for it to take effect.
            ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
                                      flagstr=dt_flag, ignore_status=True,
                                      debug=True)
        os.remove(update_xmlfile)
        libvirt.check_exit_status(ret, dt_status_error)
        if not ret.exit_status:
            check_result(orig_iso, disk_type, target_dev, dt_flag, False)

        # Then attach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Cann't pause the domain")
        create_attach_xml(update_xmlfile, disk_type, target_bus,
                          target_dev, test_iso, disk_mode)
        ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
                                  flagstr=at_flag, ignore_status=True,
                                  debug=True)
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
        update_twice = False
        if vm.is_alive() and not guest_rhel6:
            # For rhel7 guest, need to update twice for it to take effect.
            if (pre_vm_state in ["running", "paused"] and
                    dt_flag == "--config" and at_flag != "--config"):
                update_twice = True
            elif (pre_vm_state == "transient" and
                    dt_flag.count("config") and not at_flag.count("config")):
                update_twice = True
        if update_twice:
            time.sleep(5)
            ret = virsh.update_device(vm_ref, filearg=update_xmlfile,
                                      flagstr=at_flag, ignore_status=True,
                                      debug=True)
        libvirt.check_exit_status(ret, at_status_error)
        os.remove(update_xmlfile)
        if not ret.exit_status:
            check_result(test_iso, disk_type, target_dev, at_flag)
        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        vm.destroy(gracefully=False, free_mac_addresses=False)
        vmxml_backup.sync()
        if os.path.exists(orig_iso):
            os.remove(orig_iso)
        if os.path.exists(test_iso):
            os.remove(test_iso)

Example 34

Project: cdr-stats
Source File: views.py
View license
@permission_required('user_profile.dashboard', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def cdr_dashboard(request):
    """CDR dashboard on the last 24 hours

    **Attributes**:

        * ``template`` - cdr/dashboard.html
        * ``form`` - SwitchForm

    **Logic Description**:

        Display calls aggregated information for the last 24hours, several report will be
        created and displayed such as hourly call report and hangup cause/country analytics.
    """
    logging.debug('CDR dashboard view start')
    form = SwitchForm(request.POST or None)

    if form.is_valid():
        logging.debug('CDR dashboard view with search option')
        switch_id = int(getvar(request, 'switch_id'))
    else:
        switch_id = 0

    # Get list of calls/duration for each of the last 24 hours
    (calls_hour_aggr, total_calls, total_duration, total_billsec, total_buy_cost, total_sell_cost) = custom_sql_matv_voip_cdr_aggr_last24hours(request.user, switch_id)

    # Build chart data for last 24h calls
    (xdata, ydata, ydata2, ydata3, ydata4, ydata5) = ([], [], [], [], [], [])
    for i in calls_hour_aggr:
        start_time = (time.mktime(calls_hour_aggr[i]['calltime'].timetuple()) * 1000)
        xdata.append(start_time)
        ydata.append(int(calls_hour_aggr[i]['nbcalls']))
        ydata2.append(int(calls_hour_aggr[i]['duration']/60))
        ydata3.append(int(calls_hour_aggr[i]['billsec']/60))
        ydata4.append(int(calls_hour_aggr[i]['buy_cost']))
        ydata5.append(int(calls_hour_aggr[i]['sell_cost']))

    tooltip_date = "%d %b %y %H:%M %p"
    extra_serie1 = {"tooltip": {"y_start": "", "y_end": " calls"}, "date_format": tooltip_date}
    extra_serie2 = {"tooltip": {"y_start": "", "y_end": " min"}, "date_format": tooltip_date}
    extra_serie3 = {"tooltip": {"y_start": "", "y_end": " min"}, "date_format": tooltip_date}
    extra_serie4 = {"tooltip": {"y_start": "", "y_end": ""}, "date_format": tooltip_date}
    extra_serie5 = {"tooltip": {"y_start": "", "y_end": ""}, "date_format": tooltip_date}

    kwargs1 = {}
    kwargs1['bar'] = True

    final_chartdata = {
        'x': xdata,
        'name1': 'Calls', 'y1': ydata, 'extra1': extra_serie1, 'kwargs1': kwargs1,
        'name2': 'Duration', 'y2': ydata2, 'extra2': extra_serie2,
        'name3': 'Billsec', 'y3': ydata3, 'extra3': extra_serie3,
        'name4': 'Buy cost', 'y4': ydata4, 'extra4': extra_serie4,
        'name5': 'Sell cost', 'y5': ydata5, 'extra5': extra_serie5,
    }
    final_charttype = "linePlusBarChart"

    # Get top 5 of country calls for last 24 hours
    country_data = custom_sql_aggr_top_country_last24hours(request.user, switch_id, limit=5)

    # Build pie chart data for last 24h calls per country
    (xdata, ydata) = ([], [])
    for country in country_data:
        xdata.append(get_country_name(country["country_id"]))
        ydata.append(percentage(country["nbcalls"], total_calls))

    color_list = ['#FFC36C', '#FFFF9D', '#BEEB9F', '#79BD8F', '#FFB391']
    extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
    country_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
    country_analytic_charttype = "pieChart"

    country_extra = {
        'x_is_date': False,
        'x_axis_format': '',
        'tag_script_js': True,
        'jquery_on_ready': True,
    }

    # Get top 10 of hangup cause calls for last 24 hours
    hangup_cause_data = custom_sql_aggr_top_hangup_last24hours(request.user, switch_id)

    # hangup analytic pie chart data
    (xdata, ydata) = ([], [])
    for hangup_cause in hangup_cause_data:
        xdata.append(str(get_hangupcause_name(hangup_cause["hangup_cause_id"])))
        ydata.append(str(percentage(hangup_cause["nbcalls"], total_calls)))

    color_list = ['#2A343F', '#7E8282', '#EA9664', '#30998F', '#449935']
    extra_serie = {"tooltip": {"y_start": "", "y_end": " %"}, "color_list": color_list}
    hangup_analytic_chartdata = {'x': xdata, 'y1': ydata, 'extra1': extra_serie}
    hangup_analytic_charttype = "pieChart"

    hangup_extra = country_extra

    logging.debug("Result calls_hour_aggr %d" % len(calls_hour_aggr))
    logging.debug("Result hangup_cause_data %d" % len(hangup_cause_data))
    logging.debug("Result country_data %d" % len(country_data))

    # Calculate the Average Time of Call
    metric_aggr = calculate_act_acd(total_calls, total_duration)

    final_extra = {
        'x_is_date': True,
        'x_axis_format': '%H:%M',
        # 'x_axis_format': '%d %b %Y',
        'tag_script_js': True,
        'jquery_on_ready': True,
        'focus_enable': True,
    }

    logging.debug('CDR dashboard view end')
    variables = {
        'total_calls': total_calls,
        'total_duration': int_convert_to_minute(total_duration),
        'total_buy_cost': total_buy_cost,
        'total_sell_cost': total_sell_cost,
        'metric_aggr': metric_aggr,
        'country_data': country_data,
        'hangup_analytic': hangup_cause_data,
        'form': form,
        'final_chartdata': final_chartdata,
        'final_charttype': final_charttype,
        'final_chartcontainer': 'final_container',
        'final_extra': final_extra,
        'hangup_analytic_charttype': hangup_analytic_charttype,
        'hangup_analytic_chartdata': hangup_analytic_chartdata,
        'hangup_chartcontainer': 'hangup_piechart_container',
        'hangup_extra': hangup_extra,
        'country_analytic_charttype': country_analytic_charttype,
        'country_analytic_chartdata': country_analytic_chartdata,
        'country_chartcontainer': 'country_piechart_container',
        'country_extra': country_extra,
    }
    return render_to_response('cdr/dashboard.html', variables, context_instance=RequestContext(request))

Example 35

Project: tlsfuzzer
Source File: scripts_retention.py
View license
def run_rsa_cert_tests(server_cmd):
    simple_scripts = ['test-aes-gcm-nonces.py',
                      'test-atypical-padding.py',
                      'test-bleichenbacher-workaround.py',
                      'test-conversation.py',
                      'test-cve-2016-2107.py',
                      'test-dhe-rsa-key-exchange.py',
                      'test-dhe-rsa-key-exchange-signatures.py',
                      'test-dhe-rsa-key-exchange-with-bad-messages.py',
                      'test-early-application-data.py',
                      'test-ecdhe-rsa-key-exchange.py',
                      'test-ecdhe-rsa-key-exchange-with-bad-messages.py',
                      'test-empty-extensions.py',
                      # test requires renegotiation support
                      #'test-extended-master-secret-extension.py',
                      'test-fallback-scsv.py',
                      'test-fuzzed-ciphertext.py',
                      'test-fuzzed-finished.py',
                      'test-fuzzed-MAC.py',
                      'test-fuzzed-padding.py',
                      'test-hello-request-by-client.py',
                      # test requires renegotiation support
                      #'test-interleaved-application-data-and-fragmented-handshakes-in-renegotiation.py',
                      #'test-interleaved-application-data-in-renegotiation.py',
                      'test-invalid-cipher-suites.py',
                      # tlslite-ng just ignores this field
                      #'test-invalid-compression-methods.py',
                      'test-invalid-content-type.py',
                      'test-invalid-rsa-key-exchange-messages.py',
                      # not verified correctly by tlslite-ng
                      #'test-invalid-session-id.py',
                      'test-invalid-version.py',
                      'test-large-number-of-extensions.py',
                      # test requires renegotiation support
                      #'test-openssl-3712.py',
                      'test-record-layer-fragmentation.py',
                      'test-sessionID-resumption.py',
                      'test-sslv2-connection.py',
                      'test-sslv2-force-cipher-3des.py',
                      'test-sslv2-force-cipher-non3des.py',
                      'test-sslv2-force-cipher.py',
                      'test-sslv2-force-export-cipher.py',
                      'test-sslv2hello-protocol.py',
                      # SSLv3 is disabled by default
                      #'test-SSLv3-padding.py',
                      'test-TLSv1_2-rejected-without-TLSv1_2.py',
                      'test-truncating-of-client-hello.py',
                      'test-truncating-of-finished.py',
                      'test-truncating-of-kRSA-client-key-exchange.py',
                      'test-unsupported-cuve-fallback.py',
                      'test-version-numbers.py',
                      'test-zero-length-data.py']

    good = 0
    bad = 0
    srv, srv_out, srv_err = start_server(server_cmd)
    logger.info("Server process started")

    try:
        n_good, n_bad = run_clients(simple_scripts, srv)
        good += n_good
        bad += n_bad
    finally:
        try:
            logging.debug("Killing server process")
            srv.send_signal(15)  # SIGTERM
            srv.wait()
            logging.debug("Server process killed: {0}".format(srv.returncode))
        except OSError:
            logging.debug("Can't kill server process")
    srv_err.join()
    srv_out.join()

    client_certs = ['test-certificate-malformed.py',
                    'test-certificate-request.py',
                    'test-certificate-verify-malformed-sig.py',
                    'test-certificate-verify-malformed.py',
                    'test-certificate-verify.py',
                    'test-rsa-sigs-on-certificate-verify.py']

    srv, srv_out, srv_err = start_server(server_cmd, client_cert=True)
    logger.info("Server process started")

    try:
        n_good, n_bad = run_clients(client_certs, srv,
                                    ['-k', 'tests/clientX509Key.pem',
                                     '-c', 'tests/clientX509Cert.pem'])
        good += n_good
        bad += n_bad
    finally:
        try:
            logging.debug("Killing server process")
            srv.send_signal(15)  # SIGTERM
            srv.wait()
            logging.debug("Server process killed: {0}".format(srv.returncode))
        except OSError:
            logging.debug("Can't kill server process")
    srv_err.join()
    srv_out.join()

    return (good, bad)

Example 36

Project: StaticMapService
Source File: views.py
View license
def render_static(request, height=None, width=None, format='png',
                  background='satellite', bounds=None, center=None, render_srid=3857):

# width and height
    width = int(width)
    height = int(height)
    if width > settings.MAX_IMAGE_DIMENSION or \
        height > settings.MAX_IMAGE_DIMENSION or \
        width <= 1 or height <= 1:
        logging.debug("Invalid size")
        return HttpResponseBadRequest("Invalid image size, both dimensions must be in range %i-%i" % (1, settings.MAX_IMAGE_DIMENSION))

# image format
    if format not in IMAGE_FORMATS:
        logging.error("unknown image format %s" % format)
        return HttpResponseBadRequest("Unknown image format, available formats: " + ", ".join(IMAGE_FORMATS))

    if format.startswith('png'):
        mimetype = 'image/png'
    elif format.startswith('jpeg'):
        mimetype = 'image/jpeg'

# bounds
    bounds_box = None
    if bounds:
        bounds_components = bounds.split(',')
        if len(bounds_components) != 4:
            return HttpResponseBadRequest("Invalid bounds, must be 4 , separated numbers")
        bounds_components = [float(f) for f in bounds_components]

        if not (-180 < bounds_components[0] < 180) or not (-180 < bounds_components[2] < 180):
            logging.error("x out of range %f or %f" % (bounds_components[0], bounds_components[2]))
            return HttpResponseBadRequest("x out of range %f or %f" % (bounds_components[0], bounds_components[2]))
        if not (-90 < bounds_components[1] < 90) or not (-90 < bounds_components[3] < 90):
            logging.error("y out of range %f or %f" % (bounds_components[1], bounds_components[3]))
            return HttpResponseBadRequest("y out of range %f or %f" % (bounds_components[1], bounds_components[3]))

        ll = Point(bounds_components[0], bounds_components[1], srid=4326)
        ll.transform(render_srid)

        ur = Point(bounds_components[2], bounds_components[3], srid=4326)
        ur.transform(render_srid)
        bounds_box = mapnik.Box2d(ll.x, ll.y, ur.x, ur.y)
    elif center:
        center_components = center.split(',')
        if len(center_components) != 3:
            return HttpResponseBadRequest()
        lon = float(center_components[0])
        lat = float(center_components[1])
        zoom = int(center_components[2])
        # todo calc bounds from center and zoom

# baselayer
    if background not in settings.BASE_LAYERS and background != 'none':
        return HttpResponseNotFound("Background not found")

# GeoJSON post data
    if request.method == "POST" and len(request.body):
        input_data = json.loads(request.body)
    else:
        input_data = None

    if not bounds and not center and not input_data:
        return HttpResponseBadRequest("Bounds, center, or post data is required.")

# initialize map
    m = mapnik.Map(width, height)
    m.srs = '+init=epsg:' + str(render_srid)

# add a tile source as a background
    if background != "none":
        background_file = settings.BASE_LAYERS[background]
        background_style = mapnik.Style()
        background_rule = mapnik.Rule()
        background_rule.symbols.append(mapnik.RasterSymbolizer())
        background_style.rules.append(background_rule)
        m.append_style('background style', background_style)
        tile_layer = mapnik.Layer('background')
        tile_layer.srs = '+init=epsg:' + str(render_srid)
        tile_layer.datasource = mapnik.Gdal(base=settings.BASE_LAYER_DIR, file=background_file)
        tile_layer.styles.append('background style')
        m.layers.append(tile_layer)

# add features from geojson
    if input_data and input_data['type'] == "Feature":
        features = [input_data]
    elif input_data and input_data['type'] == "FeatureCollection":
        if 'features' not in input_data:
            return HttpResponseBadRequest()
        features = input_data['features']
    else:
        features = []

    logging.debug("Adding %d features to map" % len(features))

    geometries = []
    point_features = []
    fid = 0
    for feature in features:
        if 'geometry' not in feature:
            logging.debug("feature does not have geometry")
            return HttpResponseBadRequest("Feature does not have a geometry")
        if 'type' not in feature['geometry']:
            logging.debug("geometry does not have type")
            return HttpResponseBadRequest("Geometry does not have a type")

        fid += 1
        style_name = str(fid)

        if feature['geometry']['type'] == 'Point':
            point_features.append(feature)
        elif feature['geometry']['type'] in ('LineString', 'MultiLineString'):
            if feature['geometry']['type'] == 'LineString':
                geos_feature = LineString(feature['geometry']['coordinates'])
            elif feature['geometry']['type'] == 'MultiLineString':
                rings = feature['geometry']['coordinates']
                rings = [[(c[0], c[1]) for c in r] for r in rings]
                if len(rings) == 1:
                    geos_feature = LineString(rings[0])
                else:
                    linestrings = []
                    for ring in rings:
                        try:
                            linestrings.append(LineString(ring))
                        except Exception, e:
                            logging.error("Error adding ring: %s", e)

                    geos_feature = MultiLineString(linestrings)

            geos_feature.srid = 4326
            geos_feature.transform(render_srid)
            geometries.append(geos_feature)

            style = mapnik.Style()
            line_rule = mapnik.Rule()
            style_dict = None
            if 'style' in feature:
                style_dict = feature['style']
            elif 'properties' in feature:
                style_dict = feature['properties']
            line_rule.symbols.append(line_symbolizer(style_dict))
            style.rules.append(line_rule)
            m.append_style(style_name, style)

            wkt = geos_feature.wkt
            line_layer = mapnik.Layer(style_name + ' layer')
            line_layer.datasource = mapnik.CSV(inline='wkt\n' + '"' + wkt + '"')
            line_layer.styles.append(style_name)
            line_layer.srs = '+init=epsg:' + str(render_srid)
            m.layers.append(line_layer)
        elif feature['geometry']['type'] == 'Polygon':
            geos_feature = GEOSGeometry(json.dumps(feature['geometry']))
            geos_feature.srid = 4326
            geos_feature.transform(render_srid)
            geometries.append(geos_feature)

            style = mapnik.Style()
            rule = mapnik.Rule()
            style_dict = None
            if 'style' in feature:
                style_dict = feature['style']
            elif 'properties' in feature:
                style_dict = feature['properties']
            rule.symbols.append(polygon_symbolizer(style_dict))
            rule.symbols.append(line_symbolizer(style_dict))
            style.rules.append(rule)
            m.append_style(style_name, style)

            wkt = geos_feature.wkt
            layer = mapnik.Layer(style_name + ' layer')
            layer.datasource = mapnik.CSV(inline='wkt\n' + '"' + wkt + '"')
            layer.styles.append(style_name)
            layer.srs = '+init=epsg:' + str(render_srid)
            m.layers.append(layer)
        else:
            logging.info("Not adding unknown feature type")

# point features are coaslesced into a single layer for efficiency
    if len(point_features):
        logging.debug("Adding %i point features in 1 layer" % len(point_features))
        point_style = mapnik.Style()
        point_rule = mapnik.Rule()
        point_symbolizer = mapnik.PointSymbolizer()
        point_rule.symbols.append(point_symbolizer)
        point_style.rules.append(point_rule)
        m.append_style('point_style', point_style)

        csv = 'wkt\n'
        for feature in point_features:
            geos_feature = Point(feature['geometry']['coordinates'])
            geos_feature.srid = 4326
            geos_feature.transform(render_srid)
            geometries.append(geos_feature)
            csv += '"' + geos_feature.wkt + '"\n'

        point_layer = mapnik.Layer('point layer')
        point_layer.datasource = mapnik.CSV(inline=csv)
        point_layer.styles.append('point_style')
        point_layer.srs = '+init=epsg:' + str(render_srid)
        m.layers.append(point_layer)

# bounds not in url, calculate from data
    if not bounds_box:
        geometry_collection = GeometryCollection(geometries)
        minx, miny, maxx, maxy = geometry_collection.extent
        buffer_size = .2
        x_buffer_size = ((maxx - minx) * buffer_size)
        y_buffer_size = ((maxy - miny) * buffer_size)
        if x_buffer_size == 0:  # this can happen if there is only 1 point feature
            x_buffer_size = 1000
        if y_buffer_size == 0:
            y_buffer_size = 1000
        bounds_box = mapnik.Box2d(minx - x_buffer_size, miny - y_buffer_size,
                                  maxx + x_buffer_size, maxy + y_buffer_size)

    m.zoom_to_box(bounds_box)

# render image
    im = mapnik.Image(m.width, m.height)
    mapnik.render(m, im)
    data = im.tostring(str(format))

    if background in settings.BASE_LAYERS_ATTRIBUTION:
        image = Image.open(cStringIO.StringIO(data))
        if format.startswith('png'):
            image = image.convert('RGB')  # workaround for Pillow palette bug
        add_attribution(image, settings.BASE_LAYERS_ATTRIBUTION[background])
        output = cStringIO.StringIO()
        match = re.match('^(jpeg|png)(\d{1,3})$', format)
        if match:
            image_format, quality = match.groups()
            quality = int(quality)
            if image_format == 'jpeg':
                image.save(output, 'jpeg', quality=quality)
            else:
                image = image.convert('P', palette=Image.ADAPTIVE, colors=quality)
                bits = int(log(quality, 2))
                image.save(output, 'png', bits=bits)
        else:
            image.save(output, format)
        data = output.getvalue()
        output.close()

    return HttpResponse(data, content_type=mimetype)

Example 37

Project: pytrainer
Source File: googlemaps.py
View license
    def createHtml_api3(self,polyline, minlat, minlon, maxlat, maxlon, startinfo, finishinfo, laps, linetype):
        '''
        Generate a Google maps html file using the v3 api
            documentation at http://code.google.com/apis/maps/documentation/v3
        '''
        logging.debug(">>")
        if self.waypoint is not None:
            waypoints = self.waypoint.getAllWaypoints()
            #TODO waypoints not supported in this function yet
            #TODO sort polyline encoding (not supported in v3?)
            #TODO check http://code.google.com/apis/maps/documentation/v3/overlays.html#Polylines for MVArray??
        content = '''
        <html>
        <head>
        <style type="text/css">
            div.info_content { font-family: sans-serif; font-size: 10px; }
        </style>
        <meta name="viewport" content="initial-scale=1.0, user-scalable=no" />
        <script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"></script>
        <script type="text/javascript">
          function initialize() {\n'''
        content += "            var startlatlng = %s ;\n" % (polyline[0][0])
        content += "            var centerlatlng = new google.maps.LatLng(%f, %f);\n" % ((minlat+maxlat)/2., (minlon+maxlon)/2.)
        content += "            var endlatlng = %s;\n" % (polyline[-1][0])
        content += "            var swlatlng = new google.maps.LatLng(%f, %f);\n" % (minlat,minlon)
        content += "            var nelatlng = new google.maps.LatLng(%f, %f);\n" % (maxlat,maxlon)
        content += "            var startcontent = \"%s\";\n" % (startinfo)
        content += "            var finishcontent = \"%s\";\n" % (finishinfo)
        content += "            var startimageloc = \"%s/glade/start.png\";\n" % (os.path.abspath(self.data_path))
        content += "            var finishimageloc = \"%s/glade/finish.png\";\n" % (os.path.abspath(self.data_path))
        content += "            var lapimageloc = \"%s/glade/waypoint.png\";\n" % (os.path.abspath(self.data_path))
        content +='''
            var myOptions = {
              zoom: 8,
              center: centerlatlng,
              scaleControl: true,
              mapTypeId: google.maps.MapTypeId.ROADMAP
            };

            var startimage = new google.maps.MarkerImage(startimageloc,\n
              // This marker is 32 pixels wide by 32 pixels tall.
              new google.maps.Size(32, 32),
              // The origin for this image is 0,0.
              new google.maps.Point(0,0),
              // The anchor for this image is the base of the flagpole
              new google.maps.Point(16, 32));\n\n
            var finishimage = new google.maps.MarkerImage(finishimageloc,\n
              // This marker is 32 pixels wide by 32 pixels tall.
              new google.maps.Size(32, 32),
              // The origin for this image is 0,0.
              new google.maps.Point(0,0),
              // The anchor for this image is the base of the flagpole
              new google.maps.Point(16, 32));\n

            var lapimage = new google.maps.MarkerImage(lapimageloc,\n
              // This marker is 32 pixels wide by 32 pixels tall.
              new google.maps.Size(32, 32),
              // The origin for this image is 0,0.
              new google.maps.Point(0,0),
              // The anchor for this image is the base of the flagpole
              new google.maps.Point(16, 32));\n

            var map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
            var startmarker = new google.maps.Marker({
              position: startlatlng,
              map: map,
              icon: startimage,
              title:"Start"});

            var finishmarker = new google.maps.Marker({
              position: endlatlng,
              icon: finishimage,
              map: map,
              title:"End"}); \n

            //Add an infowindows
            var startinfo = new google.maps.InfoWindow({
                content: startcontent
            });

            var finishinfo = new google.maps.InfoWindow({
                content: finishcontent
            });

            google.maps.event.addListener(startmarker, 'click', function() {
              startinfo.open(map,startmarker);
            });

            google.maps.event.addListener(finishmarker, 'click', function() {
              finishinfo.open(map,finishmarker);
            });\n'''

        #"id_lap, record, elapsed_time, distance, start_lat, start_lon, end_lat, end_lon, calories, lap_number",
        for lap in laps:
            lapNumber = int(lap['lap_number'])+1
            elapsedTime = float(lap['elapsed_time'])
            elapsedTimeHours = int(elapsedTime/3600)
            elapsedTimeMins = int((elapsedTime - (elapsedTimeHours * 3600)) / 60)
            elapsedTimeSecs = elapsedTime - (elapsedTimeHours * 3600) - (elapsedTimeMins * 60)
            if elapsedTimeHours > 0:
                strElapsedTime = "%0.0dh:%0.2dm:%0.2fs" % (elapsedTimeHours, elapsedTimeMins, elapsedTimeSecs)
            elif elapsedTimeMins > 0:
                strElapsedTime = "%0.0dm:%0.2fs" % (elapsedTimeMins, elapsedTimeSecs)
            else:
                strElapsedTime = "%0.0fs" % (elapsedTimeSecs)
            #process lat and lon for this lap
            try:
                lapLat = float(lap['end_lat'])
                lapLon = float(lap['end_lon'])
                content += "var lap%dmarker = new google.maps.Marker({position: new google.maps.LatLng(%f, %f), icon: lapimage, map: map,  title:\"Lap%d\"}); \n " % (lapNumber, lapLat, lapLon, lapNumber)
                content += "var lap%d = new google.maps.InfoWindow({content: \"<div class='info_content'>End of lap:%s<br>Elapsed time:%s<br>Distance:%0.2f km<br>Calories:%s</div>\" });\n" % (lapNumber, lapNumber, strElapsedTime, float(lap['distance'])/1000, lap['calories'])
                content += "google.maps.event.addListener(lap%dmarker, 'click', function() { lap%d.open(map,lap%dmarker); });\n" % (lapNumber,lapNumber,lapNumber)
            except Exception as e:
                #Error processing lap lat or lon
                #dont show this lap
                logging.debug( "Error processing lap "+ str(lap) )
                logging.debug(str(e))

        content += '''

            var boundsBox = new google.maps.LatLngBounds(swlatlng, nelatlng );\n
            map.fitBounds(boundsBox);\n'''
            
        pre = 0
        for point in polyline:
            if pre:
                content += '''var polylineCoordinates = [\n'''
                content += "                                       %s,\n" % (pre[0])
                content += "                                       %s,\n" % (point[0])
                content += '''            ];\n
                    // Add a polyline.\n
                    var polyline = new google.maps.Polyline({\n
                            path: polylineCoordinates,\n
                            strokeColor: \"%s\",\n
                            strokeOpacity: 0.9,\n
                            strokeWeight: 5,\n
                            });\n
                polyline.setMap(map);\n''' % point[2]
                
                contenttemplate = [
                	"%s",
                	"Speed: %0.1f km/h",
                	"HR: %d bpm",
                	"Cadence: %d",
                ]
                
                content += '''
                    google.maps.event.addListener(polyline, 'click', function(event) {
                        var marker = new google.maps.InfoWindow({
                          position: event.latLng, 
                          content: "%s"
                        });
                        marker.setMap(map);
                    });
                    ''' % contenttemplate[linetype] % point[1]
            pre = point
        
        content += '''
          }

        </script>
        </head>
        <body onload="initialize()">
          <div id="map_canvas" style="width:100%; height:100%"></div>
        </body>
        </html>'''
        file = fileUtils(self.htmlfile,content)
        file.run()
        logging.debug("<<")

Example 38

Project: pootle
Source File: add_vfolders.py
View license
    def handle(self, **options):
        """Add virtual folders from file."""

        try:
            with open(options['vfolder'][0], "r") as inputfile:
                vfolders = json.load(inputfile)
        except IOError as e:
            raise CommandError(e)
        except ValueError as e:
            raise CommandError("Please check if the JSON file is malformed. "
                               "Original error:\n%s" % e)

        for vfolder_item in vfolders:
            try:
                temp = ','.join(vfolder_item['filters']['files'])
                if not temp:
                    raise ValueError
            except (KeyError, ValueError):
                raise CommandError("Virtual folder '%s' has no filtering "
                                   "rules." % vfolder_item['name'])

        self.stdout.write("Importing virtual folders...")

        added_count = 0
        updated_count = 0
        errored_count = 0

        for vfolder_item in vfolders:
            vfolder_item['name'] = vfolder_item['name'].strip().lower()

            # Put all the files for each virtual folder as a list and save it
            # as its filter rules.
            languages, projects, new_rules = self.parse_vfolder_rules(
                vfolder_item['location'].strip(),
                vfolder_item['filters']['files']
            )

            vfolder_item['filter_rules'] = new_rules

            if 'filters' in vfolder_item:
                del vfolder_item['filters']

            # Now create or update the virtual folder.
            try:
                # Retrieve the virtual folder if it exists.
                vfolder = VirtualFolder.objects.get(name=vfolder_item['name'])
            except VirtualFolder.DoesNotExist:
                # If the virtual folder doesn't exist yet then create it.
                try:
                    self.stdout.write(u'Adding new virtual folder %s...' %
                                      vfolder_item['name'])
                    vfolder_item['all_projects'] = not projects
                    vfolder_item['all_languages'] = not languages
                    vfolder = VirtualFolder(**vfolder_item)
                    vfolder.save()
                except ValidationError as e:
                    errored_count += 1
                    self.stdout.write('FAILED')
                    self.stderr.write(e)
                else:
                    if projects:
                        vfolder.projects.add(
                            *Project.objects.filter(code__in=projects)
                        )
                    if languages:
                        vfolder.languages.add(
                            *Language.objects.filter(code__in=languages)
                        )
                    self.stdout.write('DONE')
                    added_count += 1
            else:
                # Update the already existing virtual folder.
                changed = False

                if not projects:
                    vfolder.all_projects = True
                    changed = True
                    logging.debug("'All projects' for virtual folder '%s' "
                                  "will be changed.", vfolder.name)

                if not languages:
                    vfolder.all_languages = True
                    changed = True
                    logging.debug("'All languages' for virtual folder '%s' "
                                  "will be changed.", vfolder.name)

                if projects:
                    vfolder.projects.set(
                        *Project.objects.filter(code__in=projects)
                    )
                if languages:
                    vfolder.languages.set(
                        *Language.objects.filter(code__in=languages)
                    )

                if vfolder.filter_rules != vfolder_item['filter_rules']:
                    vfolder.filter_rules = vfolder_item['filter_rules']
                    changed = True
                    logging.debug("Filter rules for virtual folder '%s' will "
                                  "be changed.", vfolder.name)

                if ('priority' in vfolder_item and
                    vfolder.priority != vfolder_item['priority']):

                    vfolder.priority = vfolder_item['priority']
                    changed = True
                    logging.debug("Priority for virtual folder '%s' will be "
                                  "changed to %f.", vfolder.name,
                                  vfolder.priority)

                if ('is_public' in vfolder_item and
                    vfolder.is_public != vfolder_item['is_public']):

                    vfolder.is_public = vfolder_item['is_public']
                    changed = True
                    logging.debug("is_public status for virtual folder "
                                  "'%s' will be changed.", vfolder.name)

                if ('description' in vfolder_item and
                    vfolder.description.raw != vfolder_item['description']):

                    vfolder.description = vfolder_item['description']
                    changed = True
                    logging.debug("Description for virtual folder '%s' will "
                                  "be changed.", vfolder.name)

                if changed:
                    try:
                        self.stdout.write(u'Updating virtual folder %s...' %
                                          vfolder_item['name'])
                        vfolder.save()
                    except ValidationError as e:
                        errored_count += 1
                        self.stdout.write('FAILED')
                        self.stderr.write(e)
                    else:
                        self.stdout.write('DONE')
                        updated_count += 1

        self.stdout.write("\nErrored: %d\nAdded: %d\n"
                          "Updated: %d\nUnchanged: %d" %
                          (errored_count, added_count, updated_count,
                           len(vfolders) - errored_count - added_count -
                           updated_count))

Example 39

View license
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")
    disk_snapshot_attr = params.get('disk_snapshot_attr', 'external')
    set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no")

    # gluster related params
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = 'testacl'

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_src_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.tmpdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(test.tmpdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(test.tmpdir,
                                         params.get(external_disk))
                utils.run("qemu-img create -f qcow2 %s 1G" % disk_path)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            utils.run("chmod 500 %s" % disk_path)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        if replace_vm_disk:
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if set_snapshot_attr:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disk_xml = vmxml_backup.get_devices(device_type="disk")[0]
            vmxml_new.del_device(disk_xml)
            # set snapshot attribute in disk xml
            disk_xml.snapshot = disk_snapshot_attr
            new_disk = disk.Disk(type_name='file')
            new_disk.xmltreefile = disk_xml.xmltreefile
            vmxml_new.add_device(new_disk)
            logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile)
            vmxml_new.sync()
            vm.start()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            vm.prepare_guest_agent()
            session = vm.wait_for_login()
            if start_ga == "no":
                # The qemu-ga could be running and should be killed
                session.cmd("kill -9 `pidof qemu-ga`")
                # Check if the qemu-ga get killed
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if not stat_ps:
                    # As managed by systemd and set as autostart, qemu-ga
                    # could be restarted, so use systemctl to stop it.
                    session.cmd("systemctl stop qemu-guest-agent")
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        raise error.TestNAError("Fail to stop agent in "
                                                "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)
        else:
            # Remove channel if exist
            if vm.is_alive():
                vm.destroy(gracefully=False)
            xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name)
            xml_inst.remove_agent_channels()
            vm.start()

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                utils.run("qemu-img create -f qcow2 %s 200M" % disk_path)
                virsh.attach_disk(vm_name, disk_path,
                                  'vd%s' % list(string.lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name, options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
                # for multi snapshots without specific snapshot name, the
                # snapshot name is using time string with 1 second
                # incremental, to avoid get snapshot failure with same name,
                # sleep 1 second here.
                if int(multi_num) > 1:
                    time.sleep(1.1)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    raise error.TestFail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0 and
                            options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            raise error.TestFail("Run failed but file %s exist"
                                                 % option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            raise error.TestFail("Domain xml should not be "
                                                 "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    raise error.TestFail("Run failed with right command: %s"
                                         % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(vm_name, options, option_dict, output,
                                    snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    raise error.TestFail("'%s' was found: %s"
                                                         % (pattern, line))

    finally:
        if vm.is_alive():
            vm.destroy()
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            raise error.TestFail("Still can find snapshot metadata")

        if disk_src_protocol == 'gluster':
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
            libvirtd.restart()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd)

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                if reuse_external:
                    external_disk = "external_disk%s" % i
                    disk_path = os.path.join(test.tmpdir,
                                             params.get(external_disk))
                    if os.path.exists(disk_path):
                        os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)

Example 40

Project: ssbench
Source File: worker.py
View license
    def ignoring_http_responses(self, statuses, fn, call_info, **extra_keys):
        if 401 not in statuses:
            statuses += (401,)
        args = dict(
            container=call_info['container'],
            name=call_info['name'],
        )
        args.update(extra_keys)

        if 'auth_kwargs' not in call_info:
            raise ValueError('Got benchmark job without "auth_kwargs" key!')

        tries = 0
        while True:
            # Make sure we've got a current storage_url/token
            if call_info['auth_kwargs'].get('token', None):
                token_key = None
                args['url'] = random.choice(
                    call_info['auth_kwargs']['storage_urls'])
                args['token'] = call_info['auth_kwargs']['token']
            else:
                token_key = self._token_key(call_info['auth_kwargs'])
                if token_key not in self.token_data:
                    self.token_data_lock.acquire()
                    collided = False
                    try:
                        if token_key not in self.token_data:
                            logging.debug('Authenticating with %r',
                                          call_info['auth_kwargs'])
                            storage_url, token = client.get_auth(
                                **call_info['auth_kwargs'])
                            override_urls = call_info['auth_kwargs'].get(
                                'storage_urls', None)
                            if override_urls:
                                logging.debug(
                                    'Will override auth storage url %s with '
                                    'one of %r', storage_url, override_urls)
                                storage_urls = override_urls
                            else:
                                storage_urls = [storage_url]
                            self.token_data[token_key] = (storage_urls, token)
                        else:
                            collided = True
                    finally:
                        self.token_data_lock.release()
                    if collided:
                        # Wait just a little bit if we just collided with
                        # another greenthread's re-auth
                        logging.debug('Collided on re-auth; sleeping 0.005')
                        gevent.sleep(0.005)
                storage_urls, args['token'] = self.token_data[token_key]
                args['url'] = random.choice(storage_urls)

            # Check for connection pool initialization (protected by a
            # semaphore)
            if args['url'] not in self.conn_pools:
                self._create_connection_pool(
                    args['url'],
                    call_info.get('connect_timeout',
                                  client.DEFAULT_CONNECT_TIMEOUT),
                    call_info.get('network_timeout',
                                  client.DEFAULT_NETWORK_TIMEOUT))

            try:
                fn_results = None
                with self.connection(args['url']) as conn:
                    fn_results = fn(http_conn=conn, **args)
                if fn_results:
                    if tries != 0:
                        logging.info('%r succeeded after %d tries',
                                     call_info, tries)
                    break
                tries += 1
                if tries > self.max_retries:
                    e = Exception('No fn_results for %r after %d retires' % (
                        fn, self.max_retries))
                    e.retries = tries - 1
                    raise e
            # XXX The name of this method does not suggest that it
            # will also retry on socket-level errors. Regardless,
            # sometimes Swift refuses connections (probably when it's
            # way overloaded and the listen socket's connection queue
            # (in the kernel) is full, so the kernel just says RST).
            #
            # UPDATE: connections should be handled by the ConnectionPool
            # (which will trap socket.error and retry after a slight delay), so
            # socket.error should NOT get raised here for connection failures.
            # So hopefully this socket.error trapping code path will not get
            # hit.
            except socket.error as error:
                tries += 1
                if tries > self.max_retries:
                    error.retries = tries - 1
                    raise error
            except client.ClientException as error:
                tries += 1
                if error.http_status in statuses and \
                        tries <= self.max_retries:
                    if error.http_status == 401 and token_key:
                        if token_key in self.token_data and \
                                self.token_data[token_key][1] == args['token']:
                            self.token_data_lock.acquire()
                            try:
                                if token_key in self.token_data and \
                                        self.token_data[token_key][1] == \
                                        args['token']:
                                    logging.debug(
                                        'Deleting token %s',
                                        self.token_data[token_key][1])
                                    del self.token_data[token_key]
                            finally:
                                self.token_data_lock.release()
                    logging.debug("Retrying an error: %r", error)
                else:
                    error.retries = tries - 1
                    raise error
        fn_results['retries'] = tries
        return fn_results

Example 41

Project: Windows-Agent
Source File: basic.py
View license
def collect():
    logging.debug('enter basic collect')
    push_interval = 60
    zh_decode = "gbk"

    time_now = int(time.time())
    payload = []
    data = {"endpoint": g.HOSTNAME, "metric": "", "timestamp": time_now,
            "step": push_interval, "value": "", "counterType": "", "tags": ""}

    cpu_status = psutil.cpu_times_percent()
    mem_status = psutil.virtual_memory()
    swap_status = psutil.swap_memory()
    disk_io_status = psutil.disk_io_counters(perdisk=True)
    net_io_status = psutil.net_io_counters(pernic=True)

    # agent alive
    data["metric"] = "agent.alive"
    data["value"] = 1
    data["counterType"] = "GAUGE"
    payload.append(copy.copy(data))

    logging.debug(cpu_status)
    data["metric"] = "cpu.user"
    data["value"] = cpu_status.user
    data["counterType"] = "GAUGE"
    payload.append(copy.copy(data))

    data["metric"] = "cpu.system"
    data["value"] = cpu_status.system
    payload.append(copy.copy(data))

    data["metric"] = "cpu.idle"
    data["value"] = cpu_status.idle
    payload.append(copy.copy(data))

    data["metric"] = "mem.memused.percent"
    data["value"] = mem_status.percent
    payload.append(copy.copy(data))

    data["metric"] = "mem.swapused.percent"
    data["value"] = swap_status.percent
    payload.append(copy.copy(data))

    disk_status = psutil.disk_partitions()
    for disk in disk_status:
        if 'cdrom' in disk.opts or disk.fstype == '':
            continue
        disk_info = psutil.disk_usage(disk.mountpoint)

        data["metric"] = "df.used.percent"
        data["value"] = disk_info.percent
        data["tags"] = "disk=" + disk.device.split(":")[0]
        payload.append(copy.copy(data))

        data["metric"] = "df.byte.total"
        data["value"] = disk_info.total
        payload.append(copy.copy(data))

        data["metric"] = "df.byte.used"
        data["value"] = disk_info.used
        payload.append(copy.copy(data))

        data["metric"] = "df.byte.free"
        data["value"] = disk_info.free
        payload.append(copy.copy(data))

    for key in disk_io_status:
        data["metric"] = "disk.io.read_count"
        data["value"] = disk_io_status[key].read_count
        data["tags"] = "device=" + key
        data["counterType"] = "COUNTER"
        payload.append(copy.copy(data))

        data["metric"] = "disk.io.write_count"
        data["value"] = disk_io_status[key].write_count
        payload.append(copy.copy(data))

        data["metric"] = "disk.io.read_bytes"
        data["value"] = disk_io_status[key].read_bytes
        payload.append(copy.copy(data))

        data["metric"] = "disk.io.write_bytes"
        data["value"] = disk_io_status[key].write_bytes
        payload.append(copy.copy(data))

        data["metric"] = "disk.io.read_time"
        data["value"] = disk_io_status[key].read_time
        payload.append(copy.copy(data))

        data["metric"] = "disk.io.write_time"
        data["value"] = disk_io_status[key].write_time
        payload.append(copy.copy(data))

    for key in net_io_status:
        if is_interface_ignore(key):
            continue

        data["metric"] = "net.if.in.mbits"
        data["value"] = net_io_status[key].bytes_recv * 8 / 100000
        data["tags"] = "interface=" + key.decode(zh_decode)
        payload.append(copy.copy(data))

        data["metric"] = "net.if.out.mbits"
        data["value"] = net_io_status[key].bytes_sent * 8 / 100000
        payload.append(copy.copy(data))

        data["metric"] = "net.if.in.packets"
        data["value"] = net_io_status[key].packets_recv
        payload.append(copy.copy(data))

        data["metric"] = "net.if.out.packets"
        data["value"] = net_io_status[key].packets_sent
        payload.append(copy.copy(data))

        data["metric"] = "net.if.in.error"
        data["value"] = net_io_status[key].errin
        payload.append(copy.copy(data))

        data["metric"] = "net.if.out.error"
        data["value"] = net_io_status[key].errout
        payload.append(copy.copy(data))

        data["metric"] = "net.if.in.drop"
        data["value"] = net_io_status[key].dropin
        payload.append(copy.copy(data))

        data["metric"] = "net.if.out.drop"
        data["value"] = net_io_status[key].dropout
        payload.append(copy.copy(data))
        logging.debug(payload)

        payload = filter(lambda x: x['metric'] not in g.IGNORE, payload)

    try:
        result = send_data_to_transfer(payload)
    except Exception as e:
        logging.error(e)
    else:
        logging.info(result)

Example 42

Project: flashbake
Source File: commit.py
View license
def commit(control_config, hot_files, quiet_mins):
    # change to the project directory, necessary to find the .flashbake file and
    # to correctly refer to the project files by relative paths
    os.chdir(hot_files.project_dir)

    git_obj = git.Git(hot_files.project_dir, control_config.git_path)

    # the wrapper object ensures git is on the path
    # get the git status for the project
    git_status = git_obj.status()

    _handle_fatal(hot_files, git_status)

    # in particular find the existing entries that need a commit
    pending_re = re.compile('\s*(renamed|copied|modified|new file):.*')

    now = datetime.datetime.today()
    quiet_period = datetime.timedelta(minutes=quiet_mins)

    to_commit = list()
    # first look in the files git already knows about
    logging.debug("Examining git status.")
    for line in git_status.splitlines():
        if pending_re.match(line):
            pending_file = _trimgit(line)

            # not in the dot-control file, skip it
            if not (hot_files.contains(pending_file)):
                continue

            logging.debug('Parsing status line %s to determine commit action' % line)

            # remove files that will be considered for commit
            hot_files.remove(pending_file)

            # check the quiet period against mtime
            last_mod = os.path.getmtime(pending_file)
            pending_mod = datetime.datetime.fromtimestamp(last_mod)
            pending_mod += quiet_period

            # add the file to the list to include in the commit
            if pending_mod < now:
                to_commit.append(pending_file)
                logging.debug('Flagging file, %s, for commit.' % pending_file)
            else:
                logging.debug('Change for file, %s, is too recent.' % pending_file)
        _capture_deleted(hot_files, line)

    logging.debug('Examining unknown or unchanged files.')

    hot_files.warnproblems()

    # figure out what the status of the remaining files is
    for control_file in hot_files.control_files:
        # this shouldn't happen since HotFiles.addfile uses glob.iglob to expand
        # the original file lines which does so based on what is in project_dir
        if not os.path.exists(control_file):
            logging.debug('%s does not exist yet.' % control_file)
            hot_files.putabsent(control_file)
            continue

        status_output = git_obj.status(control_file)

        # needed for git >= 1.7.0.4
        if status_output.find('Untracked files') > 0:
            hot_files.putneedsadd(control_file)
            continue
        if status_output.startswith('error'):
            # needed for git < 1.7.0.4
            if status_output.find('did not match') > 0:
                hot_files.putneedsadd(control_file)
                logging.debug('%s exists but is unknown by git.' % control_file)
            else:
                logging.error('Unknown error occurred!')
                logging.error(status_output)
            continue
        # use a regex to match so we can enforce whole word rather than
        # substring matchs, otherwise 'foo.txt~' causes a false report of an
        # error
        control_re = re.compile('\<' + re.escape(control_file) + '\>')
        if control_re.search(status_output) == None:
            logging.debug('%s has no uncommitted changes.' % control_file)
        # if anything hits this block, we need to figure out why
        else:
            logging.error('%s is in the status message but failed other tests.' % control_file)
            logging.error('Try \'git status "%s"\' for more info.' % control_file)

    hot_files.addorphans(git_obj, control_config)

    for plugin in control_config.file_plugins:
        plugin.post_process(to_commit, hot_files, control_config)

    if len(to_commit) > 0:
        logging.info('Committing changes to known files, %s.' % to_commit)
        message_file = context.buildmessagefile(control_config)
        if not control_config.dry_run:
            # consolidate the commit to be friendly to how git normally works
            commit_output = git_obj.commit(message_file, to_commit)
            logging.debug(commit_output)
        os.remove(message_file)
        _send_commit_notice(control_config, hot_files, to_commit)
        logging.info('Commit for known files complete.')
    else:
        logging.info('No changes to known files found to commit.')

    if hot_files.needs_warning():
        _send_warning(control_config, hot_files)
    else:
        logging.info('No missing or untracked files found, not sending warning notice.')

Example 43

Project: BigJob
Source File: bigjob_agent.py
View license
    def __init__(self, args):
        
        self.coordination_url = args[1]
        # objects to store running jobs and processes
        self.jobs = []
        self.processes = {}
        self.freenodes = []
        self.busynodes = []
        self.restarted = {}

        # read config file
        conf_file = os.path.dirname(os.path.abspath( __file__ )) + "/../" + CONFIG_FILE
        if not os.path.exists(conf_file):
            conf_file = os.path.join(sys.prefix, CONFIG_FILE)
        logging.debug ("read configfile: " + conf_file)
        config = ConfigParser.ConfigParser()
        config.read(conf_file)
        default_dict = config.defaults()        
        self.CPR=False
        if default_dict.has_key("cpr"):
            self.CPR = default_dict["cpr"]
        self.SHELL="/bin/bash"
        if default_dict.has_key("shell"):
            self.SHELL=default_dict["shell"]
        self.MPIRUN="mpirun"
        # On TACC resources the default MPICH is 
        # linked under mpirun_rsh
        if default_dict.has_key("mpirun"):
            self.MPIRUN=default_dict["mpirun"]
        
        if default_dict.has_key("number_executor_threads"):
            THREAD_POOL_SIZE=int(default_dict["number_executor_threads"])
            
        self.OUTPUT_TAR=False
        if default_dict.has_key("create_output_tar"):
            self.OUTPUT_TAR=eval(default_dict["create_output_tar"])
            logger.debug("Create output tar: %r", self.OUTPUT_TAR)
        
        self.failed_polls = 0
        
        ##############################################################################
        # initialization of coordination and communication subsystem
        # Redis initialization
        self.base_url = args[2]
        self.cds_queue_url = None
        if len(args)==4:
            self.cds_queue_url = args[3]
        logger.debug("External queue: " + str(self.cds_queue_url))
        self.id = self.__get_bj_id(self.base_url)
        logger.debug("BigJob Agent arguments: " + str(args))
        logger.debug("Initialize C&C subsystem to pilot-url: " + self.base_url)
        logger.debug("BigJob ID: %s"%self.id)
        
        # create bj directory
        self.work_dir = os.getcwd()
        if self.work_dir.find(self.id)==-1: # working directory already contains BJ id
            self.bj_dir = os.path.join(os.getcwd(), self.id)
            logger.debug("Agent working directory: %s"%self.bj_dir)
            try:
                os.makedirs(self.bj_dir)
            except:
                logger.debug("Directory already exists.")
        else:
            self.bj_dir = os.getcwd()
        
        os.chdir(self.bj_dir)
        
        if(self.coordination_url.startswith("advert://") or self.coordination_url.startswith("sqlasyncadvert://")):
            try:
                from coordination.bigjob_coordination_advert import bigjob_coordination
                logging.debug("Utilizing ADVERT Backend: " + self.coordination_url)
            except:
                logger.error("Advert Backend could not be loaded")
                exc_type, exc_value, exc_traceback = sys.exc_info()
                traceback.print_exc(file=sys.stderr)
                traceback.print_tb(exc_traceback, file=sys.stderr)
        elif (self.coordination_url.startswith("redis://")):
            try:
                from coordination.bigjob_coordination_redis import bigjob_coordination
                logger.debug("Utilizing Redis Backend: " + self.coordination_url + ".")
	    except:
                logger.error("Error loading pyredis. Check configuration in bigjob_coordination_redis.py.")		
        elif (self.coordination_url.startswith("tcp://")):
            try:
                from coordination.bigjob_coordination_zmq import bigjob_coordination
                logger.debug("Utilizing ZMQ Backend")
            except:
                logger.error("ZMQ Backend not found. Please install ZeroMQ (http://www.zeromq.org/intro:get-the-software) and " 
                      +"PYZMQ (http://zeromq.github.com/pyzmq/)")

        ###
        # Initiate coordination sub-system of both BJ agent and Pilot Data
        self.coordination = bigjob_coordination(server_connect_url=self.coordination_url)
        try:
            # initialize coordination subsystem of pilot data
            self.pilot_data_service = PilotDataService(coordination_url=self.coordination_url)
        except:
            logger.warn("Pilot-Data could not be initialized.")
            
        # update state of pilot job to running
        logger.debug("set state to : " +  str(bigjob.state.Running))
        self.coordination.set_pilot_state(self.base_url, str(bigjob.state.Running), False)
        self.pilot_description = self.coordination.get_pilot_description(self.base_url)
        try:
            self.pilot_description = ast.literal_eval(self.pilot_description)
        except:
            logger.warn("Unable to parse pilot description")
            self.pilot_description = None
             

        ############################################################################
        # Detect launch method 
        self.LAUNCH_METHOD="ssh"                    
        if default_dict.has_key("launch_method"):
            self.LAUNCH_METHOD=default_dict["launch_method"]
            
        self.LAUNCH_METHOD=self.__get_launch_method(self.LAUNCH_METHOD)
        
        logging.debug("Launch Method: " + self.LAUNCH_METHOD + " mpi: " + self.MPIRUN + " shell: " + self.SHELL)
        
        # init rms (SGE/PBS)
        self.init_rms()
        
        ##############################################################################
        # start background thread for polling new jobs and monitoring current jobs
        # check whether user requested a certain threadpool size
        if self.pilot_description!=None and self.pilot_description.has_key("number_executor_threads"):
            THREAD_POOL_SIZE=int(self.pilot_description["number_executor_threads"])
        logger.debug("Creating executor thread pool of size: %d"%(THREAD_POOL_SIZE))
        self.resource_lock=threading.RLock()
        self.threadpool = ThreadPool(THREAD_POOL_SIZE)
        
        self.launcher_thread=threading.Thread(target=self.dequeue_new_jobs)
        self.launcher_thread.start()
        
        self.monitoring_thread=threading.Thread(target=self.start_background_thread)
        self.monitoring_thread.start()

Example 44

Project: cmonkey2
Source File: motif.py
View license
    def compute_pvalues(self, iteration_result, num_motifs, force):
        """Compute motif scores.
        The result is a dictionary from cluster -> (feature_id, pvalue)
        containing a sparse gene-to-pvalue mapping for each cluster

        In order to influence the sequences
        that go into meme, the user can specify a list of sequence filter
        functions that have the signature
        (seqs, feature_ids, distance) -> seqs
        These filters are applied in the order they appear in the list.
        """
        global SEQUENCE_FILTERS, ORGANISM, MEMBERSHIP

        cluster_pvalues = {}
        min_cluster_rows_allowed = self.config_params['memb.min_cluster_rows_allowed']
        max_cluster_rows_allowed = self.config_params['memb.max_cluster_rows_allowed']
        use_multiprocessing = self.config_params[scoring.KEY_MULTIPROCESSING]

        # extract the sequences for each cluster, slow
        start_time = util.current_millis()
        SEQUENCE_FILTERS = self.__sequence_filters
        ORGANISM = self.organism
        MEMBERSHIP = self.membership

        cluster_seqs_params = [(cluster, self.seqtype) for cluster in xrange(1, self.num_clusters() + 1)]
        if use_multiprocessing:
            with util.get_mp_pool(self.config_params) as pool:
                seqs_list = pool.map(cluster_seqs, cluster_seqs_params)
        else:
            seqs_list = [cluster_seqs(p) for p in cluster_seqs_params]

        SEQUENCE_FILTERS = None
        ORGANISM = None
        MEMBERSHIP = None
        logging.debug("prepared sequences in %d ms.", util.current_millis() - start_time)

        # Make the parameters, this is fast enough
        start_time = util.current_millis()
        params = {}
        for cluster in xrange(1, self.num_clusters() + 1):
            # Pass the previous run's seed if possible
            if self.__last_motif_infos is not None:
                previous_motif_infos = self.__last_motif_infos.get(cluster, None)
            else:
                previous_motif_infos = None

            seqs, feature_ids = seqs_list[cluster - 1]
            params[cluster] = ComputeScoreParams(iteration_result['iteration'], cluster,
                                                 feature_ids,
                                                 seqs,
                                                 self.used_seqs,
                                                 self.meme_runner(),
                                                 min_cluster_rows_allowed,
                                                 max_cluster_rows_allowed,
                                                 num_motifs,
                                                 previous_motif_infos,
                                                 self.config_params['output_dir'],
                                                 self.config_params['num_iterations'],
                                                 self.config_params['debug'])

        logging.debug("prepared MEME parameters in %d ms.",
                      util.current_millis() - start_time)

        # create motif result map if necessary
        for cluster in xrange(1, self.num_clusters() + 1):
            if not cluster in iteration_result:
                iteration_result[cluster] = {}

        # Optimization:
        # if the cluster hasn't changed since last time, reuse the last results
        # we do this by filtering out the parameters of the clusters that did not
        # change
        if not force and self.__last_results is not None:
            oldlen = len(params)
            params = {cluster: params[cluster]
                      for cluster in xrange(1, self.num_clusters() + 1)
                      if params[cluster].feature_ids != self.__last_results[cluster][0]}
            newlen = len(params)
            if oldlen - newlen > 0:
                logging.debug("%d clusters did not change !!!", oldlen - newlen)

        # compute and store motif results
        self.__last_motif_infos = {}
        if self.__last_results is None:
            self.__last_results = {}

        if use_multiprocessing:
            with util.get_mp_pool(self.config_params) as pool:
                results = pool.map(compute_cluster_score, params.values())
                results = {r[0]: r[1:] for r in results}  # indexed by cluster

                for cluster in xrange(1, self.num_clusters() + 1):
                    if cluster in results:
                        pvalues, run_result = results[cluster]
                        self.__last_results[cluster] = (params[cluster].feature_ids,
                                                        pvalues, run_result)
                    else:
                        feature_ids, pvalues, run_result = self.__last_results[cluster]

                    cluster_pvalues[cluster] = pvalues
                    if run_result:
                        self.__last_motif_infos[cluster] = run_result.motif_infos
                    iteration_result[cluster]['motif-info'] = meme_json(run_result)
                    iteration_result[cluster]['pvalues'] = pvalues
        else:
            for cluster in xrange(1, self.num_clusters() + 1):
                if cluster in params:
                    _, pvalues, run_result = compute_cluster_score(params[cluster])
                    self.__last_results[cluster] = (params[cluster].feature_ids,
                                                    pvalues, run_result)
                else:
                    _, pvalues, run_result = self.__last_results[cluster]

                cluster_pvalues[cluster] = pvalues
                if run_result:
                    self.__last_motif_infos[cluster] = run_result.motif_infos
                iteration_result[cluster]['motif-info'] = meme_json(run_result)
                iteration_result[cluster]['pvalues'] = pvalues

        return cluster_pvalues

Example 45

Project: pyqso
Source File: menu.py
View license
    def __init__(self, parent):
        """ Set up all menu items and connect to the various functions.

        :arg parent: The parent Gtk window.
        """

        logging.debug("New Menu instance created!")

        # First let's call the constructor of the super class (Gtk.MenuBar)
        Gtk.MenuBar.__init__(self)

        logging.debug("Setting up the menu bar...")
        agrp = Gtk.AccelGroup()
        parent.add_accel_group(agrp)

        self.items = {}

        # LOGBOOK ######
        mitem_logbook = Gtk.MenuItem("Logbook")
        self.append(mitem_logbook)
        subm_logbook = Gtk.Menu()
        mitem_logbook.set_submenu(subm_logbook)

        # Create logbook
        mitem_connect = Gtk.ImageMenuItem("Create a New Logbook...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_NEW, Gtk.IconSize.MENU)
        mitem_connect.set_image(icon)
        mitem_connect.connect("activate", parent.logbook.new)
        subm_logbook.append(mitem_connect)
        self.items["NEW_LOGBOOK"] = mitem_connect

        # Open logbook
        mitem_connect = Gtk.ImageMenuItem("Open an Existing Logbook...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_OPEN, Gtk.IconSize.MENU)
        mitem_connect.set_image(icon)
        mitem_connect.connect("activate", parent.logbook.open)
        key, mod = Gtk.accelerator_parse("<Control>O")
        mitem_connect.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_logbook.append(mitem_connect)
        self.items["OPEN_LOGBOOK"] = mitem_connect

        # Close logbook
        mitem_disconnect = Gtk.ImageMenuItem("Close Logbook")
        mitem_disconnect.connect("activate", parent.logbook.close)
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_CLOSE, Gtk.IconSize.MENU)
        mitem_disconnect.set_image(icon)
        key, mod = Gtk.accelerator_parse("<Control>W")
        mitem_disconnect.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_logbook.append(mitem_disconnect)
        self.items["CLOSE_LOGBOOK"] = mitem_disconnect

        subm_logbook.append(Gtk.SeparatorMenuItem())

        # New log
        mitem_new = Gtk.ImageMenuItem("New Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_ADD, Gtk.IconSize.MENU)
        mitem_new.set_image(icon)
        mitem_new.connect("activate", parent.logbook.new_log)
        key, mod = Gtk.accelerator_parse("<Control>N")
        mitem_new.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_logbook.append(mitem_new)
        self.items["NEW_LOG"] = mitem_new

        # Delete the current log
        mitem_delete = Gtk.ImageMenuItem("Delete Selected Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_DELETE, Gtk.IconSize.MENU)
        mitem_delete.set_image(icon)
        mitem_delete.connect("activate", parent.logbook.delete_log)
        subm_logbook.append(mitem_delete)
        self.items["DELETE_LOG"] = mitem_delete

        # Rename the current log
        mitem_rename = Gtk.ImageMenuItem("Rename Selected Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_EDIT, Gtk.IconSize.MENU)
        mitem_rename.set_image(icon)
        mitem_rename.connect("activate", parent.logbook.rename_log)
        subm_logbook.append(mitem_rename)
        self.items["RENAME_LOG"] = mitem_rename

        subm_logbook.append(Gtk.SeparatorMenuItem())

        # Import log
        mitem_import = Gtk.ImageMenuItem("Import Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_GO_FORWARD, Gtk.IconSize.MENU)
        mitem_import.set_image(icon)
        mitem_import.connect("activate", parent.logbook.import_log)
        subm_logbook.append(mitem_import)
        self.items["IMPORT_LOG"] = mitem_import

        # Export the current log
        mitem_export = Gtk.ImageMenuItem("Export Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_GO_BACK, Gtk.IconSize.MENU)
        mitem_export.set_image(icon)
        mitem_export.connect("activate", parent.logbook.export_log)
        subm_logbook.append(mitem_export)
        self.items["EXPORT_LOG"] = mitem_export

        subm_logbook.append(Gtk.SeparatorMenuItem())

        # Print log
        mitem_print = Gtk.ImageMenuItem("Print Log")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_PRINT, Gtk.IconSize.MENU)
        mitem_print.set_image(icon)
        mitem_print.connect("activate", parent.logbook.print_log)
        key, mod = Gtk.accelerator_parse("<Control>P")
        mitem_print.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_logbook.append(mitem_print)
        self.items["PRINT_LOG"] = mitem_print

        subm_logbook.append(Gtk.SeparatorMenuItem())

        # Quit
        mitem_quit = Gtk.ImageMenuItem("Quit")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_QUIT, Gtk.IconSize.MENU)
        mitem_quit.set_image(icon)
        mitem_quit.connect("activate", Gtk.main_quit)
        key, mod = Gtk.accelerator_parse("<Control>Q")
        mitem_quit.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_logbook.append(mitem_quit)
        self.items["QUIT"] = mitem_quit

        # RECORDS ######
        mitem_records = Gtk.MenuItem("Records")
        self.append(mitem_records)
        subm_records = Gtk.Menu()
        mitem_records.set_submenu(subm_records)

        mitem_addrecord = Gtk.ImageMenuItem("Add Record...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_ADD, Gtk.IconSize.MENU)
        mitem_addrecord.set_image(icon)
        mitem_addrecord.connect("activate", parent.logbook.add_record_callback)
        key, mod = Gtk.accelerator_parse("<Control>R")
        mitem_addrecord.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_records.append(mitem_addrecord)
        self.items["ADD_RECORD"] = mitem_addrecord

        mitem_editrecord = Gtk.ImageMenuItem("Edit Selected Record...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_EDIT, Gtk.IconSize.MENU)
        mitem_editrecord.set_image(icon)
        mitem_editrecord.connect("activate", parent.logbook.edit_record_callback, None, None)
        key, mod = Gtk.accelerator_parse("<Control>E")
        mitem_editrecord.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_records.append(mitem_editrecord)
        self.items["EDIT_RECORD"] = mitem_editrecord

        mitem_deleterecord = Gtk.ImageMenuItem("Delete Selected Record...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_DELETE, Gtk.IconSize.MENU)
        mitem_deleterecord.set_image(icon)
        mitem_deleterecord.connect("activate", parent.logbook.delete_record_callback)
        key, mod = Gtk.accelerator_parse("Delete")
        mitem_deleterecord.add_accelerator("activate", agrp, key, mod, Gtk.AccelFlags.VISIBLE)
        subm_records.append(mitem_deleterecord)
        self.items["DELETE_RECORD"] = mitem_deleterecord

        mitem_removeduplicates = Gtk.ImageMenuItem("Remove Duplicate Records")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_FIND_AND_REPLACE, Gtk.IconSize.MENU)
        mitem_removeduplicates.set_image(icon)
        mitem_removeduplicates.connect("activate", parent.logbook.remove_duplicates_callback)
        subm_records.append(mitem_removeduplicates)
        self.items["REMOVE_DUPLICATES"] = mitem_removeduplicates

        # VIEW ######
        mitem_view = Gtk.MenuItem("View")
        self.append(mitem_view)
        subm_view = Gtk.Menu()
        mitem_view.set_submenu(subm_view)

        mitem_toolbox = Gtk.CheckMenuItem("Toolbox")
        config = configparser.ConfigParser()
        have_config = (config.read(os.path.expanduser('~/.config/pyqso/preferences.ini')) != [])
        (section, option) = ("general", "show_toolbox")
        if(have_config and config.has_option(section, option)):
            mitem_toolbox.set_active(config.get(section, option) == "True")
        else:
            mitem_toolbox.set_active(False)  # Don't show the toolbox by default
        mitem_toolbox.connect("activate", parent.toolbox.toggle_visible_callback)
        subm_view.append(mitem_toolbox)
        self.items["TOOLBOX"] = mitem_toolbox

        mitem_preferences = Gtk.ImageMenuItem("Preferences...")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_PREFERENCES, Gtk.IconSize.MENU)
        mitem_preferences.set_image(icon)
        mitem_preferences.connect("activate", parent.show_preferences)
        subm_view.append(mitem_preferences)
        self.items["PREFERENCES"] = mitem_preferences

        # HELP ######
        mitem_help = Gtk.MenuItem("Help")
        self.append(mitem_help)
        subm_help = Gtk.Menu()
        mitem_help.set_submenu(subm_help)

        # About
        mitem_about = Gtk.ImageMenuItem("About PyQSO")
        icon = Gtk.Image()
        icon.set_from_stock(Gtk.STOCK_ABOUT, Gtk.IconSize.MENU)
        mitem_about.set_image(icon)
        mitem_about.connect("activate", parent.show_about)
        subm_help.append(mitem_about)

        self.set_logbook_item_sensitive(True)
        self.set_log_items_sensitive(False)
        self.set_record_items_sensitive(False)

        logging.debug("Menu bar ready!")

        return

Example 46

Project: nvpy
Source File: notes_db.py
View license
    def __init__(self, config):
        utils.SubjectMixin.__init__(self)

        self.config = config

        # create db dir if it does not exist
        if not os.path.exists(config.db_path):
            os.mkdir(config.db_path)

        self.db_path = config.db_path

        # create txt Notes dir if it does not exist
        if self.config.notes_as_txt and not os.path.exists(config.txt_path):
            os.mkdir(config.txt_path)

        now = time.time()
        # now read all .json files from disk
        fnlist = glob.glob(self.helper_key_to_fname('*'))
        txtlist = glob.glob(unicode(self.config.txt_path + '/*.txt', 'utf-8'))
        txtlist += glob.glob(unicode(self.config.txt_path + '/*.mkdn', 'utf-8'))

        # removing json files and force full full sync if using text files
        # and none exists and json files are there
        if self.config.notes_as_txt and not txtlist and fnlist:
            logging.debug('Forcing resync: using text notes, first usage')
            for fn in fnlist:
                os.unlink(fn)
            fnlist = []

        self.notes = {}
        if self.config.notes_as_txt:
            self.titlelist = {}

        for fn in fnlist:
            try:
                n = json.load(open(fn, 'rb'))
                if self.config.notes_as_txt:
                    nt = utils.get_note_title_file(n)
                    tfn = os.path.join(self.config.txt_path, nt)
                    if os.path.isfile(tfn):
                        self.titlelist[n.get('key')] = nt
                        txtlist.remove(tfn)
                        if os.path.getmtime(tfn) > os.path.getmtime(fn):
                            logging.debug('Text note was changed: %s' % (fn,))
                            with codecs.open(tfn, mode='rb', encoding='utf-8') as f:
                                c = f.read()

                            n['content'] = c
                            n['modifydate'] = os.path.getmtime(tfn)
                    else:
                        logging.debug('Deleting note : %s' % (fn,))
                        if not self.config.simplenote_sync:
                            os.unlink(fn)
                            continue
                        else:
                            n['deleted'] = 1
                            n['modifydate'] = now

            except IOError, e:
                logging.error('NotesDB_init: Error opening %s: %s' % (fn, str(e)))
                raise ReadError('Error opening note file')

            except ValueError, e:
                logging.error('NotesDB_init: Error reading %s: %s' % (fn, str(e)))
                raise ReadError('Error reading note file')

            else:
                # we always have a localkey, also when we don't have a note['key'] yet (no sync)
                localkey = os.path.splitext(os.path.basename(fn))[0]
                self.notes[localkey] = n
                # we maintain in memory a timestamp of the last save
                # these notes have just been read, so at this moment
                # they're in sync with the disc.
                n['savedate'] = now

        if self.config.notes_as_txt:
            for fn in txtlist:
                logging.debug('New text note found : %s' % (fn),)
                tfn = os.path.join(self.config.txt_path, fn)
                try:
                    with codecs.open(tfn, mode='rb', encoding='utf-8') as f:
                        c = f.read()

                except IOError, e:
                    logging.error('NotesDB_init: Error opening %s: %s' % (fn, str(e)))
                    raise ReadError('Error opening note file')

                except ValueError, e:
                    logging.error('NotesDB_init: Error reading %s: %s' % (fn, str(e)))
                    raise ReadError('Error reading note file')

                else:
                    nk = self.create_note(c)
                    nn = os.path.splitext(os.path.basename(fn))[0]
                    if nn != utils.get_note_title(self.notes[nk]):
                        self.notes[nk]['content'] = nn + "\n\n" + c

                    os.unlink(tfn)

        # save and sync queue
        self.q_save = Queue()
        self.q_save_res = Queue()

        thread_save = Thread(target=self.worker_save)
        thread_save.setDaemon(True)
        thread_save.start()

        # initialise the simplenote instance we're going to use
        # this does not yet need network access
        if self.config.simplenote_sync:
            self.simplenote = Simplenote(config.sn_username, config.sn_password)

            # we'll use this to store which notes are currently being synced by
            # the background thread, so we don't add them anew if they're still
            # in progress. This variable is only used by the background thread.
            self.threaded_syncing_keys = {}

            # reading a variable or setting this variable is atomic
            # so sync thread will write to it, main thread will only
            # check it sometimes.
            self.waiting_for_simplenote = False

            self.q_sync = Queue()
            self.q_sync_res = Queue()

            thread_sync = Thread(target=self.worker_sync)
            thread_sync.setDaemon(True)
            thread_sync.start()

Example 47

Project: benchexec
Source File: containerexecutor.py
View license
    def _start_execution_in_container(
            self, args, stdin, stdout, stderr, env, cwd, temp_dir, cgroups,
            output_dir, result_files_patterns,
            parent_setup_fn, child_setup_fn, parent_cleanup_fn):
        """Execute the given command and measure its resource usage similarly to super()._start_execution(),
        but inside a container implemented using Linux namespaces.
        The command has no network access (only loopback),
        a fresh directory as /tmp and no write access outside of this,
        and it does not see other processes except itself.
        """
        assert self._use_namespaces

        args = self._build_cmdline(args, env=env)

        # We have three processes involved:
        # parent: the current Python process in which RunExecutor is executing
        # child: child process in new namespace (PID 1 in inner namespace),
        #        configures inner namespace, serves as dummy init,
        #        collects result of grandchild and passes it to parent
        # grandchild: child of child process (PID 2 in inner namespace), exec()s tool

        # We need the following communication steps between these proceses:
        # 1a) grandchild tells parent its PID (in outer namespace).
        # 1b) grandchild tells parent that it is ready and measurement should begin.
        # 2) parent tells grandchild that measurement has begun and tool should
        #    be exec()ed.
        # 3) child tells parent about return value and resource consumption of grandchild.
        # 1a and 1b are done together by sending the PID through a pipe.
        # 2 is done by sending a null byte through a pipe.
        # 3 is done by sending a pickled object through the same pipe as #2.
        # We cannot use the same pipe for both directions, because otherwise a sender might
        # read the bytes it has sent itself.

        # Error codes from child to parent
        CHILD_OSERROR = 128
        CHILD_UNKNOWN_ERROR = 129

        from_parent, to_grandchild = os.pipe() # "downstream" pipe parent->grandchild
        from_grandchild, to_parent = os.pipe() # "upstream" pipe grandchild/child->parent

        # If the current directory is within one of the bind mounts we create,
        # we need to cd into this directory again, otherwise we would not see the bind mount,
        # but the directory behind it. Thus we always set cwd to force a change of directory.
        cwd = os.path.abspath(cwd or os.curdir)

        def grandchild():
            """Setup everything inside the process that finally exec()s the tool."""
            try:
                # We know that this process has PID 2 in the inner namespace,
                # but we actually need to know its PID in the outer namespace
                # such that parent can put us into the correct cgroups.
                # According to http://man7.org/linux/man-pages/man7/pid_namespaces.7.html,
                # there are two ways to achieve this: sending a message with the PID
                # via a socket (but Python < 3.3 lacks a convenient API for sendmsg),
                # and reading /proc/self in the outer procfs instance (that's what we do).
                my_outer_pid = container.get_my_pid_from_procfs()

                container.mount_proc()
                container.drop_capabilities()
                container.reset_signal_handling()
                child_setup_fn() # Do some other setup the caller wants.

                # Signal readiness to parent by sending our PID and wait until parent is also ready
                os.write(to_parent, str(my_outer_pid).encode())
                received = os.read(from_parent, 1)
                assert received == b'\0', received
            finally:
                # close remaining ends of pipe
                os.close(from_parent)
                os.close(to_parent)
            # here Python will exec() the tool for us

        def child():
            """Setup everything inside the container, start the tool, and wait for result."""
            try:
                logging.debug("Child: child process of RunExecutor with PID %d started",
                              container.get_my_pid_from_procfs())

                # Put all received signals on hold until we handle them later.
                container.block_all_signals()

                # We want to avoid leaking file descriptors to the executed child.
                # It is also nice if the child has only the minimal necessary file descriptors,
                # to avoid keeping other pipes and files open, e.g., those that the parent
                # uses to communicate with other containers (if containers are started in parallel).
                # Thus we do not use the close_fds feature of subprocess.Popen,
                # but do the same here manually.
                # We keep the relevant ends of our pipes, and stdin/out/err of child and grandchild.
                necessary_fds = {sys.stdin, sys.stdout, sys.stderr,
                    to_parent, from_parent, stdin, stdout, stderr} - {None}
                container.close_open_fds(keep_files=necessary_fds)

                try:
                    if not self._allow_network:
                        container.activate_network_interface("lo")
                    self._setup_container_filesystem(temp_dir)
                except EnvironmentError as e:
                    logging.critical("Failed to configure container: %s", e)
                    return CHILD_OSERROR

                try:
                    os.chdir(cwd)
                except EnvironmentError as e:
                    logging.critical(
                        "Cannot change into working directory inside container: %s", e)
                    return CHILD_OSERROR

                try:
                    grandchild_proc = subprocess.Popen(args,
                                        stdin=stdin,
                                        stdout=stdout, stderr=stderr,
                                        env=env,
                                        close_fds=False,
                                        preexec_fn=grandchild)
                except (EnvironmentError, RuntimeError) as e:
                    logging.critical("Cannot start process: %s", e)
                    return CHILD_OSERROR

                container.drop_capabilities()

                # Close other fds that were still necessary above.
                container.close_open_fds(keep_files={sys.stdout, sys.stderr, to_parent})

                # Set up signal handlers to forward signals to grandchild
                # (because we are PID 1, there is a special signal handling otherwise).
                # cf. dumb-init project: https://github.com/Yelp/dumb-init
                # Also wait for grandchild and return its result.
                if _HAS_SIGWAIT:
                    grandchild_result = container.wait_for_child_and_forward_all_signals(
                        grandchild_proc.pid, args[0])
                else:
                    container.forward_all_signals_async(grandchild_proc.pid, args[0])
                    grandchild_result = self._wait_for_process(grandchild_proc.pid, args[0])

                logging.debug("Child: process %s terminated with exit code %d.",
                              args[0], grandchild_result[0])
                os.write(to_parent, pickle.dumps(grandchild_result))
                os.close(to_parent)

                return 0
            except EnvironmentError as e:
                logging.exception("Error in child process of RunExecutor")
                return CHILD_OSERROR
            except:
                # Need to catch everything because this method always needs to return a int
                # (we are inside a C callback that requires returning int).
                logging.exception("Error in child process of RunExecutor")
                return CHILD_UNKNOWN_ERROR

        try: # parent
            try:
                child_pid = container.execute_in_namespace(child, use_network_ns=not self._allow_network)
            except OSError as e:
                raise BenchExecException(
                    "Creating namespace for container mode failed: " + os.strerror(e.errno))
            logging.debug("Parent: child process of RunExecutor with PID %d started.", child_pid)

            def check_child_exit_code():
                """Check if the child process terminated cleanly and raise an error otherwise."""
                child_exitcode, unused_child_rusage = self._wait_for_process(child_pid, args[0])
                child_exitcode = util.ProcessExitCode.from_raw(child_exitcode)
                logging.debug("Parent: child process of RunExecutor with PID %d terminated with %s.",
                              child_pid, child_exitcode)

                if child_exitcode:
                    if child_exitcode.value:
                        if child_exitcode.value == CHILD_OSERROR:
                            # This was an OSError in the child, details were already logged
                            raise BenchExecException("execution in container failed, check log for details")
                        elif child_exitcode.value == CHILD_UNKNOWN_ERROR:
                            raise BenchExecException("unexpected error in container")
                        raise OSError(child_exitcode.value, os.strerror(child_exitcode.value))
                    raise OSError(0, "Child process of RunExecutor terminated with " + str(child_exitcode))

            # Close unnecessary ends of pipes such that read() does not block forever
            # if all other processes have terminated.
            os.close(from_parent)
            os.close(to_parent)

            container.setup_user_mapping(child_pid, uid=self._uid, gid=self._gid)

            try:
                grandchild_pid = int(os.read(from_grandchild, 10)) # 10 bytes is enough for 32bit int
            except ValueError:
                # probably empty read, i.e., pipe closed, i.e., child or grandchild failed
                check_child_exit_code()
                assert False, "Child process of RunExecutor terminated cleanly but did not send expected data."

            logging.debug("Parent: executing %s in grand child with PID %d via child with PID %d.",
                          args[0], grandchild_pid, child_pid)

            # start measurements
            cgroups.add_task(grandchild_pid)
            parent_setup = parent_setup_fn()

            # Signal grandchild that setup is finished
            os.write(to_grandchild, b'\0')

            # Copy file descriptor, otherwise we could not close from_grandchild in finally block
            # and would leak a file descriptor in case of exception.
            from_grandchild_copy = os.dup(from_grandchild)
        finally:
            os.close(from_grandchild)
            os.close(to_grandchild)

        def wait_for_grandchild():
            # 1024 bytes ought to be enough for everyone^Wour pickled result
            try:
                received = os.read(from_grandchild_copy, 1024)
            except OSError as e:
                if self.PROCESS_KILLED and e.errno == errno.EINTR:
                    # Read was interrupted because of Ctrl+C, we just try again
                    received = os.read(from_grandchild_copy, 1024)
                else:
                    raise e

            parent_cleanup = parent_cleanup_fn(parent_setup)

            os.close(from_grandchild_copy)
            check_child_exit_code()

            if result_files_patterns:
                self._transfer_output_files(temp_dir, cwd, output_dir, result_files_patterns)

            exitcode, ru_child = pickle.loads(received)
            return exitcode, ru_child, parent_cleanup

        return grandchild_pid, wait_for_grandchild

Example 48

Project: reviewboard
Source File: models.py
View license
    def _migrate_diff_data(self, recalculate_counts=True):
        """Migrates diff data associated with a FileDiff to RawFileDiffData.

        If the diff data is stored directly on the FileDiff, it will be
        removed and stored on a RawFileDiffData instead.

        If the diff data is stored on an associated LegacyFileDiffData,
        that will be converted into a RawFileDiffData. The LegacyFileDiffData
        will then be removed, if nothing else is using it.
        """
        needs_save = False
        diff_hash_is_new = False
        parent_diff_hash_is_new = False
        fix_refs = False
        legacy_pks = []
        needs_diff_migration = self._needs_diff_migration()
        needs_parent_diff_migration = self._needs_parent_diff_migration()

        if needs_diff_migration:
            recalculate_diff_counts = recalculate_counts
            needs_save = True

            if self.legacy_diff_hash_id:
                logging.debug('Migrating LegacyFileDiffData %s to '
                              'RawFileDiffData for diff in FileDiff %s',
                              self.legacy_diff_hash_id, self.pk)

                try:
                    legacy_data = self.legacy_diff_hash.binary
                except LegacyFileDiffData.DoesNotExist:
                    # Another process migrated this before we could.
                    # We'll need to fix the references.
                    fix_refs = True
                    recalculate_diff_counts = False
                else:
                    diff_hash_is_new = self._set_diff(legacy_data)
                    legacy_pks.append(self.legacy_diff_hash_id)
                    self.legacy_diff_hash = None
            else:
                logging.debug('Migrating FileDiff %s diff data to '
                              'RawFileDiffData',
                              self.pk)

                diff_hash_is_new = self._set_diff(self.diff64)

            if recalculate_diff_counts:
                self._recalculate_line_counts(self.diff_hash)

        if needs_parent_diff_migration:
            recalculate_parent_diff_counts = recalculate_counts
            needs_save = True

            if self.legacy_parent_diff_hash_id:
                logging.debug('Migrating LegacyFileDiffData %s to '
                              'RawFileDiffData for parent diff in FileDiff %s',
                              self.legacy_parent_diff_hash_id, self.pk)

                try:
                    legacy_parent_data = self.legacy_parent_diff_hash.binary
                except LegacyFileDiffData.DoesNotExist:
                    # Another process migrated this before we could.
                    # We'll need to fix the references.
                    fix_refs = True
                    recalculate_parent_diff_counts = False
                else:
                    parent_diff_hash_is_new = \
                        self._set_parent_diff(legacy_parent_data)
                    legacy_pks.append(self.legacy_parent_diff_hash_id)
                    self.legacy_parent_diff_hash = None
            else:
                logging.debug('Migrating FileDiff %s parent diff data to '
                              'RawFileDiffData',
                              self.pk)

                parent_diff_hash_is_new = \
                    self._set_parent_diff(self.parent_diff64)

            if recalculate_parent_diff_counts:
                self._recalculate_line_counts(self.parent_diff_hash)

        if fix_refs:
            # Another server/process/thread got to this before we could.
            # We need to pull the latest refs and make sure they're set here.
            diff_hash, parent_diff_hash = (
                FileDiff.objects.filter(pk=self.pk)
                .values_list('diff_hash_id', 'parent_diff_hash_id')[0]
            )

            if needs_diff_migration:
                if diff_hash:
                    self.diff_hash_id = diff_hash
                    self.legacy_diff_hash = None
                    self.diff64 = ''
                else:
                    logging.error('Unable to migrate diff for FileDiff %s: '
                                  'LegacyFileDiffData "%s" is missing, and '
                                  'database entry does not have a new '
                                  'diff_hash! Data may be missing.',
                                  self.pk, self.legacy_diff_hash_id)

            if needs_parent_diff_migration:
                if parent_diff_hash:
                    self.parent_diff_hash_id = parent_diff_hash
                    self.legacy_parent_diff_hash = None
                    self.parent_diff64 = ''
                else:
                    logging.error('Unable to migrate parent diff for '
                                  'FileDiff %s: LegacyFileDiffData "%s" is '
                                  'missing, and database entry does not have '
                                  'a new parent_diff_hash! Data may be '
                                  'missing.',
                                  self.pk, self.legacy_parent_diff_hash_id)

        if needs_save:
            if self.pk:
                self.save(update_fields=[
                    'diff64', 'parent_diff64', 'diff_hash', 'parent_diff_hash',
                    'legacy_diff_hash', 'legacy_parent_diff_hash',
                ])
            else:
                self.save()

        if legacy_pks:
            # Delete any LegacyFileDiffData objects no longer associated
            # with any FileDiffs.
            LegacyFileDiffData.objects \
                .filter(pk__in=legacy_pks) \
                .exclude(Q(filediffs__pk__gt=0) |
                         Q(parent_filediffs__pk__gt=0)) \
                .delete()

        return diff_hash_is_new, parent_diff_hash_is_new

Example 49

Project: rbtools
Source File: process.py
View license
def execute(command,
            env=None,
            split_lines=False,
            ignore_errors=False,
            extra_ignore_errors=(),
            translate_newlines=True,
            with_errors=True,
            none_on_ignored_error=False,
            return_error_code=False,
            log_output_on_error=True,
            results_unicode=True,
            return_errors=False):
    """Utility function to execute a command and return the output.

    :param command:
        The command to execute as either a string or a list of strings.
    :param env:
        The environment variables to pass to the called executable.
        These variables will be added to the current set of environment
        variables.
    :param split_lines:
        Determines if the program's output will be split into multiple lines.
    :param ignore_errors:
        If ``False``, RBTools will exit if a command returns a non-zero status.
    :param extra_ignore_errors:
        The set of return status codes that should be treated as if the program
        exited with status 0.
    :param translate_newlines:
        If ``True``, all line endings will be translated to ``\n``.
    :param with_errors:
        If ``True``, the command's standard output and standard error streams
        will be combined. This parameter is mutually exclusive with the
        ``return_errors`` parameter.
    :param none_on_ignored_error:
        If ``True``, this function will return ``None`` if either
        ``ignore_errors`` is ``True` and the program returns a non-zero exit
        status or the program exits with a status code in
        ``extra_ignored_errors``.
    :param return_error_code:
        Determines if the exit status of the executed command will also be
        returned.
    :param log_output_on_error:
        Determines if the output of the executed command will be logged if it
        returns a non-zero status code.
    :param results_unicode:
        Determines if the output will be interpreted as UTF-8. If ``True``,
        the process's output will be returned as a ``six.text_type``.
        Otherwise, it will return a ``six.binary_type``.
    :param return_errors:
        Determines if the standard error stream will be returned. This
        parameter is mutually exclusive with the ``with_errors`` parameter.

    :returns:
        This function returns either a single value or a 2- or 3-tuple.

        If ``return_error_code`` is True, the error code of the process will be
        returned as the first element of the tuple.

        If ``return_errors`` is True, the process' standard error stream will
        be returned as the last element of the tuple.

        If both of ``return_error_code`` and ``return_errors`` are ``False``,
        then the process' output will be returned. If either or both of them
        are ``True``, then this is the other element of the returned tuple.
    """
    def post_process_output(output):
        """Post process the given output to convert it to the desired type."""
        # If Popen is called with universal_newlines=True, the resulting data
        # returned from stdout will be a text stream (and therefore a unicode
        # object). Otherwise, it will be a byte stream. Translate the results
        # into the desired type.
        encoding = sys.getfilesystemencoding()

        if split_lines and len(output) > 0:
            if results_unicode and isinstance(output[0], six.binary_type):
                return [line.decode(encoding) for line in output]
            elif not results_unicode and isinstance(output[0], six.text_type):
                return [line.encode('utf-8') for line in output]
        elif not split_lines:
            if results_unicode and isinstance(output, six.binary_type):
                return output.decode(encoding)
            elif not results_unicode and isinstance(output, six.text_type):
                return output.encode('utf-8')

        return output

    assert not (with_errors and return_errors)

    if isinstance(command, list):
        logging.debug(b'Running: ' + subprocess.list2cmdline(command))
    else:
        logging.debug(b'Running: ' + command)

    new_env = os.environ.copy()

    if env:
        new_env.update(env)

    # TODO: This can break on systems that don't have the en_US locale
    # installed (which isn't very many). Ideally in this case, we could
    # put something in the config file, but that's not plumbed through to here.
    new_env['LC_ALL'] = 'en_US.UTF-8'
    new_env['LANGUAGE'] = 'en_US.UTF-8'

    if with_errors:
        errors_output = subprocess.STDOUT
    else:
        errors_output = subprocess.PIPE

    if sys.platform.startswith('win'):
        # Convert all environment variables to byte strings, so that subprocess
        # doesn't blow up on Windows.
        new_env = dict(
            (six.binary_type(key), six.binary_type(value))
            for key, value in six.iteritems(new_env)
        )

        p = subprocess.Popen(command,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=errors_output,
                             shell=False,
                             universal_newlines=translate_newlines,
                             env=new_env)
    else:
        p = subprocess.Popen(command,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=errors_output,
                             shell=False,
                             close_fds=True,
                             universal_newlines=translate_newlines,
                             env=new_env)

    errors = None

    if split_lines:
        data = p.stdout.readlines()

        if return_errors:
            errors = p.stderr.readlines()
    else:
        data = p.stdout.read()

        if return_errors:
            errors = p.stderr.read()

    rc = p.wait()

    if rc and not ignore_errors and rc not in extra_ignore_errors:
        die('Failed to execute command: %s\n%s' % (command, data))
    elif rc:
        if log_output_on_error:
            logging.debug('Command exited with rc %s: %s\n%s---'
                          % (rc, command, data))
        else:
            logging.debug('Command exited with rc %s: %s'
                          % (rc, command))

    if rc and none_on_ignored_error:
        data = None

    if data is not None:
        data = post_process_output(data)

    if return_errors:
        errors = post_process_output(errors)

    if return_error_code or return_errors:
        if return_error_code and return_errors:
            return rc, data, errors
        elif return_error_code:
            return rc, data
        else:
            return data, errors
    else:
        return data

Example 50

Project: tp-libvirt
Source File: virtual_disks_ceph.py
View license
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                raise error.TestFail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                raise error.TestFail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                raise error.TestFail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            raise error.TestFail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            raise error.TestFail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            raise error.TestFail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            raise error.TestFail("Failed to set pool autostart")
        # find-storage-pool-sources-as
        ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
        libvirt.check_result(ret, unsupported_msg)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume infomation.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            raise error.TestFail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            raise error.TestFail("Can't see volmue info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
            raise error.TestFail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
            raise error.TestFail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout:
            raise error.TestFail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout:
            raise error.TestFail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, "atest.vol", pool_name)
        libvirt.check_result(ret, unsupported_msg)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "atest.vol"})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, unsupported_msg)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, unsupported_msg)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, unsupported_msg)
        # vol-download
        ret = virsh.vol_download(vol_name, "atest.vol", "--pool %s" % pool_name)
        libvirt.check_result(ret, unsupported_msg)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(test.tmpdir, "rbd.mem")
        snap_disk = os.path.join(test.tmpdir, "rbd.disk")
        expected_fails = []
        xml_snap_exp = ["disk name='vda' snapshot='external' type='file'"]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec vda,file=%s --disk-only" %
                       (snap_name, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec vda,file="
                       "%s" % (snap_name, snap_mem, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        error_msg = params.get("error_msg")
        if error_msg:
            expected_fails.append(error_msg)
        ret = virsh.snapshot_create_as(vm_name, options)
        if ret.exit_status:
            libvirt.check_result(ret, expected_fails)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                raise error.TestFail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                raise error.TestFail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(test.tmpdir, "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        if ret.exit_status:
            error_msg = params.get("error_msg")
            if not error_msg:
                libvirt.check_exit_status(ret)
            else:
                libvirt.check_result(ret, [error_msg])
            # Passed error check, return
            return

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            raise error.TestFail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            raise error.TestFail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            raise error.TestFail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            logging.error(str(e))
            return False

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    pre_vm_state = params.get("pre_vm_state", "running")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_error_msg = params.get("start_error_msg")
    attach_error_msg = params.get("attach_error_msg")
    unsupported_msg = params.get("unsupported_msg")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if pre_vm_state == "running":
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    key_file = os.path.join(test.tmpdir, "ceph.key")
    img_file = os.path.join(test.tmpdir,
                            "%s_test.img" % vm_name)

    try:
        # Set domain state
        libvirt.set_domain_state(vm, pre_vm_state)

        # Install ceph-common package which include rbd command
        if utils_misc.yum_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout)[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    raise error.TestNAError("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # TODO - Delete the disk if it exists
            #cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
            #       "{2}".format(mon_host, key_opt, disk_src_name))
            #process.run(cmd, ignore_status=True, shell=True)
        else:
            raise error.TestNAError("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": "unknow"}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %[email protected]%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
        if attach_device:
            if create_volume:
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            if attach_error_msg:
                libvirt.check_result(ret, attach_error_msg)
            else:
                libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.attach_device(guest_name, xml_file,
                                          "", debug=True)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, **virsh_dargs)
            libvirt.check_exit_status(ret)
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)

        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise error.TestFail("Cann't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                raise error.TestFail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    raise error.TestFail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)

        # Detach the device.
        if attach_device and not attach_error_msg:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if (attach_device or attach_disk) and not attach_error_msg:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                raise error.TestFail("Disk still exists in vm"
                                     " after detachment")
            session.close()

    except virt_vm.VMStartError, details:
        if start_error_msg in str(details):
            pass
        else:
            raise error.TestFail("VM failed to start."
                                 "Error: %s" % str(details))
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif attach_device or attach_disk:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and virsh.pool_state_dict().has_key(pool_name):
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()