Here are the examples of the python api uuid.uuid4 taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
172 Examples
0
Example 151
Project: dodai-compute Source File: vmops.py
def snapshot(self, context, instance, snapshot_name):
"""
Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
def _get_vm_and_vmdk_attribs():
# Get the vmdk file name that the VM is pointing to
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
vmdk_file_path_before_snapshot, adapter_type = \
vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
hardware_devices)
datastore_name = vm_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk_file_path_before_snapshot, adapter_type,
datastore_name, os_type)
vmdk_file_path_before_snapshot, adapter_type, datastore_name,\
os_type = _get_vm_and_vmdk_attribs()
def _create_vm_snapshot():
# Create a snapshot of the VM
LOG.debug(_("Creating Snapshot of the VM instance %s ") %
instance.name)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.name,
description="Taking Snapshot of the VM",
memory=True,
quiesce=True)
self._session._wait_for_task(instance.id, snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance %s ") %
instance.name)
_create_vm_snapshot()
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = vim_util.get_dynamic_property(
self._session._get_vim(),
vm_ref,
"VirtualMachine",
"datastore")
if not ds_ref_ret:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
ds_ref,
"Datastore",
"browser")
# Check if the vmware-tmp folder exists or not. If not, create one
tmp_folder_path = vm_util.build_datastore_path(datastore_name,
"vmware-tmp")
if not self._path_exists(ds_browser, tmp_folder_path):
self._mkdir(vm_util.build_datastore_path(datastore_name,
"vmware-tmp"))
_check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = str(uuid.uuid4())
dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
"vmware-tmp/%s.vmdk" % random_name)
dc_ref = self._get_datacenter_name_and_ref()[0]
def _copy_vmdk_content():
# Copy the contents of the disk ( or disks, if there were snapshots
# done earlier) to a temporary vmdk file.
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
adapter_type)
LOG.debug(_("Copying disk data before snapshot of the VM "
" instance %s") % instance.name)
copy_disk_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_ref,
destName=dest_vmdk_file_location,
destDatacenter=dc_ref,
destSpec=copy_spec,
force=False)
self._session._wait_for_task(instance.id, copy_disk_task)
LOG.debug(_("Copied disk data before snapshot of the VM "
"instance %s") % instance.name)
_copy_vmdk_content()
cookies = self._session._get_vim().client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug(_("Uploading image %s") % snapshot_name)
vmware_images.upload_image(
context,
snapshot_name,
instance,
os_type=os_type,
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=datastore_name,
cookies=cookies,
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
LOG.debug(_("Uploaded image %s") % snapshot_name)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""
Delete temporary vmdk files generated in image handling
operations.
"""
# Delete the temporary vmdk created above.
LOG.debug(_("Deleting temporary vmdk file %s")
% dest_vmdk_file_location)
remove_disk_task = self._session._call_method(
self._session._get_vim(),
"DeleteVirtualDisk_Task",
service_content.virtualDiskManager,
name=dest_vmdk_file_location,
datacenter=dc_ref)
self._session._wait_for_task(instance.id, remove_disk_task)
LOG.debug(_("Deleted temporary vmdk file %s")
% dest_vmdk_file_location)
_clean_temp_data()
0
Example 152
Project: cloudinit.d Source File: boot.py
def parse_commands(argv):
global g_verbose
u = """[options] <command> [<top level launch plan> | <run name>]
Boot and manage a launch plan
Run with the command 'commands' to see a list of all possible commands
"""
version = "cloudinitd " + (cloudinitd.Version)
parser = OptionParser(usage=u, version=version)
all_opts = []
opt = bootOpts("verbose", "v", "Print more output", 1, count=True)
all_opts.append(opt)
opt.add_opt(parser)
opt = bootOpts("validate", "x", "Check that boot plan is valid before launching it.", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("dryrun", "y", "Perform dry run on the boot plan. The IaaS service is never contacted but all other actions are performed. This option offers an addition level of plan validation of -x.", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("quiet", "q", "Print no output", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("name", "n", "Set the run name, only relevant for boot and reload (by default the system picks)", None)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("database", "d", "Path to the db directory", None)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("logdir", "f", "Path to the base log directory.", None)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("loglevel", "l", "Controls the level of detail in the log file", "info", vals=["debug", "info", "warn", "error"])
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("logstack", "s", "Log stack trace information (extreme debug level)", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("noclean", "c", "Do not delete the database, only relevant for the terminate command", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("safeclean", "C", "Do not delete the database on failed terminate, only relevant for the terminate command", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("kill", "k", "This option only applies to the iceage command. When on it will terminate all VMs started with IaaS associated with this run to date. This should be considered an extreme measure to prevent IaaS resource leaks.", False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("outstream", "O", SUPPRESS_HELP, None)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("remotedebug", "X", SUPPRESS_HELP, False, flag=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("output", "o", "Create an json docuement which describes the application and write it to the associated file. Relevant for boot and status", None)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("globalvar", "g", "Add a variable to global variable space", None, append_list=True)
opt.add_opt(parser)
all_opts.append(opt)
opt = bootOpts("globalvarfile", "G", "Add a file to global variable space", None, append_list=True)
opt.add_opt(parser)
all_opts.append(opt)
homedir = os.path.expanduser("~/.cloudinitd")
try:
if not os.path.exists(homedir):
os.mkdir(homedir)
os.chmod(homedir, stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR)
except Exception, ex:
print_chars(0, "Error creating cloudinit.d directort %s : %s" % (homedir, str(ex)))
(options, args) = parser.parse_args(args=argv)
_deal_with_cmd_line_globals(options)
for opt in all_opts:
opt.validate(options)
if not options.name:
options.name = str(uuid.uuid4()).split("-")[0]
if options.logdir is None:
options.logdir = os.path.expanduser("~/.cloudinitd/")
(options.logger, logfile) = cloudinitd.make_logger(options.loglevel, options.name, logdir=options.logdir)
if not options.database:
dbdir = os.path.expanduser("~/.cloudinitd")
options.database = dbdir
if options.logstack:
logger = logging.getLogger("stacktracelog")
logger.propagate = False
logger.setLevel(logging.DEBUG)
logdir = os.path.join(options.logdir, options.name)
if not os.path.exists(logdir):
try:
os.mkdir(logdir)
except OSError:
pass
stacklogfile = os.path.join(logdir, "stacktrace.log")
handler = logging.handlers.RotatingFileHandler(stacklogfile, maxBytes=100*1024*1024, backupCount=5)
logger.addHandler(handler)
fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
if options.quiet:
options.verbose = 0
g_verbose = options.verbose
if options.outstream:
global g_outfile
g_outfile = open(options.outstream, "w")
else:
g_outfile = None
if options.remotedebug:
try:
from pydev import pydevd
debug_cs = os.environ['CLOUDINITD_DEBUG_CS'].split(':')
debug_host = debug_cs[0]
debug_port = int(debug_cs[1])
pydevd.settrace(debug_host, port=debug_port, stdoutToServer=True, stderrToServer=True)
except ImportError, e:
print_chars(0, "Could not import remote debugging library: %s\n" % str(e), color="red", bold=True)
except KeyError:
print_chars(0, "If you want to do remote debugging please set the env CLOUDINITD_DEBUG_CS to the contact string of you expected debugger.\n", color="red", bold=True)
except:
print_chars(0, "Please verify the format of your contact string to be <hostname>:<port>.\n", color="red", bold=True)
global g_options
g_options = options
return (args, options)
0
Example 153
Project: cloudinit.d Source File: user_api.py
def __init__(self, db_dir, config_file=None, db_name=None, log_level="warn", logdir=None, level_callback=None, service_callback=None, boot=True, ready=True, terminate=False, continue_on_error=False, fail_if_db_present=False):
"""
db_dir: a path to a directories where databases can be stored.
config_file: the top_level config file describing this boot plan.
if this parameter is given then it is assumed that this
is a new launch plan. if it is not given the db_name
parameter is required and the plan is loaded from an
existing database
db_name: the name of the database. this is not an actual path
to a file, it is the run name given when the plan is
launched. The run name can be found in self.name
level_callback: a callback function that is invoked whenever
a level completes or a new level is started. The signature of the callback is:
def func_name(cloudinitd, action, current_level)
action is a string from the set
["starting", "transition", "complete", "error"]
service callback: a callbackfunciton that is invoked whenever
a service is started, progresses, or finishes. The signature is:
def func_name(cloudservice, action, msg)
action is a string from the set:
["starting", "transition", "complete", "error"]
boot=True: instructs the object to contextualized the service or now
ready=True: instructs the service to run the ready program or not
terminate=False: instructs the service to run the shutdown program or not
fail_if_db_present=False: instructs the constructor that the caller expects DB present already
When this object is configured with a config_file a new sqlite
database is created under @db_dir and a new name is picked for it.
the data base ends up being called <db_dir>/cloudinitd-<name>.db,
but the user has no real need to know this.
The contructor does not actually launch a run. It simply loads up
the database with the information in the config file (in the case
of a new launch) and then builds the inmemory data structures.
"""
if not db_name and not config_file:
raise APIUsageException("Cloud boot must have a db_name or a config file to load")
if not os.path.exists(db_dir):
raise APIUsageException("Path to the db directory does not exist: %s" % (db_dir))
self._level_callback = level_callback
self._service_callback = service_callback
if not db_name:
db_name = str(uuid.uuid4()).split("-")[0]
db_file = "cloudinitd-%s.db" % db_name
db_path = os.path.join("/", db_dir, db_file)
self._db_path = db_path
if config_file is None:
if not os.path.exists(db_path):
raise APIUsageException("Path to the db does not exist %s. New dbs must be given a config file" % (db_path))
if fail_if_db_present and os.path.exists(db_path):
raise APIUsageException("Already exists: '%s'" % db_path)
(self._log, logfile) = cloudinitd.make_logger(log_level, db_name, logdir=logdir)
self._started = False
self.run_name = db_name
dburl = "sqlite:///%s" % (db_path)
self._db = CloudInitDDB(dburl)
os.chmod(db_path, stat.S_IRUSR | stat.S_IWUSR)
if config_file:
self._bo = self._db.load_from_conf(config_file)
else:
self._bo = self._db.load_from_db()
self._levels = []
self._boot_top = BootTopLevel(log=self._log, level_callback=self._mp_cb, service_callback=self._svc_cb, boot=boot, ready=ready, terminate=terminate, continue_on_error=continue_on_error)
for level in self._bo.levels:
level_list = []
for s in level.services:
try:
(s_log, logfile) = cloudinitd.make_logger(log_level, self.run_name, logdir=logdir, servicename=s.name)
svc = self._boot_top.new_service(s, self._db, log=s_log, logfile=logfile, run_name=self.run_name)
# if boot is not set we assume it was already booted and we expand
if not boot:
svc._do_attr_bag()
level_list.append(svc)
except Exception, svcex:
if not continue_on_error:
raise
action = cloudinitd.callback_action_error
msg = "ERROR creating SVC object %s, but continue on error set: %s" % (s.name, str(svcex))
if self._service_callback:
cs = CloudService(self, None, name=s.name)
self._service_callback(self, cs, action, msg)
cloudinitd.log(self._log, logging.ERROR, msg)
self._boot_top.add_level(level_list)
self._levels.append(level_list)
self._exception = None
self._last_exception = None
self._exception_list = []
0
Example 154
Project: poni Source File: cloud_libvirt.py
@convert_libvirt_errors
def clone_vm(self, name, spec, overwrite=False):
def macaddr(index):
"""create a mac address based on the VM name for DHCP predictability"""
hname = name.encode("utf-8") if not isinstance(name, bytes) else name
mac_ext = hashlib.md5(hname).hexdigest() # pylint: disable=E1101
return "52:54:00:{0}:{1}:{2:02x}".format(mac_ext[0:2], mac_ext[2:4], int(mac_ext[4:6], 16) ^ index)
if name in self.dominfo.vms:
if not overwrite:
raise LVPError("{0!r} vm already exists".format(name))
self.dominfo.vms[name].delete()
hardware = dict(spec.get("hardware", {}))
for k in spec:
if k.startswith("hardware."):
hardware[k[9:]] = spec[k]
# `hardware` should contain lists of `nics` and `disks`, but
# previously we've just had a number of entries like `disk0` ..
# `disk7`
if "disks" not in hardware:
hardware["disks"] = [v for k, v in sorted(hardware.items()) if k.startswith("disk")]
if "nics" not in hardware:
hardware["nics"] = [v for k, v in sorted(hardware.items()) if k.startswith("nic")] or [{}]
hypervisor = spec.get("hypervisor", self.hypervisor)
ram_mb = hardware.get("ram_mb", hardware.get("ram", 1024))
ram_kb = hardware.get("ram_kb", 1024 * ram_mb)
if hypervisor == "kvm":
devs = XMLE.devices(
XMLE.serial(XMLE.target(port="0"), XMLE.alias(name="serial0"), type="pty"),
XMLE.console(XMLE.target(port="0"), XMLE.alias(name="serial0"), type="pty"),
XMLE.input(XMLE.alias(name="input0"), type="tablet", bus="usb"),
XMLE.input(type="mouse", bus="ps2"),
XMLE.graphics(type="vnc", autoport="yes"),
XMLE.video(
XMLE.model(type="cirrus", vram="9216", heads="1"),
XMLE.alias(name="video0")),
XMLE.memballoon(XMLE.alias(name="balloon0"), model="virtio"))
extra = [
XMLE.cpu(mode=hardware.get("cpumode", "host-model")),
XMLE.features(XMLE.acpi(), XMLE.apic(), XMLE.pae()),
XMLE.os(
XMLE.type("hvm", machine="pc", arch=hardware.get("arch", "x86_64")),
XMLE.boot(dev="hd")),
]
elif hypervisor == "lxc":
devs = XMLE.devices(
XMLE.emulator(self.emulator),
XMLE.console(
XMLE.target(type="lxc", port="0"),
XMLE.alias(name="console0"),
type="pty"))
extra = [
XMLE.resource(XMLE.partition("/machine")),
XMLE.os(
XMLE.type("exe", arch=hardware.get("arch", "x86_64")),
XMLE.init(spec.get("init", "/sbin/init"))),
XMLE.seclabel(type="none"),
]
else:
raise LVPError("unknown hypervisor type {0!r}".format(hypervisor))
desc = XMLE.domain(
XMLE.name(name),
XMLE.description(spec.get("desc", _created_str())),
XMLE.uuid(spec.get("uuid", str(uuid.uuid4()))),
XMLE.clock(offset="utc"),
XMLE.on_poweroff("destroy"),
XMLE.on_reboot("restart"),
XMLE.on_crash("restart"),
XMLE.memory(str(ram_kb)),
XMLE.vcpu(str(hardware.get("cpus", 1))),
devs, *extra,
type=hypervisor)
# Set up disks
for i, item in enumerate(hardware.get("disks", [])):
# we want to name the devices/files created on the host sides with names the kvm guests
# will see (ie vda, vdb, etc) but lxc hosts don't really see devices, instead we just
# have target directories
if hypervisor == "lxc":
dev_name = str(i)
target_dir = item.get("target")
else:
dev_name = "vd" + chr(ord("a") + i)
target_dir = None
if "clone" in item or "create" in item:
try:
pool = self.dominfo.pools[item["pool"]]
except KeyError:
raise LVPError("host {0}:{1} does not have pool named '{2}'".format(
self.host, self.port, item["pool"]))
if "name" in item:
vol_name = "{0}-{1}-{2}".format(name, item["name"], dev_name)
else:
vol_name = "{0}-{1}".format(name, dev_name)
if "clone" in item:
vol = pool.clone_volume(item["clone"], vol_name, item.get("size"), overwrite=overwrite, voltype=item.get("type"))
if "create" in item:
vol = pool.create_volume(vol_name, item["size"], overwrite=overwrite, voltype=item.get("type"), upload=item.get("upload"))
disk_path = vol.path
disk_type = vol.device
driver_type = vol.format
elif "dev" in item:
disk_path = item["dev"]
disk_type = "block"
driver_type = "raw"
elif "file" in item and hypervisor == "kvm":
disk_path = item["file"]
disk_type = "file"
driver_type = item.get("driver", "qcow2")
elif "source" in item and hypervisor == "lxc":
disk_path = item["source"]
else:
raise LVPError("Unrecognized disk specification {0!r}".format(item))
if disk_type == "block":
dsource = XMLE.source(dev=disk_path)
else:
dsource = XMLE.source(file=disk_path)
if disk_type == "block" or hypervisor == "kvm":
devs.append(XMLE.disk(dsource,
XMLE.driver(name="qemu", type=driver_type, cache=item.get("cache", "none")),
XMLE.target(dev=dev_name)))
elif hypervisor == "lxc":
if not target_dir:
target_dir = "/" if i == 0 else "/disk{0}".format(i)
devs.append(XMLE.filesystem(
XMLE.source(dir=disk_path),
XMLE.target(dir=target_dir),
type="mount", accessmode="passthrough"))
# Set up interfaces - any hardware.nicX entries in spec,
default_network = spec.get("default_network", "default")
for i, item in enumerate(hardware.get("nics", [])):
itype = item.get("type", "network")
inet = item.get("network", default_network)
iface = XMLE.interface(XMLE.mac(address=item.get("mac", macaddr(i))), type=itype)
if hypervisor == "kvm":
iface.append(XMLE.model(type="virtio"))
if itype == "network":
iface.append(XMLE.source(network=inet))
elif itype == "bridge":
iface.append(XMLE.source(bridge=inet))
devs.append(iface)
# Add guest-agent channel
if hardware.get("qemu-guest-agent"):
channel = hardware["qemu-guest-agent"]
if not isinstance(channel, str):
channel = "org.qemu.guest_agent.0"
item = XMLE.channel(
XMLE.source(mode="bind"),
XMLE.target(type="virtio", name=channel),
type="unix")
devs.append(item)
new_desc = etree.tostring(desc, encoding='unicode')
vm = self.conn.defineXML(new_desc)
self.libvirt_retry(vm.create)
for retry in range(1, 10):
self.refresh_list()
if name in self.dominfo.vms:
break
time.sleep(2.0)
self.log.info("waiting for VM %s to appear in libvirt hosts... retry #%r", name, retry)
else:
raise LVPError("VM {0} did not appear in time on libvirt hosts".format(name))
return self.dominfo.vms[name]
0
Example 155
Project: openshot-qt Source File: app.py
def __init__(self, *args, mode=None):
QApplication.__init__(self, *args)
# Log some basic system info
try:
v = openshot.GetVersion()
log.info("openshot-qt version: %s" % info.VERSION)
log.info("libopenshot version: %s" % v.ToString())
log.info("platform: %s" % platform.platform())
log.info("processor: %s" % platform.processor())
log.info("machine: %s" % platform.machine())
log.info("python version: %s" % platform.python_version())
log.info("qt5 version: %s" % QT_VERSION_STR)
log.info("pyqt5 version: %s" % PYQT_VERSION_STR)
except:
pass
# Setup appication
self.setApplicationName('openshot')
self.setApplicationVersion(info.SETUP['version'])
# Init settings
self.settings = settings.SettingStore()
try:
self.settings.load()
except Exception as ex:
log.error("Couldn't load user settings. Exiting.\n{}".format(ex))
exit()
# Init translation system
language.init_language()
# Tests of project data loading/saving
self.project = project_data.ProjectDataStore()
# Init Update Manager
self.updates = updates.UpdateManager()
# It is important that the project is the first listener if the key gets update
self.updates.add_listener(self.project)
# Load ui theme if not set by OS
ui_util.load_theme()
# Start libopenshot logging thread
self.logger_libopenshot = logger_libopenshot.LoggerLibOpenShot()
self.logger_libopenshot.start()
# Track which dockable window received a context menu
self.context_menu_object = None
# Set unique install id (if blank)
if not self.settings.get("unique_install_id"):
self.settings.set("unique_install_id", str(uuid4()))
# Track 1st launch metric
import classes.metrics
classes.metrics.track_metric_screen("initial-launch-screen")
# Set Font for any theme
if self.settings.get("theme") != "No Theme":
# Load embedded font
try:
log.info("Setting font to %s" % os.path.join(info.IMAGES_PATH, "fonts", "Ubuntu-R.ttf"))
font_id = QFontDatabase.addApplicationFont(os.path.join(info.IMAGES_PATH, "fonts", "Ubuntu-R.ttf"))
font_family = QFontDatabase.applicationFontFamilies(font_id)[0]
font = QFont(font_family)
font.setPointSizeF(10.5)
QApplication.setFont(font)
except Exception as ex:
log.error("Error setting Ubuntu-R.ttf QFont: %s" % str(ex))
# Set Experimental Dark Theme
if self.settings.get("theme") == "Humanity: Dark":
# Only set if dark theme selected
log.info("Setting custom dark theme")
self.setStyle(QStyleFactory.create("Fusion"))
darkPalette = self.palette()
darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))
darkPalette.setColor(QPalette.WindowText, Qt.white)
darkPalette.setColor(QPalette.Base, QColor(25, 25, 25))
darkPalette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
darkPalette.setColor(QPalette.ToolTipBase, Qt.white)
darkPalette.setColor(QPalette.ToolTipText, Qt.white)
darkPalette.setColor(QPalette.Text, Qt.white)
darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))
darkPalette.setColor(QPalette.ButtonText, Qt.white)
darkPalette.setColor(QPalette.BrightText, Qt.red)
darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))
darkPalette.setColor(QPalette.HighlightedText, Qt.black)
darkPalette.setColor(QPalette.Disabled, QPalette.Text, QColor(104, 104, 104))
self.setPalette(darkPalette)
self.setStyleSheet("QToolTip { color: #ffffff; background-color: #2a82da; border: 0px solid white; }")
# Create main window
from windows.main_window import MainWindow
self.window = MainWindow(mode)
log.info('Process command-line arguments: %s' % args)
if len(args[0]) == 2:
path = args[0][1]
if ".osp" in path:
# Auto load project passed as argument
self.window.open_project(path)
else:
# Auto import media file
self.window.filesTreeView.add_file(path)
# Reset undo/redo history
self.updates.reset()
self.window.updateStatusChanged(False, False)
0
Example 156
Project: cinder Source File: nfs_cmode.py
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
locations = self._construct_image_nfs_url(image_loc)
src_ip = None
selected_loc = None
# this will match the first location that has a valid IP on cluster
for location in locations:
conn, dr = self._check_get_nfs_path_segs(location)
if conn:
try:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
selected_loc = location
break
except exception.NotFound:
pass
if src_ip is None:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = selected_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
dst_share = self._get_provider_location(volume['id'])
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
# If src and dst share not equal
if (('%s:%s' % (src_ip, dr)) !=
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
# Always run copy offload as regular user, it's sufficient
# and rootwrap doesn't allow copy offload to run as root
# anyways.
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=False,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
{'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst_img_conv_local,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.',
{'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file_at_path(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
self._delete_file_at_path(dst_img_local)
0
Example 157
Project: cloudbase-init Source File: x509.py
def create_self_signed_cert(self, subject, validity_years=10,
machine_keyset=True, store_name=STORE_NAME_MY):
subject_encoded = None
cert_context_p = None
store_handle = None
container_name = str(uuid.uuid4())
self._generate_key(container_name, machine_keyset)
try:
subject_encoded_len = wintypes.DWORD()
if not cryptoapi.CertStrToName(cryptoapi.X509_ASN_ENCODING,
subject,
cryptoapi.CERT_X500_NAME_STR, None,
None,
ctypes.byref(subject_encoded_len),
None):
raise cryptoapi.CryptoAPIException()
size = ctypes.c_size_t(subject_encoded_len.value)
subject_encoded = ctypes.cast(malloc(size),
ctypes.POINTER(wintypes.BYTE))
if not cryptoapi.CertStrToName(cryptoapi.X509_ASN_ENCODING,
subject,
cryptoapi.CERT_X500_NAME_STR, None,
subject_encoded,
ctypes.byref(subject_encoded_len),
None):
raise cryptoapi.CryptoAPIException()
subject_blob = cryptoapi.CRYPTOAPI_BLOB()
subject_blob.cbData = subject_encoded_len
subject_blob.pbData = subject_encoded
key_prov_info = cryptoapi.CRYPT_KEY_PROV_INFO()
key_prov_info.pwszContainerName = container_name
key_prov_info.pwszProvName = None
key_prov_info.dwProvType = cryptoapi.PROV_RSA_FULL
key_prov_info.cProvParam = None
key_prov_info.rgProvParam = None
key_prov_info.dwKeySpec = cryptoapi.AT_SIGNATURE
if machine_keyset:
key_prov_info.dwFlags = cryptoapi.CRYPT_MACHINE_KEYSET
else:
key_prov_info.dwFlags = 0
sign_alg = cryptoapi.CRYPT_ALGORITHM_IDENTIFIER()
sign_alg.pszObjId = cryptoapi.szOID_RSA_SHA1RSA
start_time = cryptoapi.SYSTEMTIME()
cryptoapi.GetSystemTime(ctypes.byref(start_time))
end_time = self._add_system_time_interval(
start_time, X509_END_DATE_INTERVAL)
# Needed in case of time sync issues as PowerShell remoting
# enforces a valid time interval even for self signed certificates
start_time = self._add_system_time_interval(
start_time, X509_START_DATE_INTERVAL)
cert_context_p = cryptoapi.CertCreateSelfSignCertificate(
None, ctypes.byref(subject_blob), 0,
ctypes.byref(key_prov_info),
ctypes.byref(sign_alg), ctypes.byref(start_time),
ctypes.byref(end_time), None)
if not cert_context_p:
raise cryptoapi.CryptoAPIException()
if not cryptoapi.CertAddEnhancedKeyUsageIdentifier(
cert_context_p, cryptoapi.szOID_PKIX_KP_SERVER_AUTH):
raise cryptoapi.CryptoAPIException()
if machine_keyset:
flags = cryptoapi.CERT_SYSTEM_STORE_LOCAL_MACHINE
else:
flags = cryptoapi.CERT_SYSTEM_STORE_CURRENT_USER
store_handle = cryptoapi.CertOpenStore(
cryptoapi.CERT_STORE_PROV_SYSTEM, 0, 0, flags,
six.text_type(store_name))
if not store_handle:
raise cryptoapi.CryptoAPIException()
if not cryptoapi.CertAddCertificateContextToStore(
store_handle, cert_context_p,
cryptoapi.CERT_STORE_ADD_REPLACE_EXISTING, None):
raise cryptoapi.CryptoAPIException()
return self._get_cert_thumprint(cert_context_p)
finally:
if store_handle:
cryptoapi.CertCloseStore(store_handle, 0)
if cert_context_p:
cryptoapi.CertFreeCertificateContext(cert_context_p)
if subject_encoded:
free(subject_encoded)
0
Example 158
Project: designate Source File: ipaextractor.py
def main():
# HACK HACK HACK - allow required config params to be passed
# via the command line
cfg.CONF['service:api']._group._opts['api_base_uri']['cli'] = True
for optdict in cfg.CONF['backend:ipa']._group._opts.values():
if 'cli' in optdict:
optdict['cli'] = True
# HACK HACK HACK - allow api url to be passed in the usual way
utils.read_config('designate', sys.argv)
if cfg.CONF['service:central'].backend_driver == 'ipa':
raise CannotUseIPABackend(cuiberrorstr)
if cfg.CONF.debug:
LOG.setLevel(logging.DEBUG)
elif cfg.CONF.verbose:
LOG.setLevel(logging.INFO)
else:
LOG.setLevel(logging.WARN)
ipabackend = impl_ipa.IPABackend(None)
ipabackend.start()
version = cfg.CONF['backend:ipa'].ipa_version
designateurl = cfg.CONF['service:api'].api_base_uri + "v1"
# get the list of domains/zones from IPA
ipazones = getipadomains(ipabackend, version)
# get unique list of name servers
servers = {}
for zonerec in ipazones:
for nsrec in zonerec['nsrecord']:
servers[nsrec] = nsrec
if not servers:
raise NoNameServers("Error: no name servers found in IPA")
# let's see if designate is using the IPA backend
# create a fake domain in IPA
# create a fake server in Designate
# try to create the same fake domain in Designate
# if we get a DuplicateZone error from Designate, then
# raise the CannotUseIPABackend error, after deleting
# the fake server and fake domain
# find the first non-reverse zone
zone = {}
for zrec in ipazones:
if not zrec['idnsname'][0].endswith("in-addr.arpa.") and \
zrec['idnszoneactive'][0] == 'TRUE':
# ipa returns every data field as a list
# convert the list to a scalar
for n, v in list(zrec.items()):
if n in zoneskips:
continue
if isinstance(v, list):
zone[n] = v[0]
else:
zone[n] = v
break
assert(zone)
# create a fake subdomain of this zone
domname = "%s.%s" % (uuid.uuid4(), zone['idnsname'])
args = copy.copy(zone)
del args['idnsname']
args['version'] = version
ipareq = {'method': 'dnszone_add',
'params': [[domname], args]}
iparesp = ipabackend._call_and_handle_error(ipareq)
LOG.debug("Response: %s" % pprint.pformat(iparesp))
if iparesp['error']:
raise AddDomainError(pprint.pformat(iparesp))
# set up designate connection
designatereq = requests.Session()
xtra_hdrs = {'Content-Type': 'application/json'}
designatereq.headers.update(xtra_hdrs)
# sync ipa name servers to designate
syncipaservers2des(servers, designatereq, designateurl)
domainurl = designateurl + "/domains"
# next, try to add the fake domain to Designate
email = zone['idnssoarname'].rstrip(".").replace(".", "@", 1)
desreq = {"name": domname,
"ttl": int(zone['idnssoarefresh'][0]),
"email": email}
resp = designatereq.post(domainurl, data=json.dumps(desreq))
exc = None
fakezoneid = None
if resp.status_code == 200:
LOG.info(_LI("Added domain %s"), domname)
fakezoneid = resp.json()['id']
delresp = designatereq.delete(domainurl + "/" + fakezoneid)
if delresp.status_code != 200:
LOG.error(_LE("Unable to delete %(name)s: %(response)s") %
{'name': domname, 'response': pprint.pformat(
delresp.json())})
else:
exc = CannotUseIPABackend(cuiberrorstr)
# cleanup fake stuff
ipareq = {'method': 'dnszone_del',
'params': [[domname], {'version': version}]}
iparesp = ipabackend._call_and_handle_error(ipareq)
LOG.debug("Response: %s" % pprint.pformat(iparesp))
if iparesp['error']:
LOG.error(_LE("%s") % pprint.pformat(iparesp))
if exc:
raise exc
# get and delete existing domains
resp = designatereq.get(domainurl)
LOG.debug("Response: %s" % pprint.pformat(resp.json()))
if resp and resp.status_code == 200 and resp.json() and \
'domains' in resp.json():
# domains must be deleted in child/parent order i.e. delete
# sub-domains before parent domains - simple way to get this
# order is to sort the domains in reverse order of name len
dreclist = sorted(resp.json()['domains'],
key=lambda drec: len(drec['name']),
reverse=True)
for drec in dreclist:
delresp = designatereq.delete(domainurl + "/" + drec['id'])
if delresp.status_code != 200:
raise DeleteDomainError("Unable to delete %s: %s" %
(drec['name'],
pprint.pformat(delresp.json())))
# key is zonename, val is designate rec id
zonerecs = {}
for zonerec in ipazones:
desreq = zone2des(zonerec)
resp = designatereq.post(domainurl, data=json.dumps(desreq))
if resp.status_code == 200:
LOG.info(_LI("Added domain %s"), desreq['name'])
else:
raise AddDomainError("Unable to add domain %s: %s" %
(desreq['name'], pprint.pformat(resp.json())))
zonerecs[desreq['name']] = resp.json()['id']
# get the records for each zone
for zonename, domainid in list(zonerecs.items()):
recurl = designateurl + "/domains/" + domainid + "/records"
iparecs = getiparecords(ipabackend, zonename, version)
for rec in iparecs:
desreqs = rec2des(rec, zonename)
for desreq in desreqs:
resp = designatereq.post(recurl, data=json.dumps(desreq))
if resp.status_code == 200:
LOG.info(_LI("Added record %(record)s "
"for domain %(domain)s"),
{'record': desreq['name'], 'domain': zonename})
else:
raise AddRecordError("Could not add record %s: %s" %
(desreq['name'],
pprint.pformat(resp.json())))
0
Example 159
Project: heat Source File: test_software_config.py
@mock.patch.object(service_software_config.SoftwareConfigService,
'_push_metadata_software_deployments')
def test_signal_software_deployment(self, pmsd):
self.assertRaises(ValueError,
self.engine.signal_software_deployment,
self.ctx, None, {}, None)
deployment_id = str(uuid.uuid4())
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.signal_software_deployment,
self.ctx, deployment_id, {}, None)
self.assertEqual(exception.NotFound, ex.exc_info[0])
deployment = self._create_software_deployment()
deployment_id = deployment['id']
# signal is ignore unless deployment is IN_PROGRESS
self.assertIsNone(self.engine.signal_software_deployment(
self.ctx, deployment_id, {}, None))
# simple signal, no data
deployment = self._create_software_deployment(action='INIT',
status='IN_PROGRESS')
deployment_id = deployment['id']
res = self.engine.signal_software_deployment(
self.ctx, deployment_id, {}, None)
self.assertEqual('deployment %s succeeded' % deployment_id, res)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
self.ctx, deployment_id)
self.assertEqual('COMPLETE', sd.status)
self.assertEqual('Outputs received', sd.status_reason)
self.assertEqual({
'deploy_status_code': None,
'deploy_stderr': None,
'deploy_stdout': None
}, sd.output_values)
self.assertIsNotNone(sd.updated_at)
# simple signal, some data
config = self._create_software_config(outputs=[{'name': 'foo'}])
deployment = self._create_software_deployment(
config_id=config['id'], action='INIT', status='IN_PROGRESS')
deployment_id = deployment['id']
result = self.engine.signal_software_deployment(
self.ctx,
deployment_id,
{'foo': 'bar', 'deploy_status_code': 0},
None)
self.assertEqual('deployment %s succeeded' % deployment_id, result)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
self.ctx, deployment_id)
self.assertEqual('COMPLETE', sd.status)
self.assertEqual('Outputs received', sd.status_reason)
self.assertEqual({
'deploy_status_code': 0,
'foo': 'bar',
'deploy_stderr': None,
'deploy_stdout': None
}, sd.output_values)
self.assertIsNotNone(sd.updated_at)
# failed signal on deploy_status_code
config = self._create_software_config(outputs=[{'name': 'foo'}])
deployment = self._create_software_deployment(
config_id=config['id'], action='INIT', status='IN_PROGRESS')
deployment_id = deployment['id']
result = self.engine.signal_software_deployment(
self.ctx,
deployment_id,
{
'foo': 'bar',
'deploy_status_code': -1,
'deploy_stderr': 'Its gone Pete Tong'
},
None)
self.assertEqual('deployment %s failed (-1)' % deployment_id, result)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
self.ctx, deployment_id)
self.assertEqual('FAILED', sd.status)
self.assert_status_reason(
('deploy_status_code : Deployment exited with non-zero '
'status code: -1'),
sd.status_reason)
self.assertEqual({
'deploy_status_code': -1,
'foo': 'bar',
'deploy_stderr': 'Its gone Pete Tong',
'deploy_stdout': None
}, sd.output_values)
self.assertIsNotNone(sd.updated_at)
# failed signal on error_output foo
config = self._create_software_config(outputs=[
{'name': 'foo', 'error_output': True}])
deployment = self._create_software_deployment(
config_id=config['id'], action='INIT', status='IN_PROGRESS')
deployment_id = deployment['id']
result = self.engine.signal_software_deployment(
self.ctx,
deployment_id,
{
'foo': 'bar',
'deploy_status_code': -1,
'deploy_stderr': 'Its gone Pete Tong'
},
None)
self.assertEqual('deployment %s failed' % deployment_id, result)
sd = software_deployment_object.SoftwareDeployment.get_by_id(
self.ctx, deployment_id)
self.assertEqual('FAILED', sd.status)
self.assert_status_reason(
('foo : bar, deploy_status_code : Deployment exited with '
'non-zero status code: -1'),
sd.status_reason)
self.assertEqual({
'deploy_status_code': -1,
'foo': 'bar',
'deploy_stderr': 'Its gone Pete Tong',
'deploy_stdout': None
}, sd.output_values)
self.assertIsNotNone(sd.updated_at)
0
Example 160
Project: poppy Source File: services.py
def update_service(self, project_id, service_id,
auth_token, service_updates, force_update=False):
"""update.
:param project_id
:param service_id
:param auth_token
:param service_updates
:param force_update
:raises LookupError, ValueError
"""
# get the current service object
try:
service_old = self.storage_controller.get_service(
project_id,
service_id
)
except ValueError:
raise errors.ServiceNotFound("Service not found")
if service_old.operator_status == u'disabled':
raise errors.ServiceStatusDisabled(
u'Service {0} is disabled'.format(service_id))
if (
service_old.status not in [u'deployed', u'failed'] and
force_update is False
):
raise errors.ServiceStatusNeitherDeployedNorFailed(
u'Service {0} neither deployed nor failed'.format(service_id))
# Fixing the operator_url domain for ssl
# for schema validation
existing_shared_domains = {}
for domain in service_old.domains:
if domain.protocol == 'https' and domain.certificate == 'shared':
customer_domain = domain.domain.split('.')[0]
existing_shared_domains[customer_domain] = domain.domain
domain.domain = customer_domain
# old domains need to bind as well
elif domain.certificate == 'san':
cert_for_domain = (
self.ssl_certificate_storage.get_certs_by_domain(
domain.domain,
project_id=project_id,
flavor_id=service_old.flavor_id,
cert_type=domain.certificate))
if cert_for_domain == []:
cert_for_domain = None
domain.cert_info = cert_for_domain
service_old_json = json.loads(json.dumps(service_old.to_dict()))
# remove fields that cannot be part of PATCH
del service_old_json['service_id']
del service_old_json['status']
del service_old_json['operator_status']
del service_old_json['provider_details']
for domain in service_old_json['domains']:
if 'cert_info' in domain:
del domain['cert_info']
service_new_json = jsonpatch.apply_patch(
service_old_json, service_updates)
# add any default rules so its explicitly defined
self._append_defaults(service_new_json, operation='update')
# validate the updates
schema = service_schema.ServiceSchema.get_schema("service", "POST")
validators.is_valid_service_configuration(service_new_json, schema)
try:
self.flavor_controller.get(service_new_json['flavor_id'])
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
# must be valid, carry on
service_new_json['service_id'] = service_old.service_id
service_new = service.Service.init_from_dict(project_id,
service_new_json)
store = str(uuid.uuid4()).replace('-', '_')
service_new.provider_details = service_old.provider_details
# fixing the old and new shared ssl domains in service_new
for domain in service_new.domains:
if domain.protocol == 'https':
if domain.certificate == 'shared':
customer_domain = domain.domain.split('.')[0]
# if this domain is from service_old
if customer_domain in existing_shared_domains:
domain.domain = existing_shared_domains[
customer_domain
]
else:
domain.domain = self._pick_shared_ssl_domain(
customer_domain,
service_new.service_id,
store)
elif domain.certificate == 'san':
cert_for_domain = (
self.ssl_certificate_storage.get_certs_by_domain(
domain.domain,
project_id=project_id,
flavor_id=service_new.flavor_id,
cert_type=domain.certificate))
if cert_for_domain == []:
cert_for_domain = None
domain.cert_info = cert_for_domain
# retrofit the access url info into
# certificate_info table
# Note(tonytan4ever): this is for backward
# compatibility
if domain.cert_info is None and \
service_new.provider_details is not None:
# Note(tonytan4ever): right now we assume
# only one provider per flavor, that's
# why we use values()[0]
access_url_for_domain = (
list(service_new.provider_details.values())[0].
get_domain_access_url(domain.domain))
if access_url_for_domain is not None:
providers = (
self.flavor_controller.get(
service_new.flavor_id).providers
)
san_cert_url = access_url_for_domain.get(
'provider_url')
https_upgrade = self._detect_upgrade_http_to_https(
service_old.domains, domain)
if https_upgrade is True:
new_cert_detail = None
else:
# Note(tonytan4ever): stored san_cert_url
# for two times, that's intentional
# a little extra info does not hurt
new_cert_detail = {
providers[0].provider_id.title():
json.dumps(dict(
cert_domain=san_cert_url,
extra_info={
'status': 'deployed',
'san cert': san_cert_url,
'created_at': str(
datetime.datetime.now())
}
))
}
new_cert_obj = ssl_certificate.SSLCertificate(
service_new.flavor_id,
domain.domain,
'san',
project_id,
new_cert_detail
)
if https_upgrade is True:
# request a new ssl cert the same way
# ssl_cert creation is done using taskflow
LOG.debug('Sending request to create ssl cert')
self.ssl_cert_manager.create_ssl_certificate(
project_id,
new_cert_obj,
https_upgrade=True
)
else:
self.ssl_certificate_storage.\
create_certificate(
project_id,
new_cert_obj
)
# deserialize cert_details dict
try:
new_cert_obj.cert_details[
providers[0].provider_id.title()
] = json.loads(
new_cert_obj.cert_details[
providers[0].provider_id.title()]
)
except Exception:
new_cert_obj.cert_details[
providers[0].provider_id.title()] = {}
domain.cert_info = new_cert_obj
if hasattr(self, store):
delattr(self, store)
# check if the service domain names already exist
# existing shared domains do not count!
for d in service_new.domains:
if self.storage_controller.domain_exists_elsewhere(
d.domain,
service_id) is True and \
d.domain not in existing_shared_domains.values():
raise ValueError(
"Domain {0} has already been taken".format(d.domain))
# set status in provider details to u'update_in_progress'
provider_details = service_old.provider_details
for provider in provider_details:
provider_details[provider].status = u'update_in_progress'
service_new.provider_details = provider_details
self.storage_controller.update_service(
project_id,
service_id,
service_new
)
kwargs = {
'project_id': project_id,
'service_id': service_id,
'auth_token': auth_token,
'service_old': json.dumps(service_old.to_dict()),
'service_obj': json.dumps(service_new.to_dict()),
'time_seconds': self.determine_sleep_times(),
'context_dict': context_utils.get_current().to_dict()
}
self.distributed_task_controller.submit_task(
update_service.update_service, **kwargs)
return
0
Example 161
Project: rack Source File: processes.py
@wsgi.response(202)
def create(self, req, body, gid, is_proxy=False):
def _validate(context, body, gid, is_proxy=False):
proxy = db.process_get_all(
context, gid, filters={"is_proxy": True})
if is_proxy:
if len(proxy) > 0:
msg = _(
"Proxy process already exists in the group %s" % gid)
raise exception.InvalidInput(reason=msg)
else:
if len(proxy) != 1:
msg = _(
"Proxy process does not exist in the group %s" % gid)
raise webob.exc.HTTPBadRequest(explanation=msg)
keyname = "proxy" if is_proxy else "process"
if not self.is_valid_body(body, keyname):
msg = _("Invalid request body")
raise exception.InvalidInput(reason=msg)
values = body[keyname]
ppid = values.get("ppid")
name = values.get("name")
keypair_id = values.get("keypair_id")
securitygroup_ids = values.get("securitygroup_ids")
glance_image_id = values.get("glance_image_id")
nova_flavor_id = values.get("nova_flavor_id")
userdata = values.get("userdata")
args = values.get("args")
self._uuid_check(gid, ppid, keypair_id)
pid = unicode(uuid.uuid4())
if not name:
prefix = "proxy-" if is_proxy else "process-"
name = prefix + pid
if ppid:
parent_process = db.process_get_by_pid(context, gid, ppid)
nova_keypair_id = None
if keypair_id:
keypair = db.keypair_get_by_keypair_id(
context, gid, keypair_id)
nova_keypair_id = keypair["nova_keypair_id"]
elif ppid:
keypair_id = parent_process.get("keypair_id")
if keypair_id:
keypair = db.keypair_get_by_keypair_id(
context, gid, keypair_id)
nova_keypair_id = keypair["nova_keypair_id"]
else:
default_keypair = db.keypair_get_all(
context, gid,
filters={"is_default": True})
if default_keypair:
keypair_id = default_keypair[0]["keypair_id"]
nova_keypair_id = default_keypair[0]["nova_keypair_id"]
if securitygroup_ids is not None and\
not isinstance(securitygroup_ids, list):
msg = _("securitygroupids must be a list")
raise exception.InvalidInput(reason=msg)
elif securitygroup_ids:
neutron_securitygroup_ids = []
for id in securitygroup_ids:
self._uuid_check(securitygroup_id=id)
securitygroup = db.securitygroup_get_by_securitygroup_id(
context, gid, id)
neutron_securitygroup_ids.append(
securitygroup["neutron_securitygroup_id"])
elif ppid:
securitygroups = parent_process.get("securitygroups")
securitygroup_ids =\
[securitygroup["securitygroup_id"]
for securitygroup in securitygroups]
neutron_securitygroup_ids =\
[securitygroup["neutron_securitygroup_id"]
for securitygroup in securitygroups]
else:
default_securitygroups = db.securitygroup_get_all(
context, gid,
filters={"is_default": True})
if default_securitygroups:
securitygroup_ids =\
[securitygroup["securitygroup_id"]
for securitygroup in default_securitygroups]
neutron_securitygroup_ids =\
[securitygroup["neutron_securitygroup_id"]
for securitygroup in default_securitygroups]
else:
msg = _(
"securitygroup_ids is required. Default \
securitygroup_ids are not registered.")
raise exception.InvalidInput(reason=msg)
if not glance_image_id and ppid:
glance_image_id = parent_process.get("glance_image_id")
if not nova_flavor_id and ppid:
nova_flavor_id = parent_process.get("nova_flavor_id")
if userdata:
try:
base64.b64decode(userdata)
except TypeError:
msg = _("userdadta must be a base64 encoded value.")
raise exception.InvalidInput(reason=msg)
networks = db.network_get_all(context, gid)
if not networks:
msg = _("Netwoks does not exist in the group %s" % gid)
raise webob.exc.HTTPBadRequest(explanation=msg)
network_ids =\
[network["network_id"] for network in networks]
neutron_network_ids =\
[network["neutron_network_id"] for network in networks]
nics = []
for id in neutron_network_ids:
nics.append({"net-id": id})
if args is None:
args = {}
elif args is not None and\
not isinstance(args, dict):
msg = _("args must be a dict.")
raise exception.InvalidInput(reason=msg)
else:
for key in args.keys():
args[key] = str(args[key])
default_args = {
"gid": gid,
"pid": pid,
}
if ppid:
default_args["ppid"] = ppid
if is_proxy:
default_args["rackapi_ip"] = cfg.CONF.my_ip
default_args["os_username"] = cfg.CONF.os_username
default_args["os_password"] = cfg.CONF.os_password
default_args["os_tenant_name"] = cfg.CONF.os_tenant_name
default_args["os_auth_url"] = cfg.CONF.os_auth_url
default_args["os_region_name"] = cfg.CONF.os_region_name
else:
proxy_instance_id = proxy[0]["nova_instance_id"]
default_args["proxy_ip"] = self.manager.get_process_address(
context, proxy_instance_id)
args.update(default_args)
valid_values = {}
valid_values["gid"] = gid
valid_values["ppid"] = ppid
valid_values["pid"] = pid
valid_values["display_name"] = name
valid_values["keypair_id"] = keypair_id
valid_values["securitygroup_ids"] = securitygroup_ids
valid_values["glance_image_id"] = glance_image_id
valid_values["nova_flavor_id"] = nova_flavor_id
valid_values["userdata"] = userdata
valid_values["args"] = json.dumps(args)
valid_values["is_proxy"] = True if is_proxy else False
valid_values["network_ids"] = network_ids
if is_proxy:
ipc_endpoint = values.get("ipc_endpoint")
shm_endpoint = values.get("shm_endpoint")
fs_endpoint = values.get("fs_endpoint")
if ipc_endpoint:
utils.check_string_length(
ipc_endpoint, 'ipc_endpoint', min_length=1,
max_length=255)
if shm_endpoint:
utils.check_string_length(
shm_endpoint, 'shm_endpoint', min_length=1,
max_length=255)
if fs_endpoint:
utils.check_string_length(
fs_endpoint, 'fs_endpoint', min_length=1,
max_length=255)
valid_values["ipc_endpoint"] = ipc_endpoint
valid_values["shm_endpoint"] = shm_endpoint
valid_values["fs_endpoint"] = fs_endpoint
boot_values = {}
boot_values["name"] = name
boot_values["key_name"] = nova_keypair_id
boot_values["security_groups"] = neutron_securitygroup_ids
boot_values["image"] = glance_image_id
boot_values["flavor"] = nova_flavor_id
boot_values["userdata"] = userdata
boot_values["meta"] = args
boot_values["nics"] = nics
return valid_values, boot_values
try:
context = req.environ['rack.context']
values, boot_values = _validate(context, body, gid, is_proxy)
nova_instance_id, status = self.manager.process_create(
context, **boot_values)
values["nova_instance_id"] = nova_instance_id
values["user_id"] = context.user_id
values["project_id"] = context.project_id
process = db.process_create(context, values,
values.pop("network_ids"),
values.pop("securitygroup_ids"))
process["status"] = status
except exception.InvalidInput as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return self._view_builder.create(process)
0
Example 162
Project: swift Source File: test_empty_device_handoff.py
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
# Kill one container/obj primary server
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete the default data directory for objects on the primary server
obj_dir = '%s/%s' % (self._get_objects_dir(onode),
get_data_dir(self.policy))
shutil.rmtree(obj_dir, True)
self.assertFalse(os.path.exists(obj_dir))
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server((node['ip'], node['port']), self.ipport2server)
self.assertFalse(os.path.exists(obj_dir))
# We've indirectly verified the handoff node has the object, but
# let's directly verify it.
# Directly to handoff server assert we can get container/obj
another_onode = next(self.object_ring.get_more_nodes(opart))
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj,
headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
timeout = time.time() + 5
found_objs_on_cnode = []
while time.time() < timeout:
for cnode in [c for c in cnodes if cnodes not in
found_objs_on_cnode]:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
found_objs_on_cnode.append(cnode)
if len(found_objs_on_cnode) >= len(cnodes):
break
time.sleep(0.3)
if len(found_objs_on_cnode) < len(cnodes):
missing = ['%s:%s' % (cnode['ip'], cnode['port']) for cnode in
cnodes if cnode not in found_objs_on_cnode]
raise Exception('Container servers %r did not know about object' %
missing)
# Bring the first container/obj primary server back up
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
self.assertFalse(os.path.exists(obj_dir))
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
self.assertFalse(os.path.exists(obj_dir))
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication for first container/obj primary server
_, num = get_server_number(
(onode['ip'], onode.get('replication_port', onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=num)
# Run object replication for handoff node
_, another_num = get_server_number(
(another_onode['ip'],
another_onode.get('replication_port', another_onode['port'])),
self.ipport2server)
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
0
Example 163
Project: swift Source File: test_object_handoff.py
def test_main(self):
# Create container
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
# Kill one container/obj primary server
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Stash the on disk data from a primary for future comparison with the
# handoff - this may not equal 'VERIFY' if for example the proxy has
# crypto enabled
direct_get_data = direct_client.direct_get_object(
onodes[1], opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
# Kill other two container/obj primary servers
# to ensure GET handoff works
for node in onodes[1:]:
kill_server((node['ip'], node['port']), self.ipport2server)
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers
for node in onodes[1:]:
start_server((node['ip'], node['port']), self.ipport2server)
# We've indirectly verified the handoff node has the container/object,
# but let's directly verify it.
another_onode = next(self.object_ring.get_more_nodes(opart))
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# drop a tempfile in the handoff's datadir, like it might have
# had if there was an rsync failure while it was previously a
# primary
handoff_device_path = self.device_dir('object', another_onode)
data_filename = None
for root, dirs, files in os.walk(handoff_device_path):
for filename in files:
if filename.endswith('.data'):
data_filename = filename
temp_filename = '.%s.6MbL6r' % data_filename
temp_filepath = os.path.join(root, temp_filename)
if not data_filename:
self.fail('Did not find any data files on %r' %
handoff_device_path)
open(temp_filepath, 'w')
# Assert container listing (via proxy and directly) has container/obj
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj not in objs:
raise Exception(
'Container server %s:%s did not know about object' %
(cnode['ip'], cnode['port']))
# Bring the first container/obj primary server back up
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert that it doesn't have container/obj yet
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Run object replication, ensuring we run the handoff node last so it
# will remove its extra handoff partition
for node in onodes:
try:
port_num = node['replication_port']
except KeyError:
port_num = node['port']
node_id = (port_num - 6000) / 10
Manager(['object-replicator']).once(number=node_id)
try:
another_port_num = another_onode['replication_port']
except KeyError:
another_port_num = another_onode['port']
another_num = (another_port_num - 6000) / 10
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEqual(direct_get_data, odata)
# and that it does *not* have a temporary rsync dropping!
found_data_filename = False
primary_device_path = self.device_dir('object', onode)
for root, dirs, files in os.walk(primary_device_path):
for filename in files:
if filename.endswith('.6MbL6r'):
self.fail('Found unexpected file %s' %
os.path.join(root, filename))
if filename == data_filename:
found_data_filename = True
self.assertTrue(found_data_filename,
'Did not find data file %r on %r' % (
data_filename, primary_device_path))
# Assert the handoff server no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Kill the first container/obj primary server again (we have two
# primaries and the handoff up now)
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Delete container/obj
try:
client.delete_object(self.url, self.token, container, obj)
except client.ClientException as err:
if self.object_ring.replica_count > 2:
raise
# Object DELETE returning 503 for (404, 204)
# remove this with fix for
# https://bugs.launchpad.net/swift/+bug/1318375
self.assertEqual(503, err.http_status)
# Assert we can't head container/obj
try:
client.head_object(self.url, self.token, container, obj)
except client.ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
# Assert container/obj is not in the container listing, both indirectly
# and directly
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj in objs:
raise Exception('Container listing still knew about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
raise Exception(
'Container server %s:%s still knew about object' %
(cnode['ip'], cnode['port']))
# Restart the first container/obj primary server again
start_server((onode['ip'], onode['port']), self.ipport2server)
# Assert it still has container/obj
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
# Run object replication, ensuring we run the handoff node last so it
# will remove its extra handoff partition
for node in onodes:
try:
port_num = node['replication_port']
except KeyError:
port_num = node['port']
node_id = (port_num - 6000) / 10
Manager(['object-replicator']).once(number=node_id)
another_node_id = (another_port_num - 6000) / 10
Manager(['object-replicator']).once(number=another_node_id)
# Assert primary node no longer has container/obj
try:
direct_client.direct_get_object(
another_onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
except ClientException as err:
self.assertEqual(err.http_status, 404)
else:
self.fail("Expected ClientException but didn't get it")
0
Example 164
Project: swift Source File: test_replication_servers_working.py
def test_main(self):
# Create one account, container and object file.
# Find node with account, container and object replicas.
# Delete all directories and files from this node (device).
# Wait 60 seconds and check replication results.
# Delete directories and files in objects storage without
# deleting file "hashes.pkl".
# Check, that files not replicated.
# Delete file "hashes.pkl".
# Check, that all files were replicated.
path_list = []
data_dir = get_data_dir(self.policy)
# Figure out where the devices are
for node_id in range(1, 5):
conf = readconf(self.configs['object-server'][node_id])
device_path = conf['app:object-server']['devices']
for dev in self.object_ring.devs:
if dev['port'] == int(conf['app:object-server']['bind_port']):
device = dev['device']
path_list.append(os.path.join(device_path, device))
# Put data to storage nodes
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, 'VERIFY')
# Get all data file information
(files_list, dir_list) = collect_info(path_list)
num = find_max_occupancy_node(dir_list)
test_node = path_list[num]
test_node_files_list = []
for files in files_list[num]:
if not files.endswith('.pending'):
test_node_files_list.append(files)
test_node_dir_list = []
for d in dir_list[num]:
if not d.startswith('tmp'):
test_node_dir_list.append(d)
# Run all replicators
try:
self.replicators.start()
# Delete some files
for directory in os.listdir(test_node):
shutil.rmtree(os.path.join(test_node, directory))
self.assertFalse(os.listdir(test_node))
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
(new_files_list, new_dir_list) = collect_info([test_node])
try:
# Check replicate files and dir
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for dir in test_node_dir_list:
self.assertIn(dir, new_dir_list[0])
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
# Check behavior by deleting hashes.pkl file
for directory in os.listdir(os.path.join(test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
if os.path.isdir(os.path.join(
test_node, data_dir, directory, input_dir)):
shutil.rmtree(os.path.join(
test_node, data_dir, directory, input_dir))
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
try:
for directory in os.listdir(os.path.join(
test_node, data_dir)):
for input_dir in os.listdir(os.path.join(
test_node, data_dir, directory)):
self.assertFalse(os.path.isdir(
os.path.join(test_node, data_dir,
directory, '/', input_dir)))
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
for directory in os.listdir(os.path.join(test_node, data_dir)):
os.remove(os.path.join(
test_node, data_dir, directory, 'hashes.pkl'))
# We will keep trying these tests until they pass for up to 60s
begin = time.time()
while True:
try:
(new_files_list, new_dir_list) = collect_info([test_node])
# Check replicate files and dirs
for files in test_node_files_list:
self.assertIn(files, new_files_list[0])
for directory in test_node_dir_list:
self.assertIn(directory, new_dir_list[0])
break
except Exception:
if time.time() - begin > 60:
raise
time.sleep(1)
finally:
self.replicators.stop()
0
Example 165
Project: tricircle Source File: api.py
@require_context
@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
elevated = context.elevated()
with context.session.begin():
if project_id is None:
project_id = context.project_id
# Get the current usages
usages = _get_quota_usages(context, context.session, project_id)
# Handle usage refresh
refresh = False
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
project_id,
resource,
0, 0,
until_refresh or None,
session=context.session)
refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and usages[resource].updated_at is not None and (
(usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age):
refresh = True
if refresh:
# no actural usage refresh here
# refresh from the bottom pod
usages[resource].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(resource)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [r for r, delta in deltas.items()
if quotas[r] >= 0 and delta >= 0 and
quotas[r] < delta + usages[r].in_use + usages[r].reserved]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
usages[resource],
project_id,
resource, delta, expire,
session=context.session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
usages[resource].reserved += delta
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
return reservations
0
Example 166
Project: zaqar Source File: test_claims.py
def test_lifecycle(self):
# First, claim some messages
action = consts.CLAIM_CREATE
body = {"queue_name": "skittle",
"ttl": 100,
"grace": 60}
send_mock = mock.Mock()
self.protocol.sendMessage = send_mock
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(201, resp['headers']['status'])
claimed_messages = resp['body']['messages']
claim_id = resp['body']['claim_id']
# No more messages to claim
body = {"queue_name": "skittle",
"ttl": 100,
"grace": 60}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(204, resp['headers']['status'])
# Listing messages, by default, won't include claimed, will echo
action = consts.MESSAGE_LIST
body = {"queue_name": "skittle",
"echo": True}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
self.assertEqual([], resp['body']['messages'])
# Listing messages, by default, won't include claimed, won't echo
body = {"queue_name": "skittle",
"echo": False}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
self.assertEqual([], resp['body']['messages'])
# List messages, include_claimed, but don't echo
body = {"queue_name": "skittle",
"include_claimed": True,
"echo": False}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
self.assertEqual(resp['body']['messages'], [])
# List messages with a different client-id and echo=false.
# Should return some messages
body = {"queue_name": "skittle",
"echo": False}
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': self.project_id
}
req = test_utils.create_request(action, body, headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
# Include claimed messages this time, and echo
body = {"queue_name": "skittle",
"include_claimed": True,
"echo": True}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
self.assertEqual(len(claimed_messages), len(resp['body']['messages']))
message_id_1 = resp['body']['messages'][0]['id']
message_id_2 = resp['body']['messages'][1]['id']
# Try to delete the message without submitting a claim_id
action = consts.MESSAGE_DELETE
body = {"queue_name": "skittle",
"message_id": message_id_1}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(403, resp['headers']['status'])
# Delete the message and its associated claim
body = {"queue_name": "skittle",
"message_id": message_id_1,
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(204, resp['headers']['status'])
# Try to get it from the wrong project
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': 'someproject'
}
action = consts.MESSAGE_GET
body = {"queue_name": "skittle",
"message_id": message_id_2}
req = test_utils.create_request(action, body, headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(404, resp['headers']['status'])
# Get the message
action = consts.MESSAGE_GET
body = {"queue_name": "skittle",
"message_id": message_id_2}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
# Update the claim
creation = timeutils.utcnow()
action = consts.CLAIM_UPDATE
body = {"queue_name": "skittle",
"ttl": 60,
"grace": 60,
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(204, resp['headers']['status'])
# Get the claimed messages (again)
action = consts.CLAIM_GET
body = {"queue_name": "skittle",
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
query = timeutils.utcnow()
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
self.assertEqual(60, resp['body']['ttl'])
message_id_3 = resp['body']['messages'][0]['id']
estimated_age = timeutils.delta_seconds(creation, query)
self.assertTrue(estimated_age > resp['body']['age'])
# Delete the claim
action = consts.CLAIM_DELETE
body = {"queue_name": "skittle",
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(204, resp['headers']['status'])
# Try to delete a message with an invalid claim ID
action = consts.MESSAGE_DELETE
body = {"queue_name": "skittle",
"message_id": message_id_3,
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(400, resp['headers']['status'])
# Make sure it wasn't deleted!
action = consts.MESSAGE_GET
body = {"queue_name": "skittle",
"message_id": message_id_2}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(200, resp['headers']['status'])
# Try to get a claim that doesn't exist
action = consts.CLAIM_GET
body = {"queue_name": "skittle",
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(404, resp['headers']['status'])
# Try to update a claim that doesn't exist
action = consts.CLAIM_UPDATE
body = {"queue_name": "skittle",
"ttl": 60,
"grace": 60,
"claim_id": claim_id}
req = test_utils.create_request(action, body, self.headers)
self.protocol.onMessage(req, False)
resp = json.loads(send_mock.call_args[0][0])
self.assertEqual(404, resp['headers']['status'])
0
Example 167
Project: zaqar Source File: test_queue_lifecycle.py
def test_list(self):
arbitrary_number = 644079696574693
project_id = str(arbitrary_number)
client_id = str(uuid.uuid4())
headers = {
'X-Project-ID': project_id,
'Client-ID': client_id
}
send_mock = mock.patch.object(self.protocol, 'sendMessage')
self.addCleanup(send_mock.stop)
sender = send_mock.start()
# NOTE(kgriffs): It's important that this one sort after the one
# above. This is in order to prove that bug/1236605 is fixed, and
# stays fixed!
# NOTE(vkmc): In websockets as well!
alt_project_id = str(arbitrary_number + 1)
# List empty
action = consts.QUEUE_LIST
body = {}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(200, resp['headers']['status'])
self.assertEqual([], resp['body']['queues'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
# Payload exceeded
body = {'limit': 21}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(400, resp['headers']['status'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
# Create some
def create_queue(project_id, queue_name, metadata):
altheaders = {'Client-ID': client_id}
if project_id is not None:
altheaders['X-Project-ID'] = project_id
action = consts.QUEUE_CREATE
body['queue_name'] = queue_name
body['metadata'] = metadata
req = test_utils.create_request(action, body, altheaders)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(201, resp['headers']['status'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
create_queue(project_id, 'q1', {"node": 31})
create_queue(project_id, 'q2', {"node": 32})
create_queue(project_id, 'q3', {"node": 33})
create_queue(alt_project_id, 'q3', {"alt": 1})
# List (limit)
body = {'limit': 2}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(2, len(resp['body']['queues']))
sender.side_effect = validator
self.protocol.onMessage(req, False)
# List (no metadata, get all)
body = {'limit': 5}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(200, resp['headers']['status'])
# Ensure we didn't pick up the queue from the alt project.
self.assertEqual(3, len(resp['body']['queues']))
sender.side_effect = validator
self.protocol.onMessage(req, False)
# List with metadata
body = {'detailed': True}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(200, resp['headers']['status'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
action = consts.QUEUE_GET
body = {"queue_name": "q1"}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(200, resp['headers']['status'])
self.assertEqual({"node": 31}, resp['body'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
# List tail
action = consts.QUEUE_LIST
body = {}
req = test_utils.create_request(action, body, headers)
def validator(resp, isBinary):
resp = json.loads(resp)
self.assertEqual(200, resp['headers']['status'])
sender.side_effect = validator
self.protocol.onMessage(req, False)
# List manually-constructed tail
body = {'marker': "zzz"}
req = test_utils.create_request(action, body, headers)
self.protocol.onMessage(req, False)
0
Example 168
Project: zaqar Source File: test_claims.py
def test_lifecycle(self):
doc = '{"ttl": 100, "grace": 60}'
# First, claim some messages
body = self.simulate_post(self.claims_path, body=doc,
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
claimed = jsonutils.loads(body[0])['messages']
claim_href = self.srmock.headers_dict['Location']
message_href, params = claimed[0]['href'].split('?')
# No more messages to claim
self.simulate_post(self.claims_path, body=doc,
query_string='limit=3', headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Listing messages, by default, won't include claimed, will echo
body = self.simulate_get(self.messages_path,
headers=self.headers,
query_string="echo=true")
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# Listing messages, by default, won't include claimed, won't echo
body = self.simulate_get(self.messages_path,
headers=self.headers,
query_string="echo=false")
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# List messages, include_claimed, but don't echo
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=false',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# List messages with a different client-id and echo=false.
# Should return some messages
headers = self.headers.copy()
headers["Client-ID"] = str(uuid.uuid4())
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=false',
headers=headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Include claimed messages this time, and echo
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=true',
headers=self.headers)
listed = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(len(claimed), len(listed['messages']))
now = timeutils.utcnow() + datetime.timedelta(seconds=10)
timeutils_utcnow = 'oslo_utils.timeutils.utcnow'
with mock.patch(timeutils_utcnow) as mock_utcnow:
mock_utcnow.return_value = now
body = self.simulate_get(claim_href, headers=self.headers)
claim = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(100, claim['ttl'])
# NOTE(cpp-cabrera): verify that claim age is non-negative
self.assertThat(claim['age'], matchers.GreaterThan(-1))
# Try to delete the message without submitting a claim_id
self.simulate_delete(message_href, headers=self.headers)
self.assertEqual(falcon.HTTP_403, self.srmock.status)
# Delete the message and its associated claim
self.simulate_delete(message_href,
query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to get it from the wrong project
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': 'bogusproject'
}
self.simulate_get(message_href, query_string=params, headers=headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Get the message
self.simulate_get(message_href, query_string=params,
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Update the claim
new_claim_ttl = '{"ttl": 60, "grace": 60}'
creation = timeutils.utcnow()
self.simulate_patch(claim_href, body=new_claim_ttl,
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Get the claimed messages (again)
body = self.simulate_get(claim_href, headers=self.headers)
query = timeutils.utcnow()
claim = jsonutils.loads(body[0])
message_href, params = claim['messages'][0]['href'].split('?')
self.assertEqual(60, claim['ttl'])
estimated_age = timeutils.delta_seconds(creation, query)
self.assertTrue(estimated_age > claim['age'])
# Delete the claim
self.simulate_delete(claim['href'], headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to delete a message with an invalid claim ID
self.simulate_delete(message_href,
query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Make sure it wasn't deleted!
self.simulate_get(message_href, query_string=params,
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Try to get a claim that doesn't exist
self.simulate_get(claim['href'], headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Try to update a claim that doesn't exist
self.simulate_patch(claim['href'], body=doc,
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
0
Example 169
def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False, _hook=None):
"""
Loads an object with a given guid. If no guid is given, a new object
is generated with a new guid.
* guid: The guid indicating which object should be loaded
* datastoreWins: Optional boolean indicating save conflict resolve management.
** True: when saving, external modified fields will not be saved
** False: when saving, all changed data will be saved, regardless of external updates
** None: in case changed field were also changed externally, an error will be raised
"""
# Initialize super class
super(DataObject, self).__init__()
# Initialize internal fields
self._frozen = False
self._datastore_wins = datastore_wins
self._guid = None # Guid identifier of the object
self._original = {} # Original data copy
self._metadata = {} # Some metadata, mainly used for unit testing
self._data = {} # Internal data storage
self._objects = {} # Internal objects storage
# Initialize public fields
self.dirty = False
self.volatile = volatile
# Worker fields/objects
self._classname = self.__class__.__name__.lower()
# Rebuild _relation types
hybrid_structure = HybridRunner.get_hybrids()
for relation in self._relations:
if relation.foreign_type is not None:
identifier = Descriptor(relation.foreign_type).descriptor['identifier']
if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()
# Init guid
self._new = False
if guid is None:
self._guid = str(uuid.uuid4())
self._new = True
else:
self._guid = str(guid)
# Build base keys
self._key = '{0}_{1}_{2}'.format(DataObject.NAMESPACE, self._classname, self._guid)
# Worker mutexes
self._mutex_version = volatile_mutex('ovs_dataversion_{0}_{1}'.format(self._classname, self._guid))
# Load data from cache or persistent backend where appropriate
self._volatile = VolatileFactory.get_client()
self._persistent = PersistentFactory.get_client()
self._metadata['cache'] = None
if self._new:
self._data = {}
else:
if data is not None:
self._data = copy.deepcopy(data)
self._metadata['cache'] = None
else:
self._data = self._volatile.get(self._key)
if self._data is None:
self._metadata['cache'] = False
try:
self._data = self._persistent.get(self._key)
except KeyNotFoundException:
raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
self.__class__.__name__, self._guid
))
else:
self._metadata['cache'] = True
# Set default values on new fields
for prop in self._properties:
if prop.name not in self._data:
self._data[prop.name] = prop.default
self._add_property(prop)
# Load relations
for relation in self._relations:
if relation.name not in self._data:
if relation.foreign_type is None:
cls = self.__class__
else:
cls = relation.foreign_type
self._data[relation.name] = Descriptor(cls).descriptor
self._add_relation_property(relation)
# Add wrapped properties
for dynamic in self._dynamics:
self._add_dynamic_property(dynamic)
# Load foreign keys
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
self._objects[key] = {'info': info,
'data': None}
self._add_list_property(key, info['list'])
if _hook is not None and 'before_cache' in _hook:
_hook['before_cache']()
if not self._new:
# Re-cache the object, if required
if self._metadata['cache'] is False:
# The data wasn't loaded from the cache, so caching is required now
try:
self._mutex_version.acquire(30)
this_version = self._data['_version']
if _hook is not None and 'during_cache' in _hook:
_hook['during_cache']()
store_version = self._persistent.get(self._key)['_version']
if this_version == store_version:
self._volatile.set(self._key, self._data)
except KeyNotFoundException:
raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
self.__class__.__name__, self._guid
))
except NoLockAvailableException:
pass
finally:
self._mutex_version.release()
# Freeze property creation
self._frozen = True
# Optionally, initialize some fields
if data is not None:
for prop in self._properties:
if prop.name in data:
setattr(self, prop.name, data[prop.name])
# Store original data
self._original = copy.deepcopy(self._data)
0
Example 170
Project: framework Source File: utils.py
def update_argspec(*argnames): #pylint: disable=R0912
'''Wrap a callable to use real argument names
When generating functions at runtime, one often needs to fall back to
``*args`` and ``**kwargs`` usage. Using these features require
well-docuemented code though, and renders API docuementation tools less
useful.
The decorator generated by this function wraps a decorated function,
which takes ``**kwargs``, into a function which takes the given argument
names as parameters, and passes them to the decorated function as keyword
arguments.
The given argnames can be strings (for normal named arguments), or tuples
of a string and a value (for arguments with default values). Only a couple
of default value types are supported, an exception will be thrown when an
unsupported value type is given.
Example usage::
>>> @update_argspec('a', 'b', 'c')
... def fun(**kwargs):
... return kwargs['a'] + kwargs['b'] + kwargs['c']
>>> import inspect
>>> tuple(inspect.getargspec(fun))
(['a', 'b', 'c'], None, None, None)
>>> print fun(1, 2, 3)
6
>>> print fun(1, c=3, b=2)
6
>>> print fun(1, 2)
Traceback (most recent call last):
...
TypeError: fun() takes exactly 3 arguments (2 given)
>>> @update_argspec()
... def g():
... print 'Hello'
>>> tuple(inspect.getargspec(g))
([], None, None, None)
>>> g()
Hello
>>> @update_argspec('name', ('age', None))
... def hello(**kwargs):
... name = kwargs['name']
...
... if kwargs['age'] is None:
... return 'Hello, %s' % name
... else:
... age = kwargs['age']
... return 'Hello, %s, who is %d years old' % (name, age)
>>> tuple(inspect.getargspec(hello))
(['name', 'age'], None, None, (None,))
>>> hello('Nicolas')
'Hello, Nicolas'
>>> hello('Nicolas', 25)
'Hello, Nicolas, who is 25 years old'
:param argnames: Names of the arguments to be used
:type argnames: iterable of :class:`str` or `(str, object)`
:return: Decorator which wraps a given callable into one with a correct
argspec
:rtype: `callable`
'''
argnames_ = tuple(itertools.chain(argnames, ('', )))
# Standard execution context, contains only what we actually need in the
# function template
context = {
'__builtins__': None,
'dict': __builtin__.dict,
'zip': __builtin__.zip,
'True': True,
'False': False,
}
# Template for the function which will be compiled later on
def _format(value):
'''Format a value for display in a function signature'''
if isinstance(value, unicode):
return 'u\'%s\'' % value
elif isinstance(value, str):
return '\'%s\'' % value
elif isinstance(value, bool):
return 'True' if value else 'False'
elif isinstance(value, (int, long)):
return '%d' % value
elif value is None:
return 'None'
else:
raise TypeError
def _generate_signature(args):
'''Format arguments for display in a function signature'''
for arg in args:
if isinstance(arg, str):
yield '%s' % arg
else:
arg, default = arg
yield '%s=%s' % (arg, _format(default))
template_signature = ', '.join(_generate_signature(argnames_))
template_args = ', '.join(name if isinstance(name, str) else name[0] \
for name in argnames_) if argnames_ else ''
template_argnames = ', '.join(
'\'%s\'' % (name if isinstance(name, str) else name[0])
for name in argnames_) if argnames_ else ''
fun_def_template = '''
def %%(name)s(%(signature)s):
%%(kwargs_name)s = dict(zip((%(argnames)s), (%(args)s)))
return %%(orig_name)s(**%%(kwargs_name)s)
''' % {
'signature': template_signature,
'args': template_args,
'argnames': template_argnames,
}
def wrapper(fun):
'''
Decorating which wraps the decorated function in a callable which uses
named arguments
:param fun: Callable to decorate
:type fun: `callable`
:see: :func:`update_argspec`
'''
# We need unique names for the variables used in the function template,
# they shouldn't conflict with the arguments
random_suffix = lambda: str(uuid.uuid4()).replace('-', '')
orig_function_name = None
while (not orig_function_name) or (orig_function_name in argnames_):
orig_function_name = '_orig_%s' % random_suffix()
kwargs_name = None
while (not kwargs_name) or (kwargs_name in argnames_):
kwargs_name = '_kwargs_%s' % random_suffix()
# Fill in function template
fun_def = fun_def_template % {
'name': fun.__name__,
'orig_name': orig_function_name,
'kwargs_name': kwargs_name,
}
# Compile function to a code object
code = compile(fun_def, '<update_argspec>', 'exec', 0, 1)
# Create evaluation context
env = context.copy()
env[orig_function_name] = fun
# Evaluate the code object in the evaluation context
eval(code, env, env)
# Retrieve the compiled/evaluated function
fun_wrapper = env[fun.__name__]
# Update __*__ attributes
updated = functools.update_wrapper(fun_wrapper, fun)
return updated
return wrapper
0
Example 171
Project: maproulette Source File: manage.py
@manager.command
def create_testdata(challenges=10, tasks=100, users=10):
"""Creates test data in the database"""
import uuid
import random
from maproulette import db
from maproulette.models import User, Challenge, Task, TaskGeometry, Action
from shapely.geometry import Point, LineString, box
# statuses to use
statuses = ['available',
'skipped',
'fixed',
'deleted',
'alreadyfixed',
'falsepositive']
# challenge default strings
challenge_help_test = "Sample challenge *help* text"
challenge_instruction_test = "Challenge instruction text"
task_instruction_text = "Task instruction text"
# delete old tasks and challenges
db.session.query(TaskGeometry).delete()
db.session.query(Action).delete()
db.session.query(Task).delete()
db.session.query(Challenge).delete()
db.session.query(User).delete()
db.session.commit()
# create users
for uid in range(int(users)):
user = User()
user.id = uid
user.display_name = 'Test User {uid}'.format(uid=uid)
db.session.add(user)
db.session.commit()
# create ten challenges
for i in range(1, int(challenges) + 1):
print "Generating Test Challenge #%d" % i
minx = -120
maxx = -40
miny = 20
maxy = 50
challengepoly = None
slug = "test%d" % i
title = "Test Challenge %d" % i
challenge = Challenge(slug, title)
challenge.difficulty = random.choice([1, 2, 3])
challenge.active = True
challenge.blurb = "This is test challenge number %d" % i
challenge.description = "This describes challenge %d in detail" % i
challenge.help = challenge_help_test
challenge.instruction = challenge_instruction_test
# have bounding boxes for all but the first two challenges.
if i > 2:
minx = random.randrange(-120, -40)
miny = random.randrange(20, 50)
maxx = minx + 1
maxy = miny + 1
challengepoly = box(minx, miny, maxx, maxy)
print "\tChallenge has a bounding box of ", challengepoly
challenge.polygon = challengepoly
db.session.add(challenge)
# add some tasks to the challenge
print "\tGenerating %i tasks for challenge %i" % (int(tasks), i)
# generate NUM_TASKS random tasks
for j in range(int(tasks)):
# generate a unique identifier
identifier = str(uuid.uuid4())
# create two random points not too far apart
task_geometries = []
p1 = Point(
random.randrange(minx, maxx) + random.random(),
random.randrange(miny, maxy) + random.random())
p2 = Point(
p1.x + (random.random() * random.choice((1, -1)) * 0.01),
p1.y + (random.random() * random.choice((1, -1)) * 0.01))
# create a linestring connecting the two points
# no constructor for linestring from points?
l1 = LineString([(p1.x, p1.y), (p2.x, p2.y)])
# add the first point and the linestring to the task's geometries
task_geometries.append(TaskGeometry(p1))
# set a linestring for every other challenge
if not j % 2:
task_geometries.append(TaskGeometry(l1))
# instantiate the task and register it with challenge 'test'
# Initialize a task with its challenge slug and persistent ID
task = Task(challenge.slug, identifier, task_geometries)
# because we are not using the API, we need to call set_location
# explicitly to set the task's location
task.set_location()
# generate random string for the instruction
task.instruction = task_instruction_text
# set a status
action = Action(random.choice(statuses),
user_id=random.choice(range(int(users))))
task.append_action(action)
# add the task to the session
db.session.add(task)
# commit the generated tasks and the challenge to the database.
db.session.commit()
0
Example 172
def _handleRequests(self, newMail):
send = False
# run through all messages and check if new messages have arrived
# (since last read)
for host in range(0, self._numHosts):
# Check mailbox checksum
mailboxStart = host * MAILBOX_SIZE
isMailboxValidated = False
for i in range(0, MESSAGES_PER_MAILBOX):
msgId = host * SLOTS_PER_MAILBOX + i
msgStart = msgId * MESSAGE_SIZE
# First byte of message is message version. Check message
# version, if 0 then message is empty and can be skipped
if newMail[msgStart] in ['\0', '0']:
continue
# Most mailboxes are probably empty so it costs less to check
# that all messages start with 0 than to validate the mailbox,
# therefor this is done after we find a non empty message in
# mailbox
if not isMailboxValidated:
if not self._validateMailbox(
newMail[mailboxStart:mailboxStart + MAILBOX_SIZE],
host):
# Cleaning invalid mbx in newMail
newMail = newMail[:mailboxStart] + EMPTYMAILBOX + \
newMail[mailboxStart + MAILBOX_SIZE:]
break
self.log.debug("SPM_MailMonitor: Mailbox %s validated, "
"checking mail", host)
isMailboxValidated = True
newMsg = newMail[msgStart:msgStart + MESSAGE_SIZE]
msgOffset = msgId * MESSAGE_SIZE
if newMsg == CLEAN_MESSAGE:
# Should probably put a setter on outgoingMail which would
# take the lock
self._outLock.acquire()
try:
self._outgoingMail = \
self._outgoingMail[0:msgOffset] + CLEAN_MESSAGE + \
self._outgoingMail[msgOffset + MESSAGE_SIZE:
self._outMailLen]
finally:
self._outLock.release()
send = True
continue
# Message isn't empty, check if its new
isMessageNew = False
for j in range(msgStart, msgStart + MESSAGE_SIZE):
if newMail[j] != self._incomingMail[j]:
isMessageNew = True
break
# If search exhausted, i.e. message hasn't changed since last
# read, it can be skipped
if not isMessageNew:
continue
# We only get here if there is a novel request
try:
msgType = newMail[msgStart + 1:msgStart + 5]
if msgType in self._messageTypes:
# Use message class to process request according to
# message specific logic
id = str(uuid.uuid4())
self.log.debug("SPM_MailMonitor: processing request: "
"%s" % repr(newMail[
msgStart:msgStart + MESSAGE_SIZE]))
res = self.tp.queueTask(
id, runTask, (self._messageTypes[msgType], msgId,
newMail[msgStart:
msgStart + MESSAGE_SIZE])
)
if not res:
raise Exception()
else:
self.log.error("SPM_MailMonitor: unknown message type "
"encountered: %s", msgType)
except RuntimeError as e:
self.log.error("SPM_MailMonitor: exception: %s caught "
"while handling message: %s", str(e),
newMail[msgStart:msgStart + MESSAGE_SIZE])
except:
self.log.error("SPM_MailMonitor: exception caught while "
"handling message: %s",
newMail[msgStart:msgStart + MESSAGE_SIZE],
exc_info=True)
self._incomingMail = newMail
return send