Here are the examples of the python api sys.modules taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
125 Examples
2
Example 1
Project: pygobject Source File: __init__.py
def load_overrides(introspection_module):
"""Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
"""
namespace = introspection_module.__name__.rsplit(".", 1)[-1]
module_key = 'gi.repository.' + namespace
# We use sys.modules so overrides can import from gi.repository
# but restore everything at the end so this doesn't have any side effects
has_old = module_key in sys.modules
old_module = sys.modules.get(module_key)
# Create a new sub type, so we can separate descriptors like
# _DeprecatedAttribute for each namespace.
proxy_type = type(namespace + "ProxyModule", (OverridesProxyModule, ), {})
proxy = proxy_type(introspection_module)
sys.modules[module_key] = proxy
# backwards compat:
# gedit uses gi.importer.modules['Gedit']._introspection_module
from ..importer import modules
assert hasattr(proxy, "_introspection_module")
modules[namespace] = proxy
try:
override_package_name = 'gi.overrides.' + namespace
# http://bugs.python.org/issue14710
try:
override_loader = get_loader(override_package_name)
except AttributeError:
override_loader = None
# Avoid checking for an ImportError, an override might
# depend on a missing module thus causing an ImportError
if override_loader is None:
return introspection_module
override_mod = importlib.import_module(override_package_name)
finally:
del modules[namespace]
del sys.modules[module_key]
if has_old:
sys.modules[module_key] = old_module
# backwards compat: for gst-python/gstmodule.c,
# which tries to access Gst.Fraction through
# Gst._overrides_module.Fraction. We assign the proxy instead as that
# contains all overridden classes like Fraction during import anyway and
# there is no need to keep the real override module alive.
proxy._overrides_module = proxy
override_all = []
if hasattr(override_mod, "__all__"):
override_all = override_mod.__all__
for var in override_all:
try:
item = getattr(override_mod, var)
except (AttributeError, TypeError):
# Gedit puts a non-string in __all__, so catch TypeError here
continue
setattr(proxy, var, item)
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed.
for attr, replacement in _deprecated_attrs.pop(namespace, []):
try:
value = getattr(proxy, attr)
except AttributeError:
raise AssertionError(
"%s was set deprecated but wasn't added to __all__" % attr)
delattr(proxy, attr)
deprecated_attr = _DeprecatedAttribute(
namespace, attr, value, replacement)
setattr(proxy_type, attr, deprecated_attr)
return proxy
2
Example 2
def main(parser):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
# Do it late so all commands are listed.
CMDhelp.usage_more = ('\n\nCommands are:\n' + '\n'.join([
' %-10s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
parser.add_option('--host', dest='host', action='store', help='Hostname to listen on')
parser.add_option('--port', dest='port', action='store', help='Port to run on')
parser.add_option('--settings', dest='settings', action='store', default='~/.quickopend', help='Settings file to use')
parser.add_option('--test', dest='test', action='store_true', default=False, help='Adds test hooks')
parser.add_option('--trace', dest='trace', action='store_true', default=False, help='Records performance tracing information to quickopen.trace')
parser.add_option('--foreground', dest='foreground', action='store_true', default=False, help='Starts quickopend in the foreground instead of forking')
parser.add_option('--preserve-stdout', dest='preserve_stdout', action='store_true', default=False, help='Keeps the stdout of the daemon')
old_parser_args = parser.parse_args
def parse():
options, args = old_parser_args()
if options.trace:
trace_enable("./%s.trace" % "quickopen")
settings_file = os.path.expanduser(options.settings)
settings = src.settings.Settings(settings_file)
settings.register('host', str, 'localhost')
settings.register('port', int, src.default_port.get())
options.settings = settings
if not options.port:
options.port = settings.port
else:
options.port = int(options.port)
if not options.host:
options.host = settings.host
return options, args
parser.parse_args = parse
non_switch_args = [i for i in sys.argv[1:] if not i.startswith('-')]
if non_switch_args:
command = Command(non_switch_args[0])
if command:
if non_switch_args[0] == 'help':
CMDhelp.usage_more = ('\n\nCommands are:\n' + '\n'.join([
' %-10s %s' % (fn[3:], getdoc(Command(fn[3:])).split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
# "fix" the usage and the description now that we know the subcommand.
GenUsage(parser, non_switch_args[0])
new_args = list(sys.argv[1:])
new_args.remove(non_switch_args[0])
new_args.insert(0, sys.argv[0])
sys.argv = new_args
return command(parser)
else:
# Not a known command. Default to help.
print "Unrecognized command: %s\n" % non_switch_args[0]
else: # default command
CMDrun.usage_more = ('\n\nCommands are:\n' + '\n'.join([
' %-10s %s' % (fn[3:], getdoc(Command(fn[3:])).split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
GenUsage(parser, 'run')
return CMDrun(parser)
2
Example 3
Project: deblaze Source File: imports.py
def lazyModule(modname, relativePath=None):
"""
Return module 'modname', but with its contents loaded "on demand"
This function returns 'sys.modules[modname]', if present. Otherwise
it creates a 'LazyModule' object for the specified module, caches it
in 'sys.modules', and returns it.
'LazyModule' is a subclass of the standard Python module type, that
remains empty until an attempt is made to access one of its
attributes. At that moment, the module is loaded into memory, and
any hooks that were defined via 'whenImported()' are invoked.
Note that calling 'lazyModule' with the name of a non-existent or
unimportable module will delay the 'ImportError' until the moment
access is attempted. The 'ImportError' will occur every time an
attribute access is attempted, until the problem is corrected.
This function also takes an optional second parameter, 'relativePath',
which will be interpreted as a '/'-separated path string relative to
'modname'. If a 'relativePath' is supplied, the module found by
traversing the path will be loaded instead of 'modname'. In the path,
'.' refers to the current module, and '..' to the current module's
parent. For example::
fooBaz = lazyModule('foo.bar','../baz')
will return the module 'foo.baz'. The main use of the 'relativePath'
feature is to allow relative imports in modules that are intended for
use with module inheritance. Where an absolute import would be carried
over as-is into the inheriting module, an import relative to '__name__'
will be relative to the inheriting module, e.g.::
something = lazyModule(__name__,'../path/to/something')
The above code will have different results in each module that inherits
it.
(Note: 'relativePath' can also be an absolute path (starting with '/');
this is mainly useful for module '__bases__' lists.)
"""
if relativePath:
modname = joinPath(modname, relativePath)
if modname not in sys.modules:
file_name = path = None
if '.' in modname:
splitpos = modname.rindex('.')
parent = sys.modules[modname[:splitpos]]
file_name = find_module(modname[splitpos + 1:], parent.__path__)[1]
else:
file_name = find_module(modname)[1]
if os.path.isdir(file_name):
path = [file_name]
py = os.path.join(file_name, '__init__')
for full in ('.pyo', '.pyc', '.py'):
full = py + full
if os.path.exists(full):
break
else:
raise ImportError('No module name %d' % modname)
file_name = full
getModuleHooks(modname) # force an empty hook list into existence
sys.modules[modname] = LazyModule(modname, file_name, path)
if '.' in modname:
# ensure parent module/package is in sys.modules
# and parent.modname=module, as soon as the parent is imported
splitpos = modname.rindex('.')
whenImported(
modname[:splitpos],
lambda m: setattr(m, modname[splitpos + 1:], sys.modules[modname])
)
return sys.modules[modname]
2
Example 4
Project: duecredit Source File: injector.py
def activate(self, retrospect=True):
"""
Parameters
----------
retrospect : bool, optional
Either consider already loaded modules
"""
if not self._orig_import:
# for paranoid Yarik so we have assurance we are not somehow
# overriding our decorator
if hasattr(__builtin__.__import__, '__duecredited__'):
raise RuntimeError("__import__ is already duecredited")
self._orig_import = __builtin__.__import__
@wraps(__builtin__.__import__)
def __import(name, *args, **kwargs):
if self.__processing_queue or name in self._processed_modules or name in self.__queue_to_process:
lgr.debug("Performing undecorated import of %s", name)
# return right away without any decoration in such a case
if self._orig_import:
return _very_orig_import(name, *args, **kwargs)
else:
return self._mitigate_None_orig_import(name, *args, **kwargs)
import_level_prefix = self._import_level_prefix
lgr.log(1, "%sProcessing request to import %s", import_level_prefix, name)
# importing submodule might result in importing a new one and
# name here is not sufficient to determine which module would actually
# get imported unless level=0 (absolute import), but that one rarely used
# could be old-style or new style relative import!
# args[0] -> globals, [1] -> locals(), [2] -> fromlist, [3] -> level
level = args[3] if len(args) >= 4 else kwargs.get('level', -1)
# fromlist = args[2] if len(args) >= 3 else kwargs.get('fromlist', [])
if not retrospect and not self._processed_modules:
# we were asked to not consider those modules which were already loaded
# so let's assume that they were all processed already
self._processed_modules = set(sys.modules)
mod = None
try:
self.__import_level += 1
# TODO: safe-guard all our logic so
# if anything goes wrong post-import -- we still return imported module
if self._orig_import:
mod = self._orig_import(name, *args, **kwargs)
else:
mod = self._mitigate_None_orig_import(name, *args, **kwargs)
self._handle_fresh_imports(name, import_level_prefix, level)
finally:
self.__import_level -= 1
if self.__import_level == 0 and self.__queue_to_process:
self._process_queue()
lgr.log(1, "%sReturning %s", import_level_prefix, mod)
return mod
__import.__duecredited__ = True
self._populate_delayed_injections()
if retrospect:
lgr.debug("Considering previously loaded %d modules", len(sys.modules))
# operate on keys() (not iterator) since we might end up importing delayed injection modules etc
for mod_name in sys.modules.keys():
self.process(sys.modules[mod_name])
lgr.debug("Assigning our importer")
__builtin__.__import__ = __import
self._active = True
else:
lgr.warning("Seems that we are calling duecredit_importer twice."
" No harm is done but shouldn't happen")
2
Example 5
Project: multiprocess Source File: forking.py
def prepare(name, mainpath, sys_path, sys_argv, authkey,
cur_dir, orig_dir, log_args):
'''
Try to get this process ready to unpickle process object
'''
global original_main_module
original_main_module = sys.modules['__main__']
processing.currentProcess().setName(name)
processing.currentProcess().setAuthKey(authkey)
if log_args is not None:
from processing.logger import enableLogging
enableLogging(*log_args)
if orig_dir is not None:
processing.ORIGINAL_DIR = orig_dir
if cur_dir is not None:
try:
os.chdir(cur_dir)
except OSError:
raise
if sys_path is not None:
sys.path = sys_path
if mainpath is not None:
mainname = splitext(basename(mainpath))[0]
if mainname == '__init__':
mainname = basename(dirname(mainpath))
if not mainpath.lower().endswith('.exe') and mainname != 'ipython':
if mainpath is None:
dirs = None
elif basename(mainpath).startswith('__init__.py'):
dirs = [dirname(dirname(mainpath))]
else:
dirs = [dirname(mainpath)]
assert mainname not in sys.modules, mainname
file, pathname, etc = imp.find_module(mainname, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, pathname, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# XXX Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- ugly
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
if sys_argv is not None: # this needs to come last
sys.argv = sys_argv
2
Example 6
Project: saxo Source File: irc.py
def load(self):
# Update symlinks
common.populate(saxo_path, self.base)
# Load events
first = not self.events
self.events.clear()
def module_exists(name):
try: imp.find_module(name)
except ImportError:
return False
else: return True
if first and module_exists("plugins"):
debug("Warning: a 'plugins' module already exists")
if first and ("plugins" in sys.modules):
raise ImportError("'plugins' duplicated")
# This means we're using plugins as a namespace module
# Might have to move saxo.path's plugins/ to something else
# Otherwise it gets unionised into the namespace module
# if self.base not in sys.path:
# - not needed, because we clear up below
sys.path[:0] = [self.base]
plugins = os.path.join(self.base, "plugins")
plugins_package = importlib.__import__("plugins")
if next(iter(plugins_package.__path__)) != plugins:
# This is very unlikely to happen, because we pushed self.base
# to the front of sys.path, but perhaps some site configuration
# or other import mechanism may affect this
raise ImportError("non-saxo 'plugins' module")
setups = {}
for name in os.listdir(plugins):
if ("_" in name) or (not name.endswith(".py")):
continue
name = "plugins." + name[:-3]
if not name in sys.modules:
try: module = importlib.import_module(name)
except Exception as err:
debug("Error loading %s:" % name, err)
elif first:
raise ImportError("%r duplicated" % name)
else:
module = sys.modules[name]
try: module = imp.reload(module)
except Exception as err:
debug("Error reloading %s:" % name, err)
for attr in dir(module):
obj = getattr(module, attr)
if hasattr(obj, "saxo_event"):
try: self.events[obj.saxo_event].append(obj)
except KeyError:
self.events[obj.saxo_event] = [obj]
elif hasattr(obj, "saxo_setup"):
obj.saxo_name = module.__name__ + "." + obj.__name__
setups[obj.saxo_name] = obj
# debug("Loaded module:", name)
debug("%s setup functions" % len(setups))
graph = {}
for setup in setups.values():
deps = ["plugins." + dep for dep in setup.saxo_deps]
graph[setup.saxo_name] = deps
database_filename = os.path.join(self.base, "database.sqlite3")
with sqlite.Database(database_filename) as self.db:
for name in common.tsort(graph):
debug(name)
if name in setups:
setups[name](self)
else:
debug("Warning: Missing dependency:", name)
sys.path[:1] = []
2
Example 7
Project: pgi Source File: __init__.py
def load_overrides(introspection_module):
"""Loads overrides for an introspection module.
Either returns the same module again in case there are no overrides or a
proxy module including overrides. Doesn't cache the result.
"""
namespace = introspection_module.__name__.rsplit(".", 1)[-1]
module_keys = [prefix + "." + namespace for prefix in const.PREFIX]
# We use sys.modules so overrides can import from gi.repository
# but restore everything at the end so this doesn't have any side effects
for module_key in module_keys:
has_old = module_key in sys.modules
old_module = sys.modules.get(module_key)
# Create a new sub type, so we can separate descriptors like
# _DeprecatedAttribute for each namespace.
proxy_type = type(namespace + "ProxyModule", (OverridesProxyModule, ), {})
proxy = proxy_type(introspection_module)
for module_key in module_keys:
sys.modules[module_key] = proxy
try:
override_package_name = 'pgi.overrides.' + namespace
# http://bugs.python.org/issue14710
try:
override_loader = get_loader(override_package_name)
except AttributeError:
override_loader = None
# Avoid checking for an ImportError, an override might
# depend on a missing module thus causing an ImportError
if override_loader is None:
return introspection_module
override_mod = importlib.import_module(override_package_name)
finally:
for module_key in module_keys:
del sys.modules[module_key]
if has_old:
sys.modules[module_key] = old_module
override_all = []
if hasattr(override_mod, "__all__"):
override_all = override_mod.__all__
for var in override_all:
try:
item = getattr(override_mod, var)
except (AttributeError, TypeError):
# Gedit puts a non-string in __all__, so catch TypeError here
continue
# make sure new classes have a proper __module__
try:
if item.__module__.split(".")[-1] == namespace:
item.__module__ = namespace
except AttributeError:
pass
setattr(proxy, var, item)
# Replace deprecated module level attributes with a descriptor
# which emits a warning when accessed.
for attr, replacement in _deprecated_attrs.pop(namespace, []):
try:
value = getattr(proxy, attr)
except AttributeError:
raise AssertionError(
"%s was set deprecated but wasn't added to __all__" % attr)
delattr(proxy, attr)
deprecated_attr = _DeprecatedAttribute(
namespace, attr, value, replacement)
setattr(proxy_type, attr, deprecated_attr)
return proxy
2
Example 8
def get_parent(globals, level):
"""
parent, name = get_parent(globals, level)
Return the package that an import is being performed in. If globals comes
from the module foo.bar.bat (not itself a package), this returns the
sys.modules entry for foo.bar. If globals is from a package's __init__.py,
the package's entry in sys.modules is returned.
If globals doesn't come from a package or a module in a package, or a
corresponding entry is not found in sys.modules, None is returned.
"""
orig_level = level
if not level or not isinstance(globals, dict):
return None, ''
pkgname = globals.get('__package__', None)
if pkgname is not None:
# __package__ is set, so use it
if not hasattr(pkgname, 'rindex'):
raise ValueError('__package__ set to non-string')
if len(pkgname) == 0:
if level > 0:
raise ValueError('Attempted relative import in non-package')
return None, ''
name = pkgname
else:
# __package__ not set, so figure it out and set it
if '__name__' not in globals:
return None, ''
modname = globals['__name__']
if '__path__' in globals:
# __path__ is set, so modname is already the package name
globals['__package__'] = name = modname
else:
# Normal module, so work out the package name if any
lastdot = modname.rfind('.')
if lastdot < 0 and level > 0:
raise ValueError("Attempted relative import in non-package")
if lastdot < 0:
globals['__package__'] = None
return None, ''
globals['__package__'] = name = modname[:lastdot]
dot = len(name)
for x in xrange(level, 1, -1):
try:
dot = name.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
name = name[:dot]
try:
parent = sys.modules[name]
except:
if orig_level < 1:
warn("Parent module '%.200s' not found while handling absolute "
"import" % name)
parent = None
else:
raise SystemError("Parent module '%.200s' not loaded, cannot "
"perform relative import" % name)
# We expect, but can't guarantee, if parent != None, that:
# - parent.__name__ == name
# - parent.__dict__ is globals
# If this is violated... Who cares?
return parent, name
2
Example 9
Project: yadapy Source File: local.py
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
2
Example 10
def setup_logger(name='', level=twiggy.levels.DEBUG,
fmt=twiggy.formats.line_format,
fmt_name=('{0:%s}' % 10).format,
screen=None, file_name=None,
mpi_comm=None, zmq_addr=None,
log_exceptions=True, multiline=False):
"""
Setup a twiggy logger.
Parameters
----------
name : str
Logger name.
level : twiggy.levels.LogLevel
Logging level.
fmt : twiggy.formats.LineFormat
Logging formatter class instance.
fmt_name : function
Function with one parameter that formats the message name.
screen : bool
Create output stream handler to the screen if True.
file_name : str
Create output handler to specified file.
mpi_comm : mpi4py.MPI.Intracomm
If not None, use MPI I/O with the specified communicator for
output file handler. Ignored if the `file_name` parameter
is not specified.
zmq_addr : str
ZeroMQ socket address.
log_exceptions : bool
If True, exception messages are written to the logger.
multiline : bool
If True, log exception messages on multiple lines.
Returns
-------
logger : twiggy.logger.Logger
Configured logger.
Bug
---
To use the ZeroMQ output class with multiprocessing, it must be added
as an emitter within each process.
"""
fmt = copy.copy(fmt)
fmt.conversion.delete('name')
# Apply name format to the value (i.e., the name), not the key (i.e., the
# field name "name"):
fmt.conversion.add('name', str, lambda k, v: fmt_name(v))
if file_name:
if mpi_comm:
if 'mpi4py.MPI' not in sys.modules:
raise ValueError('mpi4py not available')
if not isinstance(mpi_comm, mpi4py.MPI.Intracomm):
raise ValueError('mpi_comm must be an instance of '
'mpi4py.MPI.Intracomm')
if 'neurokernel.tools.mpi' not in sys.modules:
raise ValueError('neurokernel.tools.mpi not available')
file_output = \
neurokernel.tools.mpi.MPIOutput(file_name, fmt, mpi_comm)
else:
file_output = \
twiggy.outputs.FileOutput(file_name, fmt, 'w')
twiggy.add_emitters(('file', level, None, file_output))
if screen:
screen_output = \
twiggy.outputs.StreamOutput(fmt, stream=sys.stdout)
twiggy.add_emitters(('screen', level, None, screen_output))
if zmq_addr:
if 'neurokernel.tools.zmq' not in sys.modules:
raise ValueError('neurokernel.tools.zmq not available')
zmq_output = neurokernel.tools.zmq.ZMQOutput(zmq_addr, fmt)
twiggy.add_emitters(('zmq', level, None, zmq_output))
logger = twiggy.log.name(fmt_name(name))
if log_exceptions:
set_excepthook(logger, multiline)
return logger
2
Example 11
Project: GitSavvy Source File: reload.py
def reload_modules(main, modules, perform_reload=True):
"""Implements the machinery for reloading a given plugin module."""
#
# Here's the approach in general:
#
# - Hide GitSavvy modules from the sys.modules temporarily;
#
# - Install a special import hook onto sys.meta_path;
#
# - Call sublime_plugin.reload_plugin(), which imports the main
# "git_savvy" module under the hood, triggering the hook;
#
# - The hook, instead of creating a new module object, peeks the saved
# one and reloads it. Once the module encounters an import statement
# requesting another module, not yet reloaded, the hook reenters and
# processes that new module recursively, then get back to the previous
# one, and so on.
#
# This makes the modules reload in the very same order as they were loaded
# initially, as if they were imported from scratch.
#
if perform_reload:
sublime_plugin.unload_module(main)
# Insert the main "git_savvy" module at the beginning to make the reload
# order be as close to the order of the "natural" import as possible.
module_names = [main.__name__] + sorted(name for name in modules
if name != main.__name__)
# First, remove all the loaded modules from the sys.modules cache,
# otherwise the reloading hook won't be called.
loaded_modules = dict(sys.modules)
for name in loaded_modules:
if name in modules:
del sys.modules[name]
stack_meter = StackMeter()
@FilteringImportHook.when(condition=lambda name: name in modules)
def module_reloader(name):
module = modules[name]
sys.modules[name] = module # restore the module back
if perform_reload:
with stack_meter as depth:
dprint("reloading", ('╿ '*depth) + '┡━─', name)
try:
return module.__loader__.load_module(name)
except:
if name in sys.modules:
del sys.modules[name] # to indicate an error
raise
else:
if name not in loaded_modules:
dprint("NO RELOAD", '╺━─', name)
return module
with intercepting_imports(module_reloader), \
importing_fromlist_aggresively(modules):
# Now, import all the modules back, in order, starting with the main
# module. This will reload all the modules directly or indirectly
# referenced by the main one, i.e. usually most of our modules.
sublime_plugin.reload_plugin(main.__name__)
# Be sure to bring back *all* the modules that used to be loaded, not
# only these imported through the main one. Otherwise, some of them
# might end up being created from scratch as new module objects in
# case of being imported after detaching the hook. In general, most of
# the imports below (if not all) are no-ops though.
for name in module_names:
importlib.import_module(name)
2
Example 12
Project: stash Source File: pip.py
def fake_setuptools_modules():
"""
Created a bunch of stub setuptools modules
"""
setuptools_modules = [
'setuptools',
'setuptools.command',
'setuptools.command.alias',
'setuptools.command.bdist_egg',
'setuptools.command.bdist_rpm',
'setuptools.command.bdist_wininst',
'setuptools.command.build_ext',
'setuptools.command.build_py',
'setuptools.command.develop',
'setuptools.command.easy_install',
'setuptools.command.egg_info',
'setuptools.command.install',
'setuptools.depends.install_egg_info',
'setuptools.command.install_lib',
'setuptools.command.install_scripts',
'setuptools.command.register',
'setuptools.command.rotate',
'setuptools.command.saveopts',
'setuptools.command.sdist',
'setuptools.command.setopt',
'setuptools.command.test',
'setuptools.command.upload',
'setuptools.command.upload_docs',
'setuptools.extern',
'setuptools.dist',
'setuptools.extension',
'setuptools.launch',
'setuptools.lib2to3_ex',
'setuptools.msvc9_support',
'setuptools.package_index',
'setuptools.py26compat',
'setuptools.py27compat',
'setuptools.py31compat',
'setuptools.sandbox',
'setuptools.site-patch',
'setuptools.ssl_support',
'setuptools.unicode_utils',
'setuptools.utils',
'setuptools.version',
'setuptools.windows_support',
# 'pkg_resources',
# 'pkg_resources.extern',
]
for m in setuptools_modules:
fake_module(m)
# First import importable distutils
import distutils.command
import distutils.core
import distutils.util
distutils_command_modules = [
'distutils.command.bdist'
'distutils.command.bdist_dumb',
'distutils.command.bdist_msi',
'distutils.command.bdist_rpm',
'distutils.command.bdist_wininst',
'distutils.command.build',
'distutils.command.build_clib',
'distutils.command.build_ext',
'distutils.command.build_py',
'distutils.command.build_scripts',
]
for m in distutils_command_modules:
fake_module(m)
sys.modules['distutils.util'].get_platform = OmniClass()
# fix for new problem in issue 169
sys.modules['distutils.command.build_ext'].sub_commands = []
sys.modules['setuptools.command.build_ext'].sub_commands = []
2
Example 13
Project: rootpy Source File: phantom_import.py
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
2
Example 14
def get_package(server_info, client):
NO_FINGERPRINT = object()
fingerprint = NO_FINGERPRINT
if server_info.is_valid():
fingerprint = server_info.get('fingerprint', fingerprint)
if fingerprint is not None:
try:
try:
if fingerprint is NO_FINGERPRINT:
schema = Schema(client)
else:
schema = Schema(client, fingerprint)
except SchemaUpToDate as e:
schema = Schema(client, e.fingerprint)
except NotAvailable:
fingerprint = None
ttl = None
except SchemaUpToDate as e:
fingerprint = e.fingerprint
ttl = e.ttl
else:
fingerprint = schema.fingerprint
ttl = schema.ttl
server_info['fingerprint'] = fingerprint
server_info.update_validity(ttl)
if fingerprint is None:
raise NotAvailable()
fingerprint = str(fingerprint)
package_name = '{}${}'.format(__name__, fingerprint)
package_dir = '{}${}'.format(os.path.splitext(__file__)[0], fingerprint)
try:
return sys.modules[package_name]
except KeyError:
pass
package = types.ModuleType(package_name)
package.__file__ = os.path.join(package_dir, '__init__.py')
package.modules = ['plugins']
sys.modules[package_name] = package
module_name = '.'.join((package_name, 'plugins'))
module = types.ModuleType(module_name)
module.__file__ = os.path.join(package_dir, 'plugins.py')
module.register = plugable.Registry()
for plugin_cls in (_SchemaCommandPlugin, _SchemaObjectPlugin):
for full_name in schema[plugin_cls.schema_key]:
plugin = plugin_cls(schema, str(full_name))
plugin = module.register()(plugin)
sys.modules[module_name] = module
for full_name, topic in six.iteritems(schema['topics']):
name = str(topic['name'])
module_name = '.'.join((package_name, name))
try:
module = sys.modules[module_name]
except KeyError:
module = sys.modules[module_name] = types.ModuleType(module_name)
module.__file__ = os.path.join(package_dir, '{}.py'.format(name))
module.__doc__ = topic.get('doc')
if 'topic_topic' in topic:
module.topic = str(topic['topic_topic']).partition('/')[0]
else:
module.topic = None
return package
2
Example 15
Project: pymo Source File: forking.py
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
2
Example 16
Project: ZeroNet Source File: StatsPlugin.py
def actionStats(self):
import gc
import sys
from Ui import UiRequest
from Db import Db
from Crypt import CryptConnection
hpy = None
if self.get.get("size") == "1": # Calc obj size
try:
import guppy
hpy = guppy.hpy()
except:
pass
self.sendHeader()
if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
yield "This function is disabled on this proxy"
raise StopIteration
s = time.time()
main = sys.modules["main"]
# Style
yield """
<style>
* { font-family: monospace }
table td, table th { text-align: right; padding: 0px 10px }
.serving-False { color: gray }
</style>
"""
# Memory
try:
yield "rev%s | " % config.rev
yield "%s | " % config.ip_external
yield "Opened: %s | " % main.file_server.port_opened
yield "Crypt: %s | " % CryptConnection.manager.crypt_supported
yield "In: %.2fMB, Out: %.2fMB | " % (
float(main.file_server.bytes_recv) / 1024 / 1024,
float(main.file_server.bytes_sent) / 1024 / 1024
)
yield "Peerid: %s | " % main.file_server.peer_id
import psutil
process = psutil.Process(os.getpid())
mem = process.get_memory_info()[0] / float(2 ** 20)
yield "Mem: %.2fMB | " % mem
yield "Threads: %s | " % len(process.threads())
yield "CPU: usr %.2fs sys %.2fs | " % process.cpu_times()
yield "Files: %s | " % len(process.open_files())
yield "Sockets: %s | " % len(process.connections())
yield "Calc size <a href='?size=1'>on</a> <a href='?size=0'>off</a>"
except Exception:
pass
yield "<br>"
# Connections
yield "<b>Connections</b> (%s, total made: %s):<br>" % (
len(main.file_server.connections), main.file_server.last_connection_id
)
yield "<table><tr> <th>id</th> <th>proto</th> <th>type</th> <th>ip</th> <th>open</th> <th>crypt</th> <th>ping</th>"
yield "<th>buff</th> <th>bad</th> <th>idle</th> <th>open</th> <th>delay</th> <th>cpu</th> <th>out</th> <th>in</th> <th>last sent</th>"
yield "<th>waiting</th> <th>version</th> <th>sites</th> </tr>"
for connection in main.file_server.connections:
if "cipher" in dir(connection.sock):
cipher = connection.sock.cipher()[0]
else:
cipher = connection.crypt
yield self.formatTableRow([
("%3d", connection.id),
("%s", connection.protocol),
("%s", connection.type),
("%s:%s", (connection.ip, connection.port)),
("%s", connection.handshake.get("port_opened")),
("<span title='%s'>%s</span>", (connection.crypt, cipher)),
("%6.3f", connection.last_ping_delay),
("%s", connection.incomplete_buff_recv),
("%s", connection.bad_actions),
("since", max(connection.last_send_time, connection.last_recv_time)),
("since", connection.start_time),
("%.3f", connection.last_sent_time - connection.last_send_time),
("%.3fs", connection.cpu_time),
("%.0fkB", connection.bytes_sent / 1024),
("%.0fkB", connection.bytes_recv / 1024),
("%s", connection.last_cmd),
("%s", connection.waiting_requests.keys()),
("%s r%s", (connection.handshake.get("version"), connection.handshake.get("rev", "?"))),
("%s", connection.sites)
])
yield "</table>"
# Tor hidden services
yield "<br><br><b>Tor hidden services (status: %s):</b><br>" % main.file_server.tor_manager.status
for site_address, onion in main.file_server.tor_manager.site_onions.items():
yield "- %-34s: %s<br>" % (site_address, onion)
# Db
yield "<br><br><b>Db</b>:<br>"
for db in sys.modules["Db.Db"].opened_dbs:
yield "- %.3fs: %s<br>" % (time.time() - db.last_query_time, db.db_path)
# Sites
yield "<br><br><b>Sites</b>:"
yield "<table>"
yield "<tr><th>address</th> <th>connected</th> <th title='connected/good/total'>peers</th> <th>content.json</th> <th>out</th> <th>in</th> </tr>"
for site in sorted(self.server.sites.values(), lambda a, b: cmp(a.address,b.address)):
yield self.formatTableRow([
(
"""<a href='#' class='serving-%s' onclick='docuement.getElementById("peers_%s").style.display="initial"; return false'>%s</a>""",
(site.settings["serving"], site.address, site.address)
),
("%s", [peer.connection.id for peer in site.peers.values() if peer.connection and peer.connection.connected]),
("%s/%s/%s", (
len([peer for peer in site.peers.values() if peer.connection and peer.connection.connected]),
len(site.getConnectablePeers(100)),
len(site.peers)
)),
("%s (loaded: %s)", (
len(site.content_manager.contents),
len([key for key, val in dict(site.content_manager.contents).iteritems() if val])
)),
("%.0fkB", site.settings.get("bytes_sent", 0) / 1024),
("%.0fkB", site.settings.get("bytes_recv", 0) / 1024),
])
yield "<tr><td id='peers_%s' style='display: none; white-space: pre' colspan=6>" % site.address
for key, peer in site.peers.items():
if peer.time_found:
time_found = int(time.time() - peer.time_found) / 60
else:
time_found = "--"
if peer.connection:
connection_id = peer.connection.id
else:
connection_id = None
if site.content_manager.hashfield:
yield "Optional files: %4s " % len(peer.hashfield)
yield "(#%4s, err: %s, found: %5s min ago) %30s -<br>" % (connection_id, peer.connection_error, time_found, key)
yield "<br></td></tr>"
yield "</table>"
# No more if not in debug mode
if not config.debug:
raise StopIteration
# Object types
obj_count = {}
for obj in gc.get_objects():
obj_type = str(type(obj))
if obj_type not in obj_count:
obj_count[obj_type] = [0, 0]
obj_count[obj_type][0] += 1 # Count
obj_count[obj_type][1] += float(sys.getsizeof(obj)) / 1024 # Size
yield "<br><br><b>Objects in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
len(obj_count),
sum([stat[0] for stat in obj_count.values()]),
sum([stat[1] for stat in obj_count.values()])
)
for obj, stat in sorted(obj_count.items(), key=lambda x: x[1][0], reverse=True): # Sorted by count
yield " - %.1fkb = %s x <a href=\"/Listobj?type=%s\">%s</a><br>" % (stat[1], stat[0], obj, cgi.escape(obj))
# Classes
class_count = {}
for obj in gc.get_objects():
obj_type = str(type(obj))
if obj_type != "<type 'instance'>":
continue
class_name = obj.__class__.__name__
if class_name not in class_count:
class_count[class_name] = [0, 0]
class_count[class_name][0] += 1 # Count
class_count[class_name][1] += float(sys.getsizeof(obj)) / 1024 # Size
yield "<br><br><b>Classes in memory (types: %s, total: %s, %.2fkb):</b><br>" % (
len(class_count),
sum([stat[0] for stat in class_count.values()]),
sum([stat[1] for stat in class_count.values()])
)
for obj, stat in sorted(class_count.items(), key=lambda x: x[1][0], reverse=True): # Sorted by count
yield " - %.1fkb = %s x <a href=\"/Dumpobj?class=%s\">%s</a><br>" % (stat[1], stat[0], obj, cgi.escape(obj))
from greenlet import greenlet
objs = [obj for obj in gc.get_objects() if isinstance(obj, greenlet)]
yield "<br>Greenlets (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from Worker import Worker
objs = [obj for obj in gc.get_objects() if isinstance(obj, Worker)]
yield "<br>Workers (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from Connection import Connection
objs = [obj for obj in gc.get_objects() if isinstance(obj, Connection)]
yield "<br>Connections (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from socket import socket
objs = [obj for obj in gc.get_objects() if isinstance(obj, socket)]
yield "<br>Sockets (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from msgpack import Unpacker
objs = [obj for obj in gc.get_objects() if isinstance(obj, Unpacker)]
yield "<br>Msgpack unpacker (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from Site import Site
objs = [obj for obj in gc.get_objects() if isinstance(obj, Site)]
yield "<br>Sites (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
objs = [obj for obj in gc.get_objects() if isinstance(obj, self.server.log.__class__)]
yield "<br>Loggers (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj.name)))
objs = [obj for obj in gc.get_objects() if isinstance(obj, UiRequest)]
yield "<br>UiRequests (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
from Peer import Peer
objs = [obj for obj in gc.get_objects() if isinstance(obj, Peer)]
yield "<br>Peers (%s):<br>" % len(objs)
for obj in objs:
yield " - %.1fkb: %s<br>" % (self.getObjSize(obj, hpy), cgi.escape(repr(obj)))
objs = [(key, val) for key, val in sys.modules.iteritems() if val is not None]
objs.sort()
yield "<br>Modules (%s):<br>" % len(objs)
for module_name, module in objs:
yield " - %.3fkb: %s %s<br>" % (self.getObjSize(module, hpy), module_name, cgi.escape(repr(module)))
gc.collect() # Implicit grabage collection
yield "Done in %.1f" % (time.time() - s)
2
Example 17
Project: trackpy Source File: phantom_import.py
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError:
break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0:
return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0:
return x
if a.attrib['id'] in b_bases:
return -1
if b.attrib['id'] in a_bases:
return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n":
doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent:
break
if parent in object_cache:
break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object):
pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None:
continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
2
Example 18
Project: firefox-flicks Source File: forking.py
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
2
Example 19
Project: Pyrit Source File: cpyrit.py
def __init__(self):
"""Create a new instance that blocks calls to .enqueue() when more than
the given amount of passwords are currently waiting to be scheduled
to the hardware.
"""
self.inqueue = []
self.outqueue = {}
self.workunits = []
self.slices = {}
self.in_idx = self.out_idx = 0
self.cores = []
self.CUDAs = []
self.OpCL = []
self.all = []
self.cv = threading.Condition()
# CUDA
if config.cfg['use_CUDA'] == 'true' and 'cpyrit._cpyrit_cuda' in sys.modules and config.cfg['use_OpenCL'] == 'false':
CUDA = _cpyrit_cuda.listDevices()
for dev_idx, device in enumerate(CUDA):
self.CUDAs.append(CUDACore(queue=self, dev_idx=dev_idx))
# OpenCL
if config.cfg['use_OpenCL'] == 'true' and 'cpyrit._cpyrit_opencl' in sys.modules:
for platform_idx in range(_cpyrit_opencl.numPlatforms):
p = _cpyrit_opencl.OpenCLPlatform(platform_idx)
for dev_idx in range(p.numDevices):
dev = _cpyrit_opencl.OpenCLDevice(platform_idx, dev_idx)
if dev.deviceType in ('GPU', 'ACCELERATOR'):
core = OpenCLCore(self, platform_idx, dev_idx)
self.OpCL.append(core)
# CAL++
if 'cpyrit._cpyrit_calpp' in sys.modules:
for dev_idx, device in enumerate(_cpyrit_calpp.listDevices()):
self.cores.append(CALCore(queue=self, dev_idx=dev_idx))
# CPUs
for i in xrange(util.ncpus):
self.cores.append(CPUCore(queue=self))
# Network
if config.cfg['rpc_server'] == 'true':
for port in xrange(17935, 18000):
try:
ncore = NetworkCore(queue=self, port=port)
except socket.error:
pass
else:
self.ncore_uuid = ncore.uuid
self.cores.append(ncore)
if config.cfg['rpc_announce'] == 'true':
cl = config.cfg['rpc_knownclients'].split(' ')
cl = filter(lambda x: len(x) > 0, map(str.strip, cl))
bcst = config.cfg['rpc_announce_broadcast'] == 'true'
self.announcer = network.NetworkAnnouncer(port=port, \
clients=cl, \
broadcast=bcst)
break
else:
self.ncore_uuid = None
else:
self.ncore_uuid = None
for core in self.cores:
self.all.append(core)
for OCL in self.OpCL:
self.all.append(OCL)
for CD in self.CUDAs:
self.all.append(CD)
2
Example 20
Project: openshot-qt Source File: project_data.py
def read_legacy_project_file(self, file_path):
"""Attempt to read a legacy version 1.x openshot project file"""
import sys, pickle
from classes.query import File, Track, Clip, Transition
from classes.app import get_app
import openshot
try:
import json
except ImportError:
import simplejson as json
# Get translation method
_ = get_app()._tr
# Append version info
v = openshot.GetVersion()
project_data = {}
project_data["version"] = { "openshot-qt" : info.VERSION,
"libopenshot" : v.ToString() }
# Get FPS from project
from classes.app import get_app
fps = get_app().project.get(["fps"])
fps_float = float(fps["num"]) / float(fps["den"])
# Import legacy openshot classes (from version 1.X)
from classes.legacy.openshot import classes as legacy_classes
from classes.legacy.openshot.classes import project as legacy_project
from classes.legacy.openshot.classes import sequences as legacy_sequences
from classes.legacy.openshot.classes import track as legacy_track
from classes.legacy.openshot.classes import clip as legacy_clip
from classes.legacy.openshot.classes import keyframe as legacy_keyframe
from classes.legacy.openshot.classes import files as legacy_files
from classes.legacy.openshot.classes import transition as legacy_transition
sys.modules['openshot.classes'] = legacy_classes
sys.modules['classes.project'] = legacy_project
sys.modules['classes.sequences'] = legacy_sequences
sys.modules['classes.track'] = legacy_track
sys.modules['classes.clip'] = legacy_clip
sys.modules['classes.keyframe'] = legacy_keyframe
sys.modules['classes.files'] = legacy_files
sys.modules['classes.transition'] = legacy_transition
# Keep track of files that failed to load
failed_files = []
with open(file_path.encode('UTF-8'), 'rb') as f:
try:
# Unpickle legacy openshot project file
v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8")
file_lookup = {}
# Loop through files
for item in v1_data.project_folder.items:
# Is this item a File (i.e. ignore folders)
if isinstance(item, legacy_files.OpenShotFile):
# Create file
try:
clip = openshot.Clip(item.name)
reader = clip.Reader()
file_data = json.loads(reader.Json())
# Determine media type
if file_data["has_video"] and not self.is_image(file_data):
file_data["media_type"] = "video"
elif file_data["has_video"] and self.is_image(file_data):
file_data["media_type"] = "image"
elif file_data["has_audio"] and not file_data["has_video"]:
file_data["media_type"] = "audio"
# Save new file to the project data
file = File()
file.data = file_data
file.save()
# Keep track of new ids and old ids
file_lookup[item.unique_id] = file
except:
# Handle exception quietly
msg = ("%s is not a valid video, audio, or image file." % item.name)
log.error(msg)
failed_files.append(item.name)
# Delete all tracks
track_list = copy.deepcopy(Track.filter())
for track in track_list:
track.delete()
# Create new tracks
track_counter = 0
for legacy_t in reversed(v1_data.sequences[0].tracks):
t = Track()
t.data = {"number": track_counter, "y": 0, "label": legacy_t.name}
t.save()
track_counter += 1
# Loop through clips
track_counter = 0
for sequence in v1_data.sequences:
for track in reversed(sequence.tracks):
for clip in track.clips:
# Get associated file for this clip
if clip.file_object.unique_id in file_lookup.keys():
file = file_lookup[clip.file_object.unique_id]
else:
# Skip missing file
log.info("Skipping importing missing file: %s" % clip.file_object.unique_id)
continue
# Create clip
if (file.data["media_type"] == "video" or file.data["media_type"] == "image"):
# Determine thumb path
thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"])
else:
# Audio file
thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png")
# Get file name
path, filename = os.path.split(file.data["path"])
# Convert path to the correct relative path (based on this folder)
file_path = file.absolute_path()
# Create clip object for this file
c = openshot.Clip(file_path)
# Append missing attributes to Clip JSON
new_clip = json.loads(c.Json())
new_clip["file_id"] = file.id
new_clip["title"] = filename
new_clip["image"] = thumb_path
# Check for optional start and end attributes
new_clip["start"] = clip.start_time
new_clip["end"] = clip.end_time
new_clip["position"] = clip.position_on_track
new_clip["layer"] = track_counter
# Clear alpha (if needed)
if clip.video_fade_in or clip.video_fade_out:
new_clip["alpha"]["Points"] = []
# Video Fade IN
if clip.video_fade_in:
# Add keyframes
start = openshot.Point(clip.start_time * fps_float, 0.0, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point((clip.start_time + clip.video_fade_in_amount) * fps_float, 1.0, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip["alpha"]["Points"].append(start_object)
new_clip["alpha"]["Points"].append(end_object)
# Video Fade OUT
if clip.video_fade_out:
# Add keyframes
start = openshot.Point((clip.end_time - clip.video_fade_out_amount) * fps_float, 1.0, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point(clip.end_time * fps_float, 0.0, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip["alpha"]["Points"].append(start_object)
new_clip["alpha"]["Points"].append(end_object)
# Clear Audio (if needed)
if clip.audio_fade_in or clip.audio_fade_out:
new_clip["volume"]["Points"] = []
else:
p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER)
p_object = json.loads(p.Json())
new_clip["volume"] = { "Points" : [p_object]}
# Audio Fade IN
if clip.audio_fade_in:
# Add keyframes
start = openshot.Point(clip.start_time * fps_float, 0.0, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point((clip.start_time + clip.video_fade_in_amount) * fps_float, clip.volume / 100.0, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip["volume"]["Points"].append(start_object)
new_clip["volume"]["Points"].append(end_object)
# Audio Fade OUT
if clip.audio_fade_out:
# Add keyframes
start = openshot.Point((clip.end_time - clip.video_fade_out_amount) * fps_float, clip.volume / 100.0, openshot.BEZIER)
start_object = json.loads(start.Json())
end = openshot.Point(clip.end_time * fps_float, 0.0, openshot.BEZIER)
end_object = json.loads(end.Json())
new_clip["volume"]["Points"].append(start_object)
new_clip["volume"]["Points"].append(end_object)
# Save clip
clip_object = Clip()
clip_object.data = new_clip
clip_object.save()
# Loop through transitions
for trans in track.transitions:
# Fix default transition
if not trans.resource or not os.path.exists(trans.resource):
trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg")
# Open up QtImageReader for transition Image
transition_reader = openshot.QtImageReader(trans.resource)
trans_begin_value = 1.0
trans_end_value = -1.0
if trans.reverse:
trans_begin_value = -1.0
trans_end_value = 1.0
brightness = openshot.Keyframe()
brightness.AddPoint(1, trans_begin_value, openshot.BEZIER)
brightness.AddPoint(trans.length * fps_float, trans_end_value, openshot.BEZIER)
contrast = openshot.Keyframe(trans.softness * 10.0)
# Create transition dictionary
transitions_data = {
"id": get_app().project.generate_id(),
"layer": track_counter,
"title": "Transition",
"type": "Mask",
"position": trans.position_on_track,
"start": 0,
"end": trans.length,
"brightness": json.loads(brightness.Json()),
"contrast": json.loads(contrast.Json()),
"reader": json.loads(transition_reader.Json()),
"replace_image": False
}
# Save transition
t = Transition()
t.data = transitions_data
t.save()
# Increment track counter
track_counter += 1
except Exception as ex:
# Error parsing legacy contents
msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex})
log.error(msg)
raise Exception(msg)
# Show warning if some files failed to load
if failed_files:
# Throw exception
raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files)))
# Return mostly empty project_data dict (with just the current version #)
log.info("Successfully loaded legacy project file: %s" % file_path)
return project_data
2
Example 21
def __set_pointer(self):
# TODO: c_char_p ?
# if host pointersize is same as target, keep ctypes pointer function.
if self.sizeof(self.__real_ctypes.c_void_p) == self.__pointersize:
# use the same pointer cache
self._pointer_type_cache = self.__real_ctypes._pointer_type_cache
# see __init__
# pylint: disable=access-member-before-definition
self.__ptrt = self.POINTER(self.c_byte).__bases__[0]
return
# get the replacement type.
if self.__pointersize == 4:
replacement_type = self.__real_ctypes.c_uint32
replacement_type_char = self.__real_ctypes.c_uint32._type_
elif self.__pointersize == 8:
replacement_type = self.__real_ctypes.c_uint64
replacement_type_char = self.__real_ctypes.c_uint64._type_
else:
raise NotImplementedError('pointer size of %d is not handled' % self.__pointersize)
POINTERSIZE = self.__pointersize
# required to access _ctypes
import _ctypes
# Emulate a pointer class using the approriate c_int32/c_int64 type
# The new class should have :
# ['__module__', 'from_param', '_type_', '__dict__', '__weakref__', '__doc__']
my_ctypes = self
# special class for c_void_p
class _T_Simple(_ctypes._SimpleCData,):
_type_ = replacement_type_char
@property
def _sub_addr_(myself):
return myself.value
def __init__(myself, value):
myself.value = value
def __repr__(myself):
return '%s(%d)' % (type(myself).__name__, myself.value)
self._T_Simple = _T_Simple
def POINTER_T(pointee):
if pointee in my_ctypes._pointer_type_cache:
return my_ctypes._pointer_type_cache[pointee]
# specific case for c_void_p
subtype = pointee
if pointee is None: # VOID pointer type. c_void_p.
clsname = 'LP_%d_c_void_p' % POINTERSIZE
_class = type(clsname, (_T_Simple,), {})
_class._subtype_ = type(None)
my_ctypes._pointer_type_cache[pointee] = _class
# additionnaly register this type in this module fo pickling
setattr(sys.modules[__name__], clsname, _class)
return _class
clsname = pointee.__name__
# template that creates a PointerType to pointee (clsname *)
# we have to fake the size of the structure to
# replacement_type_char's size.
# so we replace _type_ with the fake type of the expected size.
# and we had _subtype_ that will be queried by our helper
# functions.
class _T(_T_Simple,):
_subtype_ = subtype # could use _pointer_type_cache
def __repr__(myself):
return '%s(%d)' % (type(myself).__name__, myself.value)
@property
def contents(myself):
return myself._subtype_.from_address(myself.value)
# raise TypeError('This is not a ctypes pointer.')
def __init__(myself, _value=None):
if _value is None:
myself.value = 0
return
if not isinstance(_value, subtype):
raise TypeError('%s expected, not %s' % (subtype, type(_value)))
myself.value = my_ctypes.addressof(_value)
# raise TypeError('This is not a ctypes pointer.')
_class = type('LP_%d_%s' % (POINTERSIZE, clsname), (_T,), {})
my_ctypes._pointer_type_cache[pointee] = _class
# additionally register this type in this module fo pickling
setattr(sys.modules[__name__], clsname, _class)
return _class
# end of POINTER_T
self.POINTER = POINTER_T
self.__ptrt = self._T_Simple
self._pointer_type_cache.clear()
self.c_void_p = self.POINTER(None)
# c_void_p is a simple type
# self.c_void_p = type('c_void_p', (_T_Simple,),{})
# other are different
self.c_char_p = self.POINTER(self.c_char)
self.c_wchar_p = self.POINTER(self.c_wchar)
setattr(sys.modules[__name__], 'c_void_p', self.c_void_p)
setattr(sys.modules[__name__], 'c_char_p', self.c_char_p)
setattr(sys.modules[__name__], 'c_wchar_p', self.c_wchar_p)
# set the casting function
self.cast = self.__cast
return
2
Example 22
Project: TrustRouter Source File: forking.py
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
# XXX (ncoghlan): The following code makes several bogus
# assumptions regarding the relationship between __file__
# and a module's real name. See PEP 302 and issue #10845
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
2
Example 23
Project: PokemonGo-Bot-Desktop Source File: forking.py
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
# XXX (ncoghlan): The following code makes several bogus
# assumptions regarding the relationship between __file__
# and a module's real name. See PEP 302 and issue #10845
# The problem is resolved properly in Python 3.4+, as
# described in issue #19946
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
# For directory and zipfile execution, we assume an implicit
# "if __name__ == '__main__':" around the module, and don't
# rerun the main module code in spawned processes
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
2
Example 24
Project: compoundpi Source File: binding_helper.py
def _load_pyside(required_modules, optional_modules):
os.environ['QT_API'] = 'pyside'
for module_name in required_modules:
_named_import('PySide.%s' % module_name)
for module_name in optional_modules:
_named_optional_import('PySide.%s' % module_name)
# set some names for compatibility with PyQt4
sys.modules['PySide.QtCore'].pyqtSignal = sys.modules['PySide.QtCore'].Signal
sys.modules['PySide.QtCore'].pyqtSlot = sys.modules['PySide.QtCore'].Slot
sys.modules['PySide.QtCore'].pyqtProperty = sys.modules['PySide.QtCore'].Property
try:
import PySideQwt
_register_binding_module('Qwt', PySideQwt)
except ImportError:
pass
global _loadUi
def _loadUi(uifile, baseinstance=None, custom_widgets=None):
from PySide.QtUiTools import QUiLoader
from PySide.QtCore import QMetaObject
from PySide.QtGui import QDialog
class CustomUiLoader(QUiLoader):
class_aliases = {
'Line': 'QFrame',
}
def __init__(self, baseinstance=None, custom_widgets=None):
super(CustomUiLoader, self).__init__(baseinstance)
self._base_instance = baseinstance
self._custom_widgets = custom_widgets or {}
def createWidget(self, class_name, parent=None, name=''):
# don't create the top-level widget, if a base instance is set
if self._base_instance and not parent:
return self._base_instance
if class_name in self._custom_widgets:
widget = self._custom_widgets[class_name](parent)
else:
widget = QUiLoader.createWidget(self, class_name, parent, name)
if str(type(widget)).find(self.class_aliases.get(class_name, class_name)) < 0:
sys.modules['QtCore'].qDebug(
str('PySide.loadUi(): could not find widget '
'class "%s", defaulting to "%s"' % (
class_name, type(widget)))
)
if self._base_instance:
setattr(self._base_instance, name, widget)
return widget
loader = CustomUiLoader(baseinstance)
custom_widgets = custom_widgets or {}
for custom_widget in custom_widgets.values():
loader.registerCustomWidget(custom_widget)
ui = loader.load(uifile)
QMetaObject.connectSlotsByName(ui)
# Workaround: PySide doesn't automatically center dialogs on their
# parent windows
if isinstance(baseinstance, QDialog) and (baseinstance.parentWidget() is not None):
r = baseinstance.frameGeometry()
r.moveCenter(baseinstance.parentWidget().frameGeometry().center())
baseinstance.move(r.topLeft())
return ui
import PySide
return PySide.__version__
2
Example 25
Project: saga-python Source File: engine.py
def _load_adaptors (self, inject_registry=None):
""" Try to load all adaptors that are registered in
saga.engine.registry.py. This method is called from the
constructor. As Engine is a singleton, this method is
called once after the module is first loaded in any python
application.
:param inject_registry: Inject a fake registry. *For unit tests only*.
"""
# get the engine config options
global_config = ruc.getConfig('saga')
engine_config = global_config.get_category('saga.engine')
saga_adaptor_path = engine_config['adaptor_path'].get_value()
# get the list of adaptors to load
registry = saga.engine.registry.adaptor_registry
# add the list of modpaths found in the config options
for path in saga_adaptor_path.split(':'):
if path:
self._logger.debug ("adding adaptor path: '%s'" % path)
registry.append(path)
self._logger.debug ("listing adaptor registry: %s" % registry)
# check if some unit test wants to use a special registry. If
# so, we reset cpi infos from the earlier singleton creation.
if inject_registry != None :
self._adaptor_registry = {}
registry = inject_registry
# attempt to load all registered modules
for module_name in registry:
self._logger.info ("loading adaptor %s" % module_name)
# first, import the module
adaptor_module = None
try :
adaptor_module = __import__ (module_name, fromlist=['Adaptor'])
except Exception as e:
self._logger.warn ("Skipping adaptor %s 1: module loading failed: %s" % (module_name, e))
continue # skip to next adaptor
# we expect the module to have an 'Adaptor' class
# implemented, which, on calling 'register()', returns
# a info dict for all implemented adaptor classes.
adaptor_instance = None
adaptor_info = None
try:
adaptor_instance = adaptor_module.Adaptor ()
adaptor_info = adaptor_instance.register ()
except se.SagaException as e:
self._logger.warn ("Skipping adaptor %s: loading failed: '%s'" % (module_name, e))
continue # skip to next adaptor
except Exception as e:
self._logger.warn ("Skipping adaptor %s: loading failed: '%s'" % (module_name, e))
continue # skip to next adaptor
# the adaptor must also provide a sanity_check() method, which sould
# be used to confirm that the adaptor can function properly in the
# current runtime environment (e.g., that all pre-requisites and
# system dependencies are met).
try:
adaptor_instance.sanity_check ()
except Exception as e:
self._logger.warn ("Skipping adaptor %s: failed self test: %s" % (module_name, e))
continue # skip to next adaptor
# check if we have a valid adaptor_info
if adaptor_info is None :
self._logger.warning ("Skipping adaptor %s: adaptor meta data are invalid" \
% module_name)
continue # skip to next adaptor
if not 'name' in adaptor_info or \
not 'cpis' in adaptor_info or \
not 'version' in adaptor_info or \
not 'schemas' in adaptor_info :
self._logger.warning ("Skipping adaptor %s: adaptor meta data are incomplete" \
% module_name)
continue # skip to next adaptor
adaptor_name = adaptor_info['name']
adaptor_version = adaptor_info['version']
adaptor_schemas = adaptor_info['schemas']
adaptor_enabled = True # default unless disabled by 'enabled' option or version filer
# disable adaptors in 'alpha' or 'beta' versions -- unless
# the 'load_beta_adaptors' config option is set to True
if not self._cfg['load_beta_adaptors'].get_value () :
if 'alpha' in adaptor_version.lower() or \
'beta' in adaptor_version.lower() :
self._logger.warn ("Skipping adaptor %s: beta versions are disabled (%s)" \
% (module_name, adaptor_version))
continue # skip to next adaptor
# get the 'enabled' option in the adaptor's config
# section (saga.cpi.base ensures that the option exists,
# if it is initialized correctly in the adaptor class.
adaptor_config = None
adaptor_enabled = False
try :
adaptor_config = global_config.get_category (adaptor_name)
adaptor_enabled = adaptor_config['enabled'].get_value ()
except se.SagaException as e:
self._logger.warn ("Skipping adaptor %s: initialization failed: %s" % (module_name, e))
continue # skip to next adaptor
except Exception as e:
self._logger.warn ("Skipping adaptor %s: initialization failed: %s" % (module_name, e))
continue # skip to next adaptor
# only load adaptor if it is not disabled via config files
if adaptor_enabled == False :
self._logger.info ("Skipping adaptor %s: 'enabled' set to False" \
% (module_name))
continue # skip to next adaptor
# check if the adaptor has anything to register
if 0 == len (adaptor_info['cpis']) :
self._logger.warn ("Skipping adaptor %s: does not register any cpis" \
% (module_name))
continue # skip to next adaptor
# we got an enabled adaptor with valid info - yay! We can
# now register all adaptor classes (cpi implementations).
for cpi_info in adaptor_info['cpis'] :
# check cpi information details for completeness
if not 'type' in cpi_info or \
not 'class' in cpi_info :
self._logger.info ("Skipping adaptor %s cpi: cpi info detail is incomplete" \
% (module_name))
continue # skip to next cpi info
# adaptor classes are registered for specific API types.
cpi_type = cpi_info['type']
cpi_cname = cpi_info['class']
cpi_class = None
try :
cpi_class = getattr (adaptor_module, cpi_cname)
except Exception as e:
# this exception likely means that the adaptor does
# not call the saga.adaptors.Base initializer (correctly)
self._logger.warning ("Skipping adaptor %s: adaptor class invalid %s: %s" \
% (module_name, cpi_info['class'], str(e)))
continue # skip to next adaptor
# make sure the cpi class is a valid cpi for the given type.
# We walk through the list of known modules, and try to find
# a modules which could have that class. We do the following
# tests:
#
# cpi_class: ShellJobService
# cpi_type: saga.job.Service
# modules: saga.adaptors.cpi.job
# modules: saga.adaptors.cpi.job.service
# classes: saga.adaptors.cpi.job.Service
# classes: saga.adaptors.cpi.job.service.Service
#
# cpi_class: X509Context
# cpi_type: saga.Context
# modules: saga.adaptors.cpi.context
# classes: saga.adaptors.cpi.context.Context
#
# So, we add a 'adaptors.cpi' after the 'saga' namespace
# element, then append the rest of the given namespace. If that
# gives a module which has the requested class, fine -- if not,
# we add a lower cased version of the class name as last
# namespace element, and check again.
# -> saga . job . Service
# <- ['saga', 'job', 'Service']
cpi_type_nselems = cpi_type.split ('.')
if len(cpi_type_nselems) < 2 or \
len(cpi_type_nselems) > 3 :
self._logger.warn ("Skipping adaptor %s: cpi type not valid: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
if cpi_type_nselems[0] != 'saga' :
self._logger.warn ("Skipping adaptor %s: cpi namespace not valid: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
# -> ['saga', 'job', 'Service']
# <- ['saga', 'adaptors', 'cpi', 'job', 'Service']
cpi_type_nselems.insert (1, 'adaptors')
cpi_type_nselems.insert (2, 'cpi')
# -> ['saga', 'adaptors', 'cpi', 'job', 'Service']
# <- ['saga', 'adaptors', 'cpi', 'job'], 'Service'
cpi_type_cname = cpi_type_nselems.pop ()
# -> ['saga', 'adaptors', 'cpi', 'job'], 'Service'
# <- 'saga.adaptors.cpi.job
# <- 'saga.adaptors.cpi.job.service
cpi_type_modname_1 = '.'.join (cpi_type_nselems)
cpi_type_modname_2 = '.'.join (cpi_type_nselems + [cpi_type_cname.lower()])
# does either module exist?
cpi_type_modname = None
if cpi_type_modname_1 in sys.modules :
cpi_type_modname = cpi_type_modname_1
if cpi_type_modname_2 in sys.modules :
cpi_type_modname = cpi_type_modname_2
if not cpi_type_modname :
self._logger.warn ("Skipping adaptor %s: cpi type not known: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
# so, make sure the given cpi is actually
# implemented by the adaptor class
cpi_ok = False
for name, cpi_obj in inspect.getmembers (sys.modules[cpi_type_modname]) :
if name == cpi_type_cname and \
inspect.isclass (cpi_obj) :
if issubclass (cpi_class, cpi_obj) :
cpi_ok = True
if not cpi_ok :
self._logger.warn ("Skipping adaptor %s: doesn't implement cpi '%s (%s)'" \
% (module_name, cpi_class, cpi_type))
continue # skip to next cpi info
# finally, register the cpi for all its schemas!
registered_schemas = list()
for adaptor_schema in adaptor_schemas:
adaptor_schema = adaptor_schema.lower ()
# make sure we can register that cpi type
if not cpi_type in self._adaptor_registry :
self._adaptor_registry[cpi_type] = {}
# make sure we can register that schema
if not adaptor_schema in self._adaptor_registry[cpi_type] :
self._adaptor_registry[cpi_type][adaptor_schema] = []
# we register the cpi class, so that we can create
# instances as needed, and the adaptor instance,
# as that is passed to the cpi class c'tor later
# on (the adaptor instance is used to share state
# between cpi instances, amongst others)
info = {'cpi_cname' : cpi_cname,
'cpi_class' : cpi_class,
'adaptor_name' : adaptor_name,
'adaptor_instance' : adaptor_instance}
# make sure this tuple was not registered, yet
if info in self._adaptor_registry[cpi_type][adaptor_schema] :
self._logger.warn ("Skipping adaptor %s: already registered '%s - %s'" \
% (module_name, cpi_class, adaptor_instance))
continue # skip to next cpi info
self._adaptor_registry[cpi_type][adaptor_schema].append(info)
registered_schemas.append(str("%s://" % adaptor_schema))
self._logger.info("Register adaptor %s for %s API with URL scheme(s) %s" %
(module_name,
cpi_type,
registered_schemas))
2
Example 26
def create_extension_list(patterns, exclude=None, ctx=None, aliases=None, quiet=False, language=None,
exclude_failures=False):
if language is not None:
print('Please put "# distutils: language=%s" in your .pyx or .pxd file(s)' % language)
if exclude is None:
exclude = []
if patterns is None:
return [], {}
elif isinstance(patterns, basestring) or not isinstance(patterns, collections.Iterable):
patterns = [patterns]
explicit_modules = set([m.name for m in patterns if isinstance(m, Extension)])
seen = set()
deps = create_dependency_tree(ctx, quiet=quiet)
to_exclude = set()
if not isinstance(exclude, list):
exclude = [exclude]
for pattern in exclude:
to_exclude.update(map(os.path.abspath, extended_iglob(pattern)))
module_list = []
module_metadata = {}
# workaround for setuptools
if 'setuptools' in sys.modules:
Extension_distutils = sys.modules['setuptools.extension']._Extension
Extension_setuptools = sys.modules['setuptools'].Extension
else:
# dummy class, in case we do not have setuptools
Extension_distutils = Extension
class Extension_setuptools(Extension): pass
for pattern in patterns:
if isinstance(pattern, str):
filepattern = pattern
template = None
name = '*'
base = None
exn_type = Extension
ext_language = language
elif isinstance(pattern, (Extension_distutils, Extension_setuptools)):
for filepattern in pattern.sources:
if os.path.splitext(filepattern)[1] in ('.py', '.pyx'):
break
else:
# ignore non-cython modules
module_list.append(pattern)
continue
template = pattern
name = template.name
base = DistutilsInfo(exn=template)
exn_type = template.__class__
ext_language = None # do not override whatever the Extension says
else:
msg = str("pattern is not of type str nor subclass of Extension (%s)"
" but of type %s and class %s" % (repr(Extension),
type(pattern),
pattern.__class__))
raise TypeError(msg)
for file in nonempty(sorted(extended_iglob(filepattern)), "'%s' doesn't match any files" % filepattern):
if os.path.abspath(file) in to_exclude:
continue
pkg = deps.package(file)
module_name = deps.fully_qualified_name(file)
if '*' in name:
if module_name in explicit_modules:
continue
elif name != module_name:
print("Warning: Extension name '%s' does not match fully qualified name '%s' of '%s'" % (
name, module_name, file))
module_name = name
if module_name not in seen:
try:
kwds = deps.distutils_info(file, aliases, base).values
except Exception:
if exclude_failures:
continue
raise
if base is not None:
for key, value in base.values.items():
if key not in kwds:
kwds[key] = value
sources = [file]
if template is not None:
sources += [m for m in template.sources if m != filepattern]
if 'sources' in kwds:
# allow users to add .c files etc.
for source in kwds['sources']:
source = encode_filename_in_py2(source)
if source not in sources:
sources.append(source)
extra_sources = kwds['sources']
del kwds['sources']
else:
extra_sources = None
if 'depends' in kwds:
depends = resolve_depends(kwds['depends'], (kwds.get('include_dirs') or []) + ["."])
if template is not None:
# Always include everything from the template.
depends = set(template.depends).union(depends)
# Sort depends to make the metadata dump in the
# Cython-generated C code predictable.
kwds['depends'] = sorted(depends)
if ext_language and 'language' not in kwds:
kwds['language'] = ext_language
module_list.append(exn_type(
name=module_name,
sources=sources,
**kwds))
if extra_sources:
kwds['sources'] = extra_sources
module_metadata[module_name] = {'distutils': kwds, 'module_name': module_name}
m = module_list[-1]
if file not in m.sources:
# Old setuptools unconditionally replaces .pyx with .c
m.sources.remove(file.rsplit('.')[0] + '.c')
m.sources.insert(0, file)
seen.add(name)
return module_list, module_metadata
2
Example 27
def test_frozen(self):
with captured_stdout() as stdout:
try:
import __hello__
except ImportError as x:
self.fail("import __hello__ failed:" + str(x))
self.assertEqual(__hello__.initialized, True)
expect = set(self.module_attrs)
expect.add('initialized')
self.assertEqual(set(dir(__hello__)), expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__
except ImportError as x:
self.fail("import __phello__ failed:" + str(x))
self.assertEqual(__phello__.initialized, True)
expect = set(self.package_attrs)
expect.add('initialized')
if not "__phello__.spam" in sys.modules:
self.assertEqual(set(dir(__phello__)), expect)
else:
expect.add('spam')
self.assertEqual(set(dir(__phello__)), expect)
self.assertEqual(__phello__.__path__, [__phello__.__name__])
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
with captured_stdout() as stdout:
try:
import __phello__.spam
except ImportError as x:
self.fail("import __phello__.spam failed:" + str(x))
self.assertEqual(__phello__.spam.initialized, True)
spam_expect = set(self.module_attrs)
spam_expect.add('initialized')
self.assertEqual(set(dir(__phello__.spam)), spam_expect)
phello_expect = set(self.package_attrs)
phello_expect.add('initialized')
phello_expect.add('spam')
self.assertEqual(set(dir(__phello__)), phello_expect)
self.assertEqual(stdout.getvalue(), 'Hello world!\n')
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
try:
import __phello__.foo
except ImportError:
pass
else:
self.fail("import __phello__.foo should have failed")
del sys.modules['__hello__']
del sys.modules['__phello__']
del sys.modules['__phello__.spam']
2
Example 28
def print_location(proc_obj):
"""Show where we are. GUI's and front-end interfaces often
use this to update displays. So it is helpful to make sure
we give at least some place that's located in a file.
"""
i_stack = proc_obj.curindex
if i_stack is None or proc_obj.stack is None:
return False
core_obj = proc_obj.core
dbgr_obj = proc_obj.debugger
intf_obj = dbgr_obj.intf[-1]
# Evaluation routines like "exec" don't show useful location
# info. In these cases, we will use the position before that in
# the stack. Hence the looping below which in practices loops
# once and sometimes twice.
remapped_file = None
while i_stack >= 0:
frame_lineno = proc_obj.stack[i_stack]
i_stack -= 1
frame, lineno = frame_lineno
# # Next check to see that local variable breadcrumb exists and
# # has the magic dynamic value.
# # If so, it's us and we don't normally show this.a
# if 'breadcrumb' in frame.f_locals:
# if self.run == frame.f_locals['breadcrumb']:
# break
filename = Mstack.frame2file(core_obj, frame, canonic=False)
if '<string>' == filename and dbgr_obj.eval_string:
remapped_file = filename
filename = pyficache.unmap_file(filename)
if '<string>' == filename:
remapped = cmdfns.source_tempfile_remap('eval_string',
dbgr_obj.eval_string)
pyficache.remap_file(filename, remapped)
filename = remapped
pass
pass
else:
m = re.search('^<frozen (.*)>', filename)
if m and m.group(1) in pyficache.file2file_remap:
remapped_file = pyficache.file2file_remap[m.group(1)]
pass
elif filename in pyficache.file2file_remap:
remapped_file = pyficache.unmap_file(filename)
# FIXME: a remapped_file shouldn't be the same as its unmapped version
if remapped_file == filename:
remapped_file = None
pass
pass
elif m and m.group(1) in sys.modules:
remapped_file = m.group(1)
pyficache.remap_file(filename, remapped_file)
pass
code = frame.f_code
fn_name = code.co_name
last_i = frame.f_lasti
opts = {
'reload_on_change' : proc_obj.settings('reload'),
'output' : proc_obj.settings('highlight')
}
if 'style' in proc_obj.debugger.settings:
opts['style'] = proc_obj.settings('style')
line = pyficache.getline(filename, lineno, opts)
if not line:
line = linecache.getline(filename, lineno,
proc_obj.curframe.f_globals)
if not line:
m = re.search('^<frozen (.*)>', filename)
if m and m.group(1):
remapped_file = m.group(1)
try_module = sys.modules.get(remapped_file)
if (try_module and inspect.ismodule(try_module) and
hasattr(try_module, '__file__')):
remapped_file = sys.modules[remapped_file].__file__
pyficache.remap_file(filename, remapped_file)
line = linecache.getline(remapped_file, lineno,
proc_obj.curframe.f_globals)
else:
remapped_file = m.group(1)
filename, line = cmdfns.deparse_getline(code, remapped_file,
lineno, opts)
pass
pass
print_source_location_info(intf_obj.msg, filename, lineno, fn_name,
remapped_file = remapped_file,
f_lasti = last_i)
if line and len(line.strip()) != 0:
if proc_obj.event:
print_source_line(intf_obj.msg, lineno, line,
proc_obj.event2short[proc_obj.event])
pass
if '<string>' != filename: break
pass
if proc_obj.event in ['return', 'exception']:
val = proc_obj.event_arg
intf_obj.msg('R=> %s' % proc_obj._saferepr(val))
pass
return True
2
Example 29
Project: django-mako-plus Source File: router.py
def route_request(request):
'''The main router for all calls coming in to the system.'''
# output the variables so the programmer can debug where this is routing
if log.isEnabledFor(logging.INFO):
log.info('processing: app=%s, page=%s, func=%s, urlparams=%s' % (request.dmp_router_app, request.dmp_router_page, request.dmp_router_function, request.urlparams))
# set the full function location
request.dmp_router_module = '.'.join([ request.dmp_router_app, 'views', request.dmp_router_page ])
# first try going to the view function for this request
# we look for a views/name.py file where name is the same name as the HTML file
response = None
while True: # enables the InternalRedirectExceptions to loop around
full_module_filename = os.path.normpath(os.path.join(settings.BASE_DIR, request.dmp_router_module.replace('.', '/') + '.py'))
try:
# look for the module, and if not found go straight to template
if not os.path.exists(full_module_filename):
log.warning('module %s not found; sending processing directly to template %s.html' % (request.dmp_router_module, request.dmp_router_page_full))
try:
dmp_loader = get_dmp_instance().get_template_loader(request.dmp_router_app)
return dmp_loader.get_template('%s.html' % request.dmp_router_page_full).render_to_response(request=request)
except (TemplateDoesNotExist, TemplateSyntaxError, ImproperlyConfigured) as e:
log.error('%s' % (e))
raise Http404
# find the function
module_obj = import_module(request.dmp_router_module)
if not hasattr(module_obj, request.dmp_router_function):
log.error('view function/class %s not in module %s; returning 404 not found.' % (request.dmp_router_function, request.dmp_router_module))
raise Http404
func_obj = getattr(module_obj, request.dmp_router_function)
# if the func_obj is a View, we're doing class-based views and it needs converting to a function
if isclass(func_obj) and issubclass(func_obj, View):
request.dmp_router_class = request.dmp_router_function
request.dmp_router_function = request.method.lower()
func_obj = func_obj.as_view() # this Django method wraps the view class with a function, so now we can treat it like a regular dmp call
# we don't need the @view_function security check because the class is already subclassed from "View", so we know the site means to expose this class as an endpoint.
# if the func_obj is a regular function, so ensure it is decorated with @view_function - this is for security so only certain functions can be called
elif getattr(func_obj, 'dmp_view_function', False) != True:
log.error('view function %s found successfully, but it is not decorated with @view_function; returning 404 not found. Note that if you have multiple decorators on a function, the @view_function decorator must be listed first.' % (request.dmp_router_function))
raise Http404
# send the pre-signal
if DMP_OPTIONS.get('SIGNALS', False):
for receiver, ret_response in dmp_signal_pre_process_request.send(sender=sys.modules[__name__], request=request):
if isinstance(ret_response, (HttpResponse, StreamingHttpResponse)):
return ret_response
# call view function
if request.dmp_router_class == None and log.isEnabledFor(logging.INFO):
log.info('calling view function %s.%s' % (request.dmp_router_module, request.dmp_router_function))
elif log.isEnabledFor(logging.INFO):
log.info('calling class-based view function %s.%s.%s' % (request.dmp_router_module, request.dmp_router_class, request.dmp_router_function))
response = func_obj(request)
# send the post-signal
if DMP_OPTIONS.get('SIGNALS', False):
for receiver, ret_response in dmp_signal_post_process_request.send(sender=sys.modules[__name__], request=request, response=response):
if ret_response != None:
response = ret_response # sets it to the last non-None in the signal receiver chain
# if we didn't get a correct response back, send a 404
if not isinstance(response, (HttpResponse, StreamingHttpResponse)):
if request.dmp_router_class == None:
log.error('view function %s.%s failed to return an HttpResponse (or the post-signal overwrote it). Returning Http404.' % (request.dmp_router_module, request.dmp_router_function))
else:
log.error('class-based view function %s.%s.%s failed to return an HttpResponse (or the post-signal overwrote it). Returning Http404.' % (request.dmp_router_module, request.dmp_router_class, request.dmp_router_function))
raise Http404
# return the response
return response
except InternalRedirectException as ivr:
# send the signal
if DMP_OPTIONS.get('SIGNALS', False):
dmp_signal_internal_redirect_exception.send(sender=sys.modules[__name__], request=request, exc=ivr)
# do the internal redirect
request.dmp_router_module = ivr.redirect_module
request.dmp_router_function = ivr.redirect_function
full_module_filename = os.path.normpath(os.path.join(settings.BASE_DIR, request.dmp_router_module.replace('.', '/') + '.py'))
log.info('received an InternalViewRedirect to %s -> %s' % (full_module_filename, request.dmp_router_function))
except RedirectException as e: # redirect to another page
if request.dmp_router_class == None:
log.info('view function %s.%s redirected processing to %s' % (request.dmp_router_module, request.dmp_router_function, e.redirect_to))
else:
log.info('class-based view function %s.%s.%s redirected processing to %s' % (request.dmp_router_module, request.dmp_router_class, request.dmp_router_function, e.redirect_to))
# send the signal
if DMP_OPTIONS.get('SIGNALS', False):
dmp_signal_redirect_exception.send(sender=sys.modules[__name__], request=request, exc=e)
# send the browser the redirect command
return e.get_response(request)
# the code should never get here
raise Exception("Django-Mako-Plus router error: The route_request() function should not have been able to get to this point. Please notify the owner of the DMP project. Thanks.")
2
Example 30
Project: pymel Source File: utilitytypes.py
def LazyLoadModule(name, contents):
"""
:param name: name of the module
:param contents: dictionary of initial module globals
This function returns a special module type with one method `_addattr`. The signature
of this method is:
_addattr(name, creator, *creatorArgs, **creatorKwargs)
Attributes added with this method will not be created until the first time that
they are accessed, at which point a callback function will be called to generate
the attribute's value.
:param name: name of the attribute to lazily add
:param creator: a function that create the
Example::
import sys
mod = LazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
One caveat of this technique is that if a user imports everything from your
lazy module ( .e.g from module import * ), it will cause all lazy attributes
to be evaluated.
Also, if any module-level expression needs to reference something that only
exists in the LazyLoadModule, it will need to be stuck in after the creation of the
LazyLoadModule. Then, typically, after defining all functions/classes/etc
which rely on the LazyLoadModule attributes, you will wish to update the
LazyLoadModule with the newly-created functions - typically, this is done
with the _updateLazyModule method.
Finally, any functions which reference any LazyLoadModule-only attributes,
whether they are defined after OR before the creation of the LazyLoadModule,
will have to prefix it with a reference to the LazyLoadModule.
Example::
import sys
def myFunc():
# need to preface foo with 'lazyModule',
# even though this function is defined before
# the creation of the lazy module!
print 'foo is:', lazyModule.foo
mod = lazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
# create a reference to the LazyLoadModule in this module's
# global space
lazyModule = sys.modules[__name__]
# define something which relies on something in the lazy module
fooExpanded = lazyModule.foo + '... now with MORE!'
# update the lazyModule with our new additions (ie, fooExpanded)
lazyModule._updateLazyModule(globals())
"""
class _LazyLoadModule(types.ModuleType):
class LazyLoader(object):
"""
A data descriptor that delays instantiation of an object
until it is first accessed.
"""
def __init__(self, name, creator, *creatorArgs, **creatorKwargs):
self.creator = creator
self.args = creatorArgs
self.kwargs = creatorKwargs
self.name = name
def __get__(self, obj, objtype):
# In case the LazyLoader happens to get stored on more
# than one object, cache the created object so the exact
# same one will be returned
if not hasattr(self, 'newobj'):
# use the callback to create the object that will replace us
self.newobj = self.creator(*self.args, **self.kwargs)
if isinstance(obj, types.ModuleType) and hasattr(self.newobj, '__module__'):
self.newobj.__module__ = obj.__name__
# print "Lazy-loaded object:", self.name
# delattr( obj.__class__, self.name) # should we overwrite with None?
# overwrite ourselves with the newly created object
setattr(obj, self.name, self.newobj)
return self.newobj
def __init__(self, name, contents):
types.ModuleType.__init__(self, name)
self.__dict__.update(contents)
self._lazyGlobals = contents # globals of original module
# add ourselves to sys.modules, overwriting the original module
sys.modules[name] = self
# the above line assigns a None value to all entries in the original globals.
# luckily, we have a copy on this module we can use to restore it.
self._lazyGlobals.update(self.__dict__)
@property
def __all__(self):
public = [x for x in self.__dict__.keys() + self.__class__.__dict__.keys() if not x.startswith('_')]
return public
@classmethod
def _lazyModule_addAttr(cls, name, creator, *creatorArgs, **creatorKwargs):
lazyObj = cls.LazyLoader(name, creator, *creatorArgs, **creatorKwargs)
setattr(cls, name, lazyObj)
return lazyObj
def __setitem__(self, attr, args):
"""
dynModule['attrName'] = ( callbackFunc, ( 'arg1', ), {} )
"""
# args will either be a single callable, or will be a tuple of
# ( callable, (args,), {kwargs} )
if hasattr(args, '__call__'):
callback = args
elif isinstance(args, (tuple, list)):
if len(args) >= 1:
assert hasattr(args[0], '__call__'), 'first argument must be callable'
callback = args[0]
else:
raise ValueError, "must supply at least one argument"
if len(args) >= 2:
assert hasattr(args[1], '__iter__'), 'second argument must be iterable'
cb_args = args[1]
else:
cb_args = ()
cb_kwargs = {}
if len(args) == 3:
assert operator.isMappingType(args[2]), 'third argument must be a mapping type'
cb_kwargs = args[2]
else:
cb_kwargs = {}
if len(args) > 3:
raise ValueError, "if args and kwargs are desired, they should be passed as a tuple and dictionary, respectively"
else:
raise ValueError, "the item must be set to a callable, or to a 3-tuple of (callable, (args,), {kwargs})"
self._lazyModule_addAttr(attr, callback, *cb_args, **cb_kwargs)
def __getitem__(self, attr):
"""
return a LazyLoader without initializing it, or, if a LazyLoader does not exist with this name,
a real object
"""
try:
return self.__class__.__dict__[attr]
except KeyError:
return self.__dict__[attr]
# Sort of a cuembersome name, but we want to make sure it doesn't conflict with any
# 'real' entries in the module
def _lazyModule_update(self):
"""
Used to update the contents of the LazyLoadModule with the contents of another dict.
"""
# For debugging, print out a list of things in the _lazyGlobals that
# AREN'T in __dict__
# print "_lazyModule_update:"
# print "only in dynamic module:", [x for x in
# (set(self.__class__.__dict__) | set(self.__dict__))- set(self._lazyGlobals)
# if not x.startswith('__')]
self.__dict__.update(self._lazyGlobals)
return _LazyLoadModule(name, contents)
2
Example 31
@classmethod
def from_xml(cls, xml, parent=None):
"""Create a new object from XML data"""
# instiantiate the object
if cls == Party:
cls = getattr(sys.modules[__name__], xml.get('type'))
self = cls()
for child in xml:
# convert the key to underscore notation for Python
key = child.tag.replace('-', '_')
# if this key is not recognized by pyrise, ignore it
if key not in cls.fields:
continue
# if there is no data, just set the default
if child.text == None:
self.__dict__[key] = self.fields[key].default
continue
# handle the contact-data key differently
if key == 'contact_data':
klass = getattr(sys.modules[__name__], 'ContactData')
self.contact_data = klass.from_xml(child, parent=self)
continue
# if this an element with children, it's an object relationship
if len(list(child)) > 0:
# is this element an array of objects?
if cls.fields[key].type == list:
items = []
for item in child:
if item.tag == 'party':
class_string = item.find('type').text
else:
class_string = Highrise.key_to_class(item.tag.replace('_', '-'))
klass = getattr(sys.modules[__name__], class_string)
items.append(klass.from_xml(item, parent=self))
self.__dict__[child.tag.replace('-', '_')] = items
continue
# otherwise, let's treat it like a single object
else:
if child.tag == 'party':
class_string = child.find('type').text
else:
class_string = Highrise.key_to_class(child.tag)
klass = getattr(sys.modules[__name__], class_string)
self.__dict__[child.tag.replace('-', '_')] = klass.from_xml(child, parent=self)
continue
# get and convert attribute value based on type
data_type = child.get('type')
if data_type == 'integer':
value = int(child.text)
elif data_type == 'datetime':
value = Highrise.from_utc(datetime.strptime(child.text, '%Y-%m-%dT%H:%M:%SZ'))
else:
try:
value = unicode(child.text)
except:
value = str(child.text)
# add value to object dictionary
self.__dict__[key] = value
return self
2
Example 32
Project: root_numpy Source File: phantom_import.py
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
if sys.version_info[0] >= 3:
obj.__name__ = funcname
else:
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
2
Example 33
Project: girder Source File: assetstore_test.py
@moto.mock_s3bucket_path
def testS3AssetstoreAdapter(self):
# Delete the default assetstore
self.model('assetstore').remove(self.assetstore)
s3Regex = r'^https://s3.amazonaws.com(:443)?/bucketname/foo/bar'
params = {
'name': 'S3 Assetstore',
'type': AssetstoreType.S3,
'bucket': '',
'accessKeyId': 'someKey',
'secret': 'someSecret',
'prefix': '/foo/bar/'
}
# Validation should fail with empty bucket name
resp = self.request(path='/assetstore', method='POST', user=self.admin, params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'field': 'bucket',
'message': 'Bucket must not be empty.'
})
params['bucket'] = 'bucketname'
# Validation should fail with a missing bucket
resp = self.request(path='/assetstore', method='POST', user=self.admin, params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'field': 'bucket',
'message': 'Unable to write into bucket "bucketname".'
})
# Validation should fail with a bogus service name
params['service'] = 'ftp://nowhere'
resp = self.request(path='/assetstore', method='POST', user=self.admin, params=params)
self.assertStatus(resp, 400)
del params['service']
# Create a bucket (mocked using moto), so that we can create an
# assetstore in it
botoParams = makeBotoConnectParams(params['accessKeyId'],
params['secret'])
bucket = mock_s3.createBucket(botoParams, 'bucketname')
# Create an assetstore
resp = self.request(path='/assetstore', method='POST', user=self.admin, params=params)
self.assertStatusOk(resp)
assetstore = self.model('assetstore').load(resp.json['_id'])
# Set the assetstore to current. This is really to test the edit
# assetstore code.
params['current'] = True
resp = self.request(path='/assetstore/%s' % assetstore['_id'],
method='PUT', user=self.admin, params=params)
self.assertStatusOk(resp)
# Test init for a single-chunk upload
folders = self.model('folder').childFolders(self.admin, 'user')
parentFolder = six.next(folders)
params = {
'parentType': 'folder',
'parentId': parentFolder['_id'],
'name': 'My File.txt',
'size': 1024,
'mimeType': 'text/plain'
}
resp = self.request(path='/file', method='POST', user=self.admin, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['received'], 0)
self.assertEqual(resp.json['size'], 1024)
self.assertEqual(resp.json['behavior'], 's3')
singleChunkUpload = resp.json
s3Info = singleChunkUpload['s3']
self.assertEqual(s3Info['chunked'], False)
self.assertIsInstance(s3Info['chunkLength'], int)
self.assertEqual(s3Info['request']['method'], 'PUT')
six.assertRegex(self, s3Info['request']['url'], s3Regex)
self.assertEqual(s3Info['request']['headers']['x-amz-acl'], 'private')
# Test resume of a single-chunk upload
resp = self.request(path='/file/offset', method='GET', user=self.admin,
params={'uploadId': resp.json['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['method'], 'PUT')
self.assertTrue('headers' in resp.json)
six.assertRegex(self, resp.json['url'], s3Regex)
# Test finalize for a single-chunk upload
resp = self.request(
path='/file/completion', method='POST', user=self.admin,
params={'uploadId': singleChunkUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['size'], 1024)
self.assertEqual(resp.json['assetstoreId'], str(assetstore['_id']))
self.assertFalse('s3Key' in resp.json)
self.assertFalse('relpath' in resp.json)
file = self.model('file').load(resp.json['_id'], force=True)
self.assertTrue('s3Key' in file)
six.assertRegex(self, file['relpath'], '^/bucketname/foo/bar/')
# Test init for a multi-chunk upload
params['size'] = 1024 * 1024 * 1024 * 5
resp = self.request(path='/file', method='POST', user=self.admin, params=params)
self.assertStatusOk(resp)
multiChunkUpload = resp.json
s3Info = multiChunkUpload['s3']
self.assertEqual(s3Info['chunked'], True)
self.assertIsInstance(s3Info['chunkLength'], int)
self.assertEqual(s3Info['request']['method'], 'POST')
six.assertRegex(self, s3Info['request']['url'], s3Regex)
# Test uploading a chunk
resp = self.request(path='/file/chunk', method='POST',
user=self.admin, params={
'uploadId': multiChunkUpload['_id'],
'offset': 0,
'chunk': json.dumps({
'partNumber': 1,
's3UploadId': 'abcd'
})
})
self.assertStatusOk(resp)
six.assertRegex(self, resp.json['s3']['request']['url'], s3Regex)
self.assertEqual(resp.json['s3']['request']['method'], 'PUT')
# We should not be able to call file/offset with multi-chunk upload
resp = self.request(path='/file/offset', method='GET', user=self.admin,
params={'uploadId': multiChunkUpload['_id']})
self.assertStatus(resp, 400)
self.assertEqual(resp.json, {
'type': 'validation',
'message': 'You should not call requestOffset on a chunked '
'direct-to-S3 upload.'
})
# Test finalize for a multi-chunk upload
resp = self.request(
path='/file/completion', method='POST', user=self.admin,
params={'uploadId': multiChunkUpload['_id']})
largeFile = resp.json
self.assertStatusOk(resp)
six.assertRegex(self, resp.json['s3FinalizeRequest']['url'], s3Regex)
self.assertEqual(resp.json['s3FinalizeRequest']['method'], 'POST')
# Test init for an empty file (should be no-op)
params['size'] = 0
resp = self.request(path='/file', method='POST', user=self.admin, params=params)
emptyFile = resp.json
self.assertStatusOk(resp)
self.assertFalse('behavior' in resp.json)
self.assertFalse('s3' in resp.json)
# Test download for an empty file
resp = self.request(path='/file/%s/download' % emptyFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), '')
self.assertEqual(resp.headers['Content-Length'], 0)
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="My File.txt"')
# Test download of a non-empty file
resp = self.request(path='/file/%s/download' % largeFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatus(resp, 303)
six.assertRegex(self, resp.headers['Location'], s3Regex)
# Test download of a non-empty file, with Content-Disposition=inline.
# Expect the special S3 header response-content-disposition.
params = {'contentDisposition': 'inline'}
inlineRegex = r'response-content-disposition=' + \
'inline%3B\+filename%3D%22My\+File.txt%22'
resp = self.request(
path='/file/%s/download' % largeFile['_id'], user=self.admin, method='GET',
isJson=False, params=params)
self.assertStatus(resp, 303)
six.assertRegex(self, resp.headers['Location'], s3Regex)
six.assertRegex(self, resp.headers['Location'], inlineRegex)
# Test download as part of a streaming zip
@httmock.all_requests
def s3_pipe_mock(url, request):
if url.netloc.startswith('s3.amazonaws.com') and url.scheme == 'https':
return 'dummy file contents'
else:
raise Exception('Unexpected url %s' % url)
with httmock.HTTMock(s3_pipe_mock):
resp = self.request(
'/folder/%s/download' % parentFolder['_id'],
method='GET', user=self.admin, isJson=False)
self.assertStatusOk(resp)
zip = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)), 'r')
self.assertTrue(zip.testzip() is None)
extracted = zip.read('Public/My File.txt')
self.assertEqual(extracted, b'dummy file contents')
# Attempt to import item directly into user; should fail
resp = self.request(
'/assetstore/%s/import' % assetstore['_id'], method='POST', params={
'importPath': '/foo/bar',
'destinationType': 'user',
'destinationId': self.admin['_id']
}, user=self.admin)
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Keys cannot be imported directly underneath a user.')
# Import existing data from S3
resp = self.request('/folder', method='POST', params={
'parentType': 'folder',
'parentId': parentFolder['_id'],
'name': 'import destinaton'
}, user=self.admin)
self.assertStatusOk(resp)
importFolder = resp.json
resp = self.request(
'/assetstore/%s/import' % assetstore['_id'], method='POST', params={
'importPath': '',
'destinationType': 'folder',
'destinationId': importFolder['_id'],
}, user=self.admin)
self.assertStatusOk(resp)
# Data should now appear in the tree
resp = self.request('/folder', user=self.admin, params={
'parentId': importFolder['_id'],
'parentType': 'folder'
})
self.assertStatusOk(resp)
children = resp.json
self.assertEqual(len(children), 1)
self.assertEqual(children[0]['name'], 'foo')
resp = self.request('/folder', user=self.admin, params={
'parentId': children[0]['_id'],
'parentType': 'folder'
})
self.assertStatusOk(resp)
children = resp.json
self.assertEqual(len(children), 1)
self.assertEqual(children[0]['name'], 'bar')
resp = self.request('/item', user=self.admin, params={
'folderId': children[0]['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
item = resp.json[0]
self.assertEqual(item['name'], 'test')
self.assertEqual(item['size'], 0)
resp = self.request('/item/%s/files' % str(item['_id']),
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertFalse('imported' in resp.json[0])
self.assertFalse('relpath' in resp.json[0])
file = self.model('file').load(resp.json[0]['_id'], force=True)
self.assertTrue(file['imported'])
self.assertFalse('relpath' in file)
self.assertEqual(file['size'], 0)
self.assertEqual(file['assetstoreId'], assetstore['_id'])
self.assertTrue(bucket.get_key('/foo/bar/test') is not None)
# Deleting an imported file should not delete it from S3
with mock.patch('girder.events.daemon.trigger') as daemon:
resp = self.request('/item/%s' % str(item['_id']), method='DELETE', user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(daemon.mock_calls), 0)
# Create the file key in the moto s3 store so that we can test that it
# gets deleted.
file = self.model('file').load(largeFile['_id'], user=self.admin)
bucket.initiate_multipart_upload(file['s3Key'])
key = bucket.new_key(file['s3Key'])
key.set_contents_from_string("test")
# Test delete for a non-empty file
resp = self.request(path='/file/%s' % largeFile['_id'], user=self.admin, method='DELETE')
self.assertStatusOk(resp)
# The file should be gone now
resp = self.request(path='/file/%s/download' % largeFile['_id'],
user=self.admin, method='GET', isJson=False)
self.assertStatus(resp, 400)
# The actual delete may still be in the event queue, so we want to
# check the S3 bucket directly.
startTime = time.time()
while True:
if bucket.get_key(file['s3Key']) is None:
break
if time.time()-startTime > 15:
break # give up and fail
time.sleep(0.1)
self.assertIsNone(bucket.get_key(file['s3Key']))
resp = self.request(
path='/folder/%s' % parentFolder['_id'], method='DELETE', user=self.admin)
self.assertStatusOk(resp)
# Set the assetstore to read only, attempt to delete it
assetstore['readOnly'] = True
assetstore = self.model('assetstore').save(assetstore)
def fn(*args, **kwargs):
raise Exception('get_all_multipart_uploads should not be called')
# Must mock globally (too tricky to get a direct mock.patch)
old = sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads
sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads = fn
try:
resp = self.request(
path='/assetstore/%s' % assetstore['_id'], method='DELETE', user=self.admin)
self.assertStatusOk(resp)
finally:
sys.modules['boto.s3.bucket'].Bucket.get_all_multipart_uploads = old
2
Example 34
Project: pyunicorn Source File: mpi.py
def submit_call(name_to_call, args=(), kwargs={},
module="__main__", time_est=1, id=None, slave=None):
"""
Submit a call for parallel execution.
If called by the master and slaves are available, the call is submitted
to a slave for asynchronous execution.
If called by a slave or if no slaves are available, the call is instead
executed synchronously on this MPI node.
**Examples:**
1. Provide ids and time estimate explicitly:
.. code-block:: python
for n in range(0,10):
mpi.submit_call("doit", (n,A[n]), id=n, time_est=n**2)
for n in range(0,10):
result[n] = mpi.get_result(n)
2. Use generated ids stored in a list:
.. code-block:: python
for n in range(0,10):
ids.append(mpi.submit_call("doit", (n,A[n])))
for n in range(0,10):
results.append(mpi.get_result(ids.pop()))
3. Ignore ids altogether:
.. code-block:: python
for n in range(0,10):
mpi.submit_call("doit", (n,A[n]))
for n in range(0,10):
results.append(mpi.get_next_result())
4. Call a module function and use keyword arguments:
.. code-block:: python
mpi.submit_call("solve", (), {"a":a, "b":b},
module="numpy.linalg")
5. Call a static class method from a package:
.. code-block:: python
mpi.submit_call("Network._get_histogram", (values, n_bins),
module="pyunicorn")
Note that it is module="pyunicorn" and not
module="pyunicorn.network" here.
:arg str name_to_call: name of callable object (usually a function or
static method of a class) as contained in the namespace specified
by module.
:arg tuple args: the positional arguments to provide to the callable
object. Tuples of length 1 must be written (arg,). Default: ()
:arg dict kwargs: the keyword arguments to provide to the callable
object. Default: {}
:arg str module: optional name of the imported module or submodule in
whose namespace the callable object is contained. For objects
defined on the script level, this is "__main__", for objects
defined in an imported package, this is the package name. Must be a
key of the dictionary sys.modules (check there after import if in
doubt). Default: "__main__"
:arg float time_est: estimated relative completion time for this call;
used to find a suitable slave. Default: 1
:type id: object or None
:arg id: unique id for this call. Must be a possible dictionary key.
If None, a random id is assigned and returned. Can be re-used after
get_result() for this is. Default: None
:type slave: int > 0 and < mpi.size, or None
:arg slave: optional no. of slave to assign the call to. If None, the
call is assigned to the slave with the smallest current total time
estimate. Default: None
:return object: id of call, to be used in get_result().
"""
if id is None:
id = numpy.random.uniform()
if id in assigned:
raise MPIException("id ", str(id), " already in queue!")
if slave is not None and am_slave:
raise MPIException(
"only the master can use slave= in submit_call()")
if slave is None or slave < 1 or slave >= size:
# find slave with least estimated total time:
slave = numpy.argmin(total_time_est)
if available:
# send name to call, args, time_est to slave:
if _verbose:
print "MPI master : assigning call with id", id, "to slave", \
slave, ":", name_to_call, args, kwargs, "..."
comm.send((name_to_call, args, kwargs, module, time_est),
dest=slave)
else:
# do it myself right now:
slave = 0
if _verbose:
print "MPI master : calling", name_to_call, args, kwargs, "..."
try:
object_to_call = eval(name_to_call,
sys.modules[module].__dict__)
except NameError:
sys.stderr.write(str(sys.modules[module].__dict__.keys()))
raise
call_time = time.time()
results[id] = object_to_call(*args, **kwargs)
this_time = time.time() - call_time
n_processed[0] += 1
total_time[0] = time.time() - start_time
stats.append({"id": id, "rank": 0,
"this_time": this_time,
"time_over_est": this_time / time_est,
"n_processed": n_processed[0],
"total_time": total_time[0]})
total_time_est[slave] += time_est
queue.append(id)
slave_queue[slave].append(id)
assigned[id] = slave
return id
2
Example 35
def HandleRequest(unused_environ, handler_name, unused_url, post_data,
unused_error, application_root, python_lib,
import_hook=None):
"""Handle a single CGI request.
Handles a request for handler_name in the form 'path/to/handler.py' with the
environment contained in environ.
Args:
handler_name: A str containing the user-specified handler file to use for
this request as specified in the script field of a handler in app.yaml.
post_data: A stream containing the post data for this request.
application_root: A str containing the root path of the application.
python_lib: A str containing the root the Python App Engine library.
import_hook: Optional import hook (PEP 302 style loader).
Returns:
A dict containing zero or more of the following:
error: App Engine error code. 0 for OK, 1 for error. Defaults to OK if not
set. If set, then the other fields may be missing.
response_code: HTTP response code.
headers: A list of tuples (key, value) of HTTP headers.
body: A str of the body of the response.
"""
body = cStringIO.StringIO()
module_name = _FileToModuleName(handler_name)
parent_module, _, submodule_name = module_name.rpartition('.')
parent_module = _GetModuleOrNone(parent_module)
main = None
if module_name in sys.modules:
module = sys.modules[module_name]
main = _GetValidMain(module)
if not main:
module = imp.new_module('__main__')
if import_hook is not None:
module.__loader__ = import_hook
saved_streams = sys.stdin, sys.stdout
try:
sys.modules['__main__'] = module
module.__dict__['__name__'] = '__main__'
sys.stdin = post_data
sys.stdout = body
if main:
os.environ['PATH_TRANSLATED'] = module.__file__
main()
else:
filename = _AbsolutePath(handler_name, application_root, python_lib)
if filename.endswith(os.sep + '__init__.py'):
module.__path__ = [os.path.dirname(filename)]
if import_hook is None:
code, filename = _LoadModuleCode(filename)
else:
code = import_hook.get_code(module_name)
if not code:
return {'error': 2}
os.environ['PATH_TRANSLATED'] = filename
module.__file__ = filename
try:
sys.modules[module_name] = module
eval(code, module.__dict__)
except:
del sys.modules[module_name]
if parent_module and submodule_name in parent_module.__dict__:
del parent_module.__dict__[submodule_name]
raise
else:
if parent_module:
parent_module.__dict__[submodule_name] = module
return _ParseResponse(body)
except:
exception = sys.exc_info()
message = ''.join(traceback.format_exception(exception[0], exception[1],
exception[2].tb_next))
logging.error(message)
return {'error': 1}
finally:
sys.stdin, sys.stdout = saved_streams
module.__name__ = module_name
if '__main__' in sys.modules:
del sys.modules['__main__']
2
Example 36
Project: scikit-rf Source File: phantom_import.py
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
2
Example 37
Project: matlab_wrapper Source File: matlab_session.py
def ndarray_to_mxarray(libmx, arr):
### Prepare `arr` object (convert to ndarray if possible), assert
### data type
if isinstance(arr, str) or isinstance(arr, unicode):
pass
elif isinstance(arr, dict):
raise NotImplementedError('dicts are not supported.')
elif ('pandas' in sys.modules) and isinstance(arr, sys.modules['pandas'].DataFrame):
arr = arr.to_records()
elif ('pandas' in sys.modules) and isinstance(arr, sys.modules['pandas'].Series):
arr = arr.to_frame().to_records()
elif isinstance(arr, collections.Iterable):
arr = np.array(arr, ndmin=2)
elif np.issctype(type(arr)):
arr = np.array(arr, ndmin=2)
else:
raise NotImplementedError("Data type not supported: {}".format(type(arr)))
### Convert ndarray to mxarray
if isinstance(arr, str):
pm = libmx.mxCreateString(arr)
elif isinstance(arr, unicode):
pm = libmx.mxCreateString(arr.encode('utf-8'))
elif isinstance(arr, np.ndarray) and arr.dtype.kind in ['i','u','f','c']:
dim = arr.ctypes.shape_as(mwSize)
complex_flag = (arr.dtype.kind == 'c')
pm = libmx.mxCreateNumericArray(
arr.ndim,
dim,
dtype_to_mat(arr.dtype),
complex_flag
)
mat_data = libmx.mxGetData(pm)
np_data = arr.real.tostring('F')
ctypes.memmove(mat_data, np_data, len(np_data))
if complex_flag:
mat_data = libmx.mxGetImagData(pm)
np_data = arr.imag.tostring('F')
ctypes.memmove(mat_data, np_data, len(np_data))
elif isinstance(arr, np.ndarray) and arr.dtype.kind == 'b':
dim = arr.ctypes.shape_as(mwSize)
pm = libmx.mxCreateLogicalArray(arr.ndim, dim)
mat_data = libmx.mxGetData(pm)
np_data = arr.real.tostring('F')
ctypes.memmove(mat_data, np_data, len(np_data))
elif isinstance(arr, np.ndarray) and arr.dtype.kind in ('O', 'S', 'U'):
dim = arr.ctypes.shape_as(mwSize)
pm = libmx.mxCreateCellArray(arr.ndim, dim)
for i,el in enumerate(arr.flatten('F')):
p = ndarray_to_mxarray(libmx, el)
libmx.mxSetCell(pm, i, p)
elif isinstance(arr, np.ndarray) and len(arr.dtype) > 0:
dim = arr.ctypes.shape_as(mwSize)
name_num = len(arr.dtype.names)
names_p = (c_char_p*name_num)(*[c_char_p(name) for name in arr.dtype.names])
pm = libmx.mxCreateStructArray(
arr.ndim,
dim,
name_num,
names_p,
)
for i,record in enumerate(arr.flatten('F')):
for name in arr.dtype.names:
el = record[name]
p = ndarray_to_mxarray(libmx, el)
libmx.mxSetField(pm, i, name, p)
elif isinstance(arr, np.ndarray):
raise NotImplementedError('Unsupported dtype: {}'.format(arr.dtype))
return pm
2
Example 38
def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantiate_oldstyle_exceptions):
"""
Loads a dumped exception (the tuple returned by :func:`dump`) info a
throwable exception object. If the exception cannot be instantiated for any
reason (i.e., the security parameters do not allow it, or the exception
class simply doesn't exist on the local machine), a :class:`GenericException`
instance will be returned instead, containing all of the original exception's
details.
:param val: the dumped exception
:param import_custom_exceptions: whether to allow this function to import custom modules
(imposes a security risk)
:param instantiate_custom_exceptions: whether to allow this function to instantiate "custom
exceptions" (i.e., not one of the built-in exceptions,
such as ``ValueError``, ``OSError``, etc.)
:param instantiate_oldstyle_exceptions: whether to allow this function to instantiate exception
classes that do not derive from ``BaseException``.
This is required to support old-style exceptions.
Not applicable for Python 3 and above.
:returns: A throwable exception object
"""
if val == consts.EXC_STOP_ITERATION:
return StopIteration # optimization
if type(val) is str:
return val # deprecated string exceptions
(modname, clsname), args, attrs, tbtext = val
if import_custom_exceptions and modname not in sys.modules:
try:
__import__(modname, None, None, "*")
except Exception:
pass
if instantiate_custom_exceptions:
if modname in sys.modules:
cls = getattr(sys.modules[modname], clsname, None)
else:
cls = None
elif modname == exceptions_module.__name__:
cls = getattr(exceptions_module, clsname, None)
else:
cls = None
if is_py3k:
if not isinstance(cls, type) or not issubclass(cls, BaseException):
cls = None
else:
if not isinstance(cls, (type, ClassType)):
cls = None
elif issubclass(cls, ClassType) and not instantiate_oldstyle_exceptions:
cls = None
elif not issubclass(cls, BaseException):
cls = None
if cls is None:
fullname = "%s.%s" % (modname, clsname)
if fullname not in _generic_exceptions_cache:
fakemodule = {"__module__" : "%s/%s" % (__name__, modname)}
if isinstance(GenericException, ClassType):
_generic_exceptions_cache[fullname] = ClassType(fullname, (GenericException,), fakemodule)
else:
_generic_exceptions_cache[fullname] = type(fullname, (GenericException,), fakemodule)
cls = _generic_exceptions_cache[fullname]
cls = _get_exception_class(cls)
# support old-style exception classes
if ClassType is not type and isinstance(cls, ClassType):
exc = InstanceType(cls)
else:
exc = cls.__new__(cls)
exc.args = args
for name, attrval in attrs:
setattr(exc, name, attrval)
exc._remote_tb = tbtext
return exc
2
Example 39
def load_module(self, fullname):
logger.debug('Running load_module for {0}...'.format(fullname))
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
if self.kind in (imp.PY_COMPILED, imp.C_EXTENSION, imp.C_BUILTIN,
imp.PY_FROZEN):
convert = False
# elif (self.pathname.startswith(_stdlibprefix)
# and 'site-packages' not in self.pathname):
# # We assume it's a stdlib package in this case. Is this too brittle?
# # Please file a bug report at https://github.com/PythonCharmers/python-future
# # if so.
# convert = False
# in theory, other paths could be configured to be excluded here too
elif any([fullname.startswith(path) for path in self.exclude_paths]):
convert = False
elif any([fullname.startswith(path) for path in self.include_paths]):
convert = True
else:
convert = False
if not convert:
logger.debug('Excluded {0} from translation'.format(fullname))
mod = imp.load_module(fullname, *self.found)
else:
logger.debug('Autoconverting {0} ...'.format(fullname))
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
# required by PEP 302
mod.__file__ = self.pathname
mod.__name__ = fullname
mod.__loader__ = self
# This:
# mod.__package__ = '.'.join(fullname.split('.')[:-1])
# seems to result in "SystemError: Parent module '' not loaded,
# cannot perform relative import" for a package's __init__.py
# file. We use the approach below. Another option to try is the
# minimal load_module pattern from the PEP 302 text instead.
# Is the test in the next line more or less robust than the
# following one? Presumably less ...
# ispkg = self.pathname.endswith('__init__.py')
if self.kind == imp.PKG_DIRECTORY:
mod.__path__ = [ os.path.dirname(self.pathname) ]
mod.__package__ = fullname
else:
#else, regular module
mod.__path__ = []
mod.__package__ = fullname.rpartition('.')[0]
try:
cachename = imp.cache_from_source(self.pathname)
if not os.path.exists(cachename):
update_cache = True
else:
sourcetime = os.stat(self.pathname).st_mtime
cachetime = os.stat(cachename).st_mtime
update_cache = cachetime < sourcetime
# # Force update_cache to work around a problem with it being treated as Py3 code???
# update_cache = True
if not update_cache:
with open(cachename, 'rb') as f:
data = f.read()
try:
code = marshal.loads(data)
except Exception:
# pyc could be corrupt. Regenerate it
update_cache = True
if update_cache:
if self.found[0]:
source = self.found[0].read()
elif self.kind == imp.PKG_DIRECTORY:
with open(self.pathname) as f:
source = f.read()
if detect_python2(source, self.pathname):
source = self.transform(source)
with open('/tmp/futurized_code.py', 'w') as f:
f.write('### Futurized code (from %s)\n%s' %
(self.pathname, source))
code = compile(source, self.pathname, 'exec')
dirname = os.path.dirname(cachename)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
with open(cachename, 'wb') as f:
data = marshal.dumps(code)
f.write(data)
except Exception: # could be write-protected
pass
exec(code, mod.__dict__)
except Exception as e:
# must remove module from sys.modules
del sys.modules[fullname]
raise # keep it simple
if self.found[0]:
self.found[0].close()
return mod
2
Example 40
Project: PipelineConstructionSet Source File: utilitytypes.py
def LazyLoadModule(name, contents):
"""
:param name: name of the module
:param contents: dictionary of initial module globals
This function returns a special module type with one method `_addattr`. The signature
of this method is:
_addattr(name, creator, *creatorArgs, **creatorKwargs)
Attributes added with this method will not be created until the first time that
they are accessed, at which point a callback function will be called to generate
the attribute's value.
:param name: name of the attribute to lazily add
:param creator: a function that create the
Example::
import sys
mod = LazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
One caveat of this technique is that if a user imports everything from your
lazy module ( .e.g from module import * ), it will cause all lazy attributes
to be evaluated.
Also, if any module-level expression needs to reference something that only
exists in the LazyLoadModule, it will need to be stuck in after the creation of the
LazyLoadModule. Then, typically, after defining all functions/classes/etc
which rely on the LazyLoadModule attributes, you will wish to update the
LazyLoadModule with the newly-created functions - typically, this is done
with the _updateLazyModule method.
Finally, any functions which reference any LazyLoadModule-only attributes,
whether they are defined after OR before the creation of the LazyLoadModule,
will have to prefix it with a reference to the LazyLoadModule.
Example::
import sys
def myFunc():
# need to preface foo with 'lazyModule',
# even though this function is defined before
# the creation of the lazy module!
print 'foo is:', lazyModule.foo
mod = lazyLoadModule(__name__, globals())
mod._addattr( 'foo', str, 'bar' )
sys.modules[__name__] = mod
# create a reference to the LazyLoadModule in this module's
# global space
lazyModule = sys.modules[__name__]
# define something which relies on something in the lazy module
fooExpanded = lazyModule.foo + '... now with MORE!'
# update the lazyModule with our new additions (ie, fooExpanded)
lazyModule._updateLazyModule(globals())
"""
class _LazyLoadModule(types.ModuleType):
class LazyLoader(object):
"""
A data descriptor that delays instantiation of an object
until it is first accessed.
"""
def __init__(self, name, creator, *creatorArgs, **creatorKwargs):
self.creator = creator
self.args = creatorArgs
self.kwargs = creatorKwargs
self.name = name
def __get__(self, obj, objtype):
# In case the LazyLoader happens to get stored on more
# than one object, cache the created object so the exact
# same one will be returned
if not hasattr(self, 'newobj'):
# use the callback to create the object that will replace us
self.newobj = self.creator(*self.args, **self.kwargs)
if isinstance(obj, types.ModuleType) and hasattr(self.newobj, '__module__'):
self.newobj.__module__ = obj.__name__
#print "Lazy-loaded object:", self.name
#delattr( obj.__class__, self.name) # should we overwrite with None?
# overwrite ourselves with the newly created object
setattr( obj, self.name, self.newobj)
return self.newobj
def __init__(self, name, contents):
types.ModuleType.__init__(self, name)
self.__dict__.update(contents)
self._lazyGlobals = contents # globals of original module
# add ourselves to sys.modules, overwriting the original module
sys.modules[name] = self
# the above line assigns a None value to all entries in the original globals.
# luckily, we have a copy on this module we can use to restore it.
self._lazyGlobals.update( self.__dict__ )
@property
def __all__(self):
public = [ x for x in self.__dict__.keys() + self.__class__.__dict__.keys() if not x.startswith('_') ]
return public
@classmethod
def _lazyModule_addAttr(cls, name, creator, *creatorArgs, **creatorKwargs):
lazyObj = cls.LazyLoader(name, creator, *creatorArgs, **creatorKwargs)
setattr( cls, name, lazyObj )
return lazyObj
def __setitem__(self, attr, args):
"""
dynModule['attrName'] = ( callbackFunc, ( 'arg1', ), {} )
"""
# args will either be a single callable, or will be a tuple of
# ( callable, (args,), {kwargs} )
if hasattr( args, '__call__'):
callback = args
elif isinstance( args, (tuple, list) ):
if len(args) >= 1:
assert hasattr( args[0], '__call__' ), 'first argument must be callable'
callback = args[0]
else:
raise ValueError, "must supply at least one argument"
if len(args) >= 2:
assert hasattr( args[1], '__iter__'), 'second argument must be iterable'
cb_args = args[1]
else:
cb_args = ()
cb_kwargs = {}
if len(args) == 3:
assert operator.isMappingType(args[2]), 'third argument must be a mapping type'
cb_kwargs = args[2]
else:
cb_kwargs = {}
if len(args) > 3:
raise ValueError, "if args and kwargs are desired, they should be passed as a tuple and dictionary, respectively"
else:
raise ValueError, "the item must be set to a callable, or to a 3-tuple of (callable, (args,), {kwargs})"
self._lazyModule_addAttr(attr, callback, *cb_args, **cb_kwargs)
def __getitem__(self, attr):
"""
return a LazyLoader without initializing it, or, if a LazyLoader does not exist with this name,
a real object
"""
try:
return self.__class__.__dict__[attr]
except KeyError:
return self.__dict__[attr]
# Sort of a cuembersome name, but we want to make sure it doesn't conflict with any
# 'real' entries in the module
def _lazyModule_update(self):
"""
Used to update the contents of the LazyLoadModule with the contents of another dict.
"""
# For debugging, print out a list of things in the _lazyGlobals that
# AREN'T in __dict__
# print "_lazyModule_update:"
# print "only in dynamic module:", [x for x in
# (set(self.__class__.__dict__) | set(self.__dict__))- set(self._lazyGlobals)
# if not x.startswith('__')]
self.__dict__.update(self._lazyGlobals)
return _LazyLoadModule(name, contents)
0
Example 41
Project: hellanzb Source File: Core.py
def init(options = {}):
""" initialize the app """
# Whether or not the app is in the process of shutting down
Hellanzb.SHUTDOWN = False
# Get logging going ASAP
initLogging()
# CTRL-C shutdown return code
Hellanzb.SHUTDOWN_CODE = 20
# defineServer's from the config file
Hellanzb.SERVERS = {}
# we can compare the current thread's ident to our MAIN_THREAD's to determine whether
# or not we may need to route things through twisted's callFromThread
Hellanzb.MAIN_THREAD_IDENT = thread.get_ident()
Hellanzb.BEGIN_TIME = time.time()
# Whether or not the downloader has been paused
Hellanzb.downloadPaused = False
# Troll threads
Hellanzb.postProcessors = []
Hellanzb.postProcessorLock = Lock()
# How many total NZB archives have been post processed
Hellanzb.totalPostProcessed = 0
# Whether or not we're a downloader process
Hellanzb.IS_DOWNLOADER = False
# Whether or not the queue daemon is running as a daemon process (forked)
Hellanzb.DAEMONIZE = False
# Whether or not debug logging is enabled
Hellanzb.DEBUG_MODE_ENABLED = False
# How many times CTRL-C has been pressed
Hellanzb.stopSignalCount = 0
# When the first CTRL-C was pressed
Hellanzb.firstSignal = None
# Message printed before exiting
Hellanzb.shutdownMessage = None
# Whether or not this is a hellanzb download daemon process
Hellanzb.isDaemon = False
# Whether or not we're currently downloading an NZB
Hellanzb.downloading = False
# The name of the loaded config file
Hellanzb.CONFIG_FILENAME = None
# hostname we're running on
Hellanzb.HOSTNAME = gethostname()
if isWindows():
Hellanzb.SYSNAME = None
else:
(sysname, nodename, release, version, machine) = os.uname()
# The OS in use
Hellanzb.SYSNAME = sysname
# Only add anonymous NZB files placed in the QUEUE_DIR to the NZBQueue after this
# number have seconds have passed since the files modification time
Hellanzb.NZBQUEUE_MDELAY = 10
# Whether or not the C yenc module is installed
try:
import _yenc
Hellanzb.HAVE_C_YENC = True
except ImportError:
Hellanzb.HAVE_C_YENC = False
Hellanzb.PACKAGER = find_packager()
if isPy2App():
# Append the py2app Contents/Resources dir to the PATH
import __main__
os.environ['PATH'] = os.environ['PATH'] + ':' + \
os.path.dirname(os.path.abspath(__main__.__file__))
# Twisted will replace this with its own signal handler when initialized
signal.signal(signal.SIGINT, signalHandler)
outlineRequiredDirs() # before the config file is loaded
if hasattr(options, 'configFile') and options.configFile is not None:
findAndLoadConfig(options.configFile)
else:
findAndLoadConfig()
# FIXME: these blocks below, and some code in loadConfig should all be pulled out into
# a post-loadConfig normalizeConfig function. Could we skip any of this init stuff
# when just making an RPC call (to reduce startup time)?
for attr in ('logFile', 'debugLogFile'):
# this is really: logFile = None
setattr(sys.modules[__name__], attr, None)
if hasattr(options, attr) and getattr(options, attr) is not None:
setattr(sys.modules[__name__], attr, getattr(options, attr))
Hellanzb.Logging.initLogFile(logFile = logFile, debugLogFile = debugLogFile)
# overwrite xml rpc vars from the command line options if they were set
for option, attr in { 'rpcServer': 'XMLRPC_SERVER',
'rpcPassword': 'XMLRPC_PASSWORD',
'rpcPort': 'XMLRPC_PORT' }.iteritems():
if hasattr(options, option) and getattr(options, option) is not None:
setattr(Hellanzb, attr, getattr(options, option))
if not hasattr(Hellanzb, 'DELETE_PROCESSED'):
Hellanzb.DELETE_PROCESSED = True
if hasattr(Hellanzb, 'UMASK'):
try:
Hellanzb.UMASK = int(Hellanzb.UMASK)
except ValueError:
error('Config file option: Hellanzb.UMASK is not a valid integer')
sys.exit(1)
if not hasattr(Hellanzb, 'LIBNOTIFY_NOTIFY'):
Hellanzb.LIBNOTIFY_NOTIFY = False
elif Hellanzb.LIBNOTIFY_NOTIFY:
try:
import pynotify
except ImportError:
error('Please install notify-python or disable Hellanzb.LIBNOTIFY_NOTIFY')
sys.exit(1)
if not pynotify.init('hellanzb'):
error('Cannot initialize libnotify')
sys.exit(1)
if not hasattr(Hellanzb, 'GROWL_NOTIFY'):
error('Required option not defined in config file: Hellanzb.GROWL_NOTIFY')
sys.exit(1)
elif Hellanzb.GROWL_NOTIFY:
errors = []
for attr in ('GROWL_SERVER', 'GROWL_PASSWORD'):
if not hasattr(Hellanzb, attr):
err = 'Hellanzb.GROWL_NOTIFY enabled. Required option not defined in config file: Hellanzb.'
errors.append(err + attr)
if len(errors):
[error(err) for err in errors]
sys.exit(1)
0
Example 42
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None,
level=-1):
"""Python calls this hook to locate and import a module."""
parts = fqname.split('.')
# determine the context of this import
parent = self._determine_import_context(globals)
# if there is a parent, then its importer should manage this import
if parent:
module = parent.__importer__._do_import(parent, parts, fromlist)
if module:
return module
else:
#TODO: all of this hacky import stuff MUST GO!
# from now on, we will only proceed for namespaces that *may*
# need merging -- drop this ASAP for pretty much anything else
if parts[0] not in ('pyjamas',):
return self.previous_importer(fqname, globals, locals,
fromlist, level)
# has the top module already been imported?
try:
top_module = sys.modules[parts[0]]
except KeyError:
# look for the topmost module
top_module = self._import_top_module(parts[0])
if not top_module:
# the topmost module wasn't found at all.
# try previous importer.
return self.previous_importer(fqname, globals, locals, fromlist, level)
#raise ImportError, 'No module named ' + fqname
# fast-path simple imports
if len(parts) == 1:
if not fromlist:
return top_module
if not top_module.__dict__.get('__ispkg__'):
# __ispkg__ isn't defined (the module was not imported by us),
# or it is zero.
#
# In the former case, there is no way that we could import
# sub-modules that occur in the fromlist (but we can't raise an
# error because it may just be names) because we don't know how
# to deal with packages that were imported by other systems.
#
# In the latter case (__ispkg__ == 0), there can't be any sub-
# modules present, so we can just return.
#
# In both cases, since len(parts) == 1, the top_module is also
# the "bottom" which is the defined return when a fromlist
# exists.
return top_module
importer = top_module.__dict__.get('__importer__')
if importer:
return importer._finish_import(top_module, parts[1:], fromlist)
# Grrr, some people "import os.path" or do "from os.path import ..."
if len(parts) == 2 and hasattr(top_module, parts[1]):
if fromlist:
return getattr(top_module, parts[1])
else:
return top_module
# assume that the module has already been imported,
# walk from top_module to find it.
mod = top_module
for k in parts[1:]:
if not hasattr(mod, k):
#print "no mod", mod, k, parts
mod = None
break
mod = getattr(mod, k)
if mod:
return mod
# ok, pass through to previous importer
return self.previous_importer(fqname, globals, locals, fromlist, level)
# If the importer does not exist, then we have to bail. A missing
# importer means that something else imported the module, and we have
# no knowledge of how to get sub-modules out of the thing.
raise ImportError, 'No module named ' + fqname
0
Example 43
Project: pyspace Source File: time_series.py
def get_data(self, run_nr, split_nr, train_test):
""" Return the train or test data for the given split in the given run.
**Parameters**
:run_nr: The number of the run whose data should be loaded.
:split_nr: The number of the split whose data should be loaded.
:train_test: "train" if the training data should be loaded.
"test" if the test data should be loaded.
"""
# Do lazy loading of the time series objects.
if isinstance(self.data[(run_nr, split_nr, train_test)], basestring):
self._log("Lazy loading of %s time series windows from input "
"collection for run %s, split %s." % (train_test, run_nr,
split_nr))
s_format = self.meta_data["storage_format"]
if type(s_format) == list:
f_format = s_format[0]
else:
f_format = s_format
if f_format == "pickle":
# Load the time series from a pickled file
f = open(self.data[(run_nr, split_nr, train_test)], 'r')
try:
self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
except ImportError:
# code for backward compatibility
# redirection of old path
f.seek(0)
self._log("Loading deprecated data. Please transfer it " +
"to new format.",level=logging.WARNING)
from pySPACE.resources.data_types import time_series
sys.modules['abri_dp.types.time_series'] = time_series
self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
del sys.modules['abri_dp.types.time_series']
f.close()
elif f_format in ["mat", "matlab", "MATLAB"]:
from scipy.io import loadmat
from pySPACE.resources.data_types.time_series import TimeSeries
ts_fname = self.data[(run_nr, split_nr, train_test)]
dataset = loadmat(ts_fname)
channel_names = [name.strip() for name in dataset['channel_names']]
sf = dataset["sampling_frequency"][0][0]
self.data[(run_nr, split_nr, train_test)] = []
# assume third axis to be trial axis
if "channelXtime" in s_format:
for i in range(dataset["data"].shape[2]):
self.data[(run_nr, split_nr, train_test)].append(\
(TimeSeries(dataset["data"][:,:,i].T, channel_names,
sf), dataset["labels"][i].strip()))
else:
for i in range(dataset["data"].shape[2]):
self.data[(run_nr, split_nr, train_test)].append(\
(TimeSeries(dataset["data"][:,:,i], channel_names,
sf), dataset["labels"][i].strip()))
elif f_format.startswith("bci_comp"):
from scipy.io import loadmat
from pySPACE.resources.data_types.time_series import TimeSeries
if self.comp_number == "2":
if self.comp_set == "4":
ts_fname = self.data[(run_nr, split_nr, train_test)]
d = loadmat(ts_fname)
channel_names = [name[0].astype('|S3') for name in \
d["clab"][0]]
if train_test == "train":
self.data[(run_nr, split_nr, train_test)] = []
input_d = d["x_train"]
input_l = d["y_train"][0]
for i in range(input_d.shape[2]):
self.data[(run_nr, split_nr,
train_test)].append(\
(TimeSeries(input_d[:,:,i],
channel_names, float(self.sf)),
"Left" if input_l[i] == 0 else "Right"))
else:
label_fname = glob.glob(os.path.join(
os.path.dirname(ts_fname),"*.txt"))[0]
input_d = d["x_test"]
input_l = open(label_fname,'r')
self.data[(run_nr, split_nr, train_test)] = []
for i in range(input_d.shape[2]):
label = int(input_l.readline())
self.data[(run_nr, split_nr,
train_test)].append(\
(TimeSeries(input_d[:,:,i],
channel_names, float(self.sf)),
"Left" if label == 0 else "Right"))
elif self.comp_number == "3":
if self.comp_set == "2":
data = loadmat(self.data[(run_nr, split_nr, train_test)])
signal = data['Signal']
flashing = data['Flashing']
stimulus_code = data['StimulusCode']
stimulus_type = data['StimulusType']
window = 240
Fs = 240
channels = 64
epochs = signal.shape[0]
self.data[(run_nr, split_nr, train_test)] = []
self.start_offset_ms = 1000.0
self.end_offset_ms = 1000.0
whole_len = (self.start_offset_ms + self.end_offset_ms)*Fs/1000.0 + window
responses = numpy.zeros((12, 15, whole_len, channels))
for epoch in range(epochs):
rowcolcnt=numpy.ones(12)
for n in range(1, signal.shape[1]):
if (flashing[epoch,n]==0 and flashing[epoch,n-1]==1):
rowcol=stimulus_code[epoch,n-1]
if n-24-self.start_offset_ms*Fs/1000.0 < 0:
temp = signal[epoch,0:n+window+self.end_offset_ms*Fs/1000.0-24,:]
temp = numpy.vstack((numpy.zeros((whole_len - temp.shape[0], temp.shape[1])), temp))
elif n+window+self.end_offset_ms*Fs/1000.0-24> signal.shape[1]:
temp = signal[epoch,n-24-self.start_offset_ms*Fs/1000.0:signal.shape[1],:]
temp = numpy.vstack((temp, numpy.zeros((whole_len-temp.shape[0], temp.shape[1]))))
else:
temp = signal[epoch, n-24-self.start_offset_ms*Fs/1000.0:n+window+self.end_offset_ms*Fs/1000.0-24, :]
responses[rowcol-1,rowcolcnt[rowcol-1]-1,:,:]=temp
rowcolcnt[rowcol-1]=rowcolcnt[rowcol-1]+1
avgresp=numpy.mean(responses,1)
targets = stimulus_code[epoch,:]*stimulus_type[epoch,:]
target_rowcol = []
for value in targets:
if value not in target_rowcol:
target_rowcol.append(value)
target_rowcol.sort()
for i in range(avgresp.shape[0]):
temp = avgresp[i,:,:]
data = TimeSeries(input_array = temp,
channel_names = range(64),
sampling_frequency = window)
if i == target_rowcol[1]-1 or i == target_rowcol[2]-1:
self.data[(run_nr, split_nr, train_test)].append((data,"Target"))
else:
self.data[(run_nr, split_nr, train_test)].append((data,"Standard"))
if self.stream_mode and not self.data[(run_nr, split_nr, train_test)] == []:
# Create a connection to the TimeSeriesClient and return an iterator
# that passes all received data through the windower.
self.reader = TimeSeriesClient(self.data[(run_nr, split_nr, train_test)], blocksize=100)
# Creates a windower that splits the training data into windows
# based in the window definitions provided
# and assigns correct labels to these windows
self.reader.set_window_defs(self.window_definition)
self.reader.connect()
self.marker_windower = MarkerWindower(
self.reader, self.window_definition,
nullmarker_stride_ms=self.nullmarker_stride_ms,
no_overlap=self.no_overlap,
data_consistency_check=self.data_consistency_check)
return self.marker_windower
else:
return self.data[(run_nr, split_nr, train_test)]
0
Example 44
Project: fe Source File: command.py
def _call(self,
console = ExtendedConsole,
context = None
):
"""
Initialize the context and run main in the given locals
(Note: tramples on sys.argv, __main__ in sys.modules)
(Use __call__ instead)
"""
sys.modules['__main__'] = self.module__main__
md = self.module__main__.__dict__
# Establish execution context in the locals;
# iterate over all the loaders in self.context and
for path, ldesc in self.context:
ltitle, loader, xpath = ldesc
rpath = xpath(path)
li = loader(rpath)
if li is None:
sys.stderr.write(
"%s %r does not exist or cannot be read%s" %(
ltitle, rpath, os.linesep
)
)
return 1
try:
code = li.get_code(rpath)
except:
print_exception(*sys.exc_info())
return 1
self.module__main__.__file__ = getattr(
li, 'get_filename', lambda x: x
)(rpath)
self.module__main__.__loader__ = li
try:
exec(code, md, md)
except:
e, v, tb = sys.exc_info()
print_exception(e, v, tb.tb_next or tb)
return 1
if self.main == (None, None):
# It's interactive.
sys.argv = self.args or ['<console>']
# Use readline if available
try:
import readline
except ImportError:
pass
ic = console(locals = md)
try:
ic.interact()
except SystemExit as e:
return e.code
return 0
else:
# It's ultimately a code object.
path, loader = self.main
self.module__main__.__file__ = getattr(
loader, 'get_filename', lambda x: x
)(path)
sys.argv = list(self.args)
sys.argv.insert(0, self.module__main__.__file__)
try:
code = loader.get_code(path)
except:
print_exception(*sys.exc_info())
return 1
rv = 0
exe_exception = False
try:
if context is not None:
with context:
try:
exec(code, md, md)
except:
exe_exception = True
raise
else:
try:
exec(code, md, md)
except:
exe_exception = True
raise
except SystemExit as e:
# Assume it's an exe_exception as anything ran in `context`
# shouldn't cause an exception.
rv = e.code
e, v, tb = sys.exc_info()
sys.last_type = e
sys.last_value = v
sys.last_traceback = (tb.tb_next or tb)
except:
if exe_exception is False:
raise
rv = 1
e, v, tb = sys.exc_info()
print_exception(e, v, tb.tb_next or tb)
sys.last_type = e
sys.last_value = v
sys.last_traceback = (tb.tb_next or tb)
return rv
0
Example 45
Project: quality-assessment-protocol Source File: reports.py
def workflow_report(in_csv, qap_type, run_name, res_dict,
out_dir=None, out_file=None):
import datetime
if out_dir is None:
out_dir = os.getcwd()
if out_file is None:
out_file = op.join(
out_dir, qap_type + '_%s.pdf')
# Read csv file, sort and drop duplicates
df = pd.read_csv(in_csv, dtype={'subject': str}).sort(
columns=['subject', 'session', 'scan'])
try:
df.drop_duplicates(['subject', 'session', 'scan'], keep='last',
inplace=True)
except TypeError:
df.drop_duplicates(['subject', 'session', 'scan'], take_last=True,
inplace=True)
subject_list = sorted(pd.unique(df.subject.ravel()))
result = {}
func = getattr(sys.modules[__name__], qap_type)
# Identify failed subjects
failed = ['%s (%s_%s)' % (s['id'], s['session'], s['scan'])
for s in res_dict if 'failed' in s['status']]
pdf_group = []
# Generate summary page
out_sum = op.join(out_dir, run_name, 'summary_group.pdf')
summary_cover(
(qap_type,
datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
", ".join(failed) if len(failed) > 0 else "none"),
is_group=True, out_file=out_sum)
pdf_group.append(out_sum)
# Generate group report
qc_group = op.join(out_dir, run_name, 'qc_measures_group.pdf')
# Generate violinplots. If successfull, add docuementation.
func(df, out_file=qc_group)
pdf_group.append(qc_group)
# Generate docuementation page
doc = op.join(out_dir, run_name, 'docuementation.pdf')
# Let docuementation page fail
get_docuementation(qap_type, doc)
if doc is not None:
pdf_group.append(doc)
if len(pdf_group) > 0:
out_group_file = op.join(out_dir, '%s_group.pdf' % qap_type)
# Generate final report with collected pdfs in plots
concat_pdf(pdf_group, out_group_file)
result['group'] = {'success': True, 'path': out_group_file}
# Generate individual reports for subjects
for subid in subject_list:
# Get subject-specific info
subdf = df.loc[df['subject'] == subid]
sessions = sorted(pd.unique(subdf.session.ravel()))
plots = []
sess_scans = []
# Re-build mosaic location
for sesid in sessions:
sesdf = subdf.loc[subdf['session'] == sesid]
scans = sorted(pd.unique(sesdf.scan.ravel()))
# Each scan has a volume and (optional) fd plot
for scanid in scans:
sub_info = [subid, sesid, scanid]
sub_path = op.join(out_dir, run_name, '/'.join(sub_info))
m = op.join(sub_path, 'qap_mosaic', 'mosaic.pdf')
if op.isfile(m):
plots.append(m)
fd = op.join(sub_path, 'qap_fd', 'fd.pdf')
if 'functional_temporal' in qap_type and op.isfile(fd):
plots.append(fd)
sess_scans.append('%s (%s)' % (sesid, ', '.join(scans)))
failed = ['%s (%s)' % (s['session'], s['scan'])
for s in res_dict if 'failed' in s['status'] and
subid in s['id']]
# Summary cover
out_sum = op.join(out_dir, run_name, 'summary_%s.pdf' % subid)
summary_cover(
(subid, subid, qap_type,
datetime.datetime.now().strftime("%Y-%m-%d, %H:%M"),
", ".join(sess_scans),
",".join(failed) if len(failed) > 0 else "none"),
out_file=out_sum)
plots.insert(0, out_sum)
# Summary (violinplots) of QC measures
qc_ms = op.join(out_dir, run_name, subid, 'qc_measures.pdf')
func(df, subject=subid, out_file=qc_ms)
plots.append(qc_ms)
if len(plots) > 0:
if doc is not None:
plots.append(doc)
# Generate final report with collected pdfs in plots
sub_path = out_file % subid
concat_pdf(plots, sub_path)
result[subid] = {'success': True, 'path': sub_path}
return result
0
Example 46
Project: pyresample Source File: utils.py
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state
for module in list(six.itervalues(sys.modules)):
# We don't want to deal with six.MovedModules, only "real"
# modules.
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
try:
# A deprecated stdlib module used by py.test
import compiler # pylint: disable=W0611
except ImportError:
pass
try:
import scipy # pylint: disable=W0611
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn DeprecationWarnings into exceptions
warnings.filterwarnings("error", ".*", DeprecationWarning)
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
warnings.filterwarnings("error", ".*", AstropyDeprecationWarning)
warnings.filterwarnings("error", ".*", AstropyPendingDeprecationWarning)
if sys.version_info[:2] >= (3, 4):
# py.test reads files with the 'U' flag, which is now
# deprecated in Python 3.4.
warnings.filterwarnings(
"ignore",
r"'U' mode is deprecated",
DeprecationWarning)
# BeautifulSoup4 triggers a DeprecationWarning in stdlib's
# html module.x
warnings.filterwarnings(
"ignore",
r"The strict argument and mode are deprecated\.",
DeprecationWarning)
warnings.filterwarnings(
"ignore",
r"The value of convert_charrefs will become True in 3\.5\. "
r"You are encouraged to set the value explicitly\.",
DeprecationWarning)
if sys.version_info[:2] >= (3, 5):
# py.test raises this warning on Python 3.5.
# This can be removed when fixed in py.test.
# See https://github.com/pytest-dev/pytest/pull/1009
warnings.filterwarnings(
"ignore",
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) instead",
DeprecationWarning)
0
Example 47
Project: talk.org Source File: base.py
def __new__(cls, name, bases, attrs):
# If this isn't a subclass of Model, don't do anything special.
try:
parents = [b for b in bases if issubclass(b, Model)]
except NameError:
# 'Model' isn't defined yet, meaning we're looking at Django's own
# Model class, defined below.
parents = []
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = type.__new__(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', Options(meta))
if not abstract:
new_class.add_to_class('DoesNotExist',
subclass_exception('DoesNotExist', ObjectDoesNotExist, module))
new_class.add_to_class('MultipleObjectsReturned',
subclass_exception('MultipleObjectsReturned', MultipleObjectsReturned, module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
old_default_mgr = None
if getattr(new_class, '_default_manager', None):
# We have a parent who set the default manager.
if new_class._default_manager.model._meta.abstract:
old_default_mgr = new_class._default_manager
new_class._default_manager = None
if getattr(new_class._meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
new_class._meta.app_label = model_module.__name__.split('.')[-2]
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
if not base._meta.abstract:
if base in o2o_map:
field = o2o_map[base]
field.primary_key = True
new_class._meta.setup_pk(field)
else:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
new_class._meta.parents[base] = field
else:
# The abstract base class case.
names = set([f.name for f in new_class._meta.local_fields + new_class._meta.many_to_many])
for field in base._meta.local_fields + base._meta.local_many_to_many:
if field.name in names:
raise FieldError('Local field %r in class %r clashes with field of similar name from abstract base class %r'
% (field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
if old_default_mgr and not new_class._default_manager:
new_class._default_manager = old_default_mgr._copy_to_model(new_class)
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
0
Example 48
Project: GAE-Bulk-Mailer Source File: base.py
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
module, attached_to=new_class))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
module, attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
0
Example 49
Project: pyblish-base Source File: plugin.py
def discover(type=None, regex=None, paths=None):
"""Find and return available plug-ins
This function looks for files within paths registered via
:func:`register_plugin_path` and those added to `PYBLISHPLUGINPATH`.
It determines *type* - :class:`Selector`, :class:`Validator`,
:class:`Extractor` or :class:`Conform` - based on whether it
matches it's corresponding regular expression; e.g.
"$validator_*^" for plug-ins of type Validator.
Arguments:
type (str, optional): !DEPRECATED! Only return plugins of
specified type. E.g. validators, extractors. In None is specified,
return all plugins. Available options are "selectors", validators",
"extractors", "conformers", "collectors" and "integrators".
regex (str, optional): Limit results to those matching `regex`.
Matching is done on classes, as opposed to
filenames, due to a file possibly hosting
multiple plugins.
paths (list, optional): Paths to discover plug-ins from.
If no paths are provided, all paths are searched.
"""
if type is not None:
warnings.warn("type argument has been deprecated and does nothing")
if regex is not None:
warnings.warn("discover(): regex argument "
"has been deprecated and does nothing")
plugins = dict()
# Include plug-ins from registered paths
for path in paths or plugin_paths():
path = os.path.normpath(path)
if not os.path.isdir(path):
continue
for fname in os.listdir(path):
if fname.startswith("_"):
continue
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[mod_name] = module
except Exception as err:
log.debug("Skipped: \"%s\" (%s)", mod_name, err)
continue
for plugin in plugins_from_module(module):
if plugin.__name__ in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugins[plugin.__name__] = plugin
# Include plug-ins from registration.
# Directly registered plug-ins take precedence.
for plugin in registered_plugins():
if plugin.__name__ in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugins[plugin.__name__] = plugin
plugins = list(plugins.values())
sort(plugins) # In-place
return plugins
0
Example 50
def run(self):
try:
resource_class = self.get_resource_class(self.options['classname'])
except ResourceNotFound, e:
return e.error_node
# Add the class's file and this extension to the dependencies.
self.state.docuement.settings.env.note_dependency(__file__)
self.state.docuement.settings.env.note_dependency(
sys.modules[resource_class.__module__].__file__)
resource = get_resource_from_class(resource_class)
is_list = 'is-list' in self.options
docname = 'webapi2.0-%s-resource' % \
get_resource_docname(resource, is_list)
resource_title = get_resource_title(resource, is_list)
targetnode = nodes.target('', '', ids=[docname], names=[docname])
self.state.docuement.note_explicit_target(targetnode)
main_section = nodes.section(ids=[docname])
# Main section
main_section += nodes.title(text=resource_title)
main_section += parse_text(
self, inspect.getdoc(resource),
where='%s class docstring' % self.options['classname'])
# Details section
details_section = nodes.section(ids=['details'])
main_section += details_section
details_section += nodes.title(text='Details')
details_section += self.build_details_table(resource)
# Fields section
if (resource.fields and
(not is_list or resource.singleton)):
fields_section = nodes.section(ids=['fields'])
main_section += fields_section
fields_section += nodes.title(text='Fields')
fields_section += self.build_fields_table(resource.fields)
# Links section
if 'hide-links' not in self.options:
fields_section = nodes.section(ids=['links'])
main_section += fields_section
fields_section += nodes.title(text='Links')
fields_section += self.build_links_table(resource)
# HTTP method descriptions
for http_method in self.get_http_methods(resource, is_list):
method_section = nodes.section(ids=[http_method])
main_section += method_section
method_section += nodes.title(text='HTTP %s' % http_method)
method_section += self.build_http_method_section(resource,
http_method)
if 'hide-examples' not in self.options:
examples_section = nodes.section(ids=['examples'])
examples_section += nodes.title(text='Examples')
has_examples = False
if is_list:
mimetype_key = 'list'
else:
mimetype_key = 'item'
for mimetype in resource.allowed_mimetypes:
try:
mimetype = mimetype[mimetype_key]
except KeyError:
continue
if mimetype in self.FILTERED_MIMETYPES:
# Resources have more specific mimetypes. We want to
# filter out the general ones (like application/json)
# so we don't show redundant examples.
continue
if mimetype.endswith('xml'):
# JSON is preferred. While we support XML, let's not
# continue to advertise it.
continue
url, headers, data = \
self.fetch_resource_data(resource, mimetype)
example_node = build_example(headers, data, mimetype)
if example_node:
example_section = \
nodes.section(ids=['example_' + mimetype],
classes=['examples', 'requests-example'])
examples_section += example_section
example_section += nodes.title(text=mimetype)
accept_mimetype = mimetype
if (mimetype.startswith('application/') and
mimetype.endswith('+json')):
# Instead of telling the user to ask for a specific
# mimetype on the request, show them that asking for
# application/json works fine.
accept_mimetype = 'application/json'
curl_text = (
'$ curl http://reviews.example.com%s -H "Accept: %s"'
% (url, accept_mimetype)
)
example_section += nodes.literal_block(
curl_text, curl_text, classes=['cmdline'])
example_section += nodes.literal_block(
headers, headers, classes=['http-headers'])
example_section += example_node
has_examples = True
if has_examples:
main_section += examples_section
return [targetnode, main_section]