Here are the examples of the python api twisted.internet.reactor.callWhenRunning taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
67 Examples
3
Example 1
View licensedef initialize(): log.info('Starting the Pixelated user agent') args = arguments.parse_user_agent_args() logger.init(debug=args.debug) services_factory = _create_service_factory(args) resource = RootResource(services_factory) def start(): start_async = _start_mode(args, resource, services_factory) add_top_level_system_callbacks(start_async, services_factory) log.info('Running the reactor') reactor.callWhenRunning(start) reactor.run()
3
Example 2
View licensedef initialize(): args = arguments.parse_maintenance_args() logger.init(debug=args.debug) @defer.inlineCallbacks def _run(): leap_session = yield initialize_leap_single_user( args.leap_provider_cert, args.leap_provider_cert_fingerprint, args.credentials_file, leap_home=args.leap_home) execute_command(args, leap_session) reactor.callWhenRunning(_run) reactor.run()
3
Example 3
View licensedef test_twistedstatsd_write(self): self.client = TwistedStatsDClient('127.0.0.1', 8000) self.build_protocol() self.client.host_resolved('127.0.0.1') def ensure_bytes_sent(bytes_sent): self.assertEqual(bytes_sent, len('message')) def exercise(callback): self.client.write('message', callback=callback) d = Deferred() d.addCallback(ensure_bytes_sent) reactor.callWhenRunning(exercise, d.callback) return d
3
Example 4
View license@inlineCallbacks def test_twistedstatsd_write_with_host_resolved(self): self.client = TwistedStatsDClient.create( 'localhost', 8000) self.build_protocol() yield self.client.resolve_later def ensure_bytes_sent(bytes_sent): self.assertEqual(bytes_sent, len('message')) self.assertEqual(self.client.host, '127.0.0.1') def exercise(callback): self.client.write('message', callback=callback) d = Deferred() d.addCallback(ensure_bytes_sent) reactor.callWhenRunning(exercise, d.callback) yield d
3
Example 5
View licensedef main(): import config import tornado.platform.twisted tornado.platform.twisted.install() bnw.core.base.config.register(config) bnw.core.bnw_mongo.open_db() log.startLogging(sys.stdout) from twisted.internet import reactor reactor.callWhenRunning(twistedrun, reactor) reactor.run()
3
Example 6
View licensedef run(self): log.msg(".run() called") self._add_worker() self._start() d = self.deferred from twisted.internet import reactor d.addCallback(self.monitor_result, reactor) reactor.callWhenRunning(d.callback, reactor) return d
3
Example 7
View licensedef __init__(self, MEMCACHE_SERVERS): self.threadID = thread.get_ident self.MEMCACHE_SERVERS = MEMCACHE_SERVERS # connect to memcache servers self.mc_connections = {} # self.threadpool = deferedThreadPool(10, 10) # need to start up thread pools self.running = False from twisted.internet import reactor self.startID = reactor.callWhenRunning(self._start) self.shutdownID = None
3
Example 8
View licensedef test_twistedstatsd_write(self): self.client = TwistedStatsDClient('127.0.0.1', 8000) self.build_protocol() self.client.host_resolved('127.0.0.1') def ensure_bytes_sent(bytes_sent): self.assertEqual(bytes_sent, len('message')) def exercise(callback): self.client.write('message', callback=callback) d = Deferred() d.addCallback(ensure_bytes_sent) reactor.callWhenRunning(exercise, d.callback) return d
3
Example 9
View license@inlineCallbacks def test_twistedstatsd_write_with_host_resolved(self): self.client = TwistedStatsDClient.create( 'localhost', 8000) self.build_protocol() yield self.client.resolve_later def ensure_bytes_sent(bytes_sent): self.assertEqual(bytes_sent, len('message')) self.assertEqual(self.client.host, '127.0.0.1') def exercise(callback): self.client.write('message', callback=callback) d = Deferred() d.addCallback(ensure_bytes_sent) reactor.callWhenRunning(exercise, d.callback) yield d
3
Example 10
View license@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") def test_fast_keyboard_interrupt_stops_test_run(self): # If we get a SIGINT during a test run, the test stops and no more # tests run. SIGINT = getattr(signal, 'SIGINT', None) if not SIGINT: raise self.skipTest("SIGINT unavailable") class SomeCase(TestCase): def test_pause(self): return defer.Deferred() test = SomeCase('test_pause') reactor = self.make_reactor() timeout = self.make_timeout() runner = self.make_runner(test, timeout * 5) result = self.make_result() reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) self.assertThat(lambda:runner.run(result), Raises(MatchesException(KeyboardInterrupt)))
3
Example 11
View license@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only") def test_fast_sigint_raises_no_result_error(self): # If we get a SIGINT during a run, we raise _spinner.NoResultError. SIGINT = getattr(signal, 'SIGINT', None) if not SIGINT: self.skipTest("SIGINT not available") reactor = self.make_reactor() spinner = self.make_spinner(reactor) timeout = self.make_timeout() reactor.callWhenRunning(os.kill, os.getpid(), SIGINT) self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred), Raises(MatchesException(_spinner.NoResultError))) self.assertEqual([], spinner._clean())
3
Example 12
View licensedef main(): plugins_dir = FilePath("/run/docker/plugins/") if not plugins_dir.exists(): plugins_dir.makedirs() dvol_path = FilePath("/var/lib/dvol/volumes") if not dvol_path.exists(): dvol_path.makedirs() voluminous = Voluminous(dvol_path.path) sock = plugins_dir.child("%s.sock" % (VOLUME_DRIVER_NAME,)) if sock.exists(): sock.remove() adapterServer = internet.UNIXServer( sock.path, getAdapter(voluminous)) reactor.callWhenRunning(adapterServer.startService) reactor.run()
3
Example 13
View licensedef start(self): reactor.callWhenRunning(self._on_reactor_start) # Initialize gdk threading gtk.gdk.threads_enter() reactor.run() # Reactor is not running. Any async callbacks (Deferreds) can no longer # be processed from this point on. gtk.gdk.threads_leave()
3
Example 14
View licensedef run(self, use_rpc=False): try: # Import reactor locally because it also installs it and GUI # requires Qt reactor version. from twisted.internet import reactor if use_rpc: config = self.client.config_desc reactor.callWhenRunning(self._start_rpc_server, config.rpc_address, config.rpc_port) reactor.run() except Exception as ex: logger = logging.getLogger("gnr.app") logger.error("Reactor error: {}".format(ex)) finally: self.client.quit() sys.exit(0)
3
Example 15
View licensedef opt_remote(self, remote): try: host, port = remote.split(":") except ValueError: host, port = remote, DEFAULT_REMOTE_PORT reactor.callWhenRunning( log.info, "Subscribing to remote broker %s:%d" % (host, int(port)) ) self['remotes'].append((host, int(port)))
3
Example 16
View licensedef opt_remote(self, remote): try: host, port = remote.split(":") except ValueError: host, port = remote, DEFAULT_REMOTE_PORT reactor.callWhenRunning( log.info, "Subscribing to remote broker %s:%d" % (host, int(port)) ) self['remotes'].append((host, int(port)))
3
Example 17
View licensedef attachService(reactor, loadsim, service): """ Attach a given L{IService} provider to the given L{IReactorCore}; cause it to be started when the reactor starts, and stopped when the reactor stops. """ reactor.callWhenRunning(service.startService) reactor.addSystemEventTrigger('before', 'shutdown', loadsim.shutdown)
3
Example 18
View licensedef attachService(reactor, loadsim, service): """ Attach a given L{IService} provider to the given L{IReactorCore}; cause it to be started when the reactor starts, and stopped when the reactor stops. """ reactor.callWhenRunning(service.startService) reactor.addSystemEventTrigger('before', 'shutdown', loadsim.shutdown)
3
Example 19
View licensedef main(): port = int(sys.argv[1]) if issubclass(EchoServer, ServiceSOAPBinding): AsServer(port, (Service('test'),)) return #from ZSI.twisted.WSresource import WSResource #if issubclass(EchoServer, WSResource): from twisted.internet import reactor reactor.callWhenRunning(twisted_main, port) reactor.run()
3
Example 20
View licensedef _TwistedMain(): """Gets tests to run from configuration file. """ from twisted.internet import reactor reactor.callWhenRunning(_TwistedTestProgram, defaultTest="all") reactor.run(installSignalHandlers=0)
3
Example 21
View licensedef main(): port = int(sys.argv[1]) if issubclass(EchoServer, ServiceSOAPBinding): AsServer(port, (Service('test'),)) return #from ZSI.twisted.WSresource import WSResource #if issubclass(EchoServer, WSResource): from twisted.internet import reactor reactor.callWhenRunning(twisted_main, port) reactor.run()
3
Example 22
View licensedef _TwistedMain(): """Gets tests to run from configuration file. """ from twisted.internet import reactor reactor.callWhenRunning(_TwistedTestProgram, defaultTest="all") reactor.run(installSignalHandlers=0)
3
Example 23
View licensedef opt_wsgi(self, name): """ The FQPN of a WSGI application object to serve as the root resource of the webserver. """ pool = threadpool.ThreadPool() reactor.callWhenRunning(pool.start) reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) try: application = reflect.namedAny(name) except (AttributeError, ValueError): raise usage.UsageError("No such WSGI application: %r" % (name,)) self['root'] = wsgi.WSGIResource(reactor, pool, application)
3
Example 24
View licensedef opt_wsgi(self, name): """ The FQPN of a WSGI application object to serve as the root resource of the webserver. """ pool = threadpool.ThreadPool() reactor.callWhenRunning(pool.start) reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) try: application = reflect.namedAny(name) except (AttributeError, ValueError): raise usage.UsageError("No such WSGI application: %r" % (name,)) self['root'] = wsgi.WSGIResource(reactor, pool, application)
0
0
Example 26
View licensedef main(jupyter=False, argv=sys.argv, server_factory=splash_server): opts, _ = parse_opts(jupyter, argv) if opts.version: print(__version__) sys.exit(0) if not jupyter: start_logging(opts) log_splash_version() bump_nofile_limit() with xvfb.autostart(opts.disable_xvfb) as x: xvfb.log_options(x) install_qtreactor(opts.verbosity >= 5) monitor_maxrss(opts.maxrss) if opts.manhole: manhole_server() default_splash_server( portnum=opts.port, slots=opts.slots, proxy_profiles_path=opts.proxy_profiles_path, js_profiles_path=opts.js_profiles_path, js_disable_cross_domain_access=not opts.js_cross_domain_enabled, filters_path=opts.filters_path, allowed_schemes=opts.allowed_schemes, private_mode=not opts.disable_private_mode, ui_enabled=not opts.disable_ui, lua_enabled=not opts.disable_lua, lua_sandbox_enabled=not opts.disable_lua_sandbox, lua_package_path=opts.lua_package_path.strip(";"), lua_sandbox_allowed_modules=opts.lua_sandbox_allowed_modules.split(";"), verbosity=opts.verbosity, max_timeout=opts.max_timeout, argument_cache_max_entries=opts.argument_cache_max_entries, server_factory=server_factory, ) signal.signal(signal.SIGUSR1, lambda s, f: traceback.print_stack(f)) if not jupyter: from twisted.internet import reactor reactor.callWhenRunning(splash_started, opts, sys.stderr) reactor.run()
0
Example 27
View licensedef run(self): """ We've overloaded the run method to return a Deferred task object. """ log.msg(".run() called") # This is the default behavior super(ReactorlessCommando, self).run() # Setup a deferred to hold the delayed result and not return it until # it's done. This object will be populated with the value of the # results once all commands have been executed on all devices. d = defer.Deferred() # Add monitor_result as a callback from twisted.internet import reactor d.addCallback(self.monitor_result, reactor) # Tell the reactor to call the callback above when it starts reactor.callWhenRunning(d.callback, reactor) return d
0
Example 28
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 29
View licensedef _setupLogging(self, log_file, log_directory, log_level): if log_directory is None: self.logging_handler = logging.StreamHandler() else: self.logging_handler = logging.handlers.TimedRotatingFileHandler( os.path.join(log_directory, log_file), when='D', interval=1) log_format = "%(levelname)s: %(message)s %(pathname)s:%(lineno)d" self.logging_handler.setFormatter(logging.Formatter(log_format)) LOGGER.addHandler(self.logging_handler) log_level = log_level.lower() log_levels = { "debug":logging.DEBUG, "info":logging.INFO, "warning":logging.WARNING, "error":logging.ERROR, "critical":logging.CRITICAL } if log_level in log_levels: LOGGER.setLevel(log_levels[log_level]) else: LOGGER.setLevel(logging.DEBUG) def start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 30
View licensedef start(self): reactor.callWhenRunning(self._baseStart) return self.start_deferred
0
Example 31
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 32
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 33
View licensedef _setupLogging(self, log_file, log_directory, log_level): if log_directory is None: self.logging_handler = logging.StreamHandler() else: self.logging_handler = logging.handlers.TimedRotatingFileHandler( os.path.join(log_directory, log_file), when='D', interval=1) log_format = "%(levelname)s: %(message)s %(pathname)s:%(lineno)d" self.logging_handler.setFormatter(logging.Formatter(log_format)) LOGGER.addHandler(self.logging_handler) log_level = log_level.lower() log_levels = { "debug":logging.DEBUG, "info":logging.INFO, "warning":logging.WARNING, "error":logging.ERROR, "critical":logging.CRITICAL } if log_level in log_levels: LOGGER.setLevel(log_levels[log_level]) else: LOGGER.setLevel(logging.DEBUG) def start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 34
View licensedef start(self): reactor.callWhenRunning(self._baseStart) return self.start_deferred
0
Example 35
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 36
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 37
View licensedef start(self): reactor.callWhenRunning(self._start) return self.start_deferred
0
Example 38
View licensedef start( self ): reactor.callWhenRunning( self._start ) return self.start_deferred
0
Example 39
View licensedef run(self): """Run/validate/dry-run the given command with options.""" checks = build_checks(self.descriptions, self.options.connect_timeout, self.options.include_tags, self.options.exclude_tags, self.options.dry_run) if not self.options.validate: if not self.options.dry_run: load_tls_certs(self.options.cacerts_path) self.setup_reactor() reactor.callWhenRunning(run_checks, checks, self.patterns, self.results) reactor.run() # Flush output, this really only has an effect when running # buffered output self.output.flush() if not self.options.dry_run and self.results.any_failed(): return 2 return 0
0
0
Example 41
View licensedef run(): if not hasattr(tcp.Client, 'abortConnection'): print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!" print 'Pausing for 3 seconds...' time.sleep(3) realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name) parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@') parser.add_argument('--version', action='version', version=p2pool.__version__) parser.add_argument('--net', help='use specified network (default: bitcoin)', action='store', choices=sorted(realnets), default='bitcoin', dest='net_name') parser.add_argument('--testnet', help='''use the network's testnet''', action='store_const', const=True, default=False, dest='testnet') parser.add_argument('--debug', help='enable debugging mode', action='store_const', const=True, default=False, dest='debug') parser.add_argument('-a', '--address', help='generate payouts to this address (default: <address requested from bitcoind>)', type=str, action='store', default=None, dest='address') parser.add_argument('--datadir', help='store data in this directory (default: <directory run_p2pool.py is in>/data)', type=str, action='store', default=None, dest='datadir') parser.add_argument('--logfile', help='''log to this file (default: data/<NET>/log)''', type=str, action='store', default=None, dest='logfile') parser.add_argument('--merged', help='call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)', type=str, action='append', default=[], dest='merged_urls') parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE', help='donate this percentage of work towards the development of p2pool (default: 1.0)', type=float, action='store', default=1.0, dest='donation_percentage') parser.add_argument('--iocp', help='use Windows IOCP API in order to avoid errors due to large number of sockets being open', action='store_true', default=False, dest='iocp') parser.add_argument('--irc-announce', help='announce any blocks found on irc://irc.freenode.net/#p2pool', action='store_true', default=False, dest='irc_announce') parser.add_argument('--no-bugreport', help='disable submitting caught exceptions to the author', action='store_true', default=False, dest='no_bugreport') p2pool_group = parser.add_argument_group('p2pool interface') p2pool_group.add_argument('--p2pool-port', metavar='PORT', help='use port PORT to listen for connections (forward this port from your router!) (default: %s)' % ', '.join('%s:%i' % (name, net.P2P_PORT) for name, net in sorted(realnets.items())), type=int, action='store', default=None, dest='p2pool_port') p2pool_group.add_argument('-n', '--p2pool-node', metavar='ADDR[:PORT]', help='connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses', type=str, action='append', default=[], dest='p2pool_nodes') parser.add_argument('--disable-upnp', help='''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''', action='store_false', default=True, dest='upnp') p2pool_group.add_argument('--max-conns', metavar='CONNS', help='maximum incoming connections (default: 40)', type=int, action='store', default=40, dest='p2pool_conns') p2pool_group.add_argument('--outgoing-conns', metavar='CONNS', help='outgoing connections (default: 6)', type=int, action='store', default=6, dest='p2pool_outgoing_conns') parser.add_argument('--disable-advertise', help='''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''', action='store_false', default=True, dest='advertise_ip') worker_group = parser.add_argument_group('worker interface') worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT', help='listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)' % ', '.join('%s:%i' % (name, net.WORKER_PORT) for name, net in sorted(realnets.items())), type=str, action='store', default=None, dest='worker_endpoint') worker_group.add_argument('-f', '--fee', metavar='FEE_PERCENTAGE', help='''charge workers mining to their own bitcoin address (by setting their miner's username to a bitcoin address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''', type=float, action='store', default=0, dest='worker_fee') bitcoind_group = parser.add_argument_group('bitcoind interface') bitcoind_group.add_argument('--bitcoind-address', metavar='BITCOIND_ADDRESS', help='connect to this address (default: 127.0.0.1)', type=str, action='store', default='127.0.0.1', dest='bitcoind_address') bitcoind_group.add_argument('--bitcoind-rpc-port', metavar='BITCOIND_RPC_PORT', help='''connect to JSON-RPC interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())), type=int, action='store', default=None, dest='bitcoind_rpc_port') bitcoind_group.add_argument('--bitcoind-rpc-ssl', help='connect to JSON-RPC interface using SSL', action='store_true', default=False, dest='bitcoind_rpc_ssl') bitcoind_group.add_argument('--bitcoind-p2p-port', metavar='BITCOIND_P2P_PORT', help='''connect to P2P interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())), type=int, action='store', default=None, dest='bitcoind_p2p_port') bitcoind_group.add_argument(metavar='BITCOIND_RPCUSERPASS', help='bitcoind RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from bitcoin.conf)', type=str, action='store', default=[], nargs='*', dest='bitcoind_rpc_userpass') args = parser.parse_args() if args.debug: p2pool.DEBUG = True defer.setDebugging(True) else: p2pool.DEBUG = False net_name = args.net_name + ('_testnet' if args.testnet else '') net = networks.nets[net_name] datadir_path = os.path.join((os.path.join(os.path.dirname(sys.argv[0]), 'data') if args.datadir is None else args.datadir), net_name) if not os.path.exists(datadir_path): os.makedirs(datadir_path) if len(args.bitcoind_rpc_userpass) > 2: parser.error('a maximum of two arguments are allowed') args.bitcoind_rpc_username, args.bitcoind_rpc_password = ([None, None] + args.bitcoind_rpc_userpass)[-2:] if args.bitcoind_rpc_password is None: conf_path = net.PARENT.CONF_FILE_FUNC() if not os.path.exists(conf_path): parser.error('''Bitcoin configuration file not found. Manually enter your RPC password.\r\n''' '''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n''' '''\r\n''' '''server=1\r\n''' '''rpcpassword=%x\r\n''' '''\r\n''' '''Keep that password secret! After creating the file, restart Bitcoin.''' % (conf_path, random.randrange(2**128))) conf = open(conf_path, 'rb').read() contents = {} for line in conf.splitlines(True): if '#' in line: line = line[:line.index('#')] if '=' not in line: continue k, v = line.split('=', 1) contents[k.strip()] = v.strip() for conf_name, var_name, var_type in [ ('rpcuser', 'bitcoind_rpc_username', str), ('rpcpassword', 'bitcoind_rpc_password', str), ('rpcport', 'bitcoind_rpc_port', int), ('port', 'bitcoind_p2p_port', int), ]: if getattr(args, var_name) is None and conf_name in contents: setattr(args, var_name, var_type(contents[conf_name])) if args.bitcoind_rpc_password is None: parser.error('''Bitcoin configuration file didn't contain an rpcpassword= line! Add one!''') if args.bitcoind_rpc_username is None: args.bitcoind_rpc_username = '' if args.bitcoind_rpc_port is None: args.bitcoind_rpc_port = net.PARENT.RPC_PORT if args.bitcoind_p2p_port is None: args.bitcoind_p2p_port = net.PARENT.P2P_PORT if args.p2pool_port is None: args.p2pool_port = net.P2P_PORT if args.p2pool_outgoing_conns > 10: parser.error('''--outgoing-conns can't be more than 10''') if args.worker_endpoint is None: worker_endpoint = '', net.WORKER_PORT elif ':' not in args.worker_endpoint: worker_endpoint = '', int(args.worker_endpoint) else: addr, port = args.worker_endpoint.rsplit(':', 1) worker_endpoint = addr, int(port) if args.address is not None: try: args.pubkey_hash = bitcoin_data.address_to_pubkey_hash(args.address, net.PARENT) except Exception, e: parser.error('error parsing address: ' + repr(e)) else: args.pubkey_hash = None def separate_url(url): s = urlparse.urlsplit(url) if '@' not in s.netloc: parser.error('merged url netloc must contain an "@"') userpass, new_netloc = s.netloc.rsplit('@', 1) return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass merged_urls = map(separate_url, args.merged_urls) if args.logfile is None: args.logfile = os.path.join(datadir_path, 'log') logfile = logging.LogFile(args.logfile) pipe = logging.TimestampingPipe(logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile])) sys.stdout = logging.AbortPipe(pipe) sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(logging.PrefixPipe(pipe, '> ')) if hasattr(signal, "SIGUSR1"): def sigusr1(signum, frame): print 'Caught SIGUSR1, closing %r...' % (args.logfile,) logfile.reopen() print '...and reopened %r after catching SIGUSR1.' % (args.logfile,) signal.signal(signal.SIGUSR1, sigusr1) deferral.RobustLoopingCall(logfile.reopen).start(5) class ErrorReporter(object): def __init__(self): self.last_sent = None def emit(self, eventDict): if not eventDict["isError"]: return if self.last_sent is not None and time.time() < self.last_sent + 5: return self.last_sent = time.time() if 'failure' in eventDict: text = ((eventDict.get('why') or 'Unhandled Error') + '\n' + eventDict['failure'].getTraceback()) else: text = " ".join([str(m) for m in eventDict["message"]]) + "\n" from twisted.web import client client.getPage( url='http://u.forre.st/p2pool_error.cgi', method='POST', postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text, timeout=15, ).addBoth(lambda x: None) if not args.no_bugreport: log.addObserver(ErrorReporter().emit) reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint) reactor.run()
0
Example 42
View licensedef bootstrap(parser=None, args=None, descriptors=None): """Bootstrap a feat process, handling command line arguments. @param parser: the option parser to use; more options will be added to the parser; if not specified or None a new one will be created @type parser: optparse.OptionParser or None @param args: the command line arguments to parse; if not specified or None, sys.argv[1:] will be used @type args: [str()] or None @param descriptors: the descriptors of the agent to starts in addition of the host agent; if not specified or None no additional agents will be started @type descriptors: [Descriptor()] or None @return: the deferred of the bootstrap chain @rtype: defer.Deferred()""" tee = log.init() # The purpose of having log buffer here, is to be able to dump the # log lines to a journal after establishing connection with it. # This is done in stage_configure() of net agency Startup procedure. tee.add_keeper('buffer', log.LogBuffer(limit=10000)) # use the resolver from twisted.names instead of the default # the reason for this is that ThreadedResolver behaves strangely # after the reconnection - raises the DNSLookupError for names # which have been resolved while there was no connection resolver.installResolver(reactor) if parser is None: parser = optparse.OptionParser() options.add_options(parser) try: opts, args = check_options(*parser.parse_args(args)) except Exception as e: error.handle_exception('bootstrap', e, "Failed parsing config") sys.exit(1) if opts.standalone: cls = standalone.Agency else: cls = net_agency.Agency config = config_module.Config() config.load(os.environ, opts) agency = cls(config) applications.load('feat.agents.application', 'feat') applications.load('feat.gateway.application', 'featmodels') d = defer.Deferred() reactor.callWhenRunning(d.callback, None) if not opts.standalone: # specific to running normal agency hostdef = opts.hostdef if opts.hostres or opts.hostcat or opts.hostports: from feat.agents.common import host hostdef = host.HostDef() for resdef in opts.hostres: parts = resdef.split(":", 1) name = parts[0] value = 1 if len(parts) > 1: try: value = int(parts[1]) except ValueError: raise OptionError( "Invalid host resource: %s" % resdef), \ None, sys.exc_info()[2] hostdef.resources[name] = value for catdef in opts.hostcat: name, value = check_category(catdef) hostdef.categories[name] = value if opts.hostports: hostdef.ports_ranges = dict() for ports in opts.hostports: group, start, stop = tuple(ports.split(":")) hostdef.ports_ranges[group] = (int(start), int(stop)) agency.set_host_def(hostdef) d.addCallback(defer.drop_param, agency.initiate) for desc, kwargs, name in opts.agents: d.addCallback(defer.drop_param, agency.add_static_agent, desc, kwargs, name) else: # standalone specific kwargs = opts.standalone_kwargs or dict() to_spawn = opts.agent_id or opts.agents[0][0] d.addCallback(defer.drop_param, agency.initiate) d.addCallback(defer.drop_param, agency.spawn_agent, to_spawn, **kwargs) queue = None if opts.agency_daemonize: import multiprocessing queue = multiprocessing.Queue() d.addCallbacks(_bootstrap_success, _bootstrap_failure, callbackArgs=(queue, ), errbackArgs=(agency, queue)) if not opts.agency_daemonize: reactor.run() else: logname = "%s.%s.log" % ('feat', agency.agency_id) logfile = os.path.join(config.agency.logdir, logname) log.info("bootstrap", "Daemon processs will be logging to %s", logfile) try: pid = os.fork() except OSError, e: sys.stderr.write("Failed to fork: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if pid > 0: # original process waits for information about what status code # to use on exit log.info('bootstrap', "Waiting for deamon process to intialize the agency") try: exit_code, reason = queue.get(timeout=20) except multiprocessing.queues.Empty: log.error('bootstrap', "20 seconds timeout expires waiting for agency" " in child process to initiate.") os._exit(1) else: log.info('bootstrap', "Process exiting with %d status", exit_code) if exit_code: log.info('bootstrap', 'Reason for failure: %s', reason) sys.exit(exit_code) else: # child process performs second fork try: pid = os.fork() except OSError, e: sys.stderr.write("Failed to fork: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) if pid > 0: # child process just exits sys.exit(0) else: # grandchild runs the reactor and logs to an external log file log.FluLogKeeper.redirect_to(logfile, logfile) reactor.run() global _exit_code log.info('bootstrap', 'Process exiting with %d status', _exit_code) sys.exit(_exit_code)
0
Example 43
View licensedef setupPipeline(pipeline, root_service, settings): state.pipeline_processors = [] for processor in pipeline: args = [] if ':' in processor: processor, arglist = processor.split(':', 1) args = arglist.split(',') if processor == 'aggregate': setupAggregatorProcessor(root_service, settings) elif processor == 'rewrite': setupRewriterProcessor(root_service, settings) elif processor == 'relay': setupRelayProcessor(root_service, settings) elif processor == 'write': setupWriterProcessor(root_service, settings) else: raise ValueError("Invalid pipeline processor '%s'" % processor) plugin_class = Processor.plugins[processor] state.pipeline_processors.append(plugin_class(*args)) if processor == 'relay': state.pipeline_processors_generated.append(plugin_class(*args)) events.metricReceived.addHandler(run_pipeline) events.metricGenerated.addHandler(run_pipeline_generated) def activate_processors(): for processor in state.pipeline_processors: processor.pipeline_ready() from twisted.internet import reactor reactor.callWhenRunning(activate_processors)
0
Example 44
View licensedef run(self): # parse the server path self.get_server_path() # get server name if self.server_name is None: self.server_name = os.path.basename(self.server_path) if self.server_name in self.servers: raise Mark2Error("server already running: %s" % self.server_name) # check for mark2.properties self.check_config() # check we own the server dir self.check_ownership() # clear old stuff for x in ('log', 'sock', 'pid'): if os.path.exists(self.shared(x)): os.remove(self.shared(x)) i = 1 while True: p = self.shared("log.%d" % i) if not os.path.exists(p): break os.remove(p) i += 1 if self.daemonize() == 0: with open(self.shared('pid'), 'w') as f: f.write("{0}\n".format(os.getpid())) mgr = manager.Manager(self.shared_path, self.server_name, self.server_path, self.jar_file) reactor.callWhenRunning(mgr.startup) reactor.run() sys.exit(0) self.wait = '# mark2 started|stopped\.' self.wait_from_start = True
0
Example 45
View licensedef start_client_process(queue, start_ranking, datadir=None, transaction_system=False, client=None): if datadir: log_name = path.join(datadir, CLIENT_LOG_NAME) else: log_name = CLIENT_LOG_NAME config_logging(log_name) logger = logging.getLogger("golem.client") environments = load_environments() if not client: try: client = Client(datadir=datadir, transaction_system=transaction_system) client.start() except Exception as exc: logger.error("Client process error: {}".format(exc)) queue.put(exc) return for env in environments: client.environments_manager.add_environment(env) client.environments_manager.load_config(client.datadir) def listen(): rpc_server = WebSocketRPCServerFactory(interface='localhost') rpc_server.listen() client_service_info = client.set_rpc_server(rpc_server) queue.put(client_service_info) queue.close() from twisted.internet import reactor if start_ranking: client.ranking.run(reactor) reactor.callWhenRunning(listen) if not reactor.running: reactor.run()
0
Example 46
View licensedef execute(self, *args, **kwargs): from twisted.internet import reactor, threads self.rpc = WebSocketRPCClientFactory(self.address, self.port, on_disconnect=self.shutdown) def on_connected(_): rpc_client = self.rpc.build_simple_client() self.cli.register_client(rpc_client) threads.deferToThread(self.cli.execute, *args, **kwargs).addBoth(self.shutdown) def on_error(_): self.cli.register_client(WebSocketCLI.NoConnection()) self.cli.execute(*args, **kwargs) self.shutdown() def connect(): self.rpc.connect().addCallbacks(on_connected, on_error) reactor.callWhenRunning(connect) reactor.run()
0
Example 47
View licensedef __init__(self, dbapiName, *connargs, **connkw): """Create a new ConnectionPool. Any positional or keyword arguments other than those documented here are passed to the DB-API object when connecting. Use these arguments to pass database names, usernames, passwords, etc. @param dbapiName: an import string to use to obtain a DB-API compatible module (e.g. 'pyPgSQL.PgSQL') @param cp_min: the minimum number of connections in pool (default 3) @param cp_max: the maximum number of connections in pool (default 5) @param cp_noisy: generate informational log messages during operation (default False) @param cp_openfun: a callback invoked after every connect() on the underlying DB-API object. The callback is passed a new DB-API connection object. This callback can setup per-connection state such as charset, timezone, etc. @param cp_reconnect: detect connections which have failed and reconnect (default False). Failed connections may result in ConnectionLost exceptions, which indicate the query may need to be re-sent. @param cp_good_sql: an sql query which should always succeed and change no state (default 'select 1') """ self.dbapiName = dbapiName self.dbapi = reflect.namedModule(dbapiName) if getattr(self.dbapi, 'apilevel', None) != '2.0': log.msg('DB API module not DB API 2.0 compliant.') if getattr(self.dbapi, 'threadsafety', 0) < 1: log.msg('DB API module not sufficiently thread-safe.') self.connargs = connargs self.connkw = connkw for arg in self.CP_ARGS: cp_arg = 'cp_%s' % arg if connkw.has_key(cp_arg): setattr(self, arg, connkw[cp_arg]) del connkw[cp_arg] self.min = min(self.min, self.max) self.max = max(self.min, self.max) self.connections = {} # all connections, hashed on thread id # these are optional so import them here from twisted.python import threadpool import thread self.threadID = thread.get_ident self.threadpool = threadpool.ThreadPool(self.min, self.max) from twisted.internet import reactor self.startID = reactor.callWhenRunning(self._start)
0
Example 48
View licensedef __init__(self, dbapiName, *connargs, **connkw): """Create a new ConnectionPool. Any positional or keyword arguments other than those documented here are passed to the DB-API object when connecting. Use these arguments to pass database names, usernames, passwords, etc. @param dbapiName: an import string to use to obtain a DB-API compatible module (e.g. 'pyPgSQL.PgSQL') @param cp_min: the minimum number of connections in pool (default 3) @param cp_max: the maximum number of connections in pool (default 5) @param cp_noisy: generate informational log messages during operation (default False) @param cp_openfun: a callback invoked after every connect() on the underlying DB-API object. The callback is passed a new DB-API connection object. This callback can setup per-connection state such as charset, timezone, etc. @param cp_reconnect: detect connections which have failed and reconnect (default False). Failed connections may result in ConnectionLost exceptions, which indicate the query may need to be re-sent. @param cp_good_sql: an sql query which should always succeed and change no state (default 'select 1') """ self.dbapiName = dbapiName self.dbapi = reflect.namedModule(dbapiName) if getattr(self.dbapi, 'apilevel', None) != '2.0': log.msg('DB API module not DB API 2.0 compliant.') if getattr(self.dbapi, 'threadsafety', 0) < 1: log.msg('DB API module not sufficiently thread-safe.') self.connargs = connargs self.connkw = connkw for arg in self.CP_ARGS: cp_arg = 'cp_%s' % arg if connkw.has_key(cp_arg): setattr(self, arg, connkw[cp_arg]) del connkw[cp_arg] self.min = min(self.min, self.max) self.max = max(self.min, self.max) self.connections = {} # all connections, hashed on thread id # these are optional so import them here from twisted.python import threadpool import thread self.threadID = thread.get_ident self.threadpool = threadpool.ThreadPool(self.min, self.max) from twisted.internet import reactor self.startID = reactor.callWhenRunning(self._start)
0
Example 49
View licensedef opt_author_whitelist(self, network): reactor.callWhenRunning(log.info, "Whitelisting %s for submission" % network) self['running_author-whitelist'].append(ip_network(network, strict=False))
0
Example 50
View licensedef opt_subscriber_whitelist(self, network): reactor.callWhenRunning(log.info, "Whitelisting %s for subscription" % network) self['running_subscriber-whitelist'].append(ip_network(network, strict=False))