Here are the examples of the python api twisted.internet.reactor.callWhenRunning taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
43 Examples
3
Example 1
Project: pixelated-user-agent Source File: application.py
def initialize():
log.info('Starting the Pixelated user agent')
args = arguments.parse_user_agent_args()
logger.init(debug=args.debug)
services_factory = _create_service_factory(args)
resource = RootResource(services_factory)
def start():
start_async = _start_mode(args, resource, services_factory)
add_top_level_system_callbacks(start_async, services_factory)
log.info('Running the reactor')
reactor.callWhenRunning(start)
reactor.run()
3
Example 2
def initialize():
args = arguments.parse_maintenance_args()
logger.init(debug=args.debug)
@defer.inlineCallbacks
def _run():
leap_session = yield initialize_leap_single_user(
args.leap_provider_cert,
args.leap_provider_cert_fingerprint,
args.credentials_file,
leap_home=args.leap_home)
execute_command(args, leap_session)
reactor.callWhenRunning(_run)
reactor.run()
3
Example 3
Project: txstatsd Source File: test_client.py
def test_twistedstatsd_write(self):
self.client = TwistedStatsDClient('127.0.0.1', 8000)
self.build_protocol()
self.client.host_resolved('127.0.0.1')
def ensure_bytes_sent(bytes_sent):
self.assertEqual(bytes_sent, len('message'))
def exercise(callback):
self.client.write('message', callback=callback)
d = Deferred()
d.addCallback(ensure_bytes_sent)
reactor.callWhenRunning(exercise, d.callback)
return d
3
Example 4
Project: txstatsd Source File: test_client.py
@inlineCallbacks
def test_twistedstatsd_write_with_host_resolved(self):
self.client = TwistedStatsDClient.create(
'localhost', 8000)
self.build_protocol()
yield self.client.resolve_later
def ensure_bytes_sent(bytes_sent):
self.assertEqual(bytes_sent, len('message'))
self.assertEqual(self.client.host, '127.0.0.1')
def exercise(callback):
self.client.write('message', callback=callback)
d = Deferred()
d.addCallback(ensure_bytes_sent)
reactor.callWhenRunning(exercise, d.callback)
yield d
3
Example 5
Project: bnw Source File: admin.py
def main():
import config
import tornado.platform.twisted
tornado.platform.twisted.install()
bnw.core.base.config.register(config)
bnw.core.bnw_mongo.open_db()
log.startLogging(sys.stdout)
from twisted.internet import reactor
reactor.callWhenRunning(twistedrun, reactor)
reactor.run()
3
Example 6
def run(self):
log.msg(".run() called")
self._add_worker()
self._start()
d = self.deferred
from twisted.internet import reactor
d.addCallback(self.monitor_result, reactor)
reactor.callWhenRunning(d.callback, reactor)
return d
3
Example 7
def __init__(self, MEMCACHE_SERVERS):
self.threadID = thread.get_ident
self.MEMCACHE_SERVERS = MEMCACHE_SERVERS
# connect to memcache servers
self.mc_connections = {}
# self.threadpool = deferedThreadPool(10, 10)
# need to start up thread pools
self.running = False
from twisted.internet import reactor
self.startID = reactor.callWhenRunning(self._start)
self.shutdownID = None
3
Example 8
Project: pth-toolkit Source File: test_deferredruntest.py
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_keyboard_interrupt_stops_test_run(self):
# If we get a SIGINT during a test run, the test stops and no more
# tests run.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
raise self.skipTest("SIGINT unavailable")
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
reactor = self.make_reactor()
timeout = self.make_timeout()
runner = self.make_runner(test, timeout * 5)
result = self.make_result()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:runner.run(result),
Raises(MatchesException(KeyboardInterrupt)))
3
Example 9
Project: pth-toolkit Source File: test_spinner.py
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
3
Example 10
Project: dvol Source File: plugin.py
def main():
plugins_dir = FilePath("/run/docker/plugins/")
if not plugins_dir.exists():
plugins_dir.makedirs()
dvol_path = FilePath("/var/lib/dvol/volumes")
if not dvol_path.exists():
dvol_path.makedirs()
voluminous = Voluminous(dvol_path.path)
sock = plugins_dir.child("%s.sock" % (VOLUME_DRIVER_NAME,))
if sock.exists():
sock.remove()
adapterServer = internet.UNIXServer(
sock.path, getAdapter(voluminous))
reactor.callWhenRunning(adapterServer.startService)
reactor.run()
3
Example 11
Project: deluge Source File: gtkui.py
def start(self):
reactor.callWhenRunning(self._on_reactor_start)
# Initialize gdk threading
gtk.gdk.threads_enter()
reactor.run()
# Reactor is not running. Any async callbacks (Deferreds) can no longer
# be processed from this point on.
gtk.gdk.threads_leave()
3
Example 12
def run(self, use_rpc=False):
try:
# Import reactor locally because it also installs it and GUI
# requires Qt reactor version.
from twisted.internet import reactor
if use_rpc:
config = self.client.config_desc
reactor.callWhenRunning(self._start_rpc_server,
config.rpc_address,
config.rpc_port)
reactor.run()
except Exception as ex:
logger = logging.getLogger("gnr.app")
logger.error("Reactor error: {}".format(ex))
finally:
self.client.quit()
sys.exit(0)
3
Example 13
Project: Comet Source File: broker.py
def opt_remote(self, remote):
try:
host, port = remote.split(":")
except ValueError:
host, port = remote, DEFAULT_REMOTE_PORT
reactor.callWhenRunning(
log.info,
"Subscribing to remote broker %s:%d" % (host, int(port))
)
self['remotes'].append((host, int(port)))
3
Example 14
Project: ccs-calendarserver Source File: sim.py
def attachService(reactor, loadsim, service):
"""
Attach a given L{IService} provider to the given L{IReactorCore}; cause it
to be started when the reactor starts, and stopped when the reactor stops.
"""
reactor.callWhenRunning(service.startService)
reactor.addSystemEventTrigger('before', 'shutdown', loadsim.shutdown)
3
Example 15
def main():
port = int(sys.argv[1])
if issubclass(EchoServer, ServiceSOAPBinding):
AsServer(port, (Service('test'),))
return
#from ZSI.twisted.WSresource import WSResource
#if issubclass(EchoServer, WSResource):
from twisted.internet import reactor
reactor.callWhenRunning(twisted_main, port)
reactor.run()
3
Example 16
def _TwistedMain():
"""Gets tests to run from configuration file.
"""
from twisted.internet import reactor
reactor.callWhenRunning(_TwistedTestProgram, defaultTest="all")
reactor.run(installSignalHandlers=0)
3
Example 17
Project: SubliminalCollaborator Source File: tap.py
def opt_wsgi(self, name):
"""
The FQPN of a WSGI application object to serve as the root resource of
the webserver.
"""
pool = threadpool.ThreadPool()
reactor.callWhenRunning(pool.start)
reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)
try:
application = reflect.namedAny(name)
except (AttributeError, ValueError):
raise usage.UsageError("No such WSGI application: %r" % (name,))
self['root'] = wsgi.WSGIResource(reactor, pool, application)
0
Example 18
def start():
reactor.callWhenRunning(main)
reactor.run()
0
Example 19
Project: splash Source File: server.py
def main(jupyter=False, argv=sys.argv, server_factory=splash_server):
opts, _ = parse_opts(jupyter, argv)
if opts.version:
print(__version__)
sys.exit(0)
if not jupyter:
start_logging(opts)
log_splash_version()
bump_nofile_limit()
with xvfb.autostart(opts.disable_xvfb) as x:
xvfb.log_options(x)
install_qtreactor(opts.verbosity >= 5)
monitor_maxrss(opts.maxrss)
if opts.manhole:
manhole_server()
default_splash_server(
portnum=opts.port,
slots=opts.slots,
proxy_profiles_path=opts.proxy_profiles_path,
js_profiles_path=opts.js_profiles_path,
js_disable_cross_domain_access=not opts.js_cross_domain_enabled,
filters_path=opts.filters_path,
allowed_schemes=opts.allowed_schemes,
private_mode=not opts.disable_private_mode,
ui_enabled=not opts.disable_ui,
lua_enabled=not opts.disable_lua,
lua_sandbox_enabled=not opts.disable_lua_sandbox,
lua_package_path=opts.lua_package_path.strip(";"),
lua_sandbox_allowed_modules=opts.lua_sandbox_allowed_modules.split(";"),
verbosity=opts.verbosity,
max_timeout=opts.max_timeout,
argument_cache_max_entries=opts.argument_cache_max_entries,
server_factory=server_factory,
)
signal.signal(signal.SIGUSR1, lambda s, f: traceback.print_stack(f))
if not jupyter:
from twisted.internet import reactor
reactor.callWhenRunning(splash_started, opts, sys.stderr)
reactor.run()
0
Example 20
def run(self):
"""
We've overloaded the run method to return a Deferred task object.
"""
log.msg(".run() called")
# This is the default behavior
super(ReactorlessCommando, self).run()
# Setup a deferred to hold the delayed result and not return it until
# it's done. This object will be populated with the value of the
# results once all commands have been executed on all devices.
d = defer.Deferred()
# Add monitor_result as a callback
from twisted.internet import reactor
d.addCallback(self.monitor_result, reactor)
# Tell the reactor to call the callback above when it starts
reactor.callWhenRunning(d.callback, reactor)
return d
0
Example 21
def start(self):
reactor.callWhenRunning(self._start)
return self.start_deferred
0
Example 22
Project: awspider Source File: base.py
def _setupLogging(self, log_file, log_directory, log_level):
if log_directory is None:
self.logging_handler = logging.StreamHandler()
else:
self.logging_handler = logging.handlers.TimedRotatingFileHandler(
os.path.join(log_directory, log_file),
when='D',
interval=1)
log_format = "%(levelname)s: %(message)s %(pathname)s:%(lineno)d"
self.logging_handler.setFormatter(logging.Formatter(log_format))
LOGGER.addHandler(self.logging_handler)
log_level = log_level.lower()
log_levels = {
"debug":logging.DEBUG,
"info":logging.INFO,
"warning":logging.WARNING,
"error":logging.ERROR,
"critical":logging.CRITICAL
}
if log_level in log_levels:
LOGGER.setLevel(log_levels[log_level])
else:
LOGGER.setLevel(logging.DEBUG)
def start(self):
reactor.callWhenRunning(self._start)
return self.start_deferred
0
Example 23
def start(self):
reactor.callWhenRunning(self._baseStart)
return self.start_deferred
0
Example 24
def start( self ):
reactor.callWhenRunning( self._start )
return self.start_deferred
0
Example 25
def run(self):
"""Run/validate/dry-run the given command with options."""
checks = build_checks(self.descriptions,
self.options.connect_timeout,
self.options.include_tags,
self.options.exclude_tags,
self.options.dry_run)
if not self.options.validate:
if not self.options.dry_run:
load_tls_certs(self.options.cacerts_path)
self.setup_reactor()
reactor.callWhenRunning(run_checks, checks, self.patterns,
self.results)
reactor.run()
# Flush output, this really only has an effect when running
# buffered output
self.output.flush()
if not self.options.dry_run and self.results.any_failed():
return 2
return 0
0
Example 26
def run(self):
reactor.callWhenRunning(self.start)
0
Example 27
Project: p2pool-n Source File: main.py
def run():
if not hasattr(tcp.Client, 'abortConnection'):
print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!"
print 'Pausing for 3 seconds...'
time.sleep(3)
realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
parser.add_argument('--version', action='version', version=p2pool.__version__)
parser.add_argument('--net',
help='use specified network (default: bitcoin)',
action='store', choices=sorted(realnets), default='bitcoin', dest='net_name')
parser.add_argument('--testnet',
help='''use the network's testnet''',
action='store_const', const=True, default=False, dest='testnet')
parser.add_argument('--debug',
help='enable debugging mode',
action='store_const', const=True, default=False, dest='debug')
parser.add_argument('-a', '--address',
help='generate payouts to this address (default: <address requested from bitcoind>)',
type=str, action='store', default=None, dest='address')
parser.add_argument('--datadir',
help='store data in this directory (default: <directory run_p2pool.py is in>/data)',
type=str, action='store', default=None, dest='datadir')
parser.add_argument('--logfile',
help='''log to this file (default: data/<NET>/log)''',
type=str, action='store', default=None, dest='logfile')
parser.add_argument('--merged',
help='call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)',
type=str, action='append', default=[], dest='merged_urls')
parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE',
help='donate this percentage of work towards the development of p2pool (default: 1.0)',
type=float, action='store', default=1.0, dest='donation_percentage')
parser.add_argument('--iocp',
help='use Windows IOCP API in order to avoid errors due to large number of sockets being open',
action='store_true', default=False, dest='iocp')
parser.add_argument('--irc-announce',
help='announce any blocks found on irc://irc.freenode.net/#p2pool',
action='store_true', default=False, dest='irc_announce')
parser.add_argument('--no-bugreport',
help='disable submitting caught exceptions to the author',
action='store_true', default=False, dest='no_bugreport')
p2pool_group = parser.add_argument_group('p2pool interface')
p2pool_group.add_argument('--p2pool-port', metavar='PORT',
help='use port PORT to listen for connections (forward this port from your router!) (default: %s)' % ', '.join('%s:%i' % (name, net.P2P_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='p2pool_port')
p2pool_group.add_argument('-n', '--p2pool-node', metavar='ADDR[:PORT]',
help='connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses',
type=str, action='append', default=[], dest='p2pool_nodes')
parser.add_argument('--disable-upnp',
help='''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''',
action='store_false', default=True, dest='upnp')
p2pool_group.add_argument('--max-conns', metavar='CONNS',
help='maximum incoming connections (default: 40)',
type=int, action='store', default=40, dest='p2pool_conns')
p2pool_group.add_argument('--outgoing-conns', metavar='CONNS',
help='outgoing connections (default: 6)',
type=int, action='store', default=6, dest='p2pool_outgoing_conns')
parser.add_argument('--disable-advertise',
help='''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''',
action='store_false', default=True, dest='advertise_ip')
worker_group = parser.add_argument_group('worker interface')
worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT',
help='listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)' % ', '.join('%s:%i' % (name, net.WORKER_PORT) for name, net in sorted(realnets.items())),
type=str, action='store', default=None, dest='worker_endpoint')
worker_group.add_argument('-f', '--fee', metavar='FEE_PERCENTAGE',
help='''charge workers mining to their own bitcoin address (by setting their miner's username to a bitcoin address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''',
type=float, action='store', default=0, dest='worker_fee')
bitcoind_group = parser.add_argument_group('bitcoind interface')
bitcoind_group.add_argument('--bitcoind-address', metavar='BITCOIND_ADDRESS',
help='connect to this address (default: 127.0.0.1)',
type=str, action='store', default='127.0.0.1', dest='bitcoind_address')
bitcoind_group.add_argument('--bitcoind-rpc-port', metavar='BITCOIND_RPC_PORT',
help='''connect to JSON-RPC interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_rpc_port')
bitcoind_group.add_argument('--bitcoind-rpc-ssl',
help='connect to JSON-RPC interface using SSL',
action='store_true', default=False, dest='bitcoind_rpc_ssl')
bitcoind_group.add_argument('--bitcoind-p2p-port', metavar='BITCOIND_P2P_PORT',
help='''connect to P2P interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_p2p_port')
bitcoind_group.add_argument(metavar='BITCOIND_RPCUSERPASS',
help='bitcoind RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from bitcoin.conf)',
type=str, action='store', default=[], nargs='*', dest='bitcoind_rpc_userpass')
args = parser.parse_args()
if args.debug:
p2pool.DEBUG = True
defer.setDebugging(True)
else:
p2pool.DEBUG = False
net_name = args.net_name + ('_testnet' if args.testnet else '')
net = networks.nets[net_name]
datadir_path = os.path.join((os.path.join(os.path.dirname(sys.argv[0]), 'data') if args.datadir is None else args.datadir), net_name)
if not os.path.exists(datadir_path):
os.makedirs(datadir_path)
if len(args.bitcoind_rpc_userpass) > 2:
parser.error('a maximum of two arguments are allowed')
args.bitcoind_rpc_username, args.bitcoind_rpc_password = ([None, None] + args.bitcoind_rpc_userpass)[-2:]
if args.bitcoind_rpc_password is None:
conf_path = net.PARENT.CONF_FILE_FUNC()
if not os.path.exists(conf_path):
parser.error('''Bitcoin configuration file not found. Manually enter your RPC password.\r\n'''
'''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
'''\r\n'''
'''server=1\r\n'''
'''rpcpassword=%x\r\n'''
'''\r\n'''
'''Keep that password secret! After creating the file, restart Bitcoin.''' % (conf_path, random.randrange(2**128)))
conf = open(conf_path, 'rb').read()
contents = {}
for line in conf.splitlines(True):
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
contents[k.strip()] = v.strip()
for conf_name, var_name, var_type in [
('rpcuser', 'bitcoind_rpc_username', str),
('rpcpassword', 'bitcoind_rpc_password', str),
('rpcport', 'bitcoind_rpc_port', int),
('port', 'bitcoind_p2p_port', int),
]:
if getattr(args, var_name) is None and conf_name in contents:
setattr(args, var_name, var_type(contents[conf_name]))
if args.bitcoind_rpc_password is None:
parser.error('''Bitcoin configuration file didn't contain an rpcpassword= line! Add one!''')
if args.bitcoind_rpc_username is None:
args.bitcoind_rpc_username = ''
if args.bitcoind_rpc_port is None:
args.bitcoind_rpc_port = net.PARENT.RPC_PORT
if args.bitcoind_p2p_port is None:
args.bitcoind_p2p_port = net.PARENT.P2P_PORT
if args.p2pool_port is None:
args.p2pool_port = net.P2P_PORT
if args.p2pool_outgoing_conns > 10:
parser.error('''--outgoing-conns can't be more than 10''')
if args.worker_endpoint is None:
worker_endpoint = '', net.WORKER_PORT
elif ':' not in args.worker_endpoint:
worker_endpoint = '', int(args.worker_endpoint)
else:
addr, port = args.worker_endpoint.rsplit(':', 1)
worker_endpoint = addr, int(port)
if args.address is not None:
try:
args.pubkey_hash = bitcoin_data.address_to_pubkey_hash(args.address, net.PARENT)
except Exception, e:
parser.error('error parsing address: ' + repr(e))
else:
args.pubkey_hash = None
def separate_url(url):
s = urlparse.urlsplit(url)
if '@' not in s.netloc:
parser.error('merged url netloc must contain an "@"')
userpass, new_netloc = s.netloc.rsplit('@', 1)
return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass
merged_urls = map(separate_url, args.merged_urls)
if args.logfile is None:
args.logfile = os.path.join(datadir_path, 'log')
logfile = logging.LogFile(args.logfile)
pipe = logging.TimestampingPipe(logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile]))
sys.stdout = logging.AbortPipe(pipe)
sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(logging.PrefixPipe(pipe, '> '))
if hasattr(signal, "SIGUSR1"):
def sigusr1(signum, frame):
print 'Caught SIGUSR1, closing %r...' % (args.logfile,)
logfile.reopen()
print '...and reopened %r after catching SIGUSR1.' % (args.logfile,)
signal.signal(signal.SIGUSR1, sigusr1)
deferral.RobustLoopingCall(logfile.reopen).start(5)
class ErrorReporter(object):
def __init__(self):
self.last_sent = None
def emit(self, eventDict):
if not eventDict["isError"]:
return
if self.last_sent is not None and time.time() < self.last_sent + 5:
return
self.last_sent = time.time()
if 'failure' in eventDict:
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
else:
text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
from twisted.web import client
client.getPage(
url='http://u.forre.st/p2pool_error.cgi',
method='POST',
postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
timeout=15,
).addBoth(lambda x: None)
if not args.no_bugreport:
log.addObserver(ErrorReporter().emit)
reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
reactor.run()
0
Example 28
Project: feat Source File: bootstrap.py
def bootstrap(parser=None, args=None, descriptors=None):
"""Bootstrap a feat process, handling command line arguments.
@param parser: the option parser to use; more options will be
added to the parser; if not specified or None
a new one will be created
@type parser: optparse.OptionParser or None
@param args: the command line arguments to parse; if not specified
or None, sys.argv[1:] will be used
@type args: [str()] or None
@param descriptors: the descriptors of the agent to starts in addition
of the host agent; if not specified or None
no additional agents will be started
@type descriptors: [Descriptor()] or None
@return: the deferred of the bootstrap chain
@rtype: defer.Deferred()"""
tee = log.init()
# The purpose of having log buffer here, is to be able to dump the
# log lines to a journal after establishing connection with it.
# This is done in stage_configure() of net agency Startup procedure.
tee.add_keeper('buffer', log.LogBuffer(limit=10000))
# use the resolver from twisted.names instead of the default
# the reason for this is that ThreadedResolver behaves strangely
# after the reconnection - raises the DNSLookupError for names
# which have been resolved while there was no connection
resolver.installResolver(reactor)
if parser is None:
parser = optparse.OptionParser()
options.add_options(parser)
try:
opts, args = check_options(*parser.parse_args(args))
except Exception as e:
error.handle_exception('bootstrap', e, "Failed parsing config")
sys.exit(1)
if opts.standalone:
cls = standalone.Agency
else:
cls = net_agency.Agency
config = config_module.Config()
config.load(os.environ, opts)
agency = cls(config)
applications.load('feat.agents.application', 'feat')
applications.load('feat.gateway.application', 'featmodels')
d = defer.Deferred()
reactor.callWhenRunning(d.callback, None)
if not opts.standalone:
# specific to running normal agency
hostdef = opts.hostdef
if opts.hostres or opts.hostcat or opts.hostports:
from feat.agents.common import host
hostdef = host.HostDef()
for resdef in opts.hostres:
parts = resdef.split(":", 1)
name = parts[0]
value = 1
if len(parts) > 1:
try:
value = int(parts[1])
except ValueError:
raise OptionError(
"Invalid host resource: %s" % resdef), \
None, sys.exc_info()[2]
hostdef.resources[name] = value
for catdef in opts.hostcat:
name, value = check_category(catdef)
hostdef.categories[name] = value
if opts.hostports:
hostdef.ports_ranges = dict()
for ports in opts.hostports:
group, start, stop = tuple(ports.split(":"))
hostdef.ports_ranges[group] = (int(start), int(stop))
agency.set_host_def(hostdef)
d.addCallback(defer.drop_param, agency.initiate)
for desc, kwargs, name in opts.agents:
d.addCallback(defer.drop_param, agency.add_static_agent,
desc, kwargs, name)
else:
# standalone specific
kwargs = opts.standalone_kwargs or dict()
to_spawn = opts.agent_id or opts.agents[0][0]
d.addCallback(defer.drop_param, agency.initiate)
d.addCallback(defer.drop_param, agency.spawn_agent,
to_spawn, **kwargs)
queue = None
if opts.agency_daemonize:
import multiprocessing
queue = multiprocessing.Queue()
d.addCallbacks(_bootstrap_success, _bootstrap_failure,
callbackArgs=(queue, ), errbackArgs=(agency, queue))
if not opts.agency_daemonize:
reactor.run()
else:
logname = "%s.%s.log" % ('feat', agency.agency_id)
logfile = os.path.join(config.agency.logdir, logname)
log.info("bootstrap", "Daemon processs will be logging to %s",
logfile)
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("Failed to fork: (%d) %s\n" %
(e.errno, e.strerror))
os._exit(1)
if pid > 0:
# original process waits for information about what status code
# to use on exit
log.info('bootstrap',
"Waiting for deamon process to intialize the agency")
try:
exit_code, reason = queue.get(timeout=20)
except multiprocessing.queues.Empty:
log.error('bootstrap',
"20 seconds timeout expires waiting for agency"
" in child process to initiate.")
os._exit(1)
else:
log.info('bootstrap', "Process exiting with %d status",
exit_code)
if exit_code:
log.info('bootstrap', 'Reason for failure: %s', reason)
sys.exit(exit_code)
else:
# child process performs second fork
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("Failed to fork: (%d) %s\n" %
(e.errno, e.strerror))
os._exit(1)
if pid > 0:
# child process just exits
sys.exit(0)
else:
# grandchild runs the reactor and logs to an external log file
log.FluLogKeeper.redirect_to(logfile, logfile)
reactor.run()
global _exit_code
log.info('bootstrap', 'Process exiting with %d status', _exit_code)
sys.exit(_exit_code)
0
Example 29
Project: carbon Source File: service.py
def setupPipeline(pipeline, root_service, settings):
state.pipeline_processors = []
for processor in pipeline:
args = []
if ':' in processor:
processor, arglist = processor.split(':', 1)
args = arglist.split(',')
if processor == 'aggregate':
setupAggregatorProcessor(root_service, settings)
elif processor == 'rewrite':
setupRewriterProcessor(root_service, settings)
elif processor == 'relay':
setupRelayProcessor(root_service, settings)
elif processor == 'write':
setupWriterProcessor(root_service, settings)
else:
raise ValueError("Invalid pipeline processor '%s'" % processor)
plugin_class = Processor.plugins[processor]
state.pipeline_processors.append(plugin_class(*args))
if processor == 'relay':
state.pipeline_processors_generated.append(plugin_class(*args))
events.metricReceived.addHandler(run_pipeline)
events.metricGenerated.addHandler(run_pipeline_generated)
def activate_processors():
for processor in state.pipeline_processors:
processor.pipeline_ready()
from twisted.internet import reactor
reactor.callWhenRunning(activate_processors)
0
Example 30
def run(self):
# parse the server path
self.get_server_path()
# get server name
if self.server_name is None:
self.server_name = os.path.basename(self.server_path)
if self.server_name in self.servers:
raise Mark2Error("server already running: %s" % self.server_name)
# check for mark2.properties
self.check_config()
# check we own the server dir
self.check_ownership()
# clear old stuff
for x in ('log', 'sock', 'pid'):
if os.path.exists(self.shared(x)):
os.remove(self.shared(x))
i = 1
while True:
p = self.shared("log.%d" % i)
if not os.path.exists(p):
break
os.remove(p)
i += 1
if self.daemonize() == 0:
with open(self.shared('pid'), 'w') as f:
f.write("{0}\n".format(os.getpid()))
mgr = manager.Manager(self.shared_path, self.server_name, self.server_path, self.jar_file)
reactor.callWhenRunning(mgr.startup)
reactor.run()
sys.exit(0)
self.wait = '# mark2 started|stopped\.'
self.wait_from_start = True
0
Example 31
Project: golem Source File: gnrstartapp.py
def start_client_process(queue, start_ranking, datadir=None,
transaction_system=False, client=None):
if datadir:
log_name = path.join(datadir, CLIENT_LOG_NAME)
else:
log_name = CLIENT_LOG_NAME
config_logging(log_name)
logger = logging.getLogger("golem.client")
environments = load_environments()
if not client:
try:
client = Client(datadir=datadir, transaction_system=transaction_system)
client.start()
except Exception as exc:
logger.error("Client process error: {}".format(exc))
queue.put(exc)
return
for env in environments:
client.environments_manager.add_environment(env)
client.environments_manager.load_config(client.datadir)
def listen():
rpc_server = WebSocketRPCServerFactory(interface='localhost')
rpc_server.listen()
client_service_info = client.set_rpc_server(rpc_server)
queue.put(client_service_info)
queue.close()
from twisted.internet import reactor
if start_ranking:
client.ranking.run(reactor)
reactor.callWhenRunning(listen)
if not reactor.running:
reactor.run()
0
Example 32
Project: golem Source File: websockets.py
def execute(self, *args, **kwargs):
from twisted.internet import reactor, threads
self.rpc = WebSocketRPCClientFactory(self.address, self.port,
on_disconnect=self.shutdown)
def on_connected(_):
rpc_client = self.rpc.build_simple_client()
self.cli.register_client(rpc_client)
threads.deferToThread(self.cli.execute, *args, **kwargs).addBoth(self.shutdown)
def on_error(_):
self.cli.register_client(WebSocketCLI.NoConnection())
self.cli.execute(*args, **kwargs)
self.shutdown()
def connect():
self.rpc.connect().addCallbacks(on_connected, on_error)
reactor.callWhenRunning(connect)
reactor.run()
0
Example 33
Project: mythbox Source File: adbapi.py
def __init__(self, dbapiName, *connargs, **connkw):
"""Create a new ConnectionPool.
Any positional or keyword arguments other than those docuemented here
are passed to the DB-API object when connecting. Use these arguments to
pass database names, usernames, passwords, etc.
@param dbapiName: an import string to use to obtain a DB-API compatible
module (e.g. 'pyPgSQL.PgSQL')
@param cp_min: the minimum number of connections in pool (default 3)
@param cp_max: the maximum number of connections in pool (default 5)
@param cp_noisy: generate informational log messages during operation
(default False)
@param cp_openfun: a callback invoked after every connect() on the
underlying DB-API object. The callback is passed a
new DB-API connection object. This callback can
setup per-connection state such as charset,
timezone, etc.
@param cp_reconnect: detect connections which have failed and reconnect
(default False). Failed connections may result in
ConnectionLost exceptions, which indicate the
query may need to be re-sent.
@param cp_good_sql: an sql query which should always succeed and change
no state (default 'select 1')
"""
self.dbapiName = dbapiName
self.dbapi = reflect.namedModule(dbapiName)
if getattr(self.dbapi, 'apilevel', None) != '2.0':
log.msg('DB API module not DB API 2.0 compliant.')
if getattr(self.dbapi, 'threadsafety', 0) < 1:
log.msg('DB API module not sufficiently thread-safe.')
self.connargs = connargs
self.connkw = connkw
for arg in self.CP_ARGS:
cp_arg = 'cp_%s' % arg
if connkw.has_key(cp_arg):
setattr(self, arg, connkw[cp_arg])
del connkw[cp_arg]
self.min = min(self.min, self.max)
self.max = max(self.min, self.max)
self.connections = {} # all connections, hashed on thread id
# these are optional so import them here
from twisted.python import threadpool
import thread
self.threadID = thread.get_ident
self.threadpool = threadpool.ThreadPool(self.min, self.max)
from twisted.internet import reactor
self.startID = reactor.callWhenRunning(self._start)
0
Example 34
Project: Comet Source File: broker.py
def opt_author_whitelist(self, network):
reactor.callWhenRunning(log.info, "Whitelisting %s for submission" % network)
self['running_author-whitelist'].append(ip_network(network, strict=False))
0
Example 35
Project: Comet Source File: broker.py
def opt_subscriber_whitelist(self, network):
reactor.callWhenRunning(log.info, "Whitelisting %s for subscription" % network)
self['running_subscriber-whitelist'].append(ip_network(network, strict=False))
0
Example 36
Project: Comet Source File: broker.py
def makeService(config):
event_db = Event_DB(config['eventdb'])
LoopingCall(event_db.prune, MAX_AGE).start(PRUNE_INTERVAL)
broker_service = MultiService()
if config['broadcast']:
broadcaster_factory = VOEventBroadcasterFactory(
config["local-ivo"], config['broadcast-test-interval']
)
if log.LEVEL >= log.Levels.INFO: broadcaster_factory.noisy = False
broadcaster_whitelisting_factory = WhitelistingFactory(
broadcaster_factory, config['subscriber-whitelist'], "subscription"
)
if log.LEVEL >= log.Levels.INFO: broadcaster_whitelisting_factory.noisy = False
broadcaster_service = TCPServer(
config['broadcast-port'],
broadcaster_whitelisting_factory
)
broadcaster_service.setName("Broadcaster")
broadcaster_service.setServiceParent(broker_service)
# If we're running a broadcast, we will rebroadcast any events we
# receive to it.
config['handlers'].append(EventRelay(broadcaster_factory))
if config['receive']:
receiver_factory = VOEventReceiverFactory(
local_ivo=config['local-ivo'],
validators=[
CheckPreviouslySeen(event_db),
CheckSchema(
os.path.join(comet.__path__[0], "schema/VOEvent-v2.0.xsd")
),
CheckIVORN()
],
handlers=config['handlers']
)
if log.LEVEL >= log.Levels.INFO: receiver_factory.noisy = False
author_whitelisting_factory = WhitelistingFactory(
receiver_factory, config['author-whitelist'], "submission"
)
if log.LEVEL >= log.Levels.INFO: author_whitelisting_factory.noisy = False
receiver_service = TCPServer(config['receive-port'], author_whitelisting_factory)
receiver_service.setName("Receiver")
receiver_service.setServiceParent(broker_service)
for host, port in config["remotes"]:
subscriber_factory = VOEventSubscriberFactory(
local_ivo=config["local-ivo"],
validators=[CheckPreviouslySeen(event_db)],
handlers=config['handlers'],
filters=config['filters']
)
if log.LEVEL >= log.Levels.INFO: subscriber_factory.noisy = False
remote_service = TCPClient(host, port, subscriber_factory)
remote_service.setName("Remote %s:%d" % (host, port))
remote_service.setServiceParent(broker_service)
if not broker_service.services:
reactor.callWhenRunning(log.warn, "No services requested; stopping.")
reactor.callWhenRunning(reactor.stop)
return broker_service
0
Example 37
def test_no_service(self):
# When we ask for no services on the command line, nothing should be
# started -- but we also shouldn't raise.
# Note we need to stub out the reactor's callWhenRunning() method,
# because makeService schedules a reactor.stop() which will bring our
# test cases crashing down.
oldCallWhenRunning = reactor.callWhenRunning
class MockCallWhenRunning(object):
def __init__(self):
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
mockCallWhenRunning = MockCallWhenRunning()
try:
reactor.callWhenRunning = mockCallWhenRunning
service = self._make_service(['--local-ivo', 'ivo://comet/test'])
finally:
# Be sure to return the reactor to its initial state when done.
reactor.callWhenRunning = oldCallWhenRunning
# No services created.
self.assertEqual(len(service.namedServices), 0)
# Should have been called twice: once for logging, once to stop the
# reactor. We are not actually checking the values of those calls
# here, though.
self.assertEqual(mockCallWhenRunning.call_count, 2)
0
Example 38
Project: ldtp2 Source File: __init__.py
def main(port=4118, parentpid=None, XMLRPCLdtpdFactory=lambda: XMLRPCLdtpd()):
import os
os.environ['NO_GAIL'] = '1'
os.environ['NO_AT_BRIDGE'] = '1'
import twisted
gtkVersion = None
try:
from gi.repository import Gtk
gtkVersion = Gtk._version
except:
pass
if not gtkVersion or gtkVersion == '2.0':
# As per Ubuntu 11.10, twisted glib2reactor
# works with gtk2, which fails with gtk3
from twisted.internet import glib2reactor
glib2reactor.install()
elif gtkVersion >= '3.0':
try:
# Exist in Ubuntu 12.04, but not on
# Ubuntu 11.10 / Fedora 16
from twisted.internet import gtk3reactor
gtk3reactor.install()
except:
pass
from twisted.internet import reactor
from twisted.web import server, xmlrpc
import twisted.internet
import socket
import pyatspi
import traceback
_ldtp_debug = os.environ.get('LDTP_DEBUG', None)
_ldtp_debug_file = os.environ.get('LDTP_DEBUG_FILE', None)
try:
pyatspi.setCacheLevel(pyatspi.CACHE_PROPERTIES)
r = XMLRPCLdtpdFactory()
xmlrpc.addIntrospection(r)
if parentpid:
reactor.callWhenRunning(SignalParent(parentpid).send_later)
reactor.listenTCP(port, server.Site(r))
reactor.run()
except twisted.internet.error.CannotListenError:
if _ldtp_debug:
print(traceback.format_exc())
except socket.error:
if _ldtp_debug:
print(traceback.format_exc())
if _ldtp_debug_file:
with open(_ldtp_debug_file, "a") as fp:
fp.write(traceback.format_exc())
0
Example 39
Project: bitmask_client Source File: backend_app.py
def run_backend(bypass_checks=False, flags_dict=None, frontend_pid=None):
"""
Run the backend for the application.
This is called from the main app.py entrypoint, and is run in a child
subprocess.
:param bypass_checks: whether we should bypass the checks or not
:type bypass_checks: bool
:param flags_dict: a dict containing the flag values set on app start.
:type flags_dict: dict
"""
# In the backend, we want all the components to log into logbook
# that is: logging handlers and twisted logs
from logbook.compat import redirect_logging
from twisted.python.log import PythonLoggingObserver
redirect_logging()
observer = PythonLoggingObserver()
observer.start()
if flags_dict is not None:
dict_to_flags(flags_dict)
common_flags.STANDALONE = flags.STANDALONE
# NOTE: this needs to be used here, within the call since this function is
# executed in a different process and it seems that the process/thread
# identification isn't working 100%
logger = get_logger() # noqa
# The backend is the one who always creates the certificates. Either if it
# is run separately or in a process in the same app as the frontend.
if flags.ZMQ_HAS_CURVE:
generate_zmq_certificates()
# ignore SIGINT since app.py takes care of signaling SIGTERM to us.
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal_handler)
reactor.callWhenRunning(start_events_and_updater, logger)
backend = LeapBackend(bypass_checks=bypass_checks,
frontend_pid=frontend_pid)
backend.run()
0
Example 40
Project: lava-server Source File: scheduler.py
def handle(self, *args, **options): # pylint: disable=too-many-locals
import os
from twisted.internet import reactor
from lava_scheduler_daemon.service import JobQueue
from lava_scheduler_daemon.dbjobsource import DatabaseJobSource
daemon_options = self._configure(options)
source = DatabaseJobSource()
if options['use_fake']:
import lava_scheduler_app
opd = os.path.dirname
dispatcher = os.path.join(
opd(opd(os.path.abspath(lava_scheduler_app.__file__))),
'fake-dispatcher')
else:
dispatcher = options['dispatcher']
# Start scheduler service.
service = JobQueue(
source, dispatcher, reactor, daemon_options=daemon_options)
reactor.callWhenRunning(service.startService) # pylint: disable=no-member
reactor.run() # pylint: disable=no-member
0
Example 41
Project: nagcat Source File: main.py
def init(options):
"""Prepare to start up NagCat"""
# Set uid/gid/file_limit
util.setup(options.user, options.group,
options.file_limit,
options.core_dumps)
# Write out the pid to make the verify script happy
if options.pidfile:
util.write_pid(options.pidfile)
log.init(options.logfile, options.loglevel)
config = coil.parse_file(options.config, expand=False)
init_plugins(options)
merlin_db_info = {
"merlin_db_name" : options.merlin_db_name,
"merlin_db_user" : options.merlin_db_user,
"merlin_db_pass" : options.merlin_db_pass,
"merlin_db_host" : options.merlin_db_host,
}
try:
if options.test:
nagcat = simple.NagcatSimple(config,
rradir=options.rradir,
rrdcache=options.rrdcache,
monitor_port=options.status_port,
default_timeout=options.default_timeout,
test_name=options.test,
host=options.host, port=options.port)
elif options.merlin:
nagcat = merlin.NagcatMerlin(config,
rradir=options.rradir,
rrdcache=options.rrdcache,
monitor_port=options.status_port,
default_timeout=options.default_timeout,
nagios_cfg=options.nagios, tag=options.tag,
merlin_db_info=merlin_db_info)
else:
nagcat = nagios.NagcatNagios(config,
rradir=options.rradir,
rrdcache=options.rrdcache,
monitor_port=options.status_port,
default_timeout=options.default_timeout,
nagios_cfg=options.nagios, tag=options.tag)
except (errors.InitError, coil.errors.CoilError), ex:
log.error(str(ex))
sys.exit(1)
reactor.callWhenRunning(start, nagcat)
if options.verify:
sys.exit(0)
if options.core_dumps:
cwd = options.core_dumps
else:
cwd = "/"
if options.daemon:
util.daemonize(options.pidfile, cwd)
else:
os.chdir(cwd)
# redirect stdio to log
log.init_stdio()
0
Example 42
Project: SubliminalCollaborator Source File: reactormixins.py
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
0
Example 43
Project: p2pool Source File: main.py
def run():
if not hasattr(tcp.Client, 'abortConnection'):
print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!"
print 'Pausing for 3 seconds...'
time.sleep(3)
realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
parser.add_argument('--version', action='version', version=p2pool.__version__)
parser.add_argument('--net',
help='use specified network (default: bitcoin)',
action='store', choices=sorted(realnets), default='bitcoin', dest='net_name')
parser.add_argument('--testnet',
help='''use the network's testnet''',
action='store_const', const=True, default=False, dest='testnet')
parser.add_argument('--debug',
help='enable debugging mode',
action='store_const', const=True, default=False, dest='debug')
parser.add_argument('-a', '--address',
help='generate payouts to this address (default: <address requested from bitcoind>), or (dynamic)',
type=str, action='store', default=None, dest='address')
parser.add_argument('-i', '--numaddresses',
help='number of bitcoin auto-generated addresses to maintain for getwork dynamic address allocation',
type=int, action='store', default=2, dest='numaddresses')
parser.add_argument('-t', '--timeaddresses',
help='seconds between acquisition of new address and removal of single old (default: 2 days or 172800s)',
type=int, action='store', default=172800, dest='timeaddresses')
parser.add_argument('--datadir',
help='store data in this directory (default: <directory run_p2pool.py is in>/data)',
type=str, action='store', default=None, dest='datadir')
parser.add_argument('--logfile',
help='''log to this file (default: data/<NET>/log)''',
type=str, action='store', default=None, dest='logfile')
parser.add_argument('--web-static',
help='use an alternative web frontend in this directory (otherwise use the built-in frontend)',
type=str, action='store', default=None, dest='web_static')
parser.add_argument('--merged',
help='call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)',
type=str, action='append', default=[], dest='merged_urls')
parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE',
help='donate this percentage of work towards the development of p2pool (default: 1.0)',
type=float, action='store', default=1.0, dest='donation_percentage')
parser.add_argument('--iocp',
help='use Windows IOCP API in order to avoid errors due to large number of sockets being open',
action='store_true', default=False, dest='iocp')
parser.add_argument('--irc-announce',
help='announce any blocks found on irc://irc.freenode.net/#p2pool',
action='store_true', default=False, dest='irc_announce')
parser.add_argument('--no-bugreport',
help='disable submitting caught exceptions to the author',
action='store_true', default=False, dest='no_bugreport')
p2pool_group = parser.add_argument_group('p2pool interface')
p2pool_group.add_argument('--p2pool-port', metavar='PORT',
help='use port PORT to listen for connections (forward this port from your router!) (default: %s)' % ', '.join('%s:%i' % (name, net.P2P_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='p2pool_port')
p2pool_group.add_argument('-n', '--p2pool-node', metavar='ADDR[:PORT]',
help='connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses',
type=str, action='append', default=[], dest='p2pool_nodes')
parser.add_argument('--disable-upnp',
help='''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''',
action='store_false', default=True, dest='upnp')
p2pool_group.add_argument('--max-conns', metavar='CONNS',
help='maximum incoming connections (default: 40)',
type=int, action='store', default=40, dest='p2pool_conns')
p2pool_group.add_argument('--outgoing-conns', metavar='CONNS',
help='outgoing connections (default: 6)',
type=int, action='store', default=6, dest='p2pool_outgoing_conns')
p2pool_group.add_argument('--external-ip', metavar='ADDR[:PORT]',
help='specify your own public IP address instead of asking peers to discover it, useful for running dual WAN or asymmetric routing',
type=str, action='store', default=None, dest='p2pool_external_ip')
parser.add_argument('--disable-advertise',
help='''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''',
action='store_false', default=True, dest='advertise_ip')
worker_group = parser.add_argument_group('worker interface')
worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT',
help='listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)' % ', '.join('%s:%i' % (name, net.WORKER_PORT) for name, net in sorted(realnets.items())),
type=str, action='store', default=None, dest='worker_endpoint')
worker_group.add_argument('-f', '--fee', metavar='FEE_PERCENTAGE',
help='''charge workers mining to their own bitcoin address (by setting their miner's username to a bitcoin address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''',
type=float, action='store', default=0, dest='worker_fee')
bitcoind_group = parser.add_argument_group('bitcoind interface')
bitcoind_group.add_argument('--bitcoind-config-path', metavar='BITCOIND_CONFIG_PATH',
help='custom configuration file path (when bitcoind -conf option used)',
type=str, action='store', default=None, dest='bitcoind_config_path')
bitcoind_group.add_argument('--bitcoind-address', metavar='BITCOIND_ADDRESS',
help='connect to this address (default: 127.0.0.1)',
type=str, action='store', default='127.0.0.1', dest='bitcoind_address')
bitcoind_group.add_argument('--bitcoind-rpc-port', metavar='BITCOIND_RPC_PORT',
help='''connect to JSON-RPC interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_rpc_port')
bitcoind_group.add_argument('--bitcoind-rpc-ssl',
help='connect to JSON-RPC interface using SSL',
action='store_true', default=False, dest='bitcoind_rpc_ssl')
bitcoind_group.add_argument('--bitcoind-p2p-port', metavar='BITCOIND_P2P_PORT',
help='''connect to P2P interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_p2p_port')
bitcoind_group.add_argument(metavar='BITCOIND_RPCUSERPASS',
help='bitcoind RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from bitcoin.conf)',
type=str, action='store', default=[], nargs='*', dest='bitcoind_rpc_userpass')
args = parser.parse_args()
if args.debug:
p2pool.DEBUG = True
defer.setDebugging(True)
else:
p2pool.DEBUG = False
net_name = args.net_name + ('_testnet' if args.testnet else '')
net = networks.nets[net_name]
datadir_path = os.path.join((os.path.join(os.path.dirname(sys.argv[0]), 'data') if args.datadir is None else args.datadir), net_name)
if not os.path.exists(datadir_path):
os.makedirs(datadir_path)
if len(args.bitcoind_rpc_userpass) > 2:
parser.error('a maximum of two arguments are allowed')
args.bitcoind_rpc_username, args.bitcoind_rpc_password = ([None, None] + args.bitcoind_rpc_userpass)[-2:]
if args.bitcoind_rpc_password is None:
conf_path = args.bitcoind_config_path or net.PARENT.CONF_FILE_FUNC()
if not os.path.exists(conf_path):
parser.error('''Bitcoin configuration file not found. Manually enter your RPC password.\r\n'''
'''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
'''\r\n'''
'''server=1\r\n'''
'''rpcpassword=%x\r\n'''
'''\r\n'''
'''Keep that password secret! After creating the file, restart Bitcoin.''' % (conf_path, random.randrange(2**128)))
conf = open(conf_path, 'rb').read()
contents = {}
for line in conf.splitlines(True):
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
contents[k.strip()] = v.strip()
for conf_name, var_name, var_type in [
('rpcuser', 'bitcoind_rpc_username', str),
('rpcpassword', 'bitcoind_rpc_password', str),
('rpcport', 'bitcoind_rpc_port', int),
('port', 'bitcoind_p2p_port', int),
]:
if getattr(args, var_name) is None and conf_name in contents:
setattr(args, var_name, var_type(contents[conf_name]))
if 'rpcssl' in contents and contents['rpcssl'] != '0':
args.bitcoind_rpc_ssl = True
if args.bitcoind_rpc_password is None:
parser.error('''Bitcoin configuration file didn't contain an rpcpassword= line! Add one!''')
if args.bitcoind_rpc_username is None:
args.bitcoind_rpc_username = ''
if args.bitcoind_rpc_port is None:
args.bitcoind_rpc_port = net.PARENT.RPC_PORT
if args.bitcoind_p2p_port is None:
args.bitcoind_p2p_port = net.PARENT.P2P_PORT
if args.p2pool_port is None:
args.p2pool_port = net.P2P_PORT
if args.p2pool_outgoing_conns > 10:
parser.error('''--outgoing-conns can't be more than 10''')
if args.worker_endpoint is None:
worker_endpoint = '', net.WORKER_PORT
elif ':' not in args.worker_endpoint:
worker_endpoint = '', int(args.worker_endpoint)
else:
addr, port = args.worker_endpoint.rsplit(':', 1)
worker_endpoint = addr, int(port)
if args.address is not None and args.address != 'dynamic':
try:
args.pubkey_hash = bitcoin_data.address_to_pubkey_hash(args.address, net.PARENT)
except Exception, e:
parser.error('error parsing address: ' + repr(e))
else:
args.pubkey_hash = None
def separate_url(url):
s = urlparse.urlsplit(url)
if '@' not in s.netloc:
parser.error('merged url netloc must contain an "@"')
userpass, new_netloc = s.netloc.rsplit('@', 1)
return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass
merged_urls = map(separate_url, args.merged_urls)
if args.logfile is None:
args.logfile = os.path.join(datadir_path, 'log')
logfile = logging.LogFile(args.logfile)
pipe = logging.TimestampingPipe(logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile]))
sys.stdout = logging.AbortPipe(pipe)
sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(logging.PrefixPipe(pipe, '> '))
if hasattr(signal, "SIGUSR1"):
def sigusr1(signum, frame):
print 'Caught SIGUSR1, closing %r...' % (args.logfile,)
logfile.reopen()
print '...and reopened %r after catching SIGUSR1.' % (args.logfile,)
signal.signal(signal.SIGUSR1, sigusr1)
deferral.RobustLoopingCall(logfile.reopen).start(5)
class ErrorReporter(object):
def __init__(self):
self.last_sent = None
def emit(self, eventDict):
if not eventDict["isError"]:
return
if self.last_sent is not None and time.time() < self.last_sent + 5:
return
self.last_sent = time.time()
if 'failure' in eventDict:
text = ((eventDict.get('why') or 'Unhandled Error')
+ '\n' + eventDict['failure'].getTraceback())
else:
text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
from twisted.web import client
client.getPage(
url='http://u.forre.st/p2pool_error.cgi',
method='POST',
postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
timeout=15,
).addBoth(lambda x: None)
if not args.no_bugreport:
log.addObserver(ErrorReporter().emit)
reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
reactor.run()