sys.stderr.flush

Here are the examples of the python api sys.stderr.flush taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

46 Examples 7

Example 1

Project: asynq Source File: debug.py
Function: dump
def dump(state):
    if not options.DUMP_PRE_ERROR_STATE:
        return
    stdout.flush()
    stderr.flush()
    stdout.write('\n--- Pre-error state dump: --------------------------------------------\n')
    try:
        state.dump()
    finally:
        stdout.write('----------------------------------------------------------------------\n')
        stderr.write('\n')
        stdout.flush()
        stderr.flush()

Example 2

Project: asynq Source File: debug.py
def async_exception_hook(type, error, tb):
    """Exception hook capable of printing async stack traces."""
    global original_hook

    stdout.flush()
    stderr.flush()
    if original_hook is not None:
        original_hook(type, error, tb)
    dump_error(error, tb=tb)

Example 3

Project: asynq Source File: scheduler.py
    def try_time_based_dump(self, last_task=None):
        current_time = time.time()
        if (current_time - self._last_dump_time) < _debug_options.SCHEDULER_STATE_DUMP_INTERVAL:
            return
        self._last_dump_time = current_time
        debug.write('\n--- Scheduler state dump: --------------------------------------------')
        try:
            self.dump()
            if last_task is not None:
                debug.write('Last task: %s' % debug.str(last_task), 1)
        finally:
            debug.write('----------------------------------------------------------------------\n')
            stdout.flush()
            stderr.flush()

Example 4

Project: hey-athena-client Source File: tts.py
Function: ignore_stderr
@contextlib.contextmanager
def ignore_stderr():
    devnull = os.open(os.devnull, os.O_WRONLY)
    old_stderr = os.dup(2)
    sys.stderr.flush()
    os.dup2(devnull, 2)
    os.close(devnull)
    try:
        yield
    finally:
        os.dup2(old_stderr, 2)
        os.close(old_stderr)

Example 5

Project: pystan Source File: misc.py
Function: redirect_stderr
def _redirect_stderr():
    """Redirect stderr for subprocesses to /dev/null

    Silences copious compilation messages.

    Returns
    -------
    orig_stderr : file descriptor
        Copy of original stderr file descriptor
    """
    sys.stderr.flush()
    stderr_fileno = sys.stderr.fileno()
    orig_stderr = os.dup(stderr_fileno)
    devnull = os.open(os.devnull, os.O_WRONLY)
    os.dup2(devnull, stderr_fileno)
    os.close(devnull)
    return orig_stderr

Example 6

Project: guv Source File: abc.py
    def _squelch_exception(self, exc_info):
        if self._debug_exceptions and not issubclass(exc_info[0], NOT_ERROR):
            traceback.print_exception(*exc_info)
            sys.stderr.flush()

        if issubclass(exc_info[0], SYSTEM_ERROR):
            self._handle_system_error(exc_info)

Example 7

Project: guv Source File: gunicorn_worker.py
Function: handle_quit
    def handle_quit(self, sig, frame):
        """
        We override this because sys.exit() shouldn't be called. Instead, we should let the
        worker gracefully quit on its own.
        """
        # sys.stderr.write('handle_quit() frame: {0}, '
        #                  '{0.f_code.co_filename}:{0.f_code.co_name}:{0.f_lineno}\n'
        #                  .format(frame))
        sys.stderr.flush()
        self.alive = False
        # worker_int callback
        self.cfg.worker_int(self)

Example 8

Project: bup Source File: wvtest.py
Function: result
    def _result(msg, tb, code):
        global _tests, _fails
        _tests += 1
        if code != 'ok':
            _fails += 1
        (filename, line, func, text) = tb
        filename = os.path.basename(filename)
        msg = re.sub(r'\s+', ' ', str(msg))
        sys.stderr.flush()
        print '! %-70s %s' % ('%s:%-4d %s' % (filename, line, msg),
                              code)
        sys.stdout.flush()

Example 9

Project: filesync-server Source File: test_oops.py
    def get_oops_data(self):
        """Read oops data for first oops from stderr."""
        sys.stderr.flush()
        with open(self.stderr_filename) as f:
            oops_data = serializer.read(f)

        return oops_data

Example 10

Project: gitian-builder Source File: github-merge.py
def ask_prompt(text):
    print(text,end=" ",file=stderr)
    stderr.flush()
    reply = stdin.readline().rstrip()
    print("",file=stderr)
    return reply

Example 11

Project: pybikes Source File: filler.py
Function: print_status
def print_status(i, total, status):
    progress = "".join(["#" for step in range(i)]) + \
               "".join([" " for step in range(total-i)])
    status_pattern = "\r{0}/{1}: [{2}] {3}"
    output = status_pattern.format(i, total, progress, status)
    sys.stderr.flush()
    sys.stderr.write(unicode(output))
    sys.stderr.flush()
    if (i == total):
        sys.stderr.write('\n')
    return len(output)

Example 12

Project: labmanager Source File: __init__.py
Function: debug
def _debug(msg):
    sys.stderr.flush()
    sys.stdout.flush()
    print u"[%s] - %s" % (time.ctime(), msg)
    sys.stdout.flush()
    sys.stderr.flush()

Example 13

Project: econ-project-templates Source File: cpplint.py
Function: enter
    def __enter__(self):
        with cpplint_wrapper.lock:
            cpplint_wrapper.tasks_count += 1
            if cpplint_wrapper.tasks_count == 1:
                sys.stderr.flush()
                cpplint_wrapper.stream = sys.stderr
                sys.stderr = self
            return self

Example 14

Project: golem Source File: imunes_helper.py
Function: clean_up
    def _cleanup(self, context):
        if context.get('state') == SimulatorState.started:
            self.commands.get('stop').execute(context)
        sys.stderr.flush()
        sys.stdout.flush()
        sys.stdout = sys.__stdout__

Example 15

Project: prescons Source File: prescons.py
    def get_next_line(self, prompt):
        self.write(prompt)
        sys.stderr.flush()
        if prompt == sys.ps1:
            self.wait_for_user_input()
        line = self.file.readline()
        if len(line) == 0:
            self.file.close()
            raise EOFError
        return line

Example 16

Project: W.I.L.L Source File: config.py
@memoize
def _load_config_json():
    try:
        with open(CONFIG_FILE_PATH, 'r') as config:
            return json.load(config)
    except IOError:
        error("Couldn't load config file. Exiting.")
        sys.stderr.flush()
        sys.exit(1)
    except ValueError:  # Error on loading the json itself.
        error("Couldn't load config file JSON.  Formatting error?")
        error("system shutting down.")
        sys.stderr.flush()
        sys.exit(1)

Example 17

Project: OCRmyPDF Source File: leptonica.py
Function: enter
    def __enter__(self):
        from io import UnsupportedOperation
        self.tmpfile = TemporaryFile()

        # Save the old stderr, and redirect stderr to temporary file
        sys.stderr.flush()
        try:
            self.copy_of_stderr = os.dup(sys.stderr.fileno())
            os.dup2(self.tmpfile.fileno(), sys.stderr.fileno(),
                    inheritable=False)
        except UnsupportedOperation:
            self.copy_of_stderr = None
        return

Example 18

Project: anvio Source File: terminal.py
Function: enter
    def __enter__(self):
        sys.stderr.flush()
        self.old_stderr = sys.stderr
        sys.stderr = open('/dev/null', 'a+', 0)
        sys.stdout.flush()
        self.old_stdout = sys.stdout
        sys.stdout = open('/dev/null', 'a+', 0)

Example 19

Project: click Source File: _compat.py
Function: raw_input
        def raw_input(prompt=''):
            sys.stderr.flush()
            if prompt:
                stdout = _default_text_stdout()
                stdout.write(prompt)
            stdin = _default_text_stdin()
            return stdin.readline().rstrip('\r\n')

Example 20

Project: asyncio Source File: crawl.py
def main():
    """Main program.

    Parse arguments, set up event loop, run crawler, print report.
    """
    args = ARGS.parse_args()
    if not args.roots:
        print('Use --help for command line help')
        return

    log = Logger(args.level)

    if args.iocp:
        from asyncio.windows_events import ProactorEventLoop
        loop = ProactorEventLoop()
        asyncio.set_event_loop(loop)
    elif args.select:
        loop = asyncio.SelectorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()

    roots = {fix_url(root) for root in args.roots}

    crawler = Crawler(log,
                      roots, exclude=args.exclude,
                      strict=args.strict,
                      max_redirect=args.max_redirect,
                      max_tries=args.max_tries,
                      max_tasks=args.max_tasks,
                      max_pool=args.max_pool,
                      )
    try:
        loop.run_until_complete(crawler.crawl())  # Crawler gonna crawl.
    except KeyboardInterrupt:
        sys.stderr.flush()
        print('\nInterrupted\n')
    finally:
        crawler.report()
        crawler.close()
        loop.close()

Example 21

Project: performance Source File: run.py
Function: run_command
def run_command(command, hide_stderr=True):
    if hide_stderr:
        kw = {'stderr': subprocess.PIPE}
    else:
        kw = {}

    logging.info("Running `%s`",
                 " ".join(list(map(str, command))))

    proc = subprocess.Popen(command,
                            stdout=subprocess.PIPE,
                            universal_newlines=True,
                            **kw)
    try:
        stdout, stderr = proc.communicate()
    except:
        proc.stdout.close()
        if proc.stderr:
            proc.stderr.close()
        try:
            proc.kill()
        except OSError:
            # process already exited
            pass
        proc.wait()
        raise

    if proc.returncode != 0:
        if hide_stderr:
            sys.stderr.flush()
            sys.stderr.write(stderr)
            sys.stderr.flush()
        raise RuntimeError("Benchmark died")
    return stdout

Example 22

Project: Eventlet Source File: hub.py
Function: squelch_generic_exception
    def squelch_generic_exception(self, exc_info):
        if self.debug_exceptions:
            traceback.print_exception(*exc_info)
            sys.stderr.flush()

Example 23

Project: Eventlet Source File: hub.py
Function: squelch_timer_exception
    def squelch_timer_exception(self, timer, exc_info):
        if self.debug_exceptions:
            traceback.print_exception(*exc_info)
            sys.stderr.flush()

Example 24

Project: colorclass Source File: windows.py
    @classmethod
    def enable(cls, auto_colors=False, reset_atexit=False):
        """Enable color text with print() or sys.stdout.write() (stderr too).

        :param bool auto_colors: Automatically selects dark or light colors based on current terminal's background
            color. Only works with {autored} and related tags.
        :param bool reset_atexit: Resets original colors upon Python exit (in case you forget to reset it yourself with
            a closing tag). Does nothing on native ANSI consoles.

        :return: If streams replaced successfully.
        :rtype: bool
        """
        if not IS_WINDOWS:
            return False  # Windows only.

        # Get values from init_kernel32().
        kernel32, stderr, stdout = init_kernel32()
        if stderr == INVALID_HANDLE_VALUE and stdout == INVALID_HANDLE_VALUE:
            return False  # No valid handles, nothing to do.

        # Get console info.
        bg_color, native_ansi = bg_color_native_ansi(kernel32, stderr, stdout)

        # Set auto colors:
        if auto_colors:
            if bg_color in (112, 96, 240, 176, 224, 208, 160):
                ANSICodeMapping.set_light_background()
            else:
                ANSICodeMapping.set_dark_background()

        # Don't replace streams if ANSI codes are natively supported.
        if native_ansi:
            return False

        # Reset on exit if requested.
        if reset_atexit:
            atexit.register(cls.disable)

        # Overwrite stream references.
        if stderr != INVALID_HANDLE_VALUE:
            sys.stderr.flush()
            sys.stderr = WindowsStream(kernel32, stderr, sys.stderr)
        if stdout != INVALID_HANDLE_VALUE:
            sys.stdout.flush()
            sys.stdout = WindowsStream(kernel32, stdout, sys.stdout)

        return True

Example 25

Project: pgctl Source File: debug.py
Function: debug
def debug(msg, *args, **kwargs):
    level = kwargs.pop('level', 1)
    if level <= VERBOSE:  # pragma: no cover
        print('[pgctl] DEBUG:', msg % args, file=stderr)
        stderr.flush()

Example 26

Project: pyscaffold Source File: wsgi.py
Function: application
def application(env, start_response, data):
    sys.stderr.flush()  # Force the previous request log to be written.
    start_response('200 OK', [('Content-Type', 'text/html')])
    return [data.encode('utf-8')]

Example 27

Project: bundlewrap Source File: __init__.py
def suppress_broken_pipe_msg(f):
    """
    Oh boy.

    CPython does funny things with SIGPIPE. By default, it is caught and
    raised as a BrokenPipeError. When do we get a SIGPIPE? Most commonly
    when piping into head:

        bw nodes | head -n 1

    head will exit after receiving the first line, causing the kernel to
    send SIGPIPE to our process. Since in most cases, we can't just quit
    early, we simply ignore BrokenPipeError in utils.ui.write_to_stream.

    Unfortunately, Python will still print a message:

        Exception ignored in: <_io.TextIOWrapper name='<stdout>'
                               mode='w' encoding='UTF-8'>
        BrokenPipeError: [Errno 32] Broken pipe

    See also http://bugs.python.org/issue11380. The crazy try/finally
    construct below is taken from there and I quote:

        This will:
         - capture any exceptions *you've* raised as the context for the
           errors raised in this handler
         - expose any exceptions generated during this thing itself
         - prevent the interpreter dying during shutdown in
           flush_std_files by closing the files (you can't easily wipe
           out the pending writes that have failed)

    CAVEAT: There is a seamingly easier method floating around on the
    net (http://stackoverflow.com/a/16865106) that restores the default
    behavior for SIGPIPE (i.e. not turning it into a BrokenPipeError):

        from signal import signal, SIGPIPE, SIG_DFL
        signal(SIGPIPE,SIG_DFL)

    This worked fine for a while but broke when using
    multiprocessing.Manager() to share the list of jobs in utils.ui
    between processes. When the main process terminated, it quit with
    return code 141 (indicating a broken pipe), and the background
    process used for the manager continued to hang around indefinitely.
    Bonus fun: This was observed only on Ubuntu Trusty (14.04).
    """
    @wraps(f)
    def wrapper(*args, **kwargs):
        try:
            return f(*args, **kwargs)
        except SystemExit:
            raise
        except:
            print_exc()
            exit(1)
        finally:
            try:
                stdout.flush()
            finally:
                try:
                    stdout.close()
                finally:
                    try:
                        stderr.flush()
                    finally:
                        stderr.close()
    return wrapper

Example 28

Project: pth-toolkit Source File: __init__.py
Function: run_isolated
def run_isolated(klass, self, result):
    """Run a test suite or case in a subprocess, using the run method on klass.
    """
    c2pread, c2pwrite = os.pipe()
    # fixme - error -> result
    # now fork
    pid = os.fork()
    if pid == 0:
        # Child
        # Close parent's pipe ends
        os.close(c2pread)
        # Dup fds for child
        os.dup2(c2pwrite, 1)
        # Close pipe fds.
        os.close(c2pwrite)

        # at this point, sys.stdin is redirected, now we want
        # to filter it to escape ]'s.
        ### XXX: test and write that bit.
        stream = os.fdopen(1, 'wb')
        result = TestProtocolClient(stream)
        klass.run(self, result)
        stream.flush()
        sys.stderr.flush()
        # exit HARD, exit NOW.
        os._exit(0)
    else:
        # Parent
        # Close child pipe ends
        os.close(c2pwrite)
        # hookup a protocol engine
        protocol = TestProtocolServer(result)
        fileobj = os.fdopen(c2pread, 'rb')
        protocol.readFrom(fileobj)
        os.waitpid(pid, 0)
        # TODO return code evaluation.
    return result

Example 29

Project: debomatic Source File: process.py
    def _daemonize(self):
        try:
            pid = os.fork()
            if pid > 0:
                exit()
        except OSError as e:
            error(_('Error entering daemon mode: %s') % e.strerror)
            exit()
        os.chdir('/')
        os.setsid()
        os.umask(0)
        stdout.flush()
        stderr.flush()
        si = open(os.devnull, 'r')
        so = open(os.devnull, 'a+')
        se = open(os.devnull, 'a+')
        os.dup2(si.fileno(), stdin.fileno())
        os.dup2(so.fileno(), stdout.fileno())
        os.dup2(se.fileno(), stderr.fileno())
        on_exit(self._quit)
        old_log = getLogger()
        if old_log.handlers:
            for handler in old_log.handlers:
                old_log.removeHandler(handler)
        log(filename=self.logfile, level=self.loglevel,
            format='%(asctime)s %(levelname)-8s %(message)s')
        self._set_pid()

Example 30

Project: win-unicode-console Source File: console.py
Function: run_code
	def runcode(self, code):
		# PY3 # super().runcode(code)
		super(InteractiveConsole, self).runcode(code)
		sys.stderr.flush()
		sys.stdout.flush()

Example 31

Project: win-unicode-console Source File: raw_input.py
Function: raw_input
def raw_input(prompt=""):
	"""raw_input([prompt]) -> string

Read a string from standard input.  The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled.  The prompt string, if given,
is printed without a trailing newline before reading."""
	
	sys.stderr.flush()
	
	tty = STDIN.is_a_TTY() and STDOUT.is_a_TTY()
	
	if RETURN_UNICODE:
		if tty:
			line_bytes = readline(prompt)
			line = stdin_decode(line_bytes)
		else:
			line = stdio_readline(prompt)
		
	else:
		if tty:
			line = readline(prompt)
		else:
			line_unicode = stdio_readline(prompt)
			line = stdin_encode(line_unicode)
	
	if line:
		return line[:-1] # strip strailing "\n"
	else:
		raise EOFError

Example 32

Project: pybikes Source File: filler.py
Function: clear_line
def clearline(length):
    clearline = "\r" + "".join([" " for i in range(length)])
    sys.stderr.flush()
    sys.stderr.write(clearline)
    sys.stderr.flush()

Example 33

Project: eventlet Source File: hub.py
Function: squelch_generic_exception
    def squelch_generic_exception(self, exc_info):
        if self.debug_exceptions:
            traceback.print_exception(*exc_info)
            sys.stderr.flush()
            clear_sys_exc_info()

Example 34

Project: eventlet Source File: hub.py
Function: squelch_timer_exception
    def squelch_timer_exception(self, timer, exc_info):
        if self.debug_exceptions:
            traceback.print_exception(*exc_info)
            sys.stderr.flush()
            clear_sys_exc_info()

Example 35

Project: exaddos Source File: snmp.py
	def _get (self,key):
		from pysnmp.entity.rfc3413.oneliner import cmdgen
		from pysnmp.error import PySnmpError
		from pysnmp.proto.rfc1905 import NoSuchInstance

		try:
			if self.interface.snmp_version == 2:
				errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(
					cmdgen.CommunityData('exaddos', self.interface.snmp_password),
					cmdgen.UdpTransportTarget((self.interface.router, 161)),
					self.collection[key]
				)
			elif self.interface.snmp_version == 3:
				from pysnmp.entity import config

				mapping_auth = {
					'MD5' : config.usmHMACMD5AuthProtocol,
					'SHA' : config.usmHMACSHAAuthProtocol,
					''    : config.usmNoAuthProtocol,
				}

				mapping_privacy = {
					'DES'     : config.usmDESPrivProtocol,
					'3DES'    : config.usm3DESEDEPrivProtocol,
					'AES-128' : config.usmAesCfb128Protocol,
					'AES-192' : config.usmAesCfb192Protocol,
					'AES-256' : config.usmAesCfb256Protocol,
					''        : config.usmNoPrivProtocol,
				}

				user = cmdgen.UsmUserData(
						self.interface.snmp_user,
						self.interface.snmp_auth_key,
						self.interface.snmp_privacy_key,
						authProtocol=mapping_auth[self.interface.snmp_auth_method],
						privProtocol=mapping_privacy[self.interface.snmp_privacy_method])

				transport = cmdgen.UdpTransportTarget((self.interface.router, 161))

				errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd(
					user, transport,
					self.collection[key]
				)
#					cmdgen.MibVariable('.'.join(str(_) for _ in self.collection[key]))
			else:
				raise NotImplemented('Feel free to add support for this SNMP version and send us the patch - thanks')
		except PySnmpError:
			err('SNMP collection failed for %s %s' % (self.name,key))
			return None

		if (errorIndication,errorStatus,errorIndex) == (None,0,0):
			result = varBinds[0][1]

			if isinstance(result,NoSuchInstance):
				err('SNMP: %s did not have %s' % (self.name,key))
				sys.stderr.flush()
				return None

			try:
				return varBinds[0][1]
			except AttributeError:
				err('SNMP: %s did not have %s' % (self.name,key))
				return None
		else:
			err('SNMP collection failed for %s %s' % (self.name,key))
			return None

Example 36

Project: portage Source File: ForkProcess.py
	def _spawn(self, args, fd_pipes=None, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call fetch().
		"""

		parent_pid = os.getpid()
		pid = None
		try:
			pid = os.fork()

			if pid != 0:
				if not isinstance(pid, int):
					raise AssertionError(
						"fork returned non-integer: %s" % (repr(pid),))
				return [pid]

			rval = 1
			try:

				# Use default signal handlers in order to avoid problems
				# killing subprocesses as reported in bug #353239.
				signal.signal(signal.SIGINT, signal.SIG_DFL)
				signal.signal(signal.SIGTERM, signal.SIG_DFL)

				portage.locks._close_fds()
				# We don't exec, so use close_fds=False
				# (see _setup_pipes docstring).
				portage.process._setup_pipes(fd_pipes, close_fds=False)

				rval = self._run()
			except SystemExit:
				raise
			except:
				traceback.print_exc()
				# os._exit() skips stderr flush!
				sys.stderr.flush()
			finally:
				os._exit(rval)

		finally:
			if pid == 0 or (pid is None and os.getpid() != parent_pid):
				# Call os._exit() from a finally block in order
				# to suppress any finally blocks from earlier
				# in the call stack (see bug #345289). This
				# finally block has to be setup before the fork
				# in order to avoid a race condition.
				os._exit(1)

Example 37

Project: itools Source File: _unix.py
def become_daemon():
    try:
        pid = fork()
    except OSError:
        print 'unable to fork'
        exit(1)

    if pid == 0:
        # Daemonize
        setsid()
        # We redirect only the 3 first descriptors
        file_desc = os_open(devnull, O_RDWR)
        stdin.close()
        dup2(file_desc, 0)
        stdout.flush()
        dup2(file_desc, 1)
        stderr.flush()
        dup2(file_desc, 2)
    else:
        exit()

Example 38

Project: OCRmyPDF Source File: leptonica.py
Function: exit
    def __exit__(self, exc_type, exc_value, traceback):
        # Restore old stderr
        sys.stderr.flush()
        if self.copy_of_stderr is not None:
            os.dup2(self.copy_of_stderr, sys.stderr.fileno())
            os.close(self.copy_of_stderr)

        # Get data from tmpfile (in with block to ensure it is closed)
        with self.tmpfile as tmpfile:
            tmpfile.seek(0)  # Cursor will be at end, so move back to beginning
            leptonica_output = tmpfile.read().decode(errors='replace')

        assert self.tmpfile.closed
        assert not sys.stderr.closed

        # If there are Python errors, let them bubble up
        if exc_type:
            logger.warning(leptonica_output)
            return False

        # If there are Leptonica errors, wrap them in Python excpetions
        if 'Error' in leptonica_output:
            if 'image file not found' in leptonica_output:
                raise FileNotFoundError()
            if 'pixWrite: stream not opened' in leptonica_output:
                raise LeptonicaIOError()
            raise LeptonicaError(leptonica_output)

        return False

Example 39

Project: DragonPy Source File: simple_debugger.py
def print_exc_plus():
    """
    Print the usual traceback information, followed by a listing of all the
    local variables in each frame.
    """
    sys.stderr.flush() # for eclipse
    sys.stdout.flush() # for eclipse

    tb = sys.exc_info()[2]
    while True:
        if not tb.tb_next:
            break
        tb = tb.tb_next
    stack = []
    f = tb.tb_frame
    while f:
        stack.append(f)
        f = f.f_back

    txt = traceback.format_exc()
    txt_lines = txt.splitlines()
    first_line = txt_lines.pop(0)
    last_line = txt_lines.pop(-1)
    click.secho(first_line, fg="red")
    for line in txt_lines:
        if line.strip().startswith("File"):
            click.echo(line)
        else:
            click.secho(line, fg="white", bold=True)
    click.secho(last_line, fg="red")

    click.echo()
    click.secho(
        "Locals by frame, most recent call first:",
        fg="blue", bold=True
    )
    for frame in stack:
        msg = 'File "%s", line %i, in %s' % (
            frame.f_code.co_filename,
            frame.f_lineno,
            frame.f_code.co_name,
        )
        msg = click.style(msg, fg="white", bold=True, underline=True)
        click.echo("\n *** %s" % msg)

        for key, value in list(frame.f_locals.items()):
            click.echo("%30s = " % click.style(key, bold=True), nl=False)
            # We have to be careful not to cause a new error in our error
            # printer! Calling str() on an unknown object could cause an
            # error we don't want.
            if isinstance(value, int):
                value = "$%x (decimal: %i)" % (value, value)
            else:
                value = repr(value)

            if len(value) > MAX_CHARS:
                value = "%s..." % value[:MAX_CHARS]

            try:
                click.echo(value)
            except:
                click.echo("<ERROR WHILE PRINTING VALUE>")

Example 40

Project: karl Source File: scripting.py
Function: run_daemon
def run_daemon(name, func, interval=300,
               retry_period=30*60, retry_interval=60, retryable=None,
               proceed=None):
    logger = getLogger('karl')

    if retryable is None:
        retryable = (ConflictError,)

    if proceed == None: #pragma NO COVERAGE
        def proceed():
            return True

    while proceed():
        start_trying = _time_time()
        tries = 0
        logger.info("Running %s", name)
        while True:
            try:
                tries += 1
                func()
                logger.info("Finished %s", name)
                break
            except retryable:
                if _time_time() - start_trying > retry_period:
                    logger.error("Retried for %d seconds, count = %d",
                                 retry_period, tries,
                                 exc_info=True)
                    break
                logger.info("Retrying in %d seconds, count = %d",
                            retry_interval, tries,
                            exc_info=True)
                _time_sleep(retry_interval)
            except:
                logger.error("Error in daemon process", exc_info=True)
                break
        if _debug_object_refs: #pragma NO COVERAGE
            _count_object_refs()
        sys.stderr.flush()
        sys.stdout.flush()
        _time_sleep(interval)

Example 41

Project: clint Source File: progress.py
Function: dots
def dots(it, label='', hide=None, every=1):
    """Progress iterator. Prints a dot for each item being iterated"""

    count = 0

    if not hide:
        STREAM.write(label)

    for i, item in enumerate(it):
        if not hide:
            if i % every == 0:         # True every "every" updates
                STREAM.write(DOTS_CHAR)
                sys.stderr.flush()

        count += 1

        yield item

    STREAM.write('\n')
    STREAM.flush()

Example 42

Project: sky Source File: crawl.py
def start(config, crawler_class=Crawler, save_data_result_fn=None, save_bulk_data_fn=None,
          logging_level=2, cache=None):
    """Main program.

    Parse arguments, set up event loop, run crawler, print report.
    """

    logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
    logging.basicConfig(level=logging_levels[min(logging_level, len(logging_levels) - 1)])

    loop = asyncio.SelectorEventLoop()

    asyncio.set_event_loop(loop)
    conf = get_config(config, loop)

    crawler = crawler_class(conf, cache)

    if save_data_result_fn is not None:
        crawler.save_data = save_data_result_fn

    if save_bulk_data_fn is not None:
        crawler.save_bulk_data = save_bulk_data_fn

    if crawler.login_url:
        loop.run_until_complete(crawler.login())

    try:
        loop.run_until_complete(crawler.crawl())  # Crawler gonna crawl.
    except KeyboardInterrupt:
        sys.stderr.flush()
        print('\nInterrupted\n')
    except Exception as e:
        print('CRITICAL ERROR main loop exception: %r', e)
    finally:
        result = crawler.finish_leftovers()
        report(crawler)
        crawler.close()
        loop.stop()
        loop.run_forever()
        loop.close()
    return result

Example 43

Project: infrastructure-puppet Source File: loggy.py
    def run(self):
        global gotindex, config, json_pending
        random.seed(time.time())
        #print("Pushing %u json objects" % len(json_pending))
        iname = time.strftime("loggy-%Y.%m.%d")
        sys.stderr.flush()
        if not iname in gotindex:
            gotindex[iname] = True
            if not self.xes.indices.exists(iname):
                mappings = {}
                for entry in config.options('RawFields'):
                    js = {
                        "_all" : {"enabled" : True},
                        "properties": {
                            "@timestamp" : { "store": True, "type" : "date", "format": "yyyy/MM/dd HH:mm:ss"},
                            "@node" : { "store": True, "type" : "string", "index": "not_analyzed"},
                            "geo_location" : { "type": "geo_point", "geohash": True }
                        }
                    }
                    for field in config.get('RawFields', entry).split(","):
                        x = field.strip()
                        js['properties'][x] = {"store": True, "type": "string", "index": "not_analyzed"}
                    
                    mappings[entry] = js
                    
                res = self.xes.indices.create(index = iname, body = {
                        "mappings" : mappings
                    }
                )
                if not 'loggy-indices' in json_pending:
                    json_pending['loggy-indices'] = []
                    last_push['loggy-indices'] = time.time()
                json_pending['loggy-indices'].append({
                    '@node': hostname,
                    'index_created': iname,
                    'logtype': 'loggy-indices',
                    '@timestamp': time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime()),
                    'res': res,
                    'mappings': mappings
                    })
            
        js_arr = []
        for entry in self.json:
            js = entry
            # GeoHash conversion
            if 'geo_lat' in js and 'geo_long' in js:
                try:
                    js['geo_location'] = {
                        "lat": float(js['geo_lat']),
                        "lon": float(js['geo_long'])
                    }
                except:
                    pass
            js['@version'] = 2
            js['@timestamp'] = time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime())
            js['host'] = hostname
            js['@node'] = hostname
            if 'request' in js and not 'url' in js:
                match = re.match(r"(GET|POST)\s+(.+)\s+HTTP/.+", js['request'])
                if match:
                    js['url'] = match.group(2)
            if 'bytes' in js and isinstance(js['bytes'], basestring) and js['bytes'].isdigit():
                js['bytes_int'] = int(js['bytes'])
            
            js_arr.append({
                '_op_type': 'index',
                '_index': iname,
                '_type': self.logtype,
                'doc': js,
                '_source': js
            })
            
        if len(js_arr) > 0:
            #es.bulk(index=iname, doc_type=self.logtype, body = js_arr )
            helpers.bulk(self.xes, js_arr)

Example 44

Project: anvio Source File: terminal.py
Function: exit
    def __exit__(self, exc_type, exc_value, traceback):
        sys.stderr.flush()
        sys.stderr = self.old_stderr
        sys.stdout.flush()
        sys.stdout = self.old_stdout

Example 45

Project: oligotyping Source File: utils.py
def get_quals_dict(quals_file, alignment_file, output_file_path = None, verbose = True):
    """This function takes qual scores file in FASTA format, expands each
       entry to match base calls in the corresponding aligned read in the
       FASTA file (which requires deflines to be identical), and finally
       returns a dictionary that contains qual scores as a list of integer
       values that are bound to deflines as key/value pairs"""

    quals_dict = {}
    quals_aligned_dict = {}

    progress = Progress()
    progress.verbose = verbose
    progress.new('Quality scores dictionary is being generated')
 
    alignment = u.SequenceSource(alignment_file)
    qual = u.QualSource(quals_file)

    while qual.next():
        if qual.pos % 1000 == 0:
            progress.update('Step 1 of 2 :: Quality scores read: %s' % (pretty_print(qual.pos)))
        quals_dict[qual.id] = qual.quals_int

    while alignment.next():
        if alignment.pos % 1000 == 0:
            progress.update('Step 2 of 2 :: Alignments matched: %s' % (pretty_print(alignment.pos)))
            sys.stderr.flush()

        matching_qual = quals_dict[alignment.id] 

        qual_aligned = []
        for i in range(0, len(alignment.seq)):
            if alignment.seq[i] != '-':
                qual_aligned.append(matching_qual.pop(0))
            else:
                qual_aligned.append(None)

        quals_aligned_dict[alignment.id] = qual_aligned
    progress.end()

    if output_file_path:
        cPickle.dump(quals_aligned_dict, open(output_file_path, 'w'))

    return quals_aligned_dict

Example 46

Project: mpi4py Source File: runtests.py
Function: writeln
def writeln(message='', endl='\n'):
    sys.stderr.flush()
    sys.stderr.write(message+endl)
    sys.stderr.flush()