asyncio.coroutine

Here are the examples of the python api asyncio.coroutine taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

177 Examples 7

Example 51

Project: aiocouchdb Source File: document.py
Function: update
    @asyncio.coroutine
    def update(self, doc, *,
               atts=None,
               auth=None,
               batch=None,
               new_edits=None,
               rev=None):
        """`Updates a docuement`_ on server.

        :param dict doc: Docuement object. Should implement
                        :class:`~collections.abc.MutableMapping` interface

        :param auth: :class:`aiocouchdb.authn.AuthProvider` instance

        :param dict atts: Attachments mapping where keys are represents
                          attachment name and value is file-like object or
                          bytes
        :param str batch: Updates in batch mode (asynchronously)
                          This argument accepts only ``"ok"`` value.
        :param bool new_edits: Signs about new docuement edition. When ``False``
                               allows to create conflicts manually
        :param str rev: Docuement revision. Optional, since docuement ``_rev``
                        field is also respected

        :rtype: dict

        .. _Updates a docuement: http://docs.couchdb.org/en/latest/api/docuement/common.html#put--db-docid
        """
        params = dict((key, value)
                      for key, value in locals().items()
                      if (key not in {'self', 'doc', 'auth', 'atts'} and
                          value is not None))

        if not isinstance(doc, MutableMapping):
            raise TypeError('MutableMapping instance expected, like a dict')

        if '_id' in doc and doc['_id'] != self.id:
            raise ValueError('Attempt to store docuement with different ID: '
                             '%r ; expected: %r. May be you want to .copy() it?'
                             % (doc['_id'], self.id))

        if atts:
            writer = MultipartWriter('related')
            writer.append_json(doc)

            doc.setdefault('_attachments', {})

            # A little hack to sync the order of attachments definition
            # between JSON and multipart body parts
            for name in atts:
                doc['_attachments'][name] = {}

            for name, stub in doc['_attachments'].items():
                if stub:
                    continue
                att = atts[name]
                if not isinstance(att, (bytes, io.BytesIO, io.BufferedIOBase)):
                    raise TypeError('attachment payload should be a source of'
                                    ' binary data (bytes, BytesIO, file '
                                    ' opened in binary mode), got %r' % att)
                part = writer.append(att)
                part.set_content_disposition('attachment', filename=name)
                doc['_attachments'][name] = {
                    'length': int(part.headers[CONTENT_LENGTH]),
                    'follows': True,
                    'content_type': part.headers[CONTENT_TYPE]
                }

            writer.parts[0].headers[CONTENT_LENGTH] = \
                str(len(json.dumps(doc).encode('utf-8')))

            # workaround of COUCHDB-2295
            writer.headers[CONTENT_LENGTH] = str(writer.calc_content_length())

            resp = yield from self.resource.put(auth=auth,
                                                data=writer,
                                                params=params)

            for info in doc['_attachments'].values():
                info.pop('follows')
                info['stub'] = True
        else:
            resp = yield from self.resource.put(auth=auth,
                                                data=doc,
                                                params=params)
        yield from resp.maybe_raise_error()
        return (yield from resp.json())

Example 52

Project: aiokafka Source File: group_coordinator.py
    @asyncio.coroutine
    def commit_offsets(self, offsets):
        """Commit specific offsets asynchronously.

        Arguments:
            offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit

        Raises error on failure
        """
        self._subscription.needs_fetch_committed_offsets = True
        if not offsets:
            log.debug('No offsets to commit')
            return True

        if (yield from self.coordinator_unknown()):
            raise Errors.GroupCoordinatorNotAvailableError()
        node_id = self.coordinator_id

        # create the offset commit request
        offset_data = collections.defaultdict(list)
        for tp, offset in offsets.items():
            offset_data[tp.topic].append(
                (tp.partition,
                 offset.offset,
                 offset.metadata))

        request = OffsetCommitRequest(
            self.group_id,
            self.generation,
            self.member_id,
            OffsetCommitRequest.DEFAULT_RETENTION_TIME,
            [(topic, tp_offsets) for topic, tp_offsets in offset_data.items()]
        )

        log.debug(
            "Sending offset-commit request with %s to %s", offsets, node_id)

        response = yield from self._send_req(node_id, request)

        unauthorized_topics = set()
        for topic, partitions in response.topics:
            for partition, error_code in partitions:
                tp = TopicPartition(topic, partition)
                offset = offsets[tp]

                error_type = Errors.for_code(error_code)
                if error_type is Errors.NoError:
                    log.debug(
                        "Committed offset %s for partition %s", offset, tp)
                    if self._subscription.is_assigned(tp):
                        partition = self._subscription.assignment[tp]
                        partition.committed = offset.offset
                elif error_type is Errors.GroupAuthorizationFailedError:
                    log.error("OffsetCommit failed for group %s - %s",
                              self.group_id, error_type.__name__)
                    raise error_type()
                elif error_type is Errors.TopicAuthorizationFailedError:
                    unauthorized_topics.add(topic)
                elif error_type in (Errors.OffsetMetadataTooLargeError,
                                    Errors.InvalidCommitOffsetSizeError):
                    # raise the error to the user
                    log.info(
                        "OffsetCommit failed for group %s on partition %s"
                        " due to %s, will retry", self.group_id, tp,
                        error_type.__name__)
                    raise error_type()
                elif error_type is Errors.GroupLoadInProgressError:
                    # just retry
                    log.info(
                        "OffsetCommit failed for group %s because group is"
                        " initializing (%s), will retry", self.group_id,
                        error_type.__name__)
                    raise error_type()
                elif error_type in (Errors.GroupCoordinatorNotAvailableError,
                                    Errors.NotCoordinatorForGroupError,
                                    Errors.RequestTimedOutError):
                    log.info(
                        "OffsetCommit failed for group %s due to a"
                        " coordinator error (%s), will find new coordinator"
                        " and retry", self.group_id, error_type.__name__)
                    self.coordinator_dead()
                    raise error_type()
                elif error_type in (Errors.UnknownMemberIdError,
                                    Errors.IllegalGenerationError,
                                    Errors.RebalanceInProgressError):
                    # need to re-join group
                    error = error_type(self.group_id)
                    log.error(
                        "OffsetCommit failed for group %s due to group"
                        " error (%s), will rejoin", self.group_id, error)
                    self._subscription.mark_for_reassignment()
                    raise error
                else:
                    log.error(
                        "OffsetCommit failed for group %s on partition %s"
                        " with offset %s: %s", self.group_id, tp, offset,
                        error_type.__name__)
                    raise error_type()

        if unauthorized_topics:
            log.error("OffsetCommit failed for unauthorized topics %s",
                      unauthorized_topics)
            raise Errors.TopicAuthorizationFailedError(unauthorized_topics)

Example 53

Project: peru Source File: cache.py
    @asyncio.coroutine
    def export_tree(self, tree, dest, previous_tree=None, *, force=False,
                    previous_index_file=None):
        '''This method is the core of `peru sync`. If the contents of "dest"
        match "previous_tree", then export_tree() updates them to match "tree".
        If not, it raises an error and doesn't touch any files.

        Because it's important for the no-op `peru sync` to be fast, we make an
        extra optimization for this case. The caller passes in the path to the
        index file used during the last sync, which should already reflect
        "previous_tree". That allows us to skip the read-tree and update-index
        calls, so all we have to do is a single diff-files operation to check
        for cleanliness.

        It's difficult to predict all the different states the index file might
        end up in under different error conditions, not only now but also in
        past and future git versions. For safety and simplicity, if any
        operation returns an error code, we delete the supplied index file.
        Right now this includes expected errors, like "sync would overwrite
        existing files," and unexpected errors, like "index is on fire."'''

        tree = tree or (yield from self.get_empty_tree())
        previous_tree = previous_tree or (yield from self.get_empty_tree())

        if not os.path.exists(dest):
            os.makedirs(dest)

        with contextlib.ExitStack() as stack:

            # If the caller gave us an index file, create a git session around
            # it. Otherwise, create a clean one. Note that because we delete
            # the index file whenever there are errors, we also allow the
            # caller to pass in a path to a nonexistent file. In that case we
            # have to pay the cost to recreate it.
            did_refresh = False
            if previous_index_file:
                session = GitSession(
                    self.trees_path, previous_index_file, dest)
                stack.enter_context(delete_if_error(previous_index_file))
                if not os.path.exists(previous_index_file):
                    did_refresh = True
                    yield from session.read_tree_and_stats_into_index(
                        previous_tree)
            else:
                session = stack.enter_context(self.clean_git_session(dest))
                did_refresh = True
                yield from session.read_tree_and_stats_into_index(
                    previous_tree)

            # The fast path. If the previous tree is the same as the current
            # one, and no files have changed at all, short-circuit.
            if previous_tree == tree:
                if (yield from session.working_copy_matches_index()):
                    return

            # Everything below is the slow path. Some files have changed, or
            # the tree has changed, or both. If we didn't refresh the index
            # file above, we must do so now.
            if not did_refresh:
                yield from session.read_tree_and_stats_into_index(
                    previous_tree)
            modified = yield from session.get_modified_files_skipping_deletes()
            if modified and not force:
                raise DirtyWorkingCopyError(
                    'Imported files have been modified ' +
                    '(use --force to overwrite):\n\n' +
                    _format_file_lines(modified))

            # Do all the file updates and deletions needed to produce `tree`.
            try:
                yield from session.read_tree_updating_working_copy(tree, force)
            except GitError:
                # Give a more informative error if we failed because files that
                # are new in `tree` already existed in the working copy.
                new_files = yield from session.get_new_files_in_tree(
                    previous_tree, tree)
                existing_new_files = [f for f in new_files if f and
                                      os.path.exists(os.path.join(dest, f))]
                existing_new_files.sort()
                if existing_new_files:
                    raise DirtyWorkingCopyError(
                        'Imports would overwrite preexisting files '
                        '(use --force to write anyway):\n\n' +
                        _format_file_lines(existing_new_files))
                else:
                    # We must've failed for some other reason. Let the error
                    # keep going.
                    raise

            # Recreate any missing files.
            yield from session.checkout_files_from_index()

Example 54

Project: aiobotocore Source File: paginate.py
Function: next_page
    @asyncio.coroutine
    def next_page(self):
        if self._is_stop:
            return None

        response = yield from self._make_request(self._current_kwargs)
        parsed = self._extract_parsed_response(response)
        if self._first_request:
            # The first request is handled differently.  We could
            # possibly have a resume/starting token that tells us where
            # to index into the retrieved page.
            if self._starting_token is not None:
                self._starting_truncation = self._handle_first_request(
                    parsed, self._primary_result_key,
                    self._starting_truncation)
            self._first_request = False
            self._record_non_aggregate_key_values(parsed)
        current_response = self._primary_result_key.search(parsed)
        if current_response is None:
            current_response = []
        num_current_response = len(current_response)
        truncate_amount = 0
        if self._max_items is not None:
            truncate_amount = (self._total_items + num_current_response) \
                - self._max_items

        if truncate_amount > 0:
            self._truncate_response(parsed, self._primary_result_key,
                                    truncate_amount, self._starting_truncation,
                                    self._next_token)
            self._is_stop = True
            return response
        else:
            self._total_items += num_current_response
            self._next_token = self._get_next_token(parsed)
            if all(t is None for t in self._next_token.values()):
                self._is_stop = True
                return response
            if self._max_items is not None and \
                    self._total_items == self._max_items:
                # We're on a page boundary so we can set the current
                # next token to be the resume token.
                self.resume_token = self._next_token
                self._is_stop = True
                return response
            if self._previous_next_token is not None and \
                    self._previous_next_token == self._next_token:
                message = ("The same next token was received "
                           "twice: %s" % self._next_token)
                raise PaginationError(message=message)
            self._inject_token_into_kwargs(self._current_kwargs,
                                           self._next_token)
            self._previous_next_token = self._next_token
            return response

Example 55

Project: CloudBot Source File: brainfuck.py
@asyncio.coroutine
@hook.command("braincuem", "bf")
def bf(text):
    """<prog> - executes <prog> as Brainfeck code
    :type text: str
    """

    program = re.sub('[^][<>+-.,]', '', text)

    # create a dict of brackets pairs, for speed later on
    brackets = {}
    open_brackets = []
    for pos in range(len(program)):
        if program[pos] == '[':
            open_brackets.append(pos)
        elif program[pos] == ']':
            if len(open_brackets) > 0:
                brackets[pos] = open_brackets[-1]
                brackets[open_brackets[-1]] = pos
                open_brackets.pop()
            else:
                return "Unbalanced brackets"
    if len(open_brackets) != 0:
        return "Unbalanced brackets"

    # now we can start interpreting
    ip = 0  # instruction pointer
    mp = 0  # memory pointer
    steps = 0
    memory = [0] * BUFFER_SIZE  # initial memory area
    rightmost = 0
    output = ""  # we'll save the output here

    # the main program loop:
    while ip < len(program):
        c = program[ip]
        if c == '+':
            memory[mp] = (memory[mp] + 1) % 256
        elif c == '-':
            memory[mp] = (memory[mp] - 1) % 256
        elif c == '>':
            mp += 1
            if mp > rightmost:
                rightmost = mp
                if mp >= len(memory):
                    # no restriction on memory growth!
                    memory.extend([0] * BUFFER_SIZE)
        elif c == '<':
            mp -= 1 % len(memory)
        elif c == '.':
            output += chr(memory[mp])
            if len(output) > 500:
                break
        elif c == ',':
            memory[mp] = random.randint(1, 255)
        elif c == '[':
            if memory[mp] == 0:
                ip = brackets[ip]
        elif c == ']':
            if memory[mp] != 0:
                ip = brackets[ip]

        ip += 1
        steps += 1
        if steps > MAX_STEPS:
            if not output:
                output = "(no output)"
            output += "(exceeded {} iterations)".format(MAX_STEPS)
            break

    stripped_output = re.sub(r'[\x00-\x1F]', '', output)

    if not stripped_output:
        if output:
            return "No printable output"
        return "No output"

    return stripped_output[:430]

Example 56

Project: pycon2014 Source File: child_process.py
Function: main
@asyncio.coroutine
def main(loop):
    # program which prints evaluation of each expression from stdin
    code = r'''if 1:
                   import os
                   def writeall(fd, buf):
                       while buf:
                           n = os.write(fd, buf)
                           buf = buf[n:]
                   while True:
                       s = os.read(0, 1024)
                       if not s:
                           break
                       s = s.decode('ascii')
                       s = repr(eval(s)) + '\n'
                       s = s.encode('ascii')
                       writeall(1, s)
                   '''

    # commands to send to input
    commands = iter([b"1+1\n",
                     b"2**16\n",
                     b"1/3\n",
                     b"'x'*50",
                     b"1/0\n"])

    # start subprocess and wrap stdin, stdout, stderr
    p = Popen([sys.executable, '-c', code],
              stdin=PIPE, stdout=PIPE, stderr=PIPE)

    stdin = yield from connect_write_pipe(p.stdin)
    stdout, stdout_transport = yield from connect_read_pipe(p.stdout)
    stderr, stderr_transport = yield from connect_read_pipe(p.stderr)

    # interact with subprocess
    name = {stdout:'OUT', stderr:'ERR'}
    registered = {asyncio.Task(stderr.readline()): stderr,
                  asyncio.Task(stdout.readline()): stdout}
    while registered:
        # write command
        cmd = next(commands, None)
        if cmd is None:
            stdin.close()
        else:
            print('>>>', cmd.decode('ascii').rstrip())
            stdin.write(cmd)

        # get and print lines from stdout, stderr
        timeout = None
        while registered:
            done, pending = yield from asyncio.wait(
                registered, timeout=timeout,
                return_when=asyncio.FIRST_COMPLETED)
            if not done:
                break
            for f in done:
                stream = registered.pop(f)
                res = f.result()
                print(name[stream], res.decode('ascii').rstrip())
                if res != b'':
                    registered[asyncio.Task(stream.readline())] = stream
            timeout = 0.0

    stdout_transport.close()
    stderr_transport.close()

Example 57

Project: aiohttp-cors Source File: cors_config.py
    @asyncio.coroutine
    def _preflight_handler(self, request: web.Request):
        """CORS preflight request handler"""

        # Handle according to part 6.2 of the CORS specification.

        origin = request.headers.get(hdrs.ORIGIN)
        if origin is None:
            # Terminate CORS according to CORS 6.2.1.
            raise web.HTTPForbidden(
                text="CORS preflight request failed: "
                     "origin header is not specified in the request")

        # CORS 6.2.3. Doing it out of order is not an error.
        request_method = self._parse_request_method(request)

        # CORS 6.2.5. Doing it out of order is not an error.

        try:
            config = \
                yield from self._router_adapter.get_preflight_request_config(
                    request, origin, request_method)
        except KeyError:
            raise web.HTTPForbidden(
                text="CORS preflight request failed: "
                     "request method {!r} is not allowed "
                     "for {!r} origin".format(request_method, origin))

        if not config:
            # No allowed origins for the route.
            # Terminate CORS according to CORS 6.2.1.
            raise web.HTTPForbidden(
                text="CORS preflight request failed: "
                     "no origins are allowed")

        options = config.get(origin, config.get("*"))
        if options is None:
            # No configuration for the origin - deny.
            # Terminate CORS according to CORS 6.2.2.
            raise web.HTTPForbidden(
                text="CORS preflight request failed: "
                     "origin '{}' is not allowed".format(origin))

        # CORS 6.2.4
        request_headers = self._parse_request_headers(request)

        # CORS 6.2.6
        if options.allow_headers == "*":
            pass
        else:
            disallowed_headers = request_headers - options.allow_headers
            if disallowed_headers:
                raise web.HTTPForbidden(
                    text="CORS preflight request failed: "
                         "headers are not allowed: {}".format(
                             ", ".join(disallowed_headers)))

        # Ok, CORS actual request with specified in the preflight request
        # parameters is allowed.
        # Set appropriate headers and return 200 response.

        response = web.Response()

        # CORS 6.2.7
        response.headers[hdrs.ACCESS_CONTROL_ALLOW_ORIGIN] = origin
        if options.allow_credentials:
            # Set allowed credentials.
            response.headers[hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS] = _TRUE

        # CORS 6.2.8
        if options.max_age is not None:
            response.headers[hdrs.ACCESS_CONTROL_MAX_AGE] = \
                str(options.max_age)

        # CORS 6.2.9
        # TODO: more optimal for client preflight request cache would be to
        # respond with ALL allowed methods.
        response.headers[hdrs.ACCESS_CONTROL_ALLOW_METHODS] = request_method

        # CORS 6.2.10
        if request_headers:
            # Note: case of the headers in the request is changed, but this
            # shouldn't be a problem, since the headers should be compared in
            # the case-insensitive way.
            response.headers[hdrs.ACCESS_CONTROL_ALLOW_HEADERS] = \
                ",".join(request_headers)

        return response

Example 58

Project: python-steemlib Source File: async_monitor.py
Function: start
@asyncio.coroutine
def start(steem):
    with aiohttp.ClientSession() as session:
        futures = {"time": None, "exchange_price": None, "witness_price": None, "db": None}
        last_witness_update_time, last_witness_price = yield from get_witness_price_feed(steem, account)
        r = yield from steem.db.get_dynamic_global_properties()
        last_time = read_time(r["time"])
        cur_time = last_time
        first_time = True
        steem_price = yield from get_steem_price(session)
        futures["time"] = asyncio.async(asyncio.sleep(0))
        needs_updating = False
        while True:
            ftrs = []
            for f in futures.values():
                if f:
                    ftrs.append(f)
            done, pending = yield from asyncio.wait(ftrs, return_when=asyncio.FIRST_COMPLETED)

            old_futures = {}
            for k, f in futures.items():
                old_futures[k] = futures[k]
            for k, f in old_futures.items():
                if f in done:
                    futures[k] = None
                    if k == "time":
                        futures["time"] = asyncio.async(asyncio.sleep(3))
                        if futures["db"]:
                            futures["db"].cancel()
                        futures["db"]   = yield from steem.db.get_dynamic_global_properties(future=True)
                    elif k == "exchange_price":
                        steem_price = f.result()
                        if abs(1 - last_witness_price / steem_price) > 0.03 and (cur_time - last_witness_update_time) > 60 * 60:
                            if not needs_updating:
                                needs_updating = True
                                print("Price feed needs to be updated due to change in price.")
                                print("Current witness price: {} $/STEEM   Current exchange price: {} $/STEEM".format(last_witness_price, steem_price))
                        else:
                            if needs_updating and cur_time - last_witness_update_time < 24 * 60 * 60:
                                needs_updating = False
                                print("Price feed no longer needs to be updated")

                    elif k == "witness_price":
                        new_last_witness_update_time, new_last_witness_price = f.result()
                        if new_last_witness_update_time != last_witness_update_time:
                            last_witness_update_time = new_last_witness_update_time
                            last_witness_price = new_last_witness_price
                            print("Price feed has been updated")
                            needs_updating = False
                    elif k == "db":
                        r = f.result()
                        cur_time = read_time(r["time"])
                        if first_time or cur_time - last_time > 28:  # seconds
                            first_time = False
                            print("Block number {} at time: {}".format(r["head_block_number"], r["time"]))
                            if needs_updating:
                                print("Price feed still needs updating to {} $/STEEM".format(steem_price))
                            futures["exchange_price"] = asyncio.async(get_steem_price(session))
                            futures["witness_price"] = asyncio.async(get_witness_price_feed(steem, account))
                            last_time = cur_time
                        if cur_time - last_witness_update_time >= 24 * 60 * 60:
                            if not needs_updating:
                                needs_updating = True
                                print("Price feed needs to be updated because it is too old.")
            old_futures = {}

Example 59

Project: aiomcache Source File: client.py
Function: multi_get
    @asyncio.coroutine
    def _multi_get(self, conn, *keys):
        # req  - get <key> [<key> ...]\r\n
        # resp - VALUE <key> <flags> <bytes> [<cas unique>]\r\n
        #        <data block>\r\n (if exists)
        #        [...]
        #        END\r\n
        if not keys:
            return []

        [self._validate_key(key) for key in keys]
        if len(set(keys)) != len(keys):
            raise ClientException('duplicate keys passed to multi_get')

        conn.writer.write(b'get ' + b' '.join(keys) + b'\r\n')

        received = {}
        line = yield from conn.reader.readline()

        while line != b'END\r\n':
            terms = line.split()

            if len(terms) == 4 and terms[0] == b'VALUE':  # exists
                key = terms[1]
                flags = int(terms[2])
                length = int(terms[3])

                if flags != 0:
                    raise ClientException('received non zero flags')

                val = (yield from conn.reader.readexactly(length+2))[:-2]
                if key in received:
                    raise ClientException('duplicate results from server')

                received[key] = val
            else:
                raise ClientException('get failed', line)

            line = yield from conn.reader.readline()

        if len(received) > len(keys):
            raise ClientException('received too many responses')
        return [received.get(k, None) for k in keys]

Example 60

Project: fcatalog_server Source File: fcatalog_logic.py
Function: client_handler
    @asyncio.coroutine
    def client_handler(self):
        """
        Main logic of the catalog1 server, for one client.
        Communication with the client is done through the msg_endpoint class
        instance.
        """
        logger.debug('New connection {}'.format(id(self._msg_endpoint)))
        msg_inst = ( yield from self._msg_endpoint.recv() )

        if msg_inst is None:
            # Remote peer has disconnected or sent invalid data. We disconnect.
            return

        if msg_inst.msg_name != 'ChooseDB':
            # If the first message is not ChooseDB, we disconnect.
            logger.debug('Connection {} has {} as first message.'
                    ' Closing connection.'.\
                            format(msg_inst.msg_name, id(self._msg_endpoint)))
            return

        # Database name:
        db_name = msg_inst.get_field('db_name')

        # Validate database name:
        if not is_good_db_name(db_name):
            logger.info('Invalid db name {} was chosen at connection {}'.\
                    format(db_name,id(self._msg_endpoint)))
            # Disconnect the client:
            return

        # Conclude database path:
        db_path = os.path.join(self._db_base_path,db_name)

        logger.debug('db_path = {} at connection {}'.\
                format(db_path,id(self._msg_endpoint)))

        # Build a Functions DB interface:
        self._fdb = FuncsDB(db_path,self._num_hashes)
        try:
            msg_inst = ( yield from self._msg_endpoint.recv() )
            while msg_inst is not None:
                if msg_inst.msg_name == 'ChooseDB':
                    # We can't have two ChooseDB messages in a connection. We
                    # close the connection:
                    return
                elif msg_inst.msg_name == 'AddFunction':
                    yield from self._handle_add_function(msg_inst)
                elif msg_inst.msg_name == 'RequestSimilars':
                    yield from self._handle_request_similars(msg_inst)
                else:
                    # This should never happen:
                    raise ServerLogicError('Unknown message name {}'.\
                            format(msg_inst.msg_name))

                # Receive the next message:
                msg_inst = ( yield from self._msg_endpoint.recv() )

            logger.debug('Received a None message on connection {}'.\
                    format(id(self._msg_endpoint)))

        finally:
            # We make sure to eventually close the fdb interface (To commit all
            # changes that might be pending).
            self._fdb.close()

Example 61

Project: ika Source File: server.py
    @asyncio.coroutine
    def connect(self):
        self.reader, self.writer = yield from asyncio.open_connection(self.link.host, self.link.port)
        logger.debug('Connected')
        self.writeline('SERVER {} {} 0 {} :{}',
            self.name, self.link.password, self.sid, self.description
        )
        while 1:
            line = yield from self.readline()
            if not line:
                continue
            if RE_SERVER.match(line):
                server, command, *params = ircutils.parseline(line)
                sender = server
                if command == 'PING':
                    self.writeserverline('PONG {} {}', params[1], params[0])
                elif command == 'BURST':
                    self._ev = self.ev
                    self.ev = None
                elif command == 'ENDBURST':
                    self.ev = self._ev
                    self._ev = None
                    params = [self.linked_once]
                    if not self.linked_once:
                        self.linked_once = True
                        if settings.admin_channel in self.channels:
                            timestamp = self.channels[settings.admin_channel].timestamp
                            modes = self.channels[settings.admin_channel].modes
                        else:
                            timestamp = timeutils.unixtime()
                            modes = ''
                        self.writeserverline('FJOIN {} {} +{} :{}', settings.admin_channel, timestamp, modes,
                            ' '.join(map(lambda x: 'a,{}'.format(x), self.services.keys())))
                elif command == 'UID':
                    self.users[params[0]] = User(*params)
                elif command == 'METADATA':
                    if params[0] == '*':
                        pass
                    elif params[0].startswith('#'):
                        self.channels[params[0].lower()].metadata[params[1]] = params[-1]
                    else:
                        if params[1] == 'accountname':
                            account = Account.find_by_nick(params[-1])
                            if (account is not None) and (account.name.name == params[-1]):
                                self.users[params[0]].metadata['accountname'] = account.name.name
                            else:
                                self.writeserverline('METADATA {} accountname :', params[0])
                        else:
                            self.users[params[0]].metadata[params[1]] = params[-1]
                elif command == 'FJOIN':
                    channel = params[0].lower()
                    if channel in self.channels:
                        self.channels[channel].process_fjoin(self.users, *params)
                    else:
                        self.channels[channel] = Channel(self.users, *params)
            elif RE_USER.match(line):
                uid, command, *params = ircutils.parseline(line)
                user = self.users[uid]
                sender = user
                if command == 'PRIVMSG':
                    target = params[0]
                    if target.startswith(self.sid):
                        self.services[target].process_command(user, *params[1:])
                elif command == 'OPERTYPE':
                    user.opertype = params[0]
                elif command == 'IDLE':
                    service = self.services[params[0]]
                    self.writeuserline(service.uid, 'IDLE {} {} 0', uid, timeutils.unixtime())
                elif command == 'NICK':
                    user.nick = params[0]
                elif command == 'FHOST':
                    user.dhost = params[0]
                elif command == 'FMODE':
                    if len(params) == 3: # channel/user mode
                        pass # TODO: To be implemented
                    elif len(params) == 4: # channel user mode
                        modes = params[2]
                        method = 'update' if modes[0] == '+' else 'difference_update'
                        if params[3] in self.channels[params[0].lower()].usermodes.keys():
                            getattr(self.channels[params[0].lower()].usermodes[params[3]], method)(modes[1:])
                elif command == 'KICK':
                    channel = params[0].lower()
                    target = self.users[params[1]]
                    self.channels[channel].remove_user(target)
                    if len(self.channels[channel].users) == 0:
                        del self.channels[channel]
                elif command == 'PART':
                    channel = params[0].lower()
                    self.channels[channel].remove_user(user)
                    if len(self.channels[channel].users) == 0:
                        del self.channels[channel]
                elif command == 'QUIT':
                    for channel in self.users[uid].channels:
                        self.channels[channel].remove_user(user)
                        if len(self.channels[channel].users) == 0:
                            del self.channels[channel]
                    del self.users[uid]
            else:
                command, *params = ircutils.parseline(line)
                sender = None
                if command == 'SERVER':
                    try:
                        assert params[0] == self.link.name
                        assert params[1] == self.link.password
                    except AssertionError:
                        self.writeline('ERROR :Server information doesn\'t match.')
                        break
                    else:
                        self.link.sid = params[3]
                        self.writeserverline('BURST {}', timeutils.unixtime())
                        self.writeserverline('VERSION :{} {}', Versions.IKA, self.name)
                        idx = 621937810  # int('AAAAAA', 36)
                        for service in self.services_instances.values():
                            service.id = ircutils.base36encode(idx)
                            names = list(service.aliases)
                            names.insert(0, service.name)
                            for name in names:
                                uid = '{}{}'.format(self.sid, ircutils.base36encode(idx))
                                self.writeserverline('UID {uid} {timestamp} {nick} {host} {host} {ident} 0.0.0.0 {timestamp} +Iiko :{gecos}',
                                    uid=uid,
                                    nick=name,
                                    ident=service.ident,
                                    host=self.name,
                                    gecos=service.gecos,
                                    timestamp=timeutils.unixtime(),
                                )
                                self.writeuserline(uid, 'OPERTYPE Services')
                                self.services[uid] = service
                                idx += 1
                        self.writeserverline('ENDBURST')
                elif command == 'ERROR':
                    raise RuntimeError('Remote server has returned an error: {}'.format(params[-1]))
            if hasattr(self.ev, command):
                getattr(self.ev, command).fire(sender, *params)
            # TODO: Implement each functions
        logger.debug('Disconnected')

Example 62

Project: aiogremlin Source File: client.py
Function: submit
@asyncio.coroutine
def submit(gremlin, *,
           url='http://localhost:8182/',
           bindings=None,
           lang="gremlin-groovy",
           rebindings=None,
           op="eval",
           processor="",
           timeout=None,
           session=None,
           loop=None,
           conn_timeout=None,
           username="",
           password=""):
    """
    :ref:`coroutine<coroutine>`

    Submit a script to the Gremlin Server.

    :param str gremlin: The Gremlin script.
    :param str url: url for Gremlin Server (optional). 'http://localhost:8182/'
        by default
    :param dict bindings: A mapping of bindings for Gremlin script.
    :param str lang: Language of scripts submitted to the server.
        "gremlin-groovy" by default
    :param dict rebindings: Rebind ``Graph`` and ``TraversalSource``
        objects to different variable names in the current request
    :param str op: Gremlin Server op argument. "eval" by default.
    :param str processor: Gremlin Server processor argument. "" by default.
    :param float timeout: timeout for establishing connection (optional).
        Values ``0`` or ``None`` mean no timeout
    :param str session: Session id (optional). Typically a uuid
    :param loop: :ref:`event loop<asyncio-event-loop>` If param is ``None``,
        `asyncio.get_event_loop` is used for getting default event loop
        (optional)
    :param float conn_timeout: timeout for establishing connection (seconds)
        (optional). Values ``0`` or ``None`` mean no timeout
    :param username: Username for SASL auth
    :param password: Password for SASL auth
    :returns: :py:class:`aiogremlin.client.GremlinResponse` object
    """

    if loop is None:
        loop = asyncio.get_event_loop()

    connector = aiohttp.TCPConnector(force_close=True, loop=loop,
                                     verify_ssl=False,
                                     conn_timeout=conn_timeout)

    client_session = aiohttp.ClientSession(
        connector=connector, loop=loop,
        ws_response_class=GremlinClientWebSocketResponse)

    gremlin_client = GremlinClient(url=url, loop=loop,
                                   ws_connector=client_session,
                                   username=username, password=password)

    try:
        resp = yield from gremlin_client.submit(
            gremlin, bindings=bindings, lang=lang, rebindings=rebindings,
            op=op, processor=processor, session=session, timeout=timeout)

        return resp

    finally:
        gremlin_client.detach()
        client_session.detach()

Example 63

Project: ooi3 Source File: frontend.py
Function: log_in
    @asyncio.coroutine
    def login(self, request):
        """接受登录表单提交的数据,登录后跳转或登录失败后展示错误信息。

        :param request: aiohttp.web.Request
        :return: aiohttp.web.HTTPFound or aiohttp.web.Response
        """
        post = yield from request.post()
        session = yield from get_session(request)

        login_id = post.get('login_id', None)
        password = post.get('password', None)
        mode = int(post.get('mode', 1))

        session['mode'] = mode

        if login_id and password:
            kancolle = KancolleAuth(login_id, password)
            if mode in (1, 2, 3):
                try:
                    yield from kancolle.get_flash()
                    session['api_token'] = kancolle.api_token
                    session['api_starttime'] = kancolle.api_starttime
                    session['world_ip'] = kancolle.world_ip
                    if mode == 2:
                        return aiohttp.web.HTTPFound('/kcv')
                    elif mode == 3:
                        return aiohttp.web.HTTPFound('/poi')
                    else:
                        return aiohttp.web.HTTPFound('/kancolle')

                except OOIAuthException as e:
                    context = {'errmsg': e.message, 'mode': mode}
                    return aiohttp_jinja2.render_template('form.html', request, context)
            elif mode == 4:
                try:
                    osapi_url = yield from kancolle.get_osapi()
                    session['osapi_url'] = osapi_url
                    return aiohttp.web.HTTPFound('/connector')
                except OOIAuthException as e:
                    context = {'errmsg': e.message, 'mode': mode}
                    return aiohttp_jinja2.render_template('form.html', request, context)
            else:
                raise aiohttp.web.HTTPBadRequest()
        else:
            context = {'errmsg': '请输入完整的登录ID和密码', 'mode': mode}
            return aiohttp_jinja2.render_template('form.html', request, context)

Example 64

Project: aiocouchdb Source File: database.py
    @asyncio.coroutine
    def changes(self, *doc_ids,
                auth=None,
                feed_buffer_size=None,
                att_encoding_info=None,
                attachments=None,
                conflicts=None,
                descending=None,
                feed=None,
                filter=None,
                headers=None,
                heartbeat=None,
                include_docs=None,
                limit=None,
                params=None,
                since=None,
                style=None,
                timeout=None,
                view=None):
        """Emits :ref:`database changes events<api/db/changes>`.

        :param str doc_ids: Docuement IDs to filter for. This method is smart
                            enough to use `GET` or `POST` request depending
                            if any ``doc_ids`` were provided or not and
                            automatically sets ``filter`` param to ``_doc_ids``
                            value.

        :param auth: :class:`aiocouchdb.authn.AuthProvider` instance
        :param int feed_buffer_size: Internal buffer size for fetched feed items

        :param bool att_encoding_info: Includes encoding information in an
                                       attachment stubs
        :param bool attachments: Includes the Base64-encoded content of an
                                 attachments in the docuements
        :param bool conflicts: Includes conflicts information in the docuements
        :param bool descending: Return changes in descending order
        :param str feed: :ref:`Changes feed type <changes>`
        :param str filter: Filter function name
        :param dict headers: Custom request headers
        :param int heartbeat: Period in milliseconds after which an empty
                              line will be sent from server as the result
                              to keep connection alive
        :param bool include_docs: Includes the associated docuement for each
                                  emitted event
        :param int limit: Limits a number of returned events by the specified
                          value
        :param since: Starts listening changes feed since given
                      `update sequence` value
        :param dict params: Custom request query parameters
        :param str style: Changes feed output style: ``all_docs``, ``main_only``
        :param int timeout: Period in milliseconds to await for new changes
                            before close the feed. Works for continuous feeds
        :param str view: View function name which would be used as filter.
                         Implicitly sets ``filter`` param to ``_view`` value

        :rtype: :class:`aiocouchdb.feeds.ChangesFeed`
        """
        params = dict(params or {})
        params.update((key, value)
                      for key, value in locals().items()
                      if key not in {'self', 'doc_ids', 'auth', 'headers',
                                     'params'} and value is not None)

        if doc_ids:
            data = {'doc_ids': doc_ids}
            if 'filter' not in params:
                params['filter'] = '_doc_ids'
            else:
                assert params['filter'] == '_doc_ids'
            request = self.resource.post
        else:
            data = None
            request = self.resource.get

        if 'view' in params:
            if 'filter' not in params:
                params['filter'] = '_view'
            else:
                assert params['filter'] == '_view'

        resp = yield from request('_changes', auth=auth, data=data,
                                  headers=headers, params=params)
        yield from resp.maybe_raise_error()

        if feed == 'continuous':
            return ContinuousChangesFeed(resp, buffer_size=feed_buffer_size)
        elif feed == 'eventsource':
            return EventSourceChangesFeed(resp, buffer_size=feed_buffer_size)
        elif feed == 'longpoll':
            return LongPollChangesFeed(resp, buffer_size=feed_buffer_size)
        else:
            return ChangesFeed(resp, buffer_size=feed_buffer_size)

Example 65

Project: backy2 Source File: nbdserver.py
    @asyncio.coroutine
    def handler(self, reader, writer):
        """Handle the connection"""
        try:
            host, port = writer.get_extra_info("peername")
            version, cow_version_uid = None, None
            self.log.info("Incoming connection from %s:%s" % (host,port))

            # initial handshake
            writer.write(b"NBDMAGIC" + struct.pack(">QH", self.NBD_HANDSHAKE, self.NBD_HANDSHAKE_FLAGS))
            yield from writer.drain()

            data = yield from reader.readexactly(4)
            try:
                client_flag = struct.unpack(">L", data)[0]
            except struct.error:
                raise IOError("Handshake failed, disconnecting")

            # we support both fixed and unfixed new-style handshake
            if client_flag == 0:
                fixed = False
                self.log.warning("Client using new-style non-fixed handshake")
            elif client_flag & 1:
                fixed = True
            else:
                raise IOError("Handshake failed, disconnecting")

            # negotiation phase
            while True:
                header = yield from reader.readexactly(16)
                try:
                    (magic, opt, length) = struct.unpack(">QLL", header)
                except struct.error as ex:
                    raise IOError("Negotiation failed: Invalid request, disconnecting")

                if magic != self.NBD_HANDSHAKE:
                    raise IOError("Negotiation failed: bad magic number: %s" % magic)

                if length:
                    data = yield from reader.readexactly(length)
                    if(len(data) != length):
                        raise IOError("Negotiation failed: %s bytes expected" % length)
                else:
                    data = None

                self.log.debug("[%s:%s]: opt=%s, len=%s, data=%s" % (host, port, opt, length, data))

                if opt == self.NBD_OPT_EXPORTNAME:
                    if not data:
                        raise IOError("Negotiation failed: no export name was provided")

                    data = data.decode("utf-8")
                    if data not in [v.uid for v in self.store.get_versions()]:
                        if not fixed:
                            raise IOError("Negotiation failed: unknown export name")

                        writer.write(struct.pack(">QLLL", self.NBD_REPLY, opt, self.NBD_REP_ERR_UNSUP, 0))
                        yield from writer.drain()
                        continue

                    # we have negotiated a version and it will be used
                    # until the client disconnects
                    version = self.store.get_version(data)

                    self.log.info("[%s:%s] Negotiated export: %s" % (host, port, version.uid))

                    export_flags = self.NBD_EXPORT_FLAGS
                    if self.read_only:
                        export_flags ^= self.NBD_RO_FLAG
                        self.log.info("nbd is read only.")
                    else:
                        self.log.info("nbd is read/write.")

                    # in case size_bytes is not %4096, we need to extend it to match
                    # block sizes.
                    size_bytes = math.ceil(version.size_bytes/4096)*4096
                    writer.write(struct.pack('>QH', size_bytes, export_flags))
                    writer.write(b"\x00"*124)
                    yield from writer.drain()

                    break

                elif opt == self.NBD_OPT_LIST:
                    for _version in self.store.get_versions():
                        writer.write(struct.pack(">QLLL", self.NBD_REPLY, opt, self.NBD_REP_SERVER, len(_version.uid) + 4))
                        version_encoded = _version.uid.encode("utf-8")
                        writer.write(struct.pack(">L", len(version_encoded)))
                        writer.write(version_encoded)
                        yield from writer.drain()

                    writer.write(struct.pack(">QLLL", self.NBD_REPLY, opt, self.NBD_REP_ACK, 0))
                    yield from writer.drain()

                elif opt == self.NBD_OPT_ABORT:
                    writer.write(struct.pack(">QLLL", self.NBD_REPLY, opt, self.NBD_REP_ACK, 0))
                    yield from writer.drain()

                    raise AbortedNegotiationError()
                else:
                    # we don't support any other option
                    if not fixed:
                        raise IOError("Unsupported option")

                    writer.write(struct.pack(">QLLL", self.NBD_REPLY, opt, self.NBD_REP_ERR_UNSUP, 0))
                    yield from writer.drain()

            # operation phase
            while True:
                header = yield from reader.readexactly(28)
                try:
                    (magic, cmd, handle, offset, length) = struct.unpack(">LLQQL", header)
                except struct.error:
                    raise IOError("Invalid request, disconnecting")

                if magic != self.NBD_REQUEST:
                    raise IOError("Bad magic number, disconnecting")

                self.log.debug("[%s:%s]: cmd=%s, handle=%s, offset=%s, len=%s" % (host, port, cmd, handle, offset, length))

                if cmd == self.NBD_CMD_DISC:
                    self.log.info("[%s:%s] disconnecting" % (host, port))
                    break

                elif cmd == self.NBD_CMD_WRITE:
                    data = yield from reader.readexactly(length)
                    if(len(data) != length):
                        raise IOError("%s bytes expected, disconnecting" % length)
                    if not cow_version_uid:
                        cow_version_uid = self.store.get_cow_version(version)
                    try:
                        self.store.write(cow_version_uid, offset, data)
                    # TODO: Fix exception
                    except Exception as ex:
                        self.log.error("[%s:%s] %s" % (host, port, ex))
                        yield from self.nbd_response(writer, handle, error=ex.errno)
                        continue

                    yield from self.nbd_response(writer, handle)

                elif cmd == self.NBD_CMD_READ:
                    try:
                        if cow_version_uid:
                            data = self.store.read(cow_version_uid, offset, length)
                        else:
                            data = self.store.read(version.uid, offset, length)
                    # TODO: Fix exception
                    except Exception as ex:
                        self.log.error("[%s:%s] %s" % (host, port, ex))
                        yield from self.nbd_response(writer, handle, error=ex.errno)
                        continue

                    yield from self.nbd_response(writer, handle, data=data)

                elif cmd == self.NBD_CMD_FLUSH:
                    self.store.flush()
                    yield from self.nbd_response(writer, handle)

                else:
                    self.log.warning("[%s:%s] Unknown cmd %s, disconnecting" % (host, port, cmd))
                    break

        except AbortedNegotiationError:
            self.log.info("[%s:%s] Client aborted negotiation" % (host, port))

        except (asyncio.IncompleteReadError, IOError) as ex:
            self.log.error("[%s:%s] %s" % (host, port, ex))

        finally:
            if cow_version_uid:
                self.store.fixate(cow_version_uid)
            writer.close()

Example 66

Project: aioes Source File: transport.py
    @asyncio.coroutine
    def sniff_endpoints(self):
        """Obtain a list of nodes from the cluster and create a new connection
        pool using the information retrieved.

        To extract the node connection parameters use the
        `nodes_to_endpoint_callback`.

        """
        previous_sniff = self._last_sniff
        try:
            # reset last_sniff timestamp
            self._last_sniff = time.monotonic()
            # go through all current connections as well as the
            # seed_connections for good measure
            for c in itertools.chain(self._pool.connections,
                                     self._seed_connections):
                try:
                    # use small timeout for the sniffing request,
                    # should be a fast api call
                    _, headers, node_info = yield from asyncio.wait_for(
                        c.perform_request(
                            'GET',
                            '/_nodes/_all/clear',
                            None,
                            None),
                        timeout=self._sniffer_timeout,
                        loop=self._loop)
                except ConnectionError:
                    continue
                try:
                    node_info = json.loads(node_info)
                except (TypeError, ValueError):
                    continue
                break
            else:
                raise TransportError("N/A", "Unable to sniff endpoints.")
        except:
            # keep the previous value on error
            self._last_sniff = previous_sniff
            raise

        endpoints = []
        address = 'http_address'
        for n in node_info['nodes'].values():
            match = self.ADDRESS_RE.search(n.get(address, ''))
            if not match:
                continue

            dct = match.groupdict()
            host = dct['host']
            if 'port' in dct:
                port = int(dct['port'])
            else:
                port = 9200
            scheme = dct.get('scheme') or DEFAULT_SCHEME
            attrs = n.get('attributes', {})
            if not (attrs.get('data', 'true') == 'false' and
                    attrs.get('client', 'false') == 'false' and
                    attrs.get('master', 'true') == 'true'):
                endpoints.append(Endpoint(scheme, host, port))

        # we weren't able to get any nodes, maybe using an incompatible
        # transport_schema or host_info_callback blocked all - raise error.
        if not endpoints:
            raise TransportError(
                "N/A",
                "Unable to sniff endpoints - no viable endpoints found.")

        self.endpoints = endpoints

Example 67

Project: pycon2014 Source File: test_streams.py
Function: test_start_server
    def test_start_server(self):

        class MyServer:

            def __init__(self, loop):
                self.server = None
                self.loop = loop

            @asyncio.coroutine
            def handle_client(self, client_reader, client_writer):
                data = yield from client_reader.readline()
                client_writer.write(data)

            def start(self):
                sock = socket.socket()
                sock.bind(('127.0.0.1', 0))
                self.server = self.loop.run_until_complete(
                    asyncio.start_server(self.handle_client,
                                         sock=sock,
                                         loop=self.loop))
                return sock.getsockname()

            def handle_client_callback(self, client_reader, client_writer):
                task = asyncio.Task(client_reader.readline(), loop=self.loop)

                def done(task):
                    client_writer.write(task.result())

                task.add_done_callback(done)

            def start_callback(self):
                sock = socket.socket()
                sock.bind(('127.0.0.1', 0))
                addr = sock.getsockname()
                sock.close()
                self.server = self.loop.run_until_complete(
                    asyncio.start_server(self.handle_client_callback,
                                         host=addr[0], port=addr[1],
                                         loop=self.loop))
                return addr

            def stop(self):
                if self.server is not None:
                    self.server.close()
                    self.loop.run_until_complete(self.server.wait_closed())
                    self.server = None

        @asyncio.coroutine
        def client(addr):
            reader, writer = yield from asyncio.open_connection(
                *addr, loop=self.loop)
            # send a line
            writer.write(b"hello world!\n")
            # read it back
            msgback = yield from reader.readline()
            writer.close()
            return msgback

        # test the server variant with a coroutine as client handler
        server = MyServer(self.loop)
        addr = server.start()
        msg = self.loop.run_until_complete(asyncio.Task(client(addr),
                                                        loop=self.loop))
        server.stop()
        self.assertEqual(msg, b"hello world!\n")

        # test the server variant with a callback as client handler
        server = MyServer(self.loop)
        addr = server.start_callback()
        msg = self.loop.run_until_complete(asyncio.Task(client(addr),
                                                        loop=self.loop))
        server.stop()
        self.assertEqual(msg, b"hello world!\n")

Example 68

Project: aiohttp-cors Source File: test_main.py
    @asynctest
    @asyncio.coroutine
    def test_preflight_default(self):
        """Test CORS preflight requests with a route with the default
        configuration.

        The default configuration means that:
          * no credentials are allowed,
          * no headers are exposed,
          * no client headers are allowed.
        """

        client1 = "http://client1.example.org"
        client2 = "http://client2.example.org"

        tests_descriptions = [
            {
                "name": "default",
                "defaults": None,
                "route_config":
                    {
                        client1: ResourceOptions(),
                    },
                "tests": [
                    {
                        "name": "no origin",
                        "response_status": 403,
                        "in_response": "origin header is not specified",
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                            hdrs.ACCESS_CONTROL_MAX_AGE,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_METHODS,
                            hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
                        },
                    },
                    {
                        "name": "no method",
                        "request_headers": {
                            hdrs.ORIGIN: client1,
                        },
                        "response_status": 403,
                        "in_response": "'Access-Control-Request-Method' "
                                       "header is not specified",
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                            hdrs.ACCESS_CONTROL_MAX_AGE,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_METHODS,
                            hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
                        },
                    },
                    {
                        "name": "origin and method",
                        "request_headers": {
                            hdrs.ORIGIN: client1,
                            hdrs.ACCESS_CONTROL_REQUEST_METHOD: "GET",
                        },
                        "in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN: client1,
                            hdrs.ACCESS_CONTROL_ALLOW_METHODS: "GET",
                        },
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                            hdrs.ACCESS_CONTROL_MAX_AGE,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
                        },
                    },
                    {
                        "name": "disallowed origin",
                        "request_headers": {
                            hdrs.ORIGIN: client2,
                            hdrs.ACCESS_CONTROL_REQUEST_METHOD: "GET",
                        },
                        "response_status": 403,
                        "in_response": "origin '{}' is not allowed".format(
                            client2),
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                            hdrs.ACCESS_CONTROL_MAX_AGE,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_METHODS,
                            hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
                        },
                    },
                    {
                        "name": "disallowed method",
                        "request_headers": {
                            hdrs.ORIGIN: client1,
                            hdrs.ACCESS_CONTROL_REQUEST_METHOD: "POST",
                        },
                        "response_status": 403,
                        "in_response": "request method 'POST' is not allowed",
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                            hdrs.ACCESS_CONTROL_MAX_AGE,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_METHODS,
                            hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
                        },
                    },
                    ],
            },
        ]

        yield from self._run_preflight_requests_tests(
            tests_descriptions, False)
        yield from self._run_preflight_requests_tests(
            tests_descriptions, True)

Example 69

Project: sofi Source File: sample.py
@asyncio.coroutine
def oninit(event):
    logging.info("MAIN")
    v = View()

    n = Navbar(brand="SOFI", fixed='top')
    n.addlink("LINK 1")
    n.addlink("LINK 2")
    n.addtext("Just some Text with a " + str(Anchor("link", cl='navbar-link')))
    n.addlink("LINK 2", active=True)

    b = Dropdown("Dropdown", align='right')
    b.addelement(DropdownItem('Item Header', header=True))
    b.addelement(DropdownItem('Item 1'))
    b.addelement(DropdownItem('Item 2', disabled=True))
    b.addelement(DropdownItem('', divider=True))
    b.addelement(DropdownItem('Item 3'))

    n.adddropdown(b)

    v.addelement(n)

    c = Container()
    tb = ButtonToolbar()
    bgrp = ButtonGroup()
    btnDe = Button("Default")
    btnP = Button("Primary", "primary", ident='clickme')
    btnI = Button("Info", "info")
    bgrp2 = ButtonGroup()
    btnS = Button("Success", "success")
    btnW = Button("Warning", "warning")
    btnDa = Button("Danger", "danger")

    r = Row()
    bgrp.addelement(btnDe)
    bgrp.addelement(btnP)
    bgrp.addelement(btnI)
    bgrp2.addelement(btnS)
    bgrp2.addelement(btnW)
    bgrp2.addelement(btnDa)
    tb.addelement(bgrp)
    tb.addelement(bgrp2)
    r.addelement(tb)
    c.addelement(r)

    c.newrow(Heading(2, "Dude!"))
    c.newrow(Paragraph("Where's My Car?", ident="fiddle"))

    bd = ButtonDropdown('A Dropdown', size='xs', dropup=True, split=True, severity="success")
    bd.addelement(DropdownItem('Item Header', header=True))
    bd.addelement(DropdownItem('Item 1'))
    bd.addelement(DropdownItem('Item 2', disabled=True))
    bd.addelement(DropdownItem('', divider=True))
    bd.addelement(DropdownItem('Item 3'))
    c.newrow(bd)

    r = Row()
    col = Column(count=3)
    p = Panel("Panel 1")
    col.addelement(p)
    r.addelement(col)

    col = Column(count=3)
    p = Panel("Panel 2", 'danger')
    col.addelement(p)
    r.addelement(col)

    c.newrow(Paragraph())
    c.addelement(r)

    v.addelement(c)

    app.load(str(v))

Example 70

Project: aiokafka Source File: producer.py
    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        self._in_flight.add(node_id)
        t0 = self._loop.time()
        while True:
            topics = collections.defaultdict(list)
            for tp, batch in batches.items():
                topics[tp.topic].append((tp.partition, batch.data()))

            if self.client.api_version >= (0, 10):
                version = 2
            elif self.client.api_version == (0, 9):
                version = 1
            else:
                version = 0

            request = ProduceRequest[version](
                required_acks=self._acks,
                timeout=self._request_timeout_ms,
                topics=list(topics.items()))

            try:
                response = yield from self.client.send(node_id, request)
            except KafkaError as err:
                for batch in batches.values():
                    if not err.retriable or batch.expired():
                        batch.done(exception=err)
                log.warning(
                    "Got error produce response: %s", err)
                if not err.retriable:
                    break
            else:
                if response is None:
                    # noacks, just "done" batches
                    for batch in batches.values():
                        batch.done()
                    break

                for topic, partitions in response.topics:
                    for partition_info in partitions:
                        if response.API_VERSION < 2:
                            partition, error_code, offset = partition_info
                        else:
                            partition, error_code, offset, _ = partition_info
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset)
                        elif not getattr(error, 'retriable', False) or \
                                batch.expired():
                            batch.done(exception=error())
                        else:
                            # Ok, we can retry this batch
                            batches[tp] = batch
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)

            if batches:
                yield from asyncio.sleep(
                    self._retry_backoff, loop=self._loop)
            else:
                break

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

Example 71

Project: CloudBot Source File: bot.py
    @asyncio.coroutine
    def process(self, event):
        """
        :type event: Event
        """
        run_before_tasks = []
        tasks = []
        command_prefix = event.conn.config.get('command_prefix', '.')

        # Raw IRC hook
        for raw_hook in self.plugin_manager.catch_all_triggers:
            # run catch-all coroutine hooks before all others - TODO: Make this a plugin argument
            if not raw_hook.threaded:
                run_before_tasks.append(
                    self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))
            else:
                tasks.append(self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))
        if event.irc_command in self.plugin_manager.raw_triggers:
            for raw_hook in self.plugin_manager.raw_triggers[event.irc_command]:
                tasks.append(self.plugin_manager.launch(raw_hook, Event(hook=raw_hook, base_event=event)))

        # Event hooks
        if event.type in self.plugin_manager.event_type_hooks:
            for event_hook in self.plugin_manager.event_type_hooks[event.type]:
                tasks.append(self.plugin_manager.launch(event_hook, Event(hook=event_hook, base_event=event)))

        if event.type is EventType.message:
            # Commands
            if event.chan.lower() == event.nick.lower():  # private message, no command prefix
                command_re = r'(?i)^(?:[{}]?|{}[,;:]+\s+)(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)
            else:
                command_re = r'(?i)^(?:[{}]|{}[,;:]+\s+)(\w+)(?:$|\s+)(.*)'.format(command_prefix, event.conn.nick)

            cmd_match = re.match(command_re, event.content)

            if cmd_match:
                command = cmd_match.group(1).lower()
                if command in self.plugin_manager.commands:
                    command_hook = self.plugin_manager.commands[command]
                    command_event = CommandEvent(hook=command_hook, text=cmd_match.group(2).strip(),
                                             triggered_command=command, base_event=event)
                    tasks.append(self.plugin_manager.launch(command_hook, command_event))
                else:
                    potential_matches = []
                    for potential_match, plugin in self.plugin_manager.commands.items():
                        if potential_match.startswith(command):
                            potential_matches.append((potential_match, plugin))
                    if potential_matches:
                        if len(potential_matches) == 1:
                            command_hook = potential_matches[0][1]
                            command_event = CommandEvent(hook=command_hook, text=cmd_match.group(2).strip(),
                                                     triggered_command=command, base_event=event)
                            tasks.append(self.plugin_manager.launch(command_hook, command_event))
                        else:
                            event.notice("Possible matches: {}".format(
                                formatting.get_text_list([command for command, plugin in potential_matches])))

            # Regex hooks
            for regex, regex_hook in self.plugin_manager.regex_hooks:
                if not regex_hook.run_on_cmd and cmd_match:
                    pass
                else:
                    regex_match = regex.search(event.content)
                    if regex_match:
                        regex_event = RegexEvent(hook=regex_hook, match=regex_match, base_event=event)
                        tasks.append(self.plugin_manager.launch(regex_hook, regex_event))

        # Run the tasks
        yield from asyncio.gather(*run_before_tasks, loop=self.loop)
        yield from asyncio.gather(*tasks, loop=self.loop)

Example 72

Project: CloudBot Source File: plugin.py
Function: load_plugin
    @asyncio.coroutine
    def load_plugin(self, path):
        """
        Loads a plugin from the given path and plugin object, then registers all hooks from that plugin.

        Won't load any plugins listed in "disabled_plugins".

        :type path: str
        """

        file_path = os.path.abspath(path)
        file_name = os.path.basename(path)
        title = os.path.splitext(file_name)[0]

        if "plugin_loading" in self.bot.config:
            pl = self.bot.config.get("plugin_loading")

            if pl.get("use_whitelist", False):
                if title not in pl.get("whitelist", []):
                    logger.info('Not loading plugin module "{}": plugin not whitelisted'.format(file_name))
                    return
            else:
                if title in pl.get("blacklist", []):
                    logger.info('Not loading plugin module "{}": plugin blacklisted'.format(file_name))
                    return

        # make sure to unload the previously loaded plugin from this path, if it was loaded.
        if file_name in self.plugins:
            yield from self.unload_plugin(file_path)

        module_name = "plugins.{}".format(title)
        try:
            plugin_module = importlib.import_module(module_name)
            # if this plugin was loaded before, reload it
            if hasattr(plugin_module, "_cloudbot_loaded"):
                importlib.reload(plugin_module)
        except Exception:
            logger.exception("Error loading {}:".format(file_name))
            return

        # create the plugin
        plugin = Plugin(file_path, file_name, title, plugin_module)

        # proceed to register hooks

        # create database tables
        yield from plugin.create_tables(self.bot)

        # run on_start hooks
        for on_start_hook in plugin.run_on_start:
            success = yield from self.launch(on_start_hook, Event(bot=self.bot, hook=on_start_hook))
            if not success:
                logger.warning("Not registering hooks from plugin {}: on_start hook errored".format(plugin.title))

                # unregister databases
                plugin.unregister_tables(self.bot)
                return

        self.plugins[plugin.file_name] = plugin

        for periodic_hook in plugin.periodic:
            asyncio.async(self._start_periodic(periodic_hook))
            self._log_hook(periodic_hook)


        # register commands
        for command_hook in plugin.commands:
            for alias in command_hook.aliases:
                if alias in self.commands:
                    logger.warning(
                        "Plugin {} attempted to register command {} which was already registered by {}. "
                        "Ignoring new assignment.".format(plugin.title, alias, self.commands[alias].plugin.title))
                else:
                    self.commands[alias] = command_hook
            self._log_hook(command_hook)

        # register raw hooks
        for raw_hook in plugin.raw_hooks:
            if raw_hook.is_catch_all():
                self.catch_all_triggers.append(raw_hook)
            else:
                for trigger in raw_hook.triggers:
                    if trigger in self.raw_triggers:
                        self.raw_triggers[trigger].append(raw_hook)
                    else:
                        self.raw_triggers[trigger] = [raw_hook]
            self._log_hook(raw_hook)

        # register events
        for event_hook in plugin.events:
            for event_type in event_hook.types:
                if event_type in self.event_type_hooks:
                    self.event_type_hooks[event_type].append(event_hook)
                else:
                    self.event_type_hooks[event_type] = [event_hook]
            self._log_hook(event_hook)

        # register regexps
        for regex_hook in plugin.regexes:
            for regex_match in regex_hook.regexes:
                self.regex_hooks.append((regex_match, regex_hook))
            self._log_hook(regex_hook)

        # register sieves
        for sieve_hook in plugin.sieves:
            self.sieves.append(sieve_hook)
            self._log_hook(sieve_hook)

        # sort sieve hooks by priority
        self.sieves.sort(key=lambda x: x.priority)

        # we don't need this anymore
        del plugin.run_on_start

Example 73

Project: haproxystats Source File: pull.py
Function: get
@asyncio.coroutine
def get(socket_file, cmd, storage_dir, loop, executor, config):
    """
    Fetch data from a UNIX socket.

    Sends a command to HAProxy over UNIX socket, reads the response and then
    offloads the writing of the received data to a thread, so we don't block
    this coroutine.

    Arguments:
        socket_file (str): The full path of the UNIX socket file to connect to.
        cmd (str): The command to send.
        storage_dir (str): The full path of the directory to save the response.
        loop (obj): A base event loop from asyncio module.
        executor (obj): A Threader executor to execute calls asynchronously.
        config (obj): A configParser object which holds configuration.

    Returns:
        True if statistics from a UNIX sockets are saved False otherwise.
    """
    # try to connect to the UNIX socket
    log.debug('connecting to UNIX socket %s', socket_file)
    retries = config.getint('pull', 'retries')
    timeout = config.getfloat('pull', 'timeout')
    interval = config.getfloat('pull', 'interval')
    attempt = 0  # times to attempt a connect after a failure
    raised = None

    if retries == -1:
        attempt = -1  # -1 means retry indefinitely
    elif retries == 0:
        attempt = 1  # Zero means don't retry
    else:
        attempt = retries + 1  # any other value means retry N times
    while attempt != 0:
        if raised:  # an exception was raised sleep before the next retry
            log.error('caught "%s" when connecting to UNIX socket %s, '
                      'remaining tries %s, sleeping for %.2f seconds',
                      raised, socket_file, attempt, interval)
            yield from asyncio.sleep(interval)
        try:
            connect = asyncio.open_unix_connection(socket_file)
            reader, writer = yield from asyncio.wait_for(connect, timeout)
        except (ConnectionRefusedError, PermissionError, asyncio.TimeoutError,
                OSError) as exc:
            raised = exc
        else:
            log.debug('connection established to UNIX socket %s', socket_file)
            raised = None
            break

        attempt -= 1

    if raised is not None:
        log.error('failed to connect to UNIX socket %s after %s retries',
                  socket_file, retries)
        return False
    else:
        log.debug('connection established to UNIX socket %s', socket_file)

    log.debug('sending command "%s" to UNIX socket %s', cmd, socket_file)
    writer.write('{c}\n'.format(c=cmd).encode())
    data = yield from reader.read()
    writer.close()

    if len(data) == 0:
        log.critical('received zero data')
        return False

    log.debug('received data from UNIX socket %s', socket_file)

    suffix = CMD_SUFFIX_MAP.get(cmd.split()[1])
    filename = os.path.basename(socket_file) + suffix
    filename = os.path.join(storage_dir, filename)
    log.debug('going to save data to %s', filename)
    # Offload the writing to a thread so we don't block ourselves.

    def write_file():
        """
        Write data to a file.

        Returns:
            True if succeeds False otherwise.
        """
        try:
            with open(filename, 'w') as file_handle:
                file_handle.write(data.decode())
        except OSError as exc:
            log.critical('failed to write data %s', exc)
            return False
        else:
            log.debug('data saved in %s', filename)
            return True

    result = yield from loop.run_in_executor(executor, write_file)

    return result

Example 74

Project: ktcal2 Source File: ssh_brute.py
Function: check_credentials
@asyncio.coroutine
def _check_credentials(target, credentials, delay, port=22, display_func=None,
                       verbosity_level=0):
    """
    Check a concrete credential.
    
    :param target: target with SSH service running
    :type target: str

    :param credentials: Generator as format: (user, password)
    :type credentials: generator

    :param port: target port
    :type port: int
    """
    
    while 1:
        try:
            global counter
            
            try:
                user, password = next(itertools.islice(credentials, counter, counter + 1))
            except StopIteration:
                break
            counter += 1
            
            if user is None or password is None:
                break
            
            if display_func is not None:
                if verbosity_level > 1:
                    msg = "\r [*] Testing... %s credentials (%s:%s)%s" % (counter, user, password, " " * 4)
                    display_func(msg)
                else:
                    msg = "\r [*] Testing... %s credentials%s" % (counter, " " * 4)
                    if counter % 4 == 0:
                        display_func(msg)
            
            conn, client = yield from asyncssh.create_connection(None,
                                                                 host=target,
                                                                 port=port,
                                                                 username=user,
                                                                 password=password,
                                                                 server_host_keys=None)
            
            # If not raise exception -> user/password found
            global result_password, result_user, loop
            result_user = user
            result_password = password
            
            loop.stop()
        except (asyncssh.misc.DisconnectError, ConnectionResetError) as e:
            yield from asyncio.sleep(delay)

Example 75

Project: fcatalog_server Source File: test_frame_endpoint.py
def test_tcp_adapter_frame_struct(tloop):
    """
    Test send/recv of length prefixed frames over TCP with the
    TCPFrameEndpoint.
    """
    # List of results:
    res = []

    addr,port = 'localhost',8767

    @asyncio.coroutine
    def server_handler(reader,writer):
        """Echo server"""

        tfe = TCPFrameEndpoint(reader,writer)

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b'abc'

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b''

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b'abcd'

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b'abcd'
        
        res.append('sending_frame')
        # Write a frame:
        yield from tfe.send(b'1234')

        # Last frame was cut in the middle (The connection was closed),
        # therefore we expect to get a None here:
        frame = yield from tfe.recv()
        assert frame is None

        res.append('got_none')

    @asyncio.coroutine
    def client():
        reader, writer = yield from \
                asyncio.open_connection(host=addr,port=port)

        # Write b'abc':
        writer.write(b'\x03\x00\x00\x00abc')
        yield from writer.drain()

        # Write empty frame:
        writer.write(b'\x00\x00\x00\x00')
        yield from writer.drain()

        # Write b'abcd':
        writer.write(b'\x04\x00\x00\x00abcd')
        yield from writer.drain()

        # Write b'abcd' in two parts:
        writer.write(b'\x04\x00\x00')
        yield from writer.drain()
        writer.write(b'\x00abcd')
        yield from writer.drain()

        # Read a frame from the server:
        len_prefix = yield from reader.readexactly(4)
        msg_len = struct.unpack('I',len_prefix)[0]
        frame = yield from reader.readexactly(msg_len)
        assert frame == b'1234'

        # Send half a frame:
        writer.write(b'\x00\x00\x00')
        yield from writer.drain()

        # Append True to list of results:
        res.append('client_close')

        # Close client:
        writer.close()


    # Start server:
    start_server = asyncio.start_server(server_handler,host=addr,port=port,reuse_address=True)
    server_task = run_timeout(start_server,tloop)

    # Start client:
    run_timeout(client(),tloop)

    # Close server:
    server_task.close()
    # Wait until server is closed:
    run_timeout(server_task.wait_closed(),loop=tloop)

    assert res == ['sending_frame','client_close','got_none']

Example 76

Project: aioh2 Source File: test_aioh2.py
Function: test_priority
    @async_test(timeout=8)
    def test_priority(self):
        self.conn.update_settings({
            MAX_FRAME_SIZE: 16384,
            INITIAL_WINDOW_SIZE: 16384 * 1024 * 32,
        })
        event = yield from self._expect_events()
        self.assertIsInstance(event[0], SettingsAcknowledged)
        event = yield from self.server.events.get()
        self.assertIsInstance(event, RemoteSettingsChanged)

        stream_1 = yield from self._send_headers()
        yield from self.server.start_response(stream_1, {'a': '1'})
        events = yield from self._expect_events()
        self.assertIsInstance(events[0], ResponseReceived)

        stream_2 = yield from self._send_headers()
        yield from self.server.start_response(stream_2, {'a': '2'})
        events = yield from self._expect_events()
        self.assertIsInstance(events[0], ResponseReceived)

        p1 = 32
        p2 = 20

        self.server.reprioritize(stream_1, weight=p1)
        self.server.reprioritize(stream_2, weight=p2)
        self.server.pause_writing()

        running = [True]

        @asyncio.coroutine
        def _write(stream_id):
            count = 0
            while running[0]:
                yield from self.server.send_data(stream_id, b'x')
                count += 1
            yield from self.server.end_stream(stream_id)
            return count

        task_1 = asyncio.async(_write(stream_1))
        task_2 = asyncio.async(_write(stream_2))

        for i in range(1000):
            self.server.resume_writing()
            yield from asyncio.sleep(0.004)
            self.server.pause_writing()
            yield from asyncio.sleep(0.001)

        running[0] = False
        self.server.resume_writing()

        count_1 = yield from task_1
        count_2 = yield from task_2

        self.assertAlmostEqual(count_1 / count_2, p1 / p2, 1)

Example 77

Project: dcos Source File: action_lib.py
@asyncio.coroutine
def install_dcos(config, block=False, state_json_dir=None, hosts=None, async_delegate=None, try_remove_stale_dcos=False,
                 **kwargs):
    if hosts is None:
        hosts = []
    assert isinstance(hosts, list)

    # Role specific parameters
    role_params = {
        'master': {
            'tags': {'role': 'master', 'dcos_install_param': 'master'},
            'hosts': config['master_list']
        },
        'agent': {
            'tags': {'role': 'agent', 'dcos_install_param': 'slave'},
            'hosts': config.hacky_default_get('agent_list', [])
        },
        'public_agent': {
            'tags': {'role': 'public_agent', 'dcos_install_param': 'slave_public'},
            'hosts': config.hacky_default_get('public_agent_list', [])
        }
    }

    bootstrap_tarball = _get_bootstrap_tarball()
    log.debug("Local bootstrap found: %s", bootstrap_tarball)

    targets = []
    if hosts:
        targets = hosts
    else:
        for role, params in role_params.items():
            targets += [Node(node, params['tags']) for node in params['hosts']]

    runner = get_async_runner(config, targets, async_delegate=async_delegate)
    chains = []
    if try_remove_stale_dcos:
        pkgpanda_uninstall_chain = ssh.utils.CommandChain('remove_stale_dcos')
        pkgpanda_uninstall_chain.add_execute(['sudo', '-i', '/opt/mesosphere/bin/pkgpanda', 'uninstall'],
                                             stage='Trying pkgpanda uninstall')
        chains.append(pkgpanda_uninstall_chain)

        remove_dcos_chain = ssh.utils.CommandChain('remove_stale_dcos')
        remove_dcos_chain.add_execute(['rm', '-rf', '/opt/mesosphere', '/etc/mesosphere'],
                                      stage="Removing DC/OS files")
        chains.append(remove_dcos_chain)

    chain = ssh.utils.CommandChain('deploy')
    chains.append(chain)

    add_pre_action(chain, runner.ssh_user)
    _add_copy_dcos_install(chain)
    _add_copy_packages(chain)
    _add_copy_bootstap(chain, bootstrap_tarball)

    chain.add_execute(
        lambda node: (
            'sudo bash {}/dcos_install.sh {}'.format(REMOTE_TEMP_DIR, node.tags['dcos_install_param'])).split(),
        stage=lambda node: 'Installing DC/OS'
    )

    # UI expects total_masters, total_agents to be top level keys in deploy.json
    delegate_extra_params = nodes_count_by_type(config)
    if kwargs.get('retry') and state_json_dir:
        state_file_path = os.path.join(state_json_dir, 'deploy.json')
        log.debug('retry executed for a state file deploy.json')
        for _host in hosts:
            _remove_host(state_file_path, '{}:{}'.format(_host.ip, _host.port))

        # We also need to update total number of hosts
        json_state = _read_state_file(state_file_path)
        delegate_extra_params['total_hosts'] = json_state['total_hosts']

    # Setup the cleanup chain
    cleanup_chain = ssh.utils.CommandChain('deploy_cleanup')
    add_post_action(cleanup_chain)
    chains.append(cleanup_chain)

    result = yield from runner.run_commands_chain_async(chains, block=block, state_json_dir=state_json_dir,
                                                        delegate_extra_params=delegate_extra_params)
    return result

Example 78

Project: pyzmq Source File: asyncio-ironhouse.py
@asyncio.coroutine
def run():
    ''' Run Ironhouse example '''

    # These directories are generated by the generate_certificates script
    base_dir = os.path.dirname(__file__)
    keys_dir = os.path.join(base_dir, 'certificates')
    public_keys_dir = os.path.join(base_dir, 'public_keys')
    secret_keys_dir = os.path.join(base_dir, 'private_keys')

    if not (os.path.exists(keys_dir) and
            os.path.exists(public_keys_dir) and
            os.path.exists(secret_keys_dir)):
        logging.critical("Certificates are missing - run generate_certificates.py script first")
        sys.exit(1)

    ctx = Context.instance()

    # Start an authenticator for this context.
    auth = AsyncioAuthenticator(ctx)
    auth.start()
    auth.allow('127.0.0.1')
    # Tell authenticator to use the certificate in a directory
    auth.configure_curve(domain='*', location=public_keys_dir)

    server = ctx.socket(zmq.PUSH)

    server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
    server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
    server.curve_secretkey = server_secret
    server.curve_publickey = server_public
    server.curve_server = True  # must come before bind
    server.bind('tcp://*:9000')

    client = ctx.socket(zmq.PULL)

    # We need two certificates, one for the client and one for
    # the server. The client must know the server's public key
    # to make a CURVE connection.
    client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
    client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
    client.curve_secretkey = client_secret
    client.curve_publickey = client_public

    server_public_file = os.path.join(public_keys_dir, "server.key")
    server_public, _ = zmq.auth.load_certificate(server_public_file)
    # The client must know the server's public key to make a CURVE connection.
    client.curve_serverkey = server_public
    client.connect('tcp://127.0.0.1:9000')

    yield from server.send(b"Hello")

    if (yield from client.poll(1000)):
        msg = yield from client.recv()
        if msg == b"Hello":
            logging.info("Ironhouse test OK")
    else:
        logging.error("Ironhouse test FAIL")


    # close sockets
    server.close()
    client.close()
    # stop auth task
    auth.stop()

Example 79

Project: ika Source File: channel_flags.py
Function: execute
    @asyncio.coroutine
    def execute(self, user, name, target, flags):
        session = Session()

        channel = Channel.find_by_name(name)

        if channel is None:
            if user.is_operator:
                self.service.msg(user, '해당 채널 \x02{}\x02 은 오징어 IRC 네트워크에 등록되어 있지 않습니다.', name)
            else:
                self.service.msg(user, '해당 명령을 실행할 권한이 없습니다.')
            return

        if (channel.get_flags_by_user(user) & Flags.OWNER) == 0:
            if not user.is_operator:
                self.service.msg(user, '해당 명령을 실행할 권한이 없습니다.')
                return

        if (target is None) or (flags is None):
            self.service.msg(user, '\x02=== {} 채널 권한 정보 ===\x02', channel.name)
            self.service.msg(user, ' ')

            for flag in channel.flags:
                flags_str = ''.join(map(lambda x: x[1] if (flag.type & x[0]) != 0 else '', self.flagmap.items()))
                self.service.msg(user, '  \x02{:<32}\x02 {:<16} ({} 에 마지막으로 변경됨)',
                                 flag.target, flags_str, flag.created_on)
        else:
            flag = session.query(Flag).filter(
                (Flag.channel_id == channel.id) & (func.lower(Flag.target) == func.lower(target))).first()
            if flag is None:
                type = 0
            else:
                type = flag.type

            for _flag in flags:
                if _flag == '+':
                    is_adding = True
                elif _flag == '-':
                    is_adding = False
                else:
                    flagnum = int(self.reverse_flagmap[_flag])
                    if is_adding:
                        type |= flagnum
                    else:
                        type &= ~flagnum

            if type == 0:
                if flag is not None:
                    session.delete(flag)
                    session.commit()
                    self.service.msg(user, '\x02{}\x02 채널의 \x02{}\x02 대상의 권한을 제거했습니다.', name, target)
                else:
                    self.service.msg(user, '설정될 수 있는 권한이 없습니다.')
            else:
                if flag is None:
                    flag = Flag()
                    flag.channel = channel
                    flag.target = target
                flag.type = type

                session.add(flag)
                session.commit()

                self.service.msg(user, '\x02{}\x02 채널의 \x02{}\x02 대상에게 해당 권한을 설정했습니다.', name, target)

Example 80

Project: Vase Source File: handlers.py
Function: handle
    @asyncio.coroutine
    def handle(self, request, writer):
        new = self._session.is_new
        if self._session.is_new:
            self._session.is_new = False
            self._session.endpoint.on_connect()

        origin = request.get('origin', 'null')
        if origin == 'null':
            origin = '*'

        writer.status = 200
        writer.add_headers(
            ('Content-Type', 'application/javascript;charset=UTF-8'),
            ('Access-Control-Allow-Origin', origin),
            ('Access-Control-Allow-Credentials', 'true'),
            ('Transfer-Encoding', 'chunked'),
            ('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
        )

        body = (hex(2049)[2:] + '\r\n' + 'h' * 2048 + '\n\r\n').encode('utf-8')
        writer.write_body(body)
        if new:
            writer.write_body(b'2\r\no\n\r\n')

        if self._session.closed:
            msg = b'c[3000,"Go away!"]\n'
            length = hex(len(msg)).encode('utf-8')
            writer.write_body(length + b'\r\n' + msg + b'\r\n')
            writer.write_body(b'0\r\n\r\n')
            writer.close()
            return

        written = 0
        written += self._send_messages(writer)
        while True:
            self._session.waiter = Future()
            yield from self._session.waiter
            written += self._send_messages(writer)
            if written >= 4096:
                writer.write(b'0\r\n\r\n')
                writer.close()
                break

Example 81

Project: aiocouchdb Source File: client.py
Function: request
@asyncio.coroutine
def request(method, url, *,
            allow_redirects=True,
            compress=None,
            connector=None,
            cookies=None,
            data=None,
            encoding='utf-8',
            expect100=False,
            headers=None,
            loop=None,
            max_redirects=10,
            params=None,
            read_until_eof=True,
            request_class=None,
            response_class=None,
            version=aiohttp.HttpVersion11):

    redirects = 0
    method = method.upper()
    connector = connector or aiohttp.TCPConnector(force_close=True, loop=loop)
    request_class = request_class or HttpRequest
    response_class = response_class or HttpResponse

    while True:
        req = request_class(method, url,
                            compress=compress,
                            cookies=cookies,
                            data=data,
                            encoding=encoding,
                            expect100=expect100,
                            headers=headers,
                            loop=loop,
                            params=params,
                            response_class=response_class,
                            version=version)

        conn = yield from connector.connect(req)
        try:
            resp = req.send(conn.writer, conn.reader)
            try:
                yield from resp.start(conn, read_until_eof)
            except:
                resp.close()
                conn.close()
                raise
        except (aiohttp.HttpProcessingError,
                aiohttp.ServerDisconnectedError) as exc:
            raise aiohttp.ClientResponseError() from exc
        except OSError as exc:
            raise aiohttp.ClientOSError() from exc

        # redirects
        if allow_redirects and resp.status in {301, 302, 303, 307}:
            redirects += 1
            if max_redirects and redirects >= max_redirects:
                resp.close(force=True)
                break

            # For 301 and 302, mimic IE behaviour, now changed in RFC.
            # Details: https://github.com/kennethreitz/requests/pull/269
            if resp.status != 307:
                method = METH_GET
                data = None

            r_url = (resp.headers.get(LOCATION) or
                     resp.headers.get(URI))

            scheme = urllib.parse.urlsplit(r_url)[0]
            if scheme not in ('http', 'https', ''):
                resp.close(force=True)
                raise ValueError('Can redirect only to http or https')
            elif not scheme:
                r_url = urllib.parse.urljoin(url, r_url)

            url = urllib.parse.urldefrag(r_url)[0]
            if url:
                yield from asyncio.async(resp.release(), loop=loop)
                continue

        break

    return resp

Example 82

Project: induction Source File: app.py
Function: handle_request
    @asyncio.coroutine
    def handle_request(self, request, response, payload):
        # Apply request processors
        for func in self._before_request:
            before = func(request, response)
            if yields(before):
                before = yield from before
            if before is not None:
                data = before
                fn_name = func.__name__
                need_response = True
                break
        else:
            match = self._routes.match(request.path)
            _self = 0
            if match is None:
                handler = self.handle_404
                _self = 1
            else:
                handler = match.pop('_induction_handler')
            request.kwargs = match or {}

            # 3 arities supported in handlers:
            #
            # - handler(request)
            #   Handler must return response data or a response tuple.
            #
            # - handler(request, response)
            #   Handler can write stuff in response or return data that gets
            #   written to the response (str or bytes, or tuple of (response,
            #   status, headers) or (response, headers)).
            #
            # - handler(request, response, payload)
            #   The payload is passed when the handler needs it.

            args = [request]
            need_response = False
            fn_name = handler.__name__

            spec = inspect.getargspec(handler)
            argc = len(spec.args) - _self
            if argc == 1:
                need_response = True
            elif argc >= 2:
                args.append(response)
            if argc == 3:
                args.append(payload)

            data = handler(*args)

        try:
            yield from self.handle_data(data, response, need_response, fn_name)
        except Exception as e:
            handler = self._error_handlers.get(type(e))
            if handler is None:
                handler = self._error_handlers.get(None)
                if handler is None:
                    raise
            data = handler(request, response, e)
            if data is None:
                data, headers = error(500)
                response.set_status(500)
                response.add_headers(*headers.items())
                response.write(data)
            else:
                yield from self.handle_data(data, response, True,
                                            handler.__name__)

        for func in self._after_request:
            after = func(request, response)
            if yields(after):
                yield from after

Example 83

Project: aiocouchdb Source File: database.py
    @asyncio.coroutine
    def temp_view(self, map_fun,
                  red_fun=None,
                  language=None,
                  *,
                  auth=None,
                  feed_buffer_size=None,
                  att_encoding_info=None,
                  attachments=None,
                  conflicts=None,
                  descending=None,
                  endkey=...,
                  endkey_docid=None,
                  group=None,
                  group_level=None,
                  include_docs=None,
                  inclusive_end=None,
                  keys=...,
                  limit=None,
                  reduce=None,
                  skip=None,
                  stale=None,
                  startkey=...,
                  startkey_docid=None,
                  update_seq=None):
        """Executes :ref:`temporary view <api/db/temp_view>` and returns
        it results according specified parameters.

        :param str map_fun: Map function source code
        :param str red_fun: Reduce function source code
        :param str language: Query server language to process the view

        :param auth: :class:`aiocouchdb.authn.AuthProvider` instance
        :param int feed_buffer_size: Internal buffer size for fetched feed items

        :param bool att_encoding_info: Includes encoding information in an
                                       attachment stubs
        :param bool attachments: Includes attachments content into docuements.
                                 **Warning**: use with caution!
        :param bool conflicts: Includes conflicts information into docuements
        :param bool descending: Return rows in descending by key order
        :param endkey: Stop fetching rows when the specified key is reached
        :param str endkey_docid: Stop fetching rows when the specified
                                 docuement ID is reached
        :param bool group: Reduces the view result grouping by unique keys
        :param int group_level: Reduces the view result grouping the keys
                                with defined level
        :param str include_docs: Include docuement body for each row
        :param bool inclusive_end: When ``False``, doesn't includes ``endkey``
                                   in returned rows
        :param list keys: List of view keys to fetch
        :param int limit: Limits the number of the returned rows by
                          the specified number
        :param bool reduce: Defines is the reduce function needs to be applied
                            or not
        :param int skip: Skips specified number of rows before starting
                         to return the actual result
        :param str stale: Allow to fetch the rows from a stale view, without
                          triggering index update. Supported values: ``ok``
                          and ``update_after``
        :param startkey: Return rows starting with the specified key
        :param str startkey_docid: Return rows starting with the specified
                                   docuement ID
        :param bool update_seq: Include an ``update_seq`` value into view
                                results header

        :rtype: :class:`aiocouchdb.feeds.ViewFeed`
        """
        params = locals()
        for key in ('self', 'auth', 'map_fun', 'red_fun', 'language',
                    'feed_buffer_size'):
            params.pop(key)

        data = {'map': map_fun}
        if red_fun is not None:
            data['reduce'] = red_fun
        if language is not None:
            data['language'] = language

        view = self.view_class(self.resource('_temp_view'))
        return (yield from view.request(auth=auth,
                                        feed_buffer_size=feed_buffer_size,
                                        data=data,
                                        params=params))

Example 84

Project: gunicorn Source File: _gaiohttp.py
Function: run
    @asyncio.coroutine
    def _run(self):
        for sock in self.sockets:
            factory = self.get_factory(sock.sock, sock.cfg_addr)
            self.servers.append(
                (yield from self._create_server(factory, sock)))

        # If our parent changed then we shut down.
        pid = os.getpid()
        try:
            while self.alive or self.connections:
                self.notify()

                if (self.alive and
                        pid == os.getpid() and self.ppid != os.getppid()):
                    self.log.info("Parent changed, shutting down: %s", self)
                    self.alive = False

                # stop accepting requests
                if not self.alive:
                    if self.servers:
                        self.log.info(
                            "Stopping server: %s, connections: %s",
                            pid, len(self.connections))
                        for server in self.servers:
                            server.close()
                        self.servers.clear()

                    # prepare connections for closing
                    for conn in self.connections.values():
                        if hasattr(conn, 'closing'):
                            conn.closing()

                yield from asyncio.sleep(1.0, loop=self.loop)
        except KeyboardInterrupt:
            pass

        if self.servers:
            for server in self.servers:
                server.close()

        yield from self.close()

Example 85

Project: aioes Source File: nodes.py
Function: stats
    @asyncio.coroutine
    def stats(self, node_id=None, metric=None, index_metric=None, *,
              completion_fields=default, fielddata_fields=default,
              fields=default, groups=default, human=default, level=default,
              types=default):
        """
        The cluster nodes stats API allows to retrieve one or more (or all) of
        the cluster nodes statistics.
        `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html>`_

        :arg node_id: A comma-separated list of node IDs or names to limit the
            returned information; use `_local` to return information from the
            node you're connecting to, leave empty to get information from all
            nodes
        :arg metric: Limit the information returned to the specified metrics.
            Possible options are: "_all", "breaker", "fs", "http", "indices",
            "jvm", "network", "os", "process", "thread_pool", "transport"
        :arg index_metric: Limit the information returned for `indices` metric
            to the specific index metrics. Isn't used if `indices` (or `all`)
            metric isn't specified. Possible options are: "_all", "completion",
            "docs", "fielddata", "filter_cache", "flush", "get", "id_cache",
            "indexing", "merge", "percolate", "refresh", "search", "segments",
            "store", "warmer"
        :arg completion_fields: A comma-separated list of fields
            for `fielddata` and `suggest` index metric (supports wildcards)
        :arg fielddata_fields: A comma-separated list of fields for `fielddata`
            index metric (supports wildcards)
        :arg fields: A comma-separated list of fields for `fielddata` and
            `completion` index metric (supports wildcards)
        :arg groups: A comma-separated list of search groups for `search` index
            metric
        :arg human: Whether to return time and byte values in human-readable
            format., default False
        :arg level: Return indices stats aggregated at node, index or shard
            level, default 'node'
        :arg types: A comma-separated list of docuement types for the `indexing`
            index metric
        """
        params = {}

        if completion_fields is not default:
            params['completion_fields'] = completion_fields
        if fielddata_fields is not default:
            params['fielddata_fields'] = fielddata_fields
        if fields is not default:
            params['fields'] = fields
        if groups is not default:
            params['groups'] = groups
        if human is not default:
            params['human'] = bool(human)
        if level is not default:
            params['level'] = level
        if types is not default:
            params['types'] = types

        _, data = yield from self.transport.perform_request(
            'GET',
            _make_path('_nodes', node_id, 'stats', metric, index_metric),
            params=params
        )
        return data

Example 86

Project: pycon2014 Source File: crawl.py
Function: fetch
    @asyncio.coroutine
    def fetch(self):
        """Attempt to fetch the contents of the URL.

        If successful, and the data is HTML, extract further links and
        add them to the crawler.  Redirects are also added back there.
        """
        while self.tries < self.max_tries:
            self.tries += 1
            self.request = None
            try:
                self.request = Request(self.log, self.url, self.crawler.pool)
                yield from self.request.connect()
                yield from self.request.send_request()
                self.response = yield from self.request.get_response()
                self.body = yield from self.response.read()
                h_conn = self.response.get_header('connection').lower()
                h_t_enc = self.response.get_header('transfer-encoding').lower()
                if h_conn != 'close':
                    self.request.close(recycle=True)
                    self.request = None
                if self.tries > 1:
                    self.log(1, 'try', self.tries, 'for', self.url, 'success')
                break
            except (BadStatusLine, OSError) as exc:
                self.exceptions.append(exc)
                self.log(1, 'try', self.tries, 'for', self.url,
                            'raised', repr(exc))
                ##import pdb; pdb.set_trace()
                # Don't reuse the connection in this case.
            finally:
                if self.request is not None:
                    self.request.close()
        else:
            # We never broke out of the while loop, i.e. all tries failed.
            self.log(0, 'no success for', self.url,
                        'in', self.max_tries, 'tries')
            return
        next_url = self.response.get_redirect_url()
        if next_url:
            self.next_url = urllib.parse.urljoin(self.url, next_url)
            if self.max_redirect > 0:
                self.log(1, 'redirect to', self.next_url, 'from', self.url)
                self.crawler.add_url(self.next_url, self.max_redirect-1)
            else:
                self.log(0, 'redirect limit reached for', self.next_url,
                            'from', self.url)
        else:
            if self.response.status == 200:
                self.ctype = self.response.get_header('content-type')
                self.pdict = {}
                if self.ctype:
                    self.ctype, self.pdict = cgi.parse_header(self.ctype)
                self.encoding = self.pdict.get('charset', 'utf-8')
                if self.ctype == 'text/html':
                    body = self.body.decode(self.encoding, 'replace')
                    # Replace href with (?:href|src) to follow image links.
                    self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)',
                                               body))
                    if self.urls:
                        self.log(1, 'got', len(self.urls),
                                    'distinct urls from', self.url)
                    self.new_urls = set()
                    for url in self.urls:
                        url = unescape(url)
                        url = urllib.parse.urljoin(self.url, url)
                        url, frag = urllib.parse.urldefrag(url)
                        if self.crawler.add_url(url):
                            self.new_urls.add(url)

Example 87

Project: aioes Source File: test_client.py
Function: test_search
    def test_search(self):
        """ search """
        @asyncio.coroutine
        def go():
            yield from self.cl.index(self._index, 'testdoc',
                                     MESSAGES[0], '1',
                                     refresh=True)
            yield from self.cl.index(self._index, 'testdoc',
                                     MESSAGES[1], '2',
                                     refresh=True)
            yield from self.cl.index(self._index, 'testdoc',
                                     MESSAGES[2], '3',
                                     refresh=True)
            data = yield from self.cl.search(self._index,
                                             'testdoc',
                                             q='skills:Python',
                                             _source=False,
                                             _source_include='skills')
            self.assertEqual(data['hits']['total'], 2, data)
            self.assertIn('skills', data['hits']['hits'][0]['_source'])
            self.assertIn('skills', data['hits']['hits'][1]['_source'])
            data = yield from self.cl.search(self._index,
                                             'testdoc',
                                             q='skills:Python',
                                             _source_exclude='skills',
                                             analyzer='standard',
                                             default_operator='AND',
                                             analyze_wildcard=True,
                                             version=2,
                                             timeout=30000,
                                             allow_no_indices=True,
                                             ignore_unavailable=True,
                                             df='_all',
                                             explain=True,
                                             fields='skills,user',
                                             from_=0,
                                             expand_wildcards='open',
                                             lenient=True,
                                             lowercase_expanded_terms=True,
                                             preference='random',
                                             scroll='1s',
                                             search_type='query_then_fetch',
                                             size=100,
                                             sort='user:true',
                                             stats=True
                                             )
            self.assertNotIn('skills', data['hits']['hits'][0]['_source'])
            self.assertNotIn('skills', data['hits']['hits'][1]['_source'])
            with self.assertRaises(TypeError):
                yield from self.cl.search(default_operator=1,
                                          indices_boost=False)
            with self.assertRaises(ValueError):
                yield from self.cl.search(doc_type='testdoc',
                                          q='skills:Python',
                                          routing='Sidor',
                                          source='Query DSL',
                                          suggest_field='user',
                                          suggest_text='test',
                                          suggest_mode='missing',
                                          suggest_size=100,
                                          default_operator='1')

            with self.assertRaises(TypeError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          suggest_mode=1)
            with self.assertRaises(ValueError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          suggest_mode='1')

            with self.assertRaises(TypeError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          search_type=1)
            with self.assertRaises(ValueError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          search_type='1')

            with self.assertRaises(TypeError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          expand_wildcards=1)
            with self.assertRaises(ValueError):
                yield from self.cl.search(self._index,
                                          'testdoc',
                                          q='skills:Python',
                                          expand_wildcards='1')
        self.loop.run_until_complete(go())

Example 88

Project: HangoutsBot Source File: Dispatcher.py
Function: run
    @asyncio.coroutine
    def run(self, bot, event, bot_command_char, *args, **kwds):

        bot_command_char = bot_command_char.strip()  # For cases like "/bot " or " / "

        if args[0] == bot_command_char:  # Either the command char is like "/bot" or the user did "/ ping"
            args = list(args[1:])
        if args[0].startswith(bot_command_char):
            command = args[0][len(bot_command_char):]
        else:
            command = args[0]
        try:
            func = self.commands[command]
        except KeyError:
            try:
                if event.user.is_self:
                    func = self.hidden_commands[command]
                else:
                    raise KeyError
            except KeyError:
                if self.unknown_command:
                    func = self.unknown_command
                else:
                    raise NoCommandFoundError(
                        "Command {} is not registered. Furthermore, no command found to handle unknown commands.".format
                        (command))

        func = asyncio.coroutine(func)

        args = list(args[1:])

        # For help cases.
        if len(args) > 0 and args[0] == '?':
            if func.__doc__:
                bot.send_message_segments(event.conv, UtilBot.text_to_segments(func.__doc__))
                return

        try:
            asyncio.async(func(bot, event, *args, **kwds))
        except Exception as e:
            log = open('log.txt', 'a+')
            log.writelines(str(datetime.now()) + ":\n " + traceback.format_exc() + "\n\n")
            log.close()
            print(traceback.format_exc())

Example 89

Project: aiohttp-cors Source File: test_main.py
    @asynctest
    @asyncio.coroutine
    def test_simple_default(self):
        """Test CORS simple requests with a route with the default
        configuration.

        The default configuration means that:
          * no credentials are allowed,
          * no headers are exposed,
          * no client headers are allowed.
        """

        client1 = "http://client1.example.org"
        client2 = "http://client2.example.org"
        client1_80 = "http://client1.example.org:80"
        client1_https = "https://client2.example.org"

        tests_descriptions = [
            {
                "name": "default",
                "defaults": None,
                "route_config":
                    {
                        client1: ResourceOptions(),
                    },
                "tests": [
                    {
                        "name": "no origin header",
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                        }
                    },
                    {
                        "name": "allowed origin",
                        "request_headers": {
                            hdrs.ORIGIN: client1,
                        },
                        "in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN: client1,
                        },
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                        }
                    },
                    {
                        "name": "not allowed origin",
                        "request_headers": {
                            hdrs.ORIGIN: client2,
                        },
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                        }
                    },
                    {
                        "name": "explicitly specified default port",
                        # CORS specification says, that origins may compared
                        # as strings, so "example.org:80" is not the same as
                        # "example.org".
                        "request_headers": {
                            hdrs.ORIGIN: client1_80,
                        },
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                        }
                    },
                    {
                        "name": "different scheme",
                        "request_headers": {
                            hdrs.ORIGIN: client1_https,
                        },
                        "not_in_response_headers": {
                            hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
                            hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
                            hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
                        }
                    },
                    ],
            },
        ]

        yield from self._run_simple_requests_tests(tests_descriptions, False)
        yield from self._run_simple_requests_tests(tests_descriptions, True)

Example 90

Project: twtxt Source File: twhttp.py
Function: retrieve_file
@asyncio.coroutine
def retrieve_file(client, source, limit, cache):
    is_cached = cache.is_cached(source.url) if cache else None
    headers = {"If-Modified-Since": cache.last_modified(source.url)} if is_cached else {}

    try:
        response = yield from client.get(source.url, headers=headers)
        content = yield from response.text()
    except Exception as e:
        if is_cached:
            logger.debug("{0}: {1} - using cached content".format(source.url, e))
            return cache.get_tweets(source.url, limit)
        else:
            logger.debug("{0}: {1}".format(source.url, e))
            return []

    if response.status == 200:
        tweets = parse_tweets(content.splitlines(), source)

        if cache:
            last_modified_header = response.headers.get("Last-Modified")
            if last_modified_header:
                logger.debug("{0} returned 200 and Last-Modified header - adding content to cache".format(source.url))
                cache.add_tweets(source.url, last_modified_header, tweets)
            else:
                logger.debug("{0} returned 200 but no Last-Modified header - can’t cache content".format(source.url))
        else:
            logger.debug("{0} returned 200".format(source.url))

        return sorted(tweets, reverse=True)[:limit]

    elif response.status == 410 and is_cached:
        # 410 Gone:
        # The resource requested is no longer available,
        # and will not be available again.
        logger.debug("{0} returned 410 - deleting cached content".format(source.url))
        cache.remove_tweets(source.url)
        return []

    elif is_cached:
        logger.debug("{0} returned {1} - using cached content".format(source.url, response.status))
        return cache.get_tweets(source.url, limit)

    else:
        logger.debug("{0} returned {1}".format(source.url, response.status))
        return []

Example 91

Project: aiokafka Source File: fetcher.py
    @asyncio.coroutine
    def _proc_fetch_request(self, node_id, request):
        needs_wakeup = False
        try:
            response = yield from self._client.send(node_id, request)
        except Errors.KafkaError as err:
            log.error("Failed fetch messages from %s: %s", node_id, err)
            return False
        finally:
            self._in_flight.remove(node_id)

        fetch_offsets = {}
        for topic, partitions in request.topics:
            for partition, offset, _ in partitions:
                fetch_offsets[TopicPartition(topic, partition)] = offset

        for topic, partitions in response.topics:
            for partition, error_code, highwater, messages in partitions:
                tp = TopicPartition(topic, partition)
                error_type = Errors.for_code(error_code)
                if not self._subscriptions.is_fetchable(tp):
                    # this can happen when a rebalance happened
                    log.debug("Ignoring fetched records for partition %s"
                              " since it is no longer fetchable", tp)

                elif error_type is Errors.NoError:
                    self._subscriptions.assignment[tp].highwater = highwater

                    # we are interested in this fetch only if the beginning
                    # offset matches the current consumed position
                    fetch_offset = fetch_offsets[tp]
                    partial = None
                    if messages and \
                            isinstance(messages[-1][-1], PartialMessage):
                        partial = messages.pop()

                    if messages:
                        log.debug(
                            "Adding fetched record for partition %s with"
                            " offset %d to buffered record list",
                            tp, fetch_offset)
                        try:
                            messages = collections.deque(
                                self._unpack_message_set(tp, messages))
                        except Errors.InvalidMessageError as err:
                            self._set_error(tp, err)
                            continue

                        self._records[tp] = FetchResult(
                            tp, messages=messages,
                            subscriptions=self._subscriptions,
                            backoff=self._prefetch_backoff,
                            loop=self._loop)
                        # We added at least 1 successful record
                        needs_wakeup = True
                    elif partial:
                        # we did not read a single message from a non-empty
                        # buffer because that message's size is larger than
                        # fetch size, in this case record this exception
                        err = RecordTooLargeError(
                            "There are some messages at [Partition=Offset]: "
                            "%s=%s whose size is larger than the fetch size %s"
                            " and hence cannot be ever returned. "
                            "Increase the fetch size, or decrease the maximum "
                            "message size the broker will allow.",
                            tp, fetch_offset, self._max_partition_fetch_bytes)
                        self._set_error(tp, err)
                        needs_wakeup = True
                        self._subscriptions.assignment[tp].position += 1

                elif error_type in (Errors.NotLeaderForPartitionError,
                                    Errors.UnknownTopicOrPartitionError):
                    self._client.force_metadata_update()
                elif error_type is Errors.OffsetOutOfRangeError:
                    fetch_offset = fetch_offsets[tp]
                    if self._subscriptions.has_default_offset_reset_policy():
                        self._subscriptions.need_offset_reset(tp)
                    else:
                        err = Errors.OffsetOutOfRangeError({tp: fetch_offset})
                        self._set_error(tp, err)
                        needs_wakeup = True
                    log.info(
                        "Fetch offset %s is out of range, resetting offset",
                        fetch_offset)
                elif error_type is Errors.TopicAuthorizationFailedError:
                    log.warn("Not authorized to read from topic %s.", tp.topic)
                    err = Errors.TopicAuthorizationFailedError(tp.topic)
                    self._set_error(tp, err)
                    needs_wakeup = True
                else:
                    log.warn('Unexpected error while fetching data: %s',
                             error_type.__name__)
        return needs_wakeup

Example 92

Project: py-jsonapi Source File: collection.py
Function: get
    @asyncio.coroutine
    def get(self):
        """
        Handles a GET request. This means to fetch many resourcs from the
        collection and return it.

        http://jsonapi.org/format/#fetching-resources
        """
        # Fetch the requested resources.
        if self.request.japi_paginate:
            offset = self.request.japi_page_offset
            limit = self.request.japi_page_limit
        else:
            offset = self.request.japi_offset
            limit = self.request.japi_limit

        resources = yield from self.db.query(
            self.typename, order=self.request.japi_sort, limit=limit,
            offset=offset, filters=self.request.japi_filters
        )

        # Fetch all related resources, which should be included.
        included_resources = yield from self.db.get_relatives(
            resources, self.request.japi_include
        )

        # Build the response.
        data = serialize_many(resources, fields=self.request.japi_fields)
        included = serialize_many(
            included_resources.values(), fields=self.request.japi_fields
        )
        meta = OrderedDict()
        links = OrderedDict()

        # Add the pagination links, if necessairy.
        if self.request.japi_paginate:
            total_resources = yield from self.db.query_size(
                self.typename, filters=self.request.japi_filters
            )

            pagination = Pagination(self.request, total_resources)
            meta.update(pagination.json_meta)
            links.update(pagination.json_links)

        # Put all together
        self.response.headers["content-type"] = "application/vnd.api+json"
        self.response.status_code = 200
        self.response.body = self.api.dump_json(OrderedDict([
            ("data", data),
            ("included", included),
            ("meta", meta),
            ("links", links),
            ("jsonapi", self.api.jsonapi_object)
        ]))
        return None

Example 93

Project: aiokafka Source File: group_coordinator.py
    @asyncio.coroutine
    def _perform_group_join(self):
        """Join the group and return the assignment for the next generation.

        This function handles both JoinGroup and SyncGroup, delegating to
        _perform_assignment() if elected leader by the coordinator.

        Returns:
            Future: resolves to the encoded-bytes assignment returned from the
                group leader
        """
        # send a join group request to the coordinator
        log.debug("(Re-)joining group %s", self.group_id)

        topics = self._subscription.subscription
        assert topics is not None, 'Consumer has not subscribed to topics'
        metadata_list = []
        for assignor in self._assignors:
            metadata = assignor.metadata(topics)
            if not isinstance(metadata, bytes):
                metadata = metadata.encode()
            group_protocol = (assignor.name, metadata)
            metadata_list.append(group_protocol)

        request = JoinGroupRequest(
            self.group_id,
            self._session_timeout_ms,
            self.member_id,
            ConsumerProtocol.PROTOCOL_TYPE,
            metadata_list)

        # create the request for the coordinator
        log.debug("Issuing request (%s) to coordinator %s",
                  request, self.coordinator_id)
        try:
            response = yield from self._send_req(self.coordinator_id, request)
        except Errors.GroupLoadInProgressError:
            log.debug("Attempt to join group %s rejected since coordinator is"
                      " loading the group.", self.group_id)
        except Errors.UnknownMemberIdError:
            # reset the member id and retry immediately
            self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
            log.info(
                "Attempt to join group %s failed due to unknown member id,"
                " resetting and retrying.", self.group_id)
            return
        except (Errors.GroupCoordinatorNotAvailableError,
                Errors.NotCoordinatorForGroupError):
            # re-discover the coordinator and retry with backoff
            self.coordinator_dead()
            log.info("Attempt to join group %s failed due to obsolete "
                     "coordinator information, retrying.", self.group_id)
        except Errors.KafkaError as err:
            log.error(
                "Error in join group '%s' response: %s", self.group_id, err)
        else:
            log.debug("Join group response %s", response)
            self.member_id = response.member_id
            self.generation = response.generation_id
            self.rejoin_needed = False
            self.protocol = response.group_protocol
            log.info("Joined group '%s' (generation %s) with member_id %s",
                     self.group_id, self.generation, self.member_id)

            if response.leader_id == response.member_id:
                log.info("Elected group leader -- performing partition"
                         " assignments using %s", self.protocol)
                cor = self._on_join_leader(response)
            else:
                cor = self._on_join_follower()

            try:
                member_assignment_bytes = yield from cor
            except (Errors.UnknownMemberIdError,
                    Errors.RebalanceInProgressError,
                    Errors.IllegalGenerationError):
                pass
            except Errors.KafkaError as err:
                if err.retriable is False:
                    raise err
            else:
                self._on_join_complete(self.generation, self.member_id,
                                       self.protocol, member_assignment_bytes)
                self.needs_join_prepare = True
                return

        # backoff wait - failure case
        yield from asyncio.sleep(
            self._retry_backoff_ms / 1000, loop=self.loop)

Example 94

Project: peru Source File: cache.py
    @asyncio.coroutine
    def modify_tree(self, tree, modifications):
        '''The modifications are a map of the form, {path: TreeEntry}. The tree
        can be None to indicate an empty starting tree. The entries can be
        either blobs or trees, or None to indicate a deletion. The return value
        is either the hash of the resulting tree, or None if the resulting tree
        is empty. Modifications in parent directories are done before
        modifications in subdirectories below them, so for example you can
        insert a tree at a given path and also insert more new stuff beneath
        that path, without fear of overwriting the new stuff.'''

        # Read the original contents of the base tree.
        if tree is None:
            entries = {}
        else:
            entries = yield from self.ls_tree(tree, '.')

        # Separate the modifications into two groups, those that refer to
        # entries at the base of this tree (e.g. 'foo'), and those that refer
        # to entries in subtrees (e.g. 'foo/bar').
        modifications_at_base = dict()
        modifications_in_subtrees = collections.defaultdict(dict)
        for path_str, entry in modifications.items():
            # Canonicalize paths to get rid of duplicate/trailing slashes.
            path = pathlib.PurePosixPath(path_str)

            # Check for nonsense paths.
            # TODO: Maybe stop recursive calls from repeating these checks.
            if len(path.parts) == 0:
                raise ModifyTreeError('Cannot modify an empty path.')
            elif path.parts[0] == '/':
                raise ModifyTreeError('Cannot modify an absolute path.')
            elif '..' in path.parts:
                raise ModifyTreeError('.. is not allowed in tree paths.')

            if len(path.parts) == 1:
                modifications_at_base[str(path)] = entry
            else:
                first_dir = path.parts[0]
                rest = str(pathlib.PurePosixPath(*path.parts[1:]))
                modifications_in_subtrees[first_dir][rest] = entry

        # Insert or delete entries in the base tree. Note that this happens
        # before any subtree operations.
        for name, entry in modifications_at_base.items():
            if entry is None:
                entries.pop(name, None)
            else:
                entries[name] = entry

        # Recurse to compute modified subtrees. Note how we handle deletions:
        # If 'a' is a file, inserting a new file at 'a/b' will implicitly
        # delete 'a', but trying to delete 'a/b' will be a no-op and will not
        # delete 'a'.
        empty_tree = (yield from self.get_empty_tree())
        for name, sub_modifications in modifications_in_subtrees.items():
            subtree_base = None
            if name in entries and entries[name].type == TREE_TYPE:
                subtree_base = entries[name].hash
            new_subtree = yield from self.modify_tree(
                subtree_base, sub_modifications)
            if new_subtree != empty_tree:
                entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree)
            # Delete an empty tree if it was actually a tree to begin with.
            elif name in entries and entries[name].type == TREE_TYPE:
                del entries[name]

        # Return the resulting tree, or None if empty.
        if entries:
            session = self.no_index_git_session()
            tree = yield from session.make_tree_from_entries(entries)
            return tree
        else:
            return empty_tree

Example 95

Project: python-steemlib Source File: steemasyncclient.py
Function: handle
    @asyncio.coroutine
    def _handle(self, coroutines):
        if hasattr(self._config, "wallet"):
            self._wallet_ws  = yield from websockets.connect(self._config.wallet["url"])
            wallet_ws_recv_task = asyncio.async(self._wallet_ws.recv())
        if hasattr(self._config, "witness"):
            self._witness_ws = yield from websockets.connect(self._config.witness["url"])
            witness_ws_recv_task = asyncio.async(self._witness_ws.recv())
        try:
            main_task   = asyncio.async(self._initialize(coroutines))
            while True:
                tasks = [main_task]
                if hasattr(self._config, "wallet"):
                    tasks.append(wallet_ws_recv_task)
                if hasattr(self._config, "witness"):
                    tasks.append(witness_ws_recv_task)

                done, pending = yield from asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
                if main_task in done:
                    if hasattr(self._config, "wallet"):
                        wallet_ws_recv_task.cancel()
                        self._wallet_pending_rpc = {}
                    if hasattr(self._config, "witness"):
                        witness_ws_recv_task.cancel()
                        self._witness_pending_rpc = {}
                    break

                if hasattr(self._config, "wallet") and wallet_ws_recv_task in done:
                    try:
                        r = json.loads(wallet_ws_recv_task.result())
                    except ValueError:
                        raise RPCClientError("Wallet server returned invalid format via websocket. Expected JSON!")
                    call_id = r["id"]
                    if call_id in self._wallet_pending_rpc:
                        future = self._wallet_pending_rpc[call_id]
                        del self._wallet_pending_rpc[call_id]
                        if "error" in r:
                            if "detail" in r["error"]:
                                future.set_exception(RPCServerError(r["error"]["detail"]))
                            else:
                                future.set_exception(RPCServerError(r["error"]["message"]))
                        future.set_result(r["result"])
                    else:
                        raise RPCClientError("Not expecting a response via websocket from wallet server with call id {}".format(call_id))
                    wallet_ws_recv_task  = asyncio.async(self._wallet_ws.recv())

                if hasattr(self._config, "witness") and witness_ws_recv_task in done:
                    try:
                        r = json.loads(witness_ws_recv_task.result())
                    except ValueError:
                        raise RPCClientError("Witness server returned invalid format via websocket. Expected JSON!")
                    call_id = r["id"]
                    if call_id in self._witness_pending_rpc:
                        future = self._witness_pending_rpc[call_id]
                        del self._witness_pending_rpc[call_id]
                        if "error" in r:
                            if "detail" in r["error"]:
                                future.set_exception(RPCServerError(r["error"]["detail"]))
                            else:
                                future.set_exception(RPCServerError(r["error"]["message"]))
                        future.set_result(r["result"])
                    else:
                        raise RPCClientError("Not expecting a response via websocket from witness server with call id {}".format(call_id))
                    witness_ws_recv_task = asyncio.async(self._witness_ws.recv())

        finally:
            if hasattr(self._config, "wallet"):
                yield from self._wallet_ws.close()
            if hasattr(self._config, "witness"):
                yield from self._witness_ws.close()

Example 96

Project: hangups Source File: conversation.py
@asyncio.coroutine
def build_user_conversation_list(client):
    """Return UserList from initial contact data and an additional request.

    The initial data contains the user's contacts, but there may be conversions
    containing users that are not in the contacts. This function takes care of
    requesting data for those users and constructing the UserList.
    """

    # Retrieve recent conversations so we can preemptively look up their
    # participants.
    sync_recent_conversations_response = (
        yield from client.sync_recent_conversations(
            hangouts_pb2.SyncRecentConversationsRequest(
                request_header=client.get_request_header(),
                max_conversations=100,
                max_events_per_conversation=1,
                sync_filter=[hangouts_pb2.SYNC_FILTER_INBOX],
            )
        )
    )
    conv_states = sync_recent_conversations_response.conversation_state
    sync_timestamp = parsers.from_timestamp(
        # syncrecentconversations seems to return a sync_timestamp 4 minutes
        # before the present. To prevent syncallnewevents later breaking
        # requesting events older than what we already have, use
        # current_server_time instead.
        sync_recent_conversations_response.response_header.current_server_time
    )

    # Retrieve entities participating in all conversations.
    required_user_ids = set()
    for conv_state in conv_states:
        required_user_ids |= {
            user.UserID(chat_id=part.id.chat_id, gaia_id=part.id.gaia_id)
            for part in conv_state.conversation.participant_data
        }
    required_entities = []
    if required_user_ids:
        logger.debug('Need to request additional users: {}'
                     .format(required_user_ids))
        try:
            response = yield from client.get_entity_by_id(
                hangouts_pb2.GetEntityByIdRequest(
                    request_header=client.get_request_header(),
                    batch_lookup_spec=[
                        hangouts_pb2.EntityLookupSpec(
                            gaia_id=user_id.gaia_id,
                            create_offnetwork_gaia=True,
                        )
                        for user_id in required_user_ids
                    ],
                )
            )
            for entity_result in response.entity_result:
                required_entities.extend(entity_result.entity)
        except exceptions.NetworkError as e:
            logger.warning('Failed to request missing users: {}'.format(e))

    # Build list of conversation participants.
    conv_part_list = []
    for conv_state in conv_states:
        conv_part_list.extend(conv_state.conversation.participant_data)

    # Retrieve self entity.
    get_self_info_response = yield from client.get_self_info(
        hangouts_pb2.GetSelfInfoRequest(
            request_header=client.get_request_header(),
        )
    )
    self_entity = get_self_info_response.self_entity

    user_list = user.UserList(client, self_entity, required_entities,
                              conv_part_list)
    conversation_list = ConversationList(client, conv_states, user_list,
                                         sync_timestamp)
    return (user_list, conversation_list)

Example 97

Project: asynqp Source File: __init__.py
Function: connect
@asyncio.coroutine
def connect(host='localhost',
            port=5672,
            username='guest', password='guest',
            virtual_host='/',
            on_connection_close=None, *,
            loop=None, sock=None, **kwargs):
    """
    Connect to an AMQP server on the given host and port.

    Log in to the given virtual host using the supplied credentials.
    This function is a :ref:`coroutine <coroutine>`.

    :param str host: the host server to connect to.
    :param int port: the port which the AMQP server is listening on.
    :param str username: the username to authenticate with.
    :param str password: the password to authenticate with.
    :param str virtual_host: the AMQP virtual host to connect to.
    :param func on_connection_close: function called after connection lost.
    :keyword BaseEventLoop loop: An instance of :class:`~asyncio.BaseEventLoop` to use.
        (Defaults to :func:`asyncio.get_event_loop()`)
    :keyword socket sock: A :func:`~socket.socket` instance to use for the connection.
        This is passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.
        If ``sock`` is supplied then ``host`` and ``port`` will be ignored.

    Further keyword arguments are passed on to :meth:`loop.create_connection() <asyncio.BaseEventLoop.create_connection>`.

    This function will set TCP_NODELAY on TCP and TCP6 sockets either on supplied ``sock`` or created one.

    :return: the :class:`Connection` object.
    """
    from .protocol import AMQP
    from .routing import Dispatcher
    from .connection import open_connection

    loop = asyncio.get_event_loop() if loop is None else loop

    if sock is None:
        kwargs['host'] = host
        kwargs['port'] = port
    else:
        kwargs['sock'] = sock

    dispatcher = Dispatcher()

    def protocol_factory():
        return AMQP(dispatcher, loop, close_callback=on_connection_close)
    transport, protocol = yield from loop.create_connection(protocol_factory, **kwargs)

    # RPC-like applications require TCP_NODELAY in order to acheive
    # minimal response time. Actually, this library send data in one
    # big chunk and so this will not affect TCP-performance.
    sk = transport.get_extra_info('socket')
    # 1. Unfortunatelly we cannot check socket type (sk.type == socket.SOCK_STREAM). https://bugs.python.org/issue21327
    # 2. Proto remains zero, if not specified at creation of socket
    if (sk.family in (socket.AF_INET, socket.AF_INET6)) and (sk.proto in (0, socket.IPPROTO_TCP)):
        sk.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)

    connection_info = {
        'username': username,
        'password': password,
        'virtual_host': virtual_host
    }
    connection = yield from open_connection(
        loop, transport, protocol, dispatcher, connection_info)
    return connection

Example 98

Project: haproxystats Source File: pull.py
@asyncio.coroutine
def pull_stats(config, storage_dir, loop, executor):
    """
    Launch coroutines for pulling statistics from UNIX sockets.

    This a delegating routine.

    Arguments:
        config (obj): A configParser object which holds configuration.
        storage_dir (str): The absolute directory path to save the statistics.
        loop (obj): A base event loop.
        executor(obj): A ThreadPoolExecutor object.

    Returns:
        True if statistics from *all* UNIX sockets are fetched False otherwise.
    """
    # absolute directory path which contains UNIX socket files.
    results = []  # stores the result of finished tasks
    socket_dir = config.get('pull', 'socket-dir')
    pull_timeout = config.getfloat('pull', 'pull-timeout')
    if int(pull_timeout) == 0:
        pull_timeout = None

    socket_files = [f for f in glob.glob(socket_dir + '/*')
                    if is_unix_socket(f)]
    if not socket_files:
        log.error("found zero UNIX sockets under %s to connect to", socket_dir)
        return False

    log.debug('pull statistics')
    coroutines = [get(socket_file, cmd, storage_dir, loop, executor, config)
                  for socket_file in socket_files
                  for cmd in CMDS]
    # Launch all connections.
    done, pending = yield from asyncio.wait(coroutines,
                                            timeout=pull_timeout,
                                            return_when=ALL_COMPLETED)
    for task in done:
        log.debug('task status: %s', task)
        results.append(task.result())

    log.debug('task report, done:%s pending:%s succeed:%s failed:%s',
              len(done),
              len(pending),
              results.count(True),
              results.count(False))

    for task in pending:
        log.warning('cancelling task %s as it reached its timeout threshold of'
                    ' %.2f seconds', task, pull_timeout)
        task.cancel()

    # only when all tasks are finished successfully we claim success
    return not pending and len(set(results)) == 1 and True in set(results)

Example 99

Project: fcatalog_server Source File: test_frame_endpoint.py
def test_tcp_adapter_max_frame_len(tloop):
    """
    Test send/recv of length prefixed frames over TCP with the
    TCPFrameEndpoint.
    """
    # List of results:
    res = []

    addr,port = 'localhost',8767

    @asyncio.coroutine
    def server_handler(reader,writer):
        """Echo server"""

        tfe = TCPFrameEndpoint(reader,writer,max_frame_len=4)

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b'abc'

        # Read a frame:
        frame = yield from tfe.recv()
        assert frame == b''

        # Read a frame. The frame should be too large for the chosen
        # max_frame_len=4, so we expect to get None here:
        frame = yield from tfe.recv()
        assert frame == None

        res.append('got_none')

    @asyncio.coroutine
    def client():
        reader, writer = yield from \
                asyncio.open_connection(host=addr,port=port)

        # Write b'abc':
        writer.write(b'\x03\x00\x00\x00abc')
        yield from writer.drain()

        # Write empty frame:
        writer.write(b'\x00\x00\x00\x00')
        yield from writer.drain()

        res.append('send_large_frame')

        # Write b'abcdef', which is too large for our chosen max_frame_len=4:
        writer.write(b'\x06\x00\x00\x00abcdef')
        yield from writer.drain()

        # We expect the server to disconnect us:
        with pytest.raises(asyncio.IncompleteReadError):
            yield from reader.readexactly(4)

        res.append('got_disconnected')

        # Close client:
        writer.close()

    # Start server:
    start_server = asyncio.start_server(server_handler,host=addr,port=port,reuse_address=True)
    server_task = run_timeout(start_server,tloop)

    # Start client:
    run_timeout(client(),tloop)

    # Close server:
    server_task.close()
    # Wait until server is closed:
    run_timeout(server_task.wait_closed(),loop=tloop)

    assert res == ['send_large_frame','got_none','got_disconnected']

Example 100

Project: aioh2 Source File: protocol.py
Function: send_data
    @asyncio.coroutine
    def send_data(self, stream_id, data, *, end_stream=False):
        """
        Send request or response body on the given stream.

        This will block until either whole data is sent, or the stream gets
        closed. Meanwhile, a paused underlying transport or a closed flow
        control window will also help waiting. If the peer increase the flow
        control window, this method will start sending automatically.

        This can be called multiple times, but it must be called after a
        `start_request` or `start_response` with the returning stream ID, and
        before any `end_stream` instructions; Otherwise it will fail.

        The given data may be automatically split into smaller frames in order
        to fit in the configured frame size or flow control window.

        Each stream can only have one `send_data` running, others calling this
        will be blocked on a per-stream lock (wlock), so that coroutines
        sending data concurrently won't mess up with each other.

        Similarly, the completion of the call to this method does not mean the
        data is delivered.

        :param stream_id: Which stream to send data on
        :param data: Bytes to send
        :param end_stream: To finish sending a request or response, set this to
                           `True` to close the given stream locally after data
                           is sent (default `False`).
        :raise: `SendException` if there is an error sending data. Data left
                unsent can be found in `data` of the exception.
        """
        try:
            with (yield from self._get_stream(stream_id).wlock):
                while True:
                    yield from _wait_for_events(
                        self._resumed, self._get_stream(stream_id).window_open)
                    self._priority.unblock(stream_id)
                    waiter = asyncio.Future()
                    if not self._priority_events:
                        self._loop.call_soon(self._priority_step)
                    self._priority_events[stream_id] = waiter
                    try:
                        yield from waiter
                        data_size = len(data)
                        size = min(
                            data_size,
                            self._conn.local_flow_control_window(stream_id),
                            self._conn.max_outbound_frame_size)
                        if data_size == 0 or size == data_size:
                            self._conn.send_data(stream_id, data,
                                                 end_stream=end_stream)
                            self._flush()
                            break
                        elif size > 0:
                            self._conn.send_data(stream_id, data[:size])
                            data = data[size:]
                            self._flush()
                    finally:
                        self._priority_events.pop(stream_id, None)
                        self._priority.block(stream_id)
                        if self._priority_events:
                            self._loop.call_soon(self._priority_step)
        except ProtocolError:
            raise exceptions.SendException(data)
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3 Page 4