sys.maxsize

Here are the examples of the python api sys.maxsize taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

156 Examples 7

Example 51

Project: hubblemon Source File: chart_data.py
Function: sampling
	def sampling(self, max_resolution):
		new_items = []
		for item in self.items:
			#print('## len: %d' % len(item.data))

			if len(item.data) < (max_resolution * 2):
				new_items.append(item)
			else:
				per = int(len(item.data) / max_resolution)
				#print('#### per: %d' % per)
				new_data = []

				min = [0, sys.maxsize]
				max = [0, -sys.maxsize]
				idx = 0
				for data in item.data:
					if data != None and data[1] > max[1]:
						max = data
					if data != None and data[1] < min[1]:
						min = data

					idx += 1
					if idx % per == 0 and min[0] > 0:
						#print('## %d, %d' % (idx, per))

						if min[0] < max[0]:
							new_data.append(min)
							if max[1] != min[1]: # if min == max skip (do not append)
								new_data.append(max)
						else:
							new_data.append(max)
							if max[1] != min[1]: # if min == max skip (do not append)
								new_data.append(min)
						
					
						min = [0, sys.maxsize]
						max = [0, -sys.maxsize]


				if len(new_data) < (max_resolution / 2): # too many Null datas are exists do not sampling
					pass
				else:
					item.data = new_data
					#print(item.data)
					#print('## new len: %d' % len(item.data))

				new_items.append(item)


		self.items = new_items

Example 52

Project: nidaba Source File: test_questions_util.py
def test_get_weekday():
    """
    Test the get_weekday() _util function.
    :return: None
    """

    # Regular data
    assert question.get_weekday(1416654427) == 5
    assert question.get_weekday(1417000158) != 5

    # Ensure processes negative dates properly
    assert question.get_weekday(-100000000) == 0
    assert question.get_weekday(-99913600) != 0

    # Ensure that days tick over properly (and that UTC timezone is being used)
    assert question.get_weekday(345599) == 6
    assert question.get_weekday(345600) == 0

    # Make sure the various out of range exceptions are thrown
    with pytest.raises(FeatureException):
        question.get_weekday(sys.maxsize+1)  # Overflow Error

    with pytest.raises(FeatureException):
        question.get_weekday(-sys.maxsize-1)  # OSError

    with pytest.raises(FeatureException):
        # If replace is not used, then the timezone is naively converted into the test machine's timezone
        question.get_weekday(datetime.datetime.max.replace(tzinfo=pytz.utc).timestamp() + 1)  # Value Error (> 31st December 9999 23:59:59)

    with pytest.raises(FeatureException):
        # If replace is not used, then the timezone is naively converted into the test machine's timezone
        question.get_weekday(datetime.datetime.min.replace(tzinfo=pytz.utc).timestamp()-1) # Value Error (< 1st January 1 00:00:00)

Example 53

Project: git-pandas Source File: repository.py
    def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):
        """
        Returns a DataFrame of all file changes (via the commit history) for the specified branch.  This is similar to
        the commit history DataFrame, but is one row per file edit rather than one row per commit (which may encapsulate
        many file changes). Included in the DataFrame will be the columns:

         * date (index)
         * author
         * committer
         * message
         * filename
         * insertions
         * deletions

        :param branch: the branch to return commits for
        :param limit: (optional, default=None) a maximum number of commits to return, None for no limit
        :param days: (optional, default=None) number of days to return if limit is None
        :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
        :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
        :return: DataFrame
        """

        # setup the dataset of commits
        if limit is None:
            if days is None:
                ds = [[
                          x.author.name,
                          x.committer.name,
                          x.committed_date,
                          x.message,
                          x.name_rev.split()[0],
                          self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs)
                      ] for x in self.repo.iter_commits(branch, max_count=sys.maxsize)]
            else:
                ds = []
                c_date = time.time()
                commits = self.repo.iter_commits(branch, max_count=sys.maxsize)
                dlim = time.time() - days * 24 * 3600
                while c_date > dlim:
                    try:
                        if sys.version_info.major == 2:
                            x = commits.next()
                        else:
                            x = commits.__next__()
                    except StopIteration:
                        break

                    c_date = x.committed_date
                    if c_date > dlim:
                        ds.append([
                            x.author.name,
                            x.committer.name,
                            x.committed_date,
                            x.message,
                            x.name_rev.split()[0],
                            self.__check_extension(x.stats.files, ignore_globs=ignore_globs,
                                                   include_globs=include_globs)
                        ])

        else:
            ds = [[
                      x.author.name,
                      x.committer.name,
                      x.committed_date,
                      x.message,
                      x.name_rev.split()[0],
                      self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs)
                  ] for x in self.repo.iter_commits(branch, max_count=limit)]

        ds = [x[:-1] + [fn, x[-1][fn]['insertions'], x[-1][fn]['deletions']] for x in ds for fn in x[-1].keys() if
              len(x[-1].keys()) > 0]

        # make it a pandas dataframe
        df = DataFrame(ds,
                       columns=['author', 'committer', 'date', 'message', 'rev', 'filename', 'insertions', 'deletions'])

        # format the date col and make it the index
        df['date'] = to_datetime(df['date'].map(datetime.datetime.fromtimestamp))
        df.set_index(keys=['date'], drop=True, inplace=True)

        return df

Example 54

Project: couchdb-python Source File: http.py
Function: request
    def request(self, method, url, body=None, headers=None, credentials=None,
                num_redirects=0):
        if url in self.perm_redirects:
            url = self.perm_redirects[url]
        method = method.upper()

        if headers is None:
            headers = {}
        headers.setdefault('Accept', 'application/json')
        headers['User-Agent'] = self.user_agent

        cached_resp = None
        if method in ('GET', 'HEAD'):
            cached_resp = self.cache.get(url)
            if cached_resp is not None:
                etag = cached_resp[1].get('etag')
                if etag:
                    headers['If-None-Match'] = etag

        if (body is not None and not isinstance(body, util.strbase) and
                not hasattr(body, 'read')):
            body = json.encode(body).encode('utf-8')
            headers.setdefault('Content-Type', 'application/json')

        if body is None:
            headers.setdefault('Content-Length', '0')
        elif isinstance(body, util.strbase):
            headers.setdefault('Content-Length', str(len(body)))
        else:
            headers['Transfer-Encoding'] = 'chunked'

        authorization = basic_auth(credentials)
        if authorization:
            headers['Authorization'] = authorization

        path_query = util.urlunsplit(('', '') + util.urlsplit(url)[2:4] + ('',))
        conn = self.connection_pool.get(url)

        def _try_request_with_retries(retries):
            while True:
                try:
                    return _try_request()
                except socket.error as e:
                    ecode = e.args[0]
                    if ecode not in self.retryable_errors:
                        raise
                    try:
                        delay = next(retries)
                    except StopIteration:
                        # No more retries, raise last socket error.
                        raise e
                    finally:
                        time.sleep(delay)
                        conn.close()

        def _try_request():
            try:
                conn.putrequest(method, path_query, skip_accept_encoding=True)
                for header in headers:
                    conn.putheader(header, headers[header])
                if body is None:
                    conn.endheaders()
                else:
                    if isinstance(body, util.strbase):
                        if isinstance(body, util.utype):
                            conn.endheaders(body.encode('utf-8'))
                        else:
                            conn.endheaders(body)
                    else: # assume a file-like object and send in chunks
                        conn.endheaders()
                        while 1:
                            chunk = body.read(CHUNK_SIZE)
                            if not chunk:
                                break
                            if isinstance(chunk, util.utype):
                                chunk = chunk.encode('utf-8')
                            status = ('%x\r\n' % len(chunk)).encode('utf-8')
                            conn.send(status + chunk + b'\r\n')
                        conn.send(b'0\r\n\r\n')
                return conn.getresponse()
            except BadStatusLine as e:
                # httplib raises a BadStatusLine when it cannot read the status
                # line saying, "Presumably, the server closed the connection
                # before sending a valid response."
                # Raise as ECONNRESET to simplify retry logic.
                if e.line == '' or e.line == "''":
                    raise socket.error(errno.ECONNRESET)
                else:
                    raise

        resp = _try_request_with_retries(iter(self.retry_delays))
        status = resp.status

        # Handle conditional response
        if status == 304 and method in ('GET', 'HEAD'):
            resp.read()
            self.connection_pool.release(url, conn)
            status, msg, data = cached_resp
            if data is not None:
                data = util.StringIO(data)
            return status, msg, data
        elif cached_resp:
            self.cache.remove(url)

        # Handle redirects
        if status == 303 or \
                method in ('GET', 'HEAD') and status in (301, 302, 307):
            resp.read()
            self.connection_pool.release(url, conn)
            if num_redirects > self.max_redirects:
                raise RedirectLimit('Redirection limit exceeded')
            location = resp.getheader('location')

            # in case of relative location: add scheme and host to the location
            location_split = util.urlsplit(location)

            if not location_split[0]:
                orig_url_split = util.urlsplit(url)
                location = util.urlunsplit(orig_url_split[:2] + location_split[2:])

            if status == 301:
                self.perm_redirects[url] = location
            elif status == 303:
                method = 'GET'
            return self.request(method, location, body, headers,
                                num_redirects=num_redirects + 1)

        data = None
        streamed = False

        # Read the full response for empty responses so that the connection is
        # in good state for the next request
        if method == 'HEAD' or resp.getheader('content-length') == '0' or \
                status < 200 or status in (204, 304):
            resp.read()
            self.connection_pool.release(url, conn)

        # Buffer small non-JSON response bodies
        elif int(resp.getheader('content-length', sys.maxsize)) < CHUNK_SIZE:
            data = resp.read()
            self.connection_pool.release(url, conn)

        # For large or chunked response bodies, do not buffer the full body,
        # and instead return a minimal file-like object
        else:
            data = ResponseBody(resp, self.connection_pool, url, conn)
            streamed = True

        # Handle errors
        if status >= 400:
            ctype = resp.getheader('content-type')
            if data is not None and 'application/json' in ctype:
                data = json.decode(data.decode('utf-8'))
                error = data.get('error'), data.get('reason')
            elif method != 'HEAD':
                error = resp.read()
                self.connection_pool.release(url, conn)
            else:
                error = ''
            if status == 401:
                raise Unauthorized(error)
            elif status == 403:
                raise Forbidden(error)
            elif status == 404:
                raise ResourceNotFound(error)
            elif status == 409:
                raise ResourceConflict(error)
            elif status == 412:
                raise PreconditionFailed(error)
            else:
                raise ServerError((status, error))

        # Store cachable responses
        if not streamed and method == 'GET' and 'etag' in resp.msg:
            self.cache.put(url, (status, resp.msg, data))

        if not streamed and data is not None:
            data = util.StringIO(data)

        return status, resp.msg, data

Example 55

Project: vscode-lldb Source File: debugsession.py
    def DEBUG_stackTrace(self, args):
        thread = self.process.GetThreadByID(args['threadId'])
        start_frame = args.get('startFrame', 0)
        levels = args.get('levels', sys.maxsize)
        if start_frame + levels > thread.num_frames:
            levels = thread.num_frames - start_frame
        stack_frames = []
        for i in range(start_frame, start_frame + levels):
            frame = thread.frames[i]
            stack_frame = { 'id': self.var_refs.create(frame, (thread.GetThreadID(), i), None) }
            fn_name = frame.GetFunctionName()
            if fn_name is None:
                fn_name = str(frame.GetPCAddress())
            stack_frame['name'] = fn_name

            if not self.in_disassembly(frame):
                le = frame.GetLineEntry()
                if le.IsValid():
                    fs = le.GetFileSpec()
                    # VSCode gets confused if the path contains funky stuff like a double-slash
                    full_path = os.path.normpath(fs.fullpath)
                    stack_frame['source'] = { 'name': fs.basename, 'path': full_path }
                    stack_frame['line'] = le.GetLine()
                    stack_frame['column'] = le.GetColumn()
            else:
                pc_sbaddr = frame.GetPCAddress()
                pc_addr = pc_sbaddr.GetLoadAddress(self.target)
                dasm = disassembly.find(self.disassembly_by_addr, pc_addr)
                if dasm is None:
                    log.info('Creating new disassembly for %x', pc_addr)
                    dasm = disassembly.Disassembly(pc_sbaddr, self.target)
                    disassembly.insert(self.disassembly_by_addr, dasm)
                    dasm.source_ref = self.disassembly_by_handle.create(dasm)
                stack_frame['source'] = dasm.get_source_ref()
                stack_frame['line'] = dasm.line_num_by_address(pc_addr)
                stack_frame['column'] = 0

            stack_frames.append(stack_frame)
        return { 'stackFrames': stack_frames, 'totalFrames': len(thread) }

Example 56

Project: Django--an-app-at-a-time Source File: response.py
    def _convert_to_charset(self, value, charset, mime_encode=False):
        """Converts headers key/value to ascii/latin-1 native strings.

        `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
        `value` can't be represented in the given charset, MIME-encoding
        is applied.
        """
        if not isinstance(value, (bytes, six.text_type)):
            value = str(value)
        if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or
                isinstance(value, six.text_type) and ('\n' in value or '\r' in value)):
            raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
        try:
            if six.PY3:
                if isinstance(value, str):
                    # Ensure string is valid in given charset
                    value.encode(charset)
                else:
                    # Convert bytestring using given charset
                    value = value.decode(charset)
            else:
                if isinstance(value, str):
                    # Ensure string is valid in given charset
                    value.decode(charset)
                else:
                    # Convert unicode string to given charset
                    value = value.encode(charset)
        except UnicodeError as e:
            if mime_encode:
                # Wrapping in str() is a workaround for #12422 under Python 2.
                value = str(Header(value, 'utf-8', maxlinelen=sys.maxsize).encode())
            else:
                e.reason += ', HTTP response headers must be in %s format' % charset
                raise
        return value

Example 57

Project: scikit-bio Source File: _subsample.py
@experimental(as_of="0.4.0")
def isubsample(items, maximum, minimum=1, buf_size=1000, bin_f=None):
    """Randomly subsample items from bins, without replacement.

    Randomly subsample items without replacement from an unknown number of
    input items, that may fall into an unknown number of bins. This method is
    intended for data that either a) cannot fit into memory or b) subsampling
    collections of arbitrary datatypes.

    Parameters
    ----------
    items : Iterable
        The items to evaluate.
    maximum : unsigned int
        The maximum number of items per bin.
    minimum : unsigned int, optional
        The minimum number of items per bin. The default is 1.
    buf_size : unsigned int, optional
        The size of the random value buffer. This buffer holds the random
        values assigned to each item from items. In practice, it is unlikely
        that this value will need to change. Increasing it will require more
        resident memory, but potentially reduce the number of function calls
        made to the PRNG, whereas decreasing it will result in more function
        calls and lower memory overhead. The default is 1000.
    bin_f : function, optional
        Method to determine what bin an item is associated with. If None (the
        default), then all items are considered to be part of the same bin.
        This function will be provided with each entry in items, and must
        return a hashable value indicating the bin that that entry should be
        placed in.

    Returns
    -------
    generator
        (bin, item)

    Raises
    ------
    ValueError
        If ``minimum`` is > ``maximum``.
    ValueError
        If ``minimum`` < 1 or if ``maximum`` < 1.

    See Also
    --------
    subsample_counts

    Notes
    -----
    Randomly get up to ``maximum`` items for each bin. If the bin has less than
    ``maximum``, only those bins that have >= ``minimum`` items are
    returned.

    This method will at most hold ``maximum`` * N data, where N is the number
    of bins.

    All items associated to a bin have an equal probability of being retained.

    Examples
    --------
    Randomly keep up to 2 sequences per sample from a set of demultiplexed
    sequences:

    >>> from skbio.stats import isubsample
    >>> import numpy as np
    >>> np.random.seed(123)
    >>> seqs = [('sampleA', 'AATTGG'),
    ...         ('sampleB', 'ATATATAT'),
    ...         ('sampleC', 'ATGGCC'),
    ...         ('sampleB', 'ATGGCT'),
    ...         ('sampleB', 'ATGGCG'),
    ...         ('sampleA', 'ATGGCA')]
    >>> bin_f = lambda item: item[0]
    >>> for bin_, item in sorted(isubsample(seqs, 2, bin_f=bin_f)):
    ...     print(bin_, item[1])
    sampleA AATTGG
    sampleA ATGGCA
    sampleB ATATATAT
    sampleB ATGGCG
    sampleC ATGGCC

    Now, let's set the minimum to 2:

    >>> bin_f = lambda item: item[0]
    >>> for bin_, item in sorted(isubsample(seqs, 2, 2, bin_f=bin_f)):
    ...     print(bin_, item[1])
    sampleA AATTGG
    sampleA ATGGCA
    sampleB ATATATAT
    sampleB ATGGCG
    """
    if minimum > maximum:
        raise ValueError("minimum cannot be > maximum.")
    if minimum < 1 or maximum < 1:
        raise ValueError("minimum and maximum must be > 0.")
    if bin_f is None:
        def bin_f(x):
            return True

    # buffer some random values
    random_values = np.random.randint(0, sys.maxsize, buf_size)
    random_idx = 0

    result = defaultdict(list)
    for item in items:
        bin_ = bin_f(item)
        heap = result[bin_]

        # pull a random value, and recompute random values if we've consumed
        # our buffer
        random_value = random_values[random_idx]
        random_idx += 1
        if random_idx >= buf_size:
            random_values = np.random.randint(0, sys.maxsize, buf_size)
            random_idx = 0

        # push our item on to the heap and drop the smallest if necessary
        heappush(heap, (random_value, copy(item)))
        if len(heap) > maximum:
            heappop(heap)

    # yield items
    for bin_, heap in result.items():
        if len(heap) < minimum:
            continue

        for _, item in heap:
            yield (bin_, item)

Example 58

Project: RxPy Source File: test_replaysubject.py
Function: test_replay_subject_dies_out
def test_replay_subject_dies_out():
    scheduler = TestScheduler()

    xs = scheduler.create_hot_observable(
        on_next(70, 1),
        on_next(110, 2),
        on_next(220, 3),
        on_next(270, 4),
        on_next(340, 5),
        on_next(410, 6),
        on_next(520, 7),
        on_completed(580)
    )

    subject = None

    results1 = scheduler.create_observer()
    results2 = scheduler.create_observer()
    results3 = scheduler.create_observer()
    results4 = scheduler.create_observer()

    def action1(scheduler, state=None):
        nonlocal subject
        subject = ReplaySubject(sys.maxsize, 100, scheduler)
    scheduler.schedule_absolute(100, action1)
    
    def action2(scheduler, state=None):
        xs.subscribe(subject)
    scheduler.schedule_absolute(200, action2)
    
    def action3(scheduler, state=None):
        subject.subscribe(results1)
    scheduler.schedule_absolute(300, action3)
    
    def action4(scheduler, state=None):
        subject.subscribe(results2)
    scheduler.schedule_absolute(400, action4)
    
    def action5(scheduler, state=None):
        subject.subscribe(results3)
    scheduler.schedule_absolute(600, action5)
    
    def action6(scheduler, state=None):
        subject.subscribe(results4)
    scheduler.schedule_absolute(900, action6)
    
    scheduler.start()

    results1.messages.assert_equal(
        on_next(301, 3),
        on_next(302, 4),
        on_next(341, 5),
        on_next(411, 6),
        on_next(521, 7),
        on_completed(581)
    )

    results2.messages.assert_equal(
        on_next(401, 5),
        on_next(411, 6),
        on_next(521, 7),
        on_completed(581)
    )

    results3.messages.assert_equal(
        on_next(601, 7),
        on_completed(602)
    )

    results4.messages.assert_equal(
        on_completed(901)
    )

Example 59

Project: ripe-atlas-tools Source File: probe_search.py
    def run(self):

        if not self.arguments.field:
            self.arguments.field = (
                "id", "asn_v4", "asn_v6", "country", "status")

        if self.arguments.all:
            self.arguments.limit = sys.maxsize if six.PY3 else sys.maxint

        filters = self.build_request_args()

        if not filters and not self.arguments.all:
            raise RipeAtlasToolsException(colourise(
                "Typically you'd want to run this with some arguments to "
                "filter the probe \nlist, as fetching all of the probes can "
                "take a Very Long Time.  However, if you \ndon't care about "
                "the wait, you can use --all and go get yourself a coffee.",
                "blue"
            ))

        self.set_aggregators()
        probes = ProbeRequest(
            return_objects=True, user_agent=self.user_agent, **filters)
        truncated_probes = itertools.islice(probes, self.arguments.limit)

        if self.arguments.ids_only:
            for probe in truncated_probes:
                print(probe.id)
            return

        hr = self._get_horizontal_rule()

        print(self._get_filter_display(filters))
        print(colourise(self._get_header(), "bold"))
        print(colourise(hr, "bold"))

        if self.arguments.aggregate_by:

            buckets = aggregate(list(truncated_probes), self.aggregators)
            self.render_aggregation(buckets)

        else:

            for probe in truncated_probes:
                print(self._get_line(probe))

        print(colourise(hr, "bold"))

        # Print total count of found measurements
        print(("{:>" + str(len(hr)) + "}\n").format(
            "Showing {} of {} total probes".format(
                min(self.arguments.limit, probes.total_count) or "all",
                probes.total_count
            )
        ))

Example 60

Project: Live-Blog Source File: parts.py
    def build_contents(self, node, level=0):
        level += 1
        sections = [sect for sect in node if isinstance(sect, nodes.section)]
        entries = []
        autonum = 0
        depth = self.startnode.details.get('depth', sys.maxsize)
        for section in sections:
            title = section[0]
            auto = title.get('auto')    # May be set by SectNum.
            entrytext = self.copy_and_filter(title)
            reference = nodes.reference('', '', refid=section['ids'][0],
                                        *entrytext)
            ref_id = self.docuement.set_id(reference)
            entry = nodes.paragraph('', '', reference)
            item = nodes.list_item('', entry)
            if ( self.backlinks in ('entry', 'top')
                 and title.next_node(nodes.reference) is None):
                if self.backlinks == 'entry':
                    title['refid'] = ref_id
                elif self.backlinks == 'top':
                    title['refid'] = self.toc_id
            if level < depth:
                subsects = self.build_contents(section, level)
                item += subsects
            entries.append(item)
        if entries:
            contents = nodes.bullet_list('', *entries)
            if auto:
                contents['classes'].append('auto-toc')
            return contents
        else:
            return []

Example 61

Project: OmniMarkupPreviewer Source File: tableparser.py
    def check_columns(self, lines, first_line, columns):
        """
        Check for text in column margins and text overflow in the last column.
        Raise TableMarkupError if anything but whitespace is in column margins.
        Adjust the end value for the last column if there is text overflow.
        """
        # "Infinite" value for a dummy last column's beginning, used to
        # check for text overflow:
        columns.append((sys.maxsize, None))
        lastcol = len(columns) - 2
        # combining characters do not contribute to the column width
        lines = [strip_combining_chars(line) for line in lines]

        for i in range(len(columns) - 1):
            start, end = columns[i]
            nextstart = columns[i+1][0]
            offset = 0
            for line in lines:
                if i == lastcol and line[end:].strip():
                    text = line[start:].rstrip()
                    new_end = start + len(text)
                    columns[i] = (start, new_end)
                    main_start, main_end = self.columns[-1]
                    if new_end > main_end:
                        self.columns[-1] = (main_start, new_end)
                elif line[end:nextstart].strip():
                    raise TableMarkupError('Text in column margin '
                        'in table line %s.' % (first_line+offset+1),
                        offset=first_line+offset)
                offset += 1
        columns.pop()

Example 62

Project: Ultros Source File: public.py
    @run_async_threadpool
    def submit_metrics(self):
        self.log.trace(_("Firing task."))
        compiled = {"plugins": [], "packages": [], "protocols": []}
        if self.status is True:
            self.log.debug(_("Submitting metrics."))
            compiled["plugins"] = [
                obj.info.name for obj in
                self.manager.plugman.plugin_objects.values()
            ]
            compiled["packages"] = self.packages.get_installed_packages()

            for name in self.manager.factories.keys():
                proto = self.manager.get_protocol(name)
                compiled["protocols"].append(proto.TYPE)

            try:
                compiled["enabled"] = True

                is_64bits = sys.maxsize > 2 ** 32

                cpu = platform.processor().strip() or "Unknown"
                _os = platform.system()

                if _os.lower() == "linux":
                    nix = list(platform.linux_distribution())

                    if nix[2]:
                        nix[2] = "({})".format(nix[2])

                    nix = filter(None, nix)

                    if nix:
                        _os = "{}: {}".format(_os, " ".join(nix))
                    else:
                        _os = "{}: Unknown".format(_os)
                else:
                    release = platform.release()

                    if release:
                        _os = "{} {}".format(_os, release)

                ram = psutil.virtual_memory().total / 1048576.0

                python = "%s %s %s" % (
                    platform.python_implementation(),
                    platform.python_version(),
                    "x64" if is_64bits else "x86"
                )

                release = version_info["release"]
                _hash = version_info["hash"] or "Zipball (%s)" % release

                compiled["system"] = {
                    "cpu": cpu,
                    "os": _os,
                    "python": python,
                    "ram": ram,
                    "release": release,
                    "hash": _hash
                }

                r = self.post(self.submit_url % self.data["uuid"], compiled)
                r = json.loads(r)

                self.log.trace(_("Submitted. Result: %s") % r)

                if r["result"] == "error":
                    self.log.error(_("Error submitting metrics: %s")
                                   % r["error"])
            except Exception:
                self.log.exception(_("Error submitting metrics"))
        elif self.status is False:
            self.log.debug(_("Submitting disable message."))
            try:
                compiled["enabled"] = False
                r = self.post(self.submit_url % self.data["uuid"], compiled)
                r = json.loads(r)

                self.log.trace(_("Submitted. Result: %s") % r)

                if r["result"] == "error":
                    self.log.error(_("Error submitting disable message: %s")
                                   % r["error"])
            except Exception:
                self.log.exception(_("Error submitting disable message"))
            else:
                with self.data:
                    self.data["status"] = "disabled"
            finally:
                self.task.stop()
        elif self.status == "destroy":
            self.log.debug(_("Submitting destruction message."))
            try:
                r = self.get(self.destroy_url % self.data["uuid"])
                r = json.loads(r)

                self.log.trace("Submitted. Result: %s" % r)

                if r["result"] == "success":
                    self.log.info(_("Metrics data has been removed from the "
                                    "server."))
                else:
                    self.log.warn(_("Unknown UUID, data was already removed "
                                    "from the server."))
            except Exception:
                self.log.exception(_("Error submitting destruction message"))
            else:
                with self.data:
                    del self.data["uuid"]
                    self.data["status"] = "disabled"
            finally:
                self.task.stop()
        else:
            self.log.warn(_("Unknown status: %s") % self.status)
            self.task.stop()

Example 63

Project: stolos Source File: qbcli_redis.py
    def __init__(self, path):
        assert self.SCRIPTS, 'child class must define SCRIPTS'
        assert self._EXTEND_LOCK_SCRIPT_NAME, (
            'child class must define _EXTEND_LOCK_SCRIPT_NAME')

        self._client_id = str(random.randint(0, sys.maxsize))
        self._path = path

        self._lock_timeout = get_NS().qb_redis_lock_timeout
        self._max_network_delay = get_NS().qb_redis_max_network_delay

        if not BaseStolosRedis._BASE_INITIALIZED:
            BaseStolosRedis._BASE_INITIALIZED = True

            # use signals to trigger main thread to exit if child thread errors
            # be nice and don't replace any signal handlers if already set
            for sig in [signal.SIGUSR1, signal.SIGUSR2,
                        'fail: no user-level signals available']:
                if signal.getsignal(sig) == 0:
                    BaseStolosRedis._SIGNAL = sig
                    break
            signal.signal(BaseStolosRedis._SIGNAL, _raise_err)

        if not self._INITIALIZED:
            self._INITIALIZED = True
            self.LOCKS = dict()

            # submit class's lua scripts to redis and store the SHA's
            self._SHAS = dict()
            for k in self.SCRIPTS:
                self._SHAS[k] = raw_client().script_load(
                    self.SCRIPTS[k]['script'])

            # initialize a lock extender thread for each class type that exists
            # we could just group all together, but this seems like a good idea
            # start extending locks in the background
            t = threading.Thread(
                name=("stolos.queue_backend.qbcli_redis.%s Extender"
                      % self.__class__.__name__),
                target=self._extend_lock_in_background)
            t.daemon = True
            t.start()

Example 64

Project: Vase Source File: handlers.py
    @asyncio.coroutine
    def handle(self, request, writer):
        origin = request.get('origin', 'null')
        if origin == 'null':
            origin = '*'
        origin = origin

        if request.method == 'GET':
            data = {
                'websocket': self._allow_websocket,
                'cookie_needed': False,
                'origins': ['*:*'],
                'entropy': random.SystemRandom().randint(1, sys.maxsize)
            }
            content = json.dumps(data).encode('utf-8')
            writer.status = 200
            writer.add_headers(
                ('Content-Type', 'application/json;charset=UTF-8'),
                ('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'),
                ('Content-Length', str(len(content))),
                ('Access-Control-Allow-Origin', origin),
                ('Access-Control-Allow-Credentials', 'true'),
            )
            writer.write_body(content)
        elif request.method == 'OPTIONS':
            writer.status = 204
            date = datetime.utcnow() + timedelta(milliseconds=31536000)

            methods = 'OPTIONS, {}'.format(', '.join(self.allowed_methods)).encode('utf-8')
            writer.add_headers(
                ('Content-Type', 'application/json;charset=UTF-8'),
                ('Cache-Control', 'public, max-age=31536000'),
                ('Expires', email.utils.format_datetime(date)),
                ('Content-Length', '0'),
                ('Access-Control-Allow-Origin', origin),
                ('Access-Control-Allow-Credentials', 'true'),
                ('Access-Control-Allow-Methods', methods),
                ('Access-Control-Max-Age', '31536000'),
            )
            writer.write_body('')

Example 65

Project: aws-iot-device-sdk-python Source File: mqttCore.py
    def __init__(self, clientID, cleanSession, protocol, srcUseWebsocket=False):
        if clientID is None or cleanSession is None or protocol is None:
            raise TypeError("None type inputs detected.")
        # All internal data member should be unique per mqttCore intance
        # Tool handler
        self._log = logging.getLogger(__name__)
        self._clientID = clientID
        self._pahoClient = self.createPahoClient(clientID, cleanSession, None, protocol, srcUseWebsocket)  # User data is set to None as default
        self._log.debug("Paho MQTT Client init.")
        self._log.info("ClientID: " + str(clientID))
        protocolType = "MQTTv3.1.1"
        if protocol == 3:
            protocolType = "MQTTv3.1"
        self._log.info("Protocol: " + protocolType)
        self._pahoClient.on_connect = self.on_connect
        self._pahoClient.on_disconnect = self.on_disconnect
        self._pahoClient.on_message = self.on_message
        self._pahoClient.on_subscribe = self.on_subscribe
        self._pahoClient.on_unsubscribe = self.on_unsubscribe
        self._log.debug("Register Paho MQTT Client callbacks.")
        # Tool data structure
        self._connectResultCode = sys.maxsize
        self._disconnectResultCode = sys.maxsize
        self._subscribeSent = False
        self._unsubscribeSent = False
        self._connectdisconnectTimeout = 30  # Default connect/disconnect timeout set to 30 second
        self._mqttOperationTimeout = 5  # Default MQTT operation timeout set to 5 second
        # Use Websocket
        self._useWebsocket = srcUseWebsocket
        # Subscribe record
        self._subscribePool = dict()
        self._resubscribeCount = -1  # Ensure that initial value for _resubscribeCount does not trigger draining on each SUBACK
        # Broker information
        self._host = ""
        self._port = -1
        self._cafile = ""
        self._key = ""
        self._cert = ""
        self._stsToken = ""
        # Operation mutex
        self._publishLock = Lock()
        self._subscribeLock = Lock()
        self._unsubscribeLock = Lock()
        # OfflinePublishQueue
        self._offlinePublishQueueLock = Lock()
        self._offlinePublishQueue = offlinePublishQueue.offlinePublishQueue(20, 1)
        # Draining interval in seconds
        self._drainingIntervalSecond = 0.5
        # Is Draining complete
        self._drainingComplete = True
        self._log.debug("mqttCore init.")

Example 66

Project: ores Source File: precached.py
def run(stream_url, ores_url, metrics_collector, config, delay, notify, verbose):

    if verbose:
        log_level = logging.DEBUG
    else:
        log_level = logging.INFO

    logging.basicConfig(
        level=log_level,
        format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
    )

    # Make requests and socketIO_client be quiet.  They are very noisy.
    logging.getLogger("requests").setLevel(logging.WARNING)
    logging.getLogger("socketIO_client").setLevel(logging.ERROR)  # SHUT UP!
    requests.packages.urllib3.disable_warnings()
    # If we're using logging for metrics collection, show it.
    logging.getLogger("ores.metrics_collectors").setLevel(logging.DEBUG)

    if not notify:
        logger.info('Not being ran as a service, watchdog disabled')

    score_on = build_score_on(config)
    RCNamespace = build_RCNamespace(
        stream_url, score_on, ores_url, MAX_WORKERS, metrics_collector, delay,
        notify)

    socketIO = socketIO_client.SocketIO(stream_url, 80)
    socketIO.define(RCNamespace, '/rc')

    try:
        socketIO.wait(seconds=sys.maxsize)
    except KeyboardInterrupt:
        print("Keyboard interrupt detected.  Shutting down.")
        socketIO.disconnect()

Example 67

Project: ultisnips Source File: _diff.py
Function: diff
def diff(a, b, sline=0):
    """
    Return a list of deletions and insertions that will turn 'a' into 'b'. This
    is done by traversing an implicit edit graph and searching for the shortest
    route. The basic idea is as follows:

        - Matching a character is free as long as there was no
          deletion/insertion before. Then, matching will be seen as delete +
          insert [1].
        - Deleting one character has the same cost everywhere. Each additional
          character costs only have of the first deletion.
        - Insertion is cheaper the earlier it happens. The first character is
          more expensive that any later [2].

    [1] This is that world -> aolsa will be "D" world + "I" aolsa instead of
        "D" w , "D" rld, "I" a, "I" lsa
    [2] This is that "hello\n\n" -> "hello\n\n\n" will insert a newline after
        hello and not after \n
    """
    d = defaultdict(list)  # pylint:disable=invalid-name
    seen = defaultdict(lambda: sys.maxsize)

    d[0] = [(0, 0, sline, 0, ())]
    cost = 0
    deletion_cost = len(a) + len(b)
    insertion_cost = len(a) + len(b)
    while True:
        while len(d[cost]):
            x, y, line, col, what = d[cost].pop()

            if a[x:] == b[y:]:
                return what

            if x < len(a) and y < len(b) and a[x] == b[y]:
                ncol = col + 1
                nline = line
                if a[x] == '\n':
                    ncol = 0
                    nline += 1
                lcost = cost + 1
                if (what and what[-1][0] == 'D' and what[-1][1] == line and
                        what[-1][2] == col and a[x] != '\n'):
                    # Matching directly after a deletion should be as costly as
                    # DELETE + INSERT + a bit
                    lcost = (deletion_cost + insertion_cost) * 1.5
                if seen[x + 1, y + 1] > lcost:
                    d[lcost].append((x + 1, y + 1, nline, ncol, what))
                    seen[x + 1, y + 1] = lcost
            if y < len(b):  # INSERT
                ncol = col + 1
                nline = line
                if b[y] == '\n':
                    ncol = 0
                    nline += 1
                if (what and what[-1][0] == 'I' and what[-1][1] == nline and
                            what[-1][2] + len(what[-1][-1]) == col and b[y] != '\n' and
                            seen[x, y + 1] > cost + (insertion_cost + ncol) // 2
                        ):
                    seen[x, y + 1] = cost + (insertion_cost + ncol) // 2
                    d[cost + (insertion_cost + ncol) // 2].append(
                        (x, y + 1, line, ncol, what[:-1] + (
                            ('I', what[-1][1], what[-1][2],
                             what[-1][-1] + b[y]),)
                         )
                    )
                elif seen[x, y + 1] > cost + insertion_cost + ncol:
                    seen[x, y + 1] = cost + insertion_cost + ncol
                    d[cost + ncol + insertion_cost].append((x, y + 1, nline, ncol,
                                                            what + (('I', line, col, b[y]),))
                                                           )
            if x < len(a):  # DELETE
                if (what and what[-1][0] == 'D' and what[-1][1] == line and
                            what[-1][2] == col and a[x] != '\n' and
                            what[-1][-1] != '\n' and
                            seen[x + 1, y] > cost + deletion_cost // 2
                        ):
                    seen[x + 1, y] = cost + deletion_cost // 2
                    d[cost + deletion_cost // 2].append(
                        (x + 1, y, line, col, what[:-1] + (
                            ('D', line, col, what[-1][-1] + a[x]),))
                    )
                elif seen[x + 1, y] > cost + deletion_cost:
                    seen[x + 1, y] = cost + deletion_cost
                    d[cost + deletion_cost].append((x + 1, y, line, col, what +
                                                    (('D', line, col, a[x]),))
                                                   )
        cost += 1

Example 68

Project: brython Source File: test_struct.py
    def test_integers(self):
        # Integer tests (bBhHiIlLqQnN).
        import binascii

        class IntTester(unittest.TestCase):
            def __init__(self, format):
                super(IntTester, self).__init__(methodName='test_one')
                self.format = format
                self.code = format[-1]
                self.byteorder = format[:-1]
                if not self.byteorder in byteorders:
                    raise ValueError("unrecognized packing byteorder: %s" %
                                     self.byteorder)
                self.bytesize = struct.calcsize(format)
                self.bitsize = self.bytesize * 8
                if self.code in tuple('bhilqn'):
                    self.signed = True
                    self.min_value = -(2**(self.bitsize-1))
                    self.max_value = 2**(self.bitsize-1) - 1
                elif self.code in tuple('BHILQN'):
                    self.signed = False
                    self.min_value = 0
                    self.max_value = 2**self.bitsize - 1
                else:
                    raise ValueError("unrecognized format code: %s" %
                                     self.code)

            def test_one(self, x, pack=struct.pack,
                                  unpack=struct.unpack,
                                  unhexlify=binascii.unhexlify):

                format = self.format
                if self.min_value <= x <= self.max_value:
                    expected = x
                    if self.signed and x < 0:
                        expected += 1 << self.bitsize
                    self.assertGreaterEqual(expected, 0)
                    expected = '%x' % expected
                    if len(expected) & 1:
                        expected = "0" + expected
                    expected = expected.encode('ascii')
                    expected = unhexlify(expected)
                    expected = (b"\x00" * (self.bytesize - len(expected)) +
                                expected)
                    if (self.byteorder == '<' or
                        self.byteorder in ('', '@', '=') and not ISBIGENDIAN):
                        expected = string_reverse(expected)
                    self.assertEqual(len(expected), self.bytesize)

                    # Pack work?
                    got = pack(format, x)
                    self.assertEqual(got, expected)

                    # Unpack work?
                    retrieved = unpack(format, got)[0]
                    self.assertEqual(x, retrieved)

                    # Adding any byte should cause a "too big" error.
                    self.assertRaises((struct.error, TypeError), unpack, format,
                                                                 b'\x01' + got)
                else:
                    # x is out of range -- verify pack realizes that.
                    self.assertRaises((OverflowError, ValueError, struct.error),
                                      pack, format, x)

            def run(self):
                from random import randrange

                # Create all interesting powers of 2.
                values = []
                for exp in range(self.bitsize + 3):
                    values.append(1 << exp)

                # Add some random values.
                for i in range(self.bitsize):
                    val = 0
                    for j in range(self.bytesize):
                        val = (val << 8) | randrange(256)
                    values.append(val)

                # Values absorbed from other tests
                values.extend([300, 700000, sys.maxsize*4])

                # Try all those, and their negations, and +-1 from
                # them.  Note that this tests all power-of-2
                # boundaries in range, and a few out of range, plus
                # +-(2**n +- 1).
                for base in values:
                    for val in -base, base:
                        for incr in -1, 0, 1:
                            x = val + incr
                            self.test_one(x)

                # Some error cases.
                class NotAnInt:
                    def __int__(self):
                        return 42

                # Objects with an '__index__' method should be allowed
                # to pack as integers.  That is assuming the implemented
                # '__index__' method returns an 'int'.
                class Indexable(object):
                    def __init__(self, value):
                        self._value = value

                    def __index__(self):
                        return self._value

                # If the '__index__' method raises a type error, then
                # '__int__' should be used with a deprecation warning.
                class BadIndex(object):
                    def __index__(self):
                        raise TypeError

                    def __int__(self):
                        return 42

                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  "a string")
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  randrange)
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  3+42j)
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  NotAnInt())
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  BadIndex())

                # Check for legitimate values from '__index__'.
                for obj in (Indexable(0), Indexable(10), Indexable(17),
                            Indexable(42), Indexable(100), Indexable(127)):
                    try:
                        struct.pack(format, obj)
                    except:
                        self.fail("integer code pack failed on object "
                                  "with '__index__' method")

                # Check for bogus values from '__index__'.
                for obj in (Indexable(b'a'), Indexable('b'), Indexable(None),
                            Indexable({'a': 1}), Indexable([1, 2, 3])):
                    self.assertRaises((TypeError, struct.error),
                                      struct.pack, self.format,
                                      obj)

        for code, byteorder in iter_integer_formats():
            format = byteorder+code
            t = IntTester(format)
            t.run()

Example 69

Project: pychess Source File: scorePanel.py
    def game_changed(self, model, ply):
        if len(self.plot) + model.lowply > ply:
            return

        for i in range(len(self.plot) + model.lowply, ply):
            if i in model.scores:
                points = model.scores[i][1]
            else:
                points = leval.evaluateComplete(
                    model.getBoardAtPly(i).board, WHITE)
            self.plot.addScore(points)

        if model.status == DRAW:
            points = 0
        elif model.status == WHITEWON:
            points = sys.maxsize
        elif model.status == BLACKWON:
            points = -sys.maxsize
        else:
            if ply in model.scores:
                points = model.scores[ply][1]
            else:
                points = leval.evaluateComplete(
                    model.getBoardAtPly(ply).board, WHITE)
        self.plot.addScore(points)

        # As shownChanged will normally be emitted just after game_changed -
        # if we are viewing the latest position - we can do the selection change
        # now, and thereby avoid redraw being called twice
        if self.plot.selected == ply - model.lowply - 1:
            self.plot.select(ply - model.lowply)
        self.plot.redraw()

        # Uncomment this to debug eval function
        return

        board = model.boards[-1].board
        opboard = model.boards[-1].clone().board
        opboard.setColor(1 - opboard.color)
        material, phase = leval.evalMaterial(board)
        if board.color == WHITE:
            print("material", -material)
            e1 = leval.evalKingTropism(board)
            e2 = leval.evalKingTropism(opboard)
            print("evaluation: %d + %d = %d " % (e1, e2, e1 + e2))
            p1 = leval.evalPawnStructure(board, phase)
            p2 = leval.evalPawnStructure(opboard, phase)
            print("pawns: %d + %d = %d " % (p1, p2, p1 + p2))
            print("knights:", -leval.evalKnights(board))
            print("king:", -leval.evalKing(board, phase))
        else:
            print("material", material)
            print("evaluation:", leval.evalKingTropism(board))
            print("pawns:", leval.evalPawnStructure(board, phase))
            print("pawns2:", leval.evalPawnStructure(opboard, phase))
            print("pawns3:", leval.evalPawnStructure(board, phase) +
                  leval.evalPawnStructure(opboard, phase))
            print("knights:", leval.evalKnights(board))
            print("king:", leval.evalKing(board, phase))
        print("----------------------")

Example 70

Project: editxt Source File: window.py
    def focus(self, value, offset=1):
        """Change the current docuement by navigating the tree or recent docuements

        :param value: One of the direction constants in
        `editxt.constants` or an editor's file path. `NEXT` and
        `PREVIOUS` select items in the recent editors stack. `UP` and
        `DOWN` move up or down in the tree.
        :param offset: The number of positions to move in direction.
        :returns: True if a new editor was focused, otherwise false.
        """
        def focus(ident):
            for project in self.projects:
                if project.id == ident:
                    self.current_editor = project
                    return True
                else:
                    for editor in project.editors:
                        if editor.id == ident:
                            self.current_editor = editor
                            return True
            return False
        def get_item_in_tree(current, offset):
            if current is not None:
                items = []
                index = 0
                stop = sys.maxsize
                for project in self.projects:
                    items.append(project)
                    if current.id == project.id:
                        stop = index + offset
                        if stop <= index:
                            break
                    index += 1
                    if project.expanded:
                        for editor in project.editors:
                            items.append(editor)
                            if current.id == editor.id:
                                stop = index + offset
                                if stop <= index:
                                    break
                            index += 1
                if 0 <= stop < len(items):
                    return items[stop]
            return None
        if isinstance(value, const.Constant):
            if value == const.PREVIOUS or value == const.NEXT:
                history = ((list(reversed(self.recent)) + [0])
                           if self._recent_history is None
                           else self._recent_history)
                if value == const.PREVIOUS:
                    offset = offset + history[-1]
                else:
                    offset = history[-1] - offset
                if 0 <= offset < len(history) - 1:
                    ok = focus(history[offset])
                    if ok:
                        history[-1] = offset
                        self._recent_history = history
                    return ok
                return False
            if value == const.UP:
                offset = -offset
            editor = get_item_in_tree(self.current_editor, offset)
            if editor is not None:
                self.current_editor = editor
                return True
        if isinstance(value, (Editor, Project)):
            return focus(value.id)
        return False

Example 71

Project: RxPY Source File: test_replaysubject.py
def test_replay_subject_dies_out():
    scheduler = TestScheduler()

    xs = scheduler.create_hot_observable(
        on_next(70, 1),
        on_next(110, 2),
        on_next(220, 3),
        on_next(270, 4),
        on_next(340, 5),
        on_next(410, 6),
        on_next(520, 7),
        on_completed(580)
    )

    subject = [None]

    results1 = scheduler.create_observer()
    results2 = scheduler.create_observer()
    results3 = scheduler.create_observer()
    results4 = scheduler.create_observer()

    def action1(scheduler, state=None):
        subject[0] = ReplaySubject(sys.maxsize, 100, scheduler)
    scheduler.schedule_absolute(100, action1)

    def action2(scheduler, state=None):
        xs.subscribe(subject[0])
    scheduler.schedule_absolute(200, action2)

    def action3(scheduler, state=None):
        subject[0].subscribe(results1)
    scheduler.schedule_absolute(300, action3)

    def action4(scheduler, state=None):
        subject[0].subscribe(results2)
    scheduler.schedule_absolute(400, action4)

    def action5(scheduler, state=None):
        subject[0].subscribe(results3)
    scheduler.schedule_absolute(600, action5)

    def action6(scheduler, state=None):
        subject[0].subscribe(results4)
    scheduler.schedule_absolute(900, action6)

    scheduler.start()

    results1.messages.assert_equal(
        on_next(301, 3),
        on_next(302, 4),
        on_next(341, 5),
        on_next(411, 6),
        on_next(521, 7),
        on_completed(581)
    )

    results2.messages.assert_equal(
        on_next(401, 5),
        on_next(411, 6),
        on_next(521, 7),
        on_completed(581)
    )

    results3.messages.assert_equal(
        on_next(601, 7),
        on_completed(602)
    )

    results4.messages.assert_equal(
        on_completed(901)
    )

Example 72

Project: curtsies Source File: input.py
Function: send
    def _send(self, timeout):
        def find_key():
            """Returns keypress identified by adding unprocessed bytes or None"""
            current_bytes = []
            while self.unprocessed_bytes:
                current_bytes.append(self.unprocessed_bytes.pop(0))
                e = events.get_key(current_bytes,
                                   getpreferredencoding(),
                                   keynames=self.keynames,
                                   full=len(self.unprocessed_bytes)==0)
                if e is not None:
                    self.current_bytes = []
                    return e
            if current_bytes:  # incomplete keys shouldn't happen
                raise ValueError("Couldn't identify key sequence: %r" % self.current_bytes)

        if self.sigints:
            return self.sigints.pop()
        if self.queued_events:
            return self.queued_events.pop(0)
        if self.queued_interrupting_events:
            return self.queued_interrupting_events.pop(0)

        if self.queued_scheduled_events:
            self.queued_scheduled_events.sort()  #TODO use a data structure that inserts sorted
            when, _ = self.queued_scheduled_events[0]
            if when < time.time():
                logger.warning('popping an event! %r %r',
                               self.queued_scheduled_events[0],
                               self.queued_scheduled_events[1:])
                return self.queued_scheduled_events.pop(0)[1]
            else:
                time_until_check = min(max(0, when - time.time()), timeout if timeout is not None else sys.maxsize)
        else:
            time_until_check = timeout

        # try to find an already pressed key from prev input
        e = find_key()
        if e is not None:
            return e

        stdin_ready_for_read, event = self._wait_for_read_ready_or_timeout(time_until_check)
        if event:
            return event
        if self.queued_scheduled_events and when < time.time():  # when should always be defined
            # because queued_scheduled_events should not be modified during this time
            logger.warning('popping an event! %r %r', self.queued_scheduled_events[0],
                           self.queued_scheduled_events[1:])
            return self.queued_scheduled_events.pop(0)[1]
        if not stdin_ready_for_read:
            return None

        num_bytes = self._nonblocking_read()
        if num_bytes == 0:
            # thought stdin was ready, but not bytes to read is triggered
            # when SIGTSTP was send by dsusp
            return None

        if self.paste_threshold is not None and num_bytes > self.paste_threshold:
            paste = events.PasteEvent()
            while True:
                if len(self.unprocessed_bytes) < events.MAX_KEYPRESS_SIZE:
                    self._nonblocking_read()  # may need to read to get the rest of a keypress
                e = find_key()
                if e is None:
                    return paste
                else:
                    paste.events.append(e)
        else:
            e = find_key()
            assert e is not None
            return e

Example 73

Project: pychess Source File: PyChess.py
    def __go(self, ondone=None):
        """ Finds and prints the best move from the current position """

        mv = False if self.outOfBook else self.__getBestOpening()
        if mv:
            mvs = [mv]

        if not mv:

            lsearch.skipPruneChance = self.skipPruneChance
            lsearch.searching = True

            timed = self.basetime > 0

            if self.searchtime > 0:
                usetime = self.searchtime
            else:
                usetime = self.clock[self.playingAs] / self.__remainingMovesA()
                if self.clock[self.playingAs] > 10:
                    # If we have time, we assume 40 moves rather than 80
                    usetime *= 2
                # The increment is a constant. We'll use this always
                usetime += self.increment[self.playingAs]

            prevtime = 0
            starttime = time()
            lsearch.endtime = starttime + usetime if timed else sys.maxsize
            if self.debug:
                if timed:
                    print("# Time left: %3.2f s; Planing to think for %3.2f s" %
                          (self.clock[self.playingAs], usetime))
                else:
                    print("# Searching to depth %d without timelimit" % self.sd)

            for depth in range(1, self.sd + 1):
                # Heuristic time saving
                # Don't waste time, if the estimated isn't enough to complete
                # next depth
                if timed and usetime <= prevtime * 4 and usetime > 1:
                    break
                lsearch.timecheck_counter = lsearch.TIMECHECK_FREQ
                search_result = alphaBeta(self.board, depth)
                if lsearch.searching:
                    mvs, self.scr = search_result
                    if time() > lsearch.endtime:
                        break
                    if self.post:
                        pv1 = " ".join(listToSan(self.board, mvs))
                        time_cs = int(100 * (time() - starttime))
                        print("%s %s %s %s %s" % (
                            depth, self.scr, time_cs, lsearch.nodes, pv1))
                else:
                    # We were interrupted
                    if depth == 1:
                        mvs, self.scr = search_result
                    break
                prevtime = time() - starttime - prevtime

                self.clock[self.playingAs] -= time(
                ) - starttime - self.increment[self.playingAs]

            if not mvs:
                if not lsearch.searching:
                    # We were interupted
                    lsearch.nodes = 0
                    return

                # This should only happen in terminal mode

                if self.scr == 0:
                    print("result %s" % reprResult[DRAW])
                elif self.scr < 0:
                    if self.board.color == WHITE:
                        print("result %s" % reprResult[BLACKWON])
                    else:
                        print("result %s" % reprResult[WHITEWON])
                else:
                    if self.board.color == WHITE:
                        print("result %s" % reprResult[WHITEWON])
                    else:
                        print("result %s" % reprResult[BLACKWON])
                return

            lsearch.nodes = 0
            lsearch.searching = False

        move = mvs[0]
        sanmove = toSAN(self.board, move)
        if ondone:
            ondone(sanmove)
        return sanmove

Example 74

Project: batch-shipyard Source File: graph.py
def graph_data(data: dict, sizes: dict, offer: str, sku: str):
    """Graph data via gnuplot
    :param dict data: timing data
    :param dict sizes: size data
    :param str offer: offer
    :param str sku: sku
    """
    print(sizes)
    # create data file
    dat_fname = _PARTITION_KEY.replace('$', '-') + '.dat'
    mintime = float(sys.maxsize)
    maxtime = 0.0
    rdata = {}
    for nodeid in data:
        start = data[nodeid]['start']
        if start in rdata:
            raise RuntimeError('cannot create reverse mapping')
        rdata[start] = nodeid
        if start < mintime:
            mintime = start
        if start > maxtime:
            maxtime = start
    print('nodeready variance:', maxtime - mintime)
    total_gr = 0
    total_ac = 0
    with open(dat_fname, 'w') as f:
        f.write(
            'NodePrepStartTime NodeId NodePrep+DockerInstall '
            'PrivateRegistrySetup ShipyardContainerPull GlobalResourcesLoad '
            'TotalPull TotalSave TotalLoad TotalTorrent\n')
        for start in sorted(rdata):
            nodeid = rdata[start]
            pull = 0
            save = 0
            load = 0
            torrent = 0
            for event in data[nodeid]['timing']:
                if event.startswith('pull:'):
                    pull += data[nodeid]['timing'][event]
                elif event.startswith('save:'):
                    save += data[nodeid]['timing'][event]
                elif event.startswith('load:'):
                    load += data[nodeid]['timing'][event]
                elif event.startswith('torrent:'):
                    torrent += data[nodeid]['timing'][event]
            acquisition = pull + torrent + load
            total_ac += acquisition
            print(nodeid, data[nodeid]['timing'])
            f.write(
                ('{0} {1} {2} {3} {4} {5} {6:.5f} {7:.5f} {8:.5f} '
                 '{9:.5f}\n').format(
                     datetime.datetime.fromtimestamp(start).strftime(
                         '%Y-%m-%d-%H:%M:%S.%f'),
                     nodeid,
                     data[nodeid]['timing']['docker_install'],
                     data[nodeid]['timing']['private_registry_setup'],
                     data[nodeid]['timing']['docker_shipyard_container_pull'],
                     data[nodeid]['timing']['global_resources_loaded'],
                     pull,
                     save,
                     load,
                     torrent)
            )
            total_gr += data[nodeid]['timing']['global_resources_loaded']
    print('total gr: {} avg: {}'.format(total_gr, total_gr / len(data)))
    print('total acq: {} avg: {}'.format(total_ac, total_ac / len(data)))
    # create plot file
    plot_fname = _PARTITION_KEY.replace('$', '-') + '.plot'
    with open(plot_fname, 'w') as f:
        f.write('set terminal pngcairo enhanced transparent crop\n')
        f.write(
            ('set title "Shipyard Performance for {} ({} {})" '
             'font ", 10" \n').format(
                 _PARTITION_KEY.split('$')[-1], offer, sku))
        f.write(
            'set key top right horizontal autotitle columnhead '
            'font ", 7"\n')
        f.write('set xtics rotate by 45 right font ", 7"\n')
        f.write('set ytics font ", 8"\n')
        f.write('set xlabel "Node Prep Start Time" font ", 8"\n')
        f.write('set ylabel "Seconds" font ", 8"\n')
        f.write('set format x "%H:%M:%.3S"\n')
        f.write('set xdata time\n')
        f.write('set timefmt "%Y-%m-%d-%H:%M:%S"\n')
        f.write('set style fill solid\n')
        f.write('set boxwidth {0:.5f} absolute\n'.format(
            (maxtime - mintime) / 100.0))
        f.write('plot "{}" using 1:($3+$4+$5+$6) with boxes, \\\n'.format(
            dat_fname))
        f.write('\t"" using 1:($3+$4+$5) with boxes, \\\n')
        f.write('\t"" using 1:($3+$4) with boxes, \\\n')
        f.write('\t"" using 1:3 with boxes\n')
    png_fname = _PARTITION_KEY.replace('$', '-') + '.png'
    subprocess.check_call(
        'gnuplot {} > {}'.format(plot_fname, png_fname), shell=True)

Example 75

Project: semisup-learn Source File: qns3vm.py
Function: set_parameters
    def __setParameters(self,  ** kw):
        for attr, val in kw.items():
            self.parameters[attr] = val
        self.__lam = float(self.parameters['lam'])
        assert self.__lam > 0
        self.__lamU = float(self.parameters['lamU'])
        assert self.__lamU > 0
        self.__lam_Uvec = [float(self.__lamU)*i for i in [0,0.000001,0.0001,0.01,0.1,0.5,1]]
        self.__sigma = float(self.parameters['sigma'])
        assert self.__sigma > 0
        self.__kernel_type = str(self.parameters['kernel_type'])
        if self.parameters['numR'] != None:
            self.__numR = int(self.parameters['numR'])
            assert (self.__numR <= len(self.__X)) and (self.__numR > 0)
        else:
            self.__numR = len(self.__X)
        self.__regressors_indices = sorted(self.__random_generator.sample( range(0,len(self.__X)), self.__numR ))
        self.__dim = self.__numR + 1 # add bias term b
        self.__minimum_labeled_patterns_for_estimate_r = float(self.parameters['minimum_labeled_patterns_for_estimate_r'])
        # If reliable estimate is available or can be estimated, use it, otherwise
        # assume classes to be balanced (i.e., estimate_r=0.0)
        if self.parameters['estimate_r'] != None:
            self.__estimate_r = float(self.parameters['estimate_r'])
        elif len(self.__L_l) >= self.__minimum_labeled_patterns_for_estimate_r:
            self.__estimate_r = (1.0 / len(self.__L_l)) * np.sum(self.__L_l)
        else:
            self.__estimate_r = 0.0
        self.__BFGS_m = int(self.parameters['BFGS_m'])
        self.__BFGS_maxfun = int(self.parameters['BFGS_maxfun'])
        self.__BFGS_factr = float(self.parameters['BFGS_factr'])
        # This is a hack for 64 bit systems (Linux). The machine precision 
        # is different for the BFGS optimizer (Fortran code) and we fix this by:
        is_64bits = sys.maxsize > 2**32
        if is_64bits:
            logging.debug("64-bit system detected, modifying BFGS_factr!")
            self.__BFGS_factr = 0.000488288*self.__BFGS_factr
        self.__BFGS_pgtol = float(self.parameters['BFGS_pgtol'])
        self.__BFGS_verbose = int(self.parameters['BFGS_verbose'])
        self.__surrogate_gamma = float(self.parameters['surrogate_gamma'])
        self.__s = float(self.parameters['surrogate_s'])
        self.__breakpoint_for_exp = float(self.parameters['breakpoint_for_exp'])
        self.__b = self.__estimate_r
        # size of unlabeled patterns to estimate mean (used for balancing constraint)
        self.__max_unlabeled_subset_size = 1000

Example 76

Project: TrustRouter Source File: configparser.py
Function: read
    def _read(self, fp, fpname):
        """Parse a sectioned configuration file.

        Each section in a configuration file contains a header, indicated by
        a name in square brackets (`[]'), plus key/value options, indicated by
        `name' and `value' delimited with a specific substring (`=' or `:' by
        default).

        Values can span multiple lines, as long as they are indented deeper
        than the first line of the value. Depending on the parser's mode, blank
        lines may be treated as parts of multiline values or ignored.

        Configuration files may include comments, prefixed by specific
        characters (`#' and `;' by default). Comments may appear on their own
        in an otherwise empty line or may be entered in lines holding values or
        section names.
        """
        elements_added = set()
        cursect = None                        # None, or a dictionary
        sectname = None
        optname = None
        lineno = 0
        indent_level = 0
        e = None                              # None, or an exception
        for lineno, line in enumerate(fp, start=1):
            comment_start = None
            # strip inline comments
            for prefix in self._inline_comment_prefixes:
                index = line.find(prefix)
                if index == 0 or (index > 0 and line[index-1].isspace()):
                    comment_start = index
                    break
            # strip full line comments
            for prefix in self._comment_prefixes:
                if line.strip().startswith(prefix):
                    comment_start = 0
                    break
            value = line[:comment_start].strip()
            if not value:
                if self._empty_lines_in_values:
                    # add empty line to the value, but only if there was no
                    # comment on the line
                    if (comment_start is None and
                        cursect is not None and
                        optname and
                        cursect[optname] is not None):
                        cursect[optname].append('') # newlines added at join
                else:
                    # empty line marks end of value
                    indent_level = sys.maxsize
                continue
            # continuation line?
            first_nonspace = self.NONSPACECRE.search(line)
            cur_indent_level = first_nonspace.start() if first_nonspace else 0
            if (cursect is not None and optname and
                cur_indent_level > indent_level):
                cursect[optname].append(value)
            # a section header or option header?
            else:
                indent_level = cur_indent_level
                # is it a section header?
                mo = self.SECTCRE.match(value)
                if mo:
                    sectname = mo.group('header')
                    if sectname in self._sections:
                        if self._strict and sectname in elements_added:
                            raise DuplicateSectionError(sectname, fpname,
                                                        lineno)
                        cursect = self._sections[sectname]
                        elements_added.add(sectname)
                    elif sectname == self.default_section:
                        cursect = self._defaults
                    else:
                        cursect = self._dict()
                        self._sections[sectname] = cursect
                        self._proxies[sectname] = SectionProxy(self, sectname)
                        elements_added.add(sectname)
                    # So sections can't start with a continuation line
                    optname = None
                # no section header in the file?
                elif cursect is None:
                    raise MissingSectionHeaderError(fpname, lineno, line)
                # an option line?
                else:
                    mo = self._optcre.match(value)
                    if mo:
                        optname, vi, optval = mo.group('option', 'vi', 'value')
                        if not optname:
                            e = self._handle_error(e, fpname, lineno, line)
                        optname = self.optionxform(optname.rstrip())
                        if (self._strict and
                            (sectname, optname) in elements_added):
                            raise DuplicateOptionError(sectname, optname,
                                                       fpname, lineno)
                        elements_added.add((sectname, optname))
                        # This check is fine because the OPTCRE cannot
                        # match if it would set optval to None
                        if optval is not None:
                            optval = optval.strip()
                            cursect[optname] = [optval]
                        else:
                            # valueless option handling
                            cursect[optname] = None
                    else:
                        # a non-fatal parsing error occurred. set up the
                        # exception but keep going. the exception will be
                        # raised at the end of the file and will contain a
                        # list of all bogus lines
                        e = self._handle_error(e, fpname, lineno, line)
        # if any parsing errors occurred, raise an exception
        if e:
            raise e
        self._join_multiline_values()

Example 77

Project: TrustRouter Source File: test_struct.py
    def test_integers(self):
        # Integer tests (bBhHiIlLqQ).
        import binascii

        class IntTester(unittest.TestCase):
            def __init__(self, format):
                super(IntTester, self).__init__(methodName='test_one')
                self.format = format
                self.code = format[-1]
                self.byteorder = format[:-1]
                if not self.byteorder in byteorders:
                    raise ValueError("unrecognized packing byteorder: %s" %
                                     self.byteorder)
                self.bytesize = struct.calcsize(format)
                self.bitsize = self.bytesize * 8
                if self.code in tuple('bhilq'):
                    self.signed = True
                    self.min_value = -(2**(self.bitsize-1))
                    self.max_value = 2**(self.bitsize-1) - 1
                elif self.code in tuple('BHILQ'):
                    self.signed = False
                    self.min_value = 0
                    self.max_value = 2**self.bitsize - 1
                else:
                    raise ValueError("unrecognized format code: %s" %
                                     self.code)

            def test_one(self, x, pack=struct.pack,
                                  unpack=struct.unpack,
                                  unhexlify=binascii.unhexlify):

                format = self.format
                if self.min_value <= x <= self.max_value:
                    expected = x
                    if self.signed and x < 0:
                        expected += 1 << self.bitsize
                    self.assertGreaterEqual(expected, 0)
                    expected = '%x' % expected
                    if len(expected) & 1:
                        expected = "0" + expected
                    expected = expected.encode('ascii')
                    expected = unhexlify(expected)
                    expected = (b"\x00" * (self.bytesize - len(expected)) +
                                expected)
                    if (self.byteorder == '<' or
                        self.byteorder in ('', '@', '=') and not ISBIGENDIAN):
                        expected = string_reverse(expected)
                    self.assertEqual(len(expected), self.bytesize)

                    # Pack work?
                    got = pack(format, x)
                    self.assertEqual(got, expected)

                    # Unpack work?
                    retrieved = unpack(format, got)[0]
                    self.assertEqual(x, retrieved)

                    # Adding any byte should cause a "too big" error.
                    self.assertRaises((struct.error, TypeError), unpack, format,
                                                                 b'\x01' + got)
                else:
                    # x is out of range -- verify pack realizes that.
                    self.assertRaises((OverflowError, ValueError, struct.error),
                                      pack, format, x)

            def run(self):
                from random import randrange

                # Create all interesting powers of 2.
                values = []
                for exp in range(self.bitsize + 3):
                    values.append(1 << exp)

                # Add some random values.
                for i in range(self.bitsize):
                    val = 0
                    for j in range(self.bytesize):
                        val = (val << 8) | randrange(256)
                    values.append(val)

                # Values absorbed from other tests
                values.extend([300, 700000, sys.maxsize*4])

                # Try all those, and their negations, and +-1 from
                # them.  Note that this tests all power-of-2
                # boundaries in range, and a few out of range, plus
                # +-(2**n +- 1).
                for base in values:
                    for val in -base, base:
                        for incr in -1, 0, 1:
                            x = val + incr
                            self.test_one(x)

                # Some error cases.
                class NotAnInt:
                    def __int__(self):
                        return 42

                # Objects with an '__index__' method should be allowed
                # to pack as integers.  That is assuming the implemented
                # '__index__' method returns and 'int' or 'long'.
                class Indexable(object):
                    def __init__(self, value):
                        self._value = value

                    def __index__(self):
                        return self._value

                # If the '__index__' method raises a type error, then
                # '__int__' should be used with a deprecation warning.
                class BadIndex(object):
                    def __index__(self):
                        raise TypeError

                    def __int__(self):
                        return 42

                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  "a string")
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  randrange)
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  3+42j)
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  NotAnInt())
                self.assertRaises((TypeError, struct.error),
                                  struct.pack, self.format,
                                  BadIndex())

                # Check for legitimate values from '__index__'.
                for obj in (Indexable(0), Indexable(10), Indexable(17),
                            Indexable(42), Indexable(100), Indexable(127)):
                    try:
                        struct.pack(format, obj)
                    except:
                        self.fail("integer code pack failed on object "
                                  "with '__index__' method")

                # Check for bogus values from '__index__'.
                for obj in (Indexable(b'a'), Indexable('b'), Indexable(None),
                            Indexable({'a': 1}), Indexable([1, 2, 3])):
                    self.assertRaises((TypeError, struct.error),
                                      struct.pack, self.format,
                                      obj)

        for code in integer_codes:
            for byteorder in byteorders:
                if (byteorder in ('', '@') and code in ('q', 'Q') and
                    not HAVE_LONG_LONG):
                    continue
                format = byteorder+code
                t = IntTester(format)
                t.run()

Example 78

Project: freetype-py Source File: texture_font.py
Function: get_region
    def get_region(self, width, height):
        '''
        Get a free region of given size and allocate it

        Parameters
        ----------

        width : int
            Width of region to allocate

        height : int
            Height of region to allocate

        Return
        ------
            A newly allocated region as (x,y,width,height) or (-1,-1,0,0)
        '''

        best_height = sys.maxsize
        best_index = -1
        best_width = sys.maxsize
        region = 0, 0, width, height

        for i in range(len(self.nodes)):
            y = self.fit(i, width, height)
            if y >= 0:
                node = self.nodes[i]
                if (y+height < best_height or
                    (y+height == best_height and node[2] < best_width)):
                    best_height = y+height
                    best_index = i
                    best_width = node[2]
                    region = node[0], y, width, height

        if best_index == -1:
            return -1,-1,0,0

        node = region[0], region[1]+height, width
        self.nodes.insert(best_index, node)

        i = best_index+1
        while i < len(self.nodes):
            node = self.nodes[i]
            prev_node = self.nodes[i-1]
            if node[0] < prev_node[0]+prev_node[2]:
                shrink = prev_node[0]+prev_node[2] - node[0]
                x,y,w = self.nodes[i]
                self.nodes[i] = x+shrink, y, w-shrink
                if self.nodes[i][2] <= 0:
                    del self.nodes[i]
                    i -= 1
                else:
                    break
            else:
                break
            i += 1

        self.merge()
        self.used += width*height
        return region

Example 79

Project: ursgal Source File: simple_example_search.py
def main():
    '''
    Executes a search with OMSSA, XTandem and MS-GF+ on the BSA1.mzML
    input_file

    usage:
        ./simple_example_search.py

    Note:
        myrimatch does not work with this file in this case

    '''
    uc = ursgal.UController(
        profile = 'LTQ XL low res',
        params = {
            'database' : os.path.join(
                os.pardir,
                'example_data',
                'BSA.fasta'
            ),
            'modifications' : [
                'M,opt,any,Oxidation',        # Met oxidation
                'C,fix,any,Carbamidomethyl',  # Carbamidomethylation
                '*,opt,Prot-N-term,Acetyl'    # N-Acteylation
            ],
        }
    )

    if sys.maxsize > 2 ** 32:
        xtandem = 'xtandem_vengeance'
    else:
        xtandem = 'xtandem_sledgehammer'

    if sys.platform == 'win32':
        msamanda = 'msamanda_1_0_0_7503'
    elif sys.platform == 'darwin':
        pass
    else:
        msamanda = 'msamanda_1_0_0_7504'

    engine_list = [
        'omssa',
        xtandem,
        'msgfplus_v2016_09_16',
        # msamanda,
    ]

    mzML_file = os.path.join(
        os.pardir,
        'example_data',
        'BSA_simple_example_search',
        'BSA1.mzML'
    )
    if os.path.exists(mzML_file) is False:
        uc.params['http_url'] = 'http://sourceforge.net/p/open-ms/code/HEAD/tree/OpenMS/share/OpenMS/examples/BSA/BSA1.mzML?format=raw'
        uc.params['http_output_folder'] = os.path.dirname(mzML_file)
        uc.fetch_file(
            engine     = 'get_http_files_1_0_0',
        )
        try:
            shutil.move(
                '{0}?format=raw'.format(mzML_file),
                mzML_file
            )
        except:
            shutil.move(
                '{0}format=raw'.format(mzML_file),
                mzML_file
            )

    unified_file_list = []

    for engine in engine_list:
        unified_search_result_file = uc.search(
            input_file = mzML_file,
            engine     = engine,
            force      = False
        )
        unified_file_list.append(unified_search_result_file)

    uc.visualize(
        input_files    = unified_file_list,
        engine         = 'venndiagram',
    )
    return

Example 80

Project: pulsar Source File: httpurl.py
Function: parse_headers
    def _parse_headers(self, data):
        if data == b'\r\n':
            self.__on_headers_complete = True
            self._buf = []
            return 0
        idx = data.find(b'\r\n\r\n')
        if idx < 0:  # we don't have all headers
            return False
        chunk = to_string(data[:idx], DEFAULT_CHARSET)
        # Split lines on \r\n keeping the \r\n on each line
        lines = deque(('%s\r\n' % line for line in chunk.split('\r\n')))
        # Parse headers into key/value pairs paying attention
        # to continuation lines.
        while len(lines):
            # Parse initial header name : value pair.
            curr = lines.popleft()
            if curr.find(":") < 0:
                continue
            name, value = curr.split(":", 1)
            name = name.rstrip(" \t").upper()
            if HEADER_RE.search(name):
                raise InvalidHeader("invalid header name %s" % name)
            name, value = header_field(name.strip()), [value.lstrip()]
            # Consume value continuation lines
            while len(lines) and lines[0].startswith((" ", "\t")):
                value.append(lines.popleft())
            value = ''.join(value).rstrip()
            if name in self._headers:
                self._headers[name].append(value)
            else:
                self._headers[name] = [value]
        # detect now if body is sent by chunks.
        clen = self._headers.get('Content-Length')
        if 'Transfer-Encoding' in self._headers:
            te = self._headers['Transfer-Encoding'][0].lower()
            self._chunked = (te == 'chunked')
        else:
            self._chunked = False
        #
        status = self._status_code
        if status and has_empty_content(status, self._method):
            clen = 0
        elif clen is not None:
            try:
                clen = int(clen[0])
            except ValueError:
                clen = None
            else:
                if clen < 0:  # ignore nonsensical negative lengths
                    clen = None
        #
        if clen is None:
            self._clen_rest = sys.maxsize
        else:
            self._clen_rest = self._clen = clen
        #
        # detect encoding and set decompress object
        if self.decompress and 'Content-Encoding' in self._headers:
            encoding = self._headers['Content-Encoding'][0]
            if encoding == "gzip":
                self.__decompress_obj = zlib.decompressobj(16+zlib.MAX_WBITS)
                self.__decompress_first_try = False
            elif encoding == "deflate":
                self.__decompress_obj = zlib.decompressobj()

        rest = data[idx+4:]
        self._buf = [rest]
        self.__on_headers_complete = True
        self.__on_message_begin = True
        return len(rest)

Example 81

Project: viewfinder Source File: run-ui-tests.py
def CreateSummaryResults():
  """Process the resulting .plist file from the test and generate the html results
  """
  errors = []
  passes = []
  current_path = os.path.join(_RESULTS_PATH, 'current')
  schemes = GetCurrentSchemes();
  test_path = os.path.join(current_path, options.conf)
  print 'Creating summary results.'
  for testname in _SUMMARY.keys():
    # Strip .js extension if present.
    if testname.endswith('.js'):
      testname = testname[:-3]
    temp_details = ''
    filepath = test_path + '/' + testname + r'/Run 1/Automation Results.plist'
    print filepath
    xmldoc = ElementTree.parse(filepath)
    dicts = xmldoc.findall('*/array/dict')
    for tmpdict in dicts:
      error = {}
      tmppass = {}
      if tmpdict.find('string').text == 'Error' and int(tmpdict.find('integer').text) == 4:
        error['testname'] = tmpdict[3].text
        error['timestamp'] = tmpdict.find('date').text
        error['status'] = tmpdict[1].text
        errors.append(error)
      elif tmpdict.find('string').text == 'Pass' and int(tmpdict.find('integer').text) == 4:
        tmppass['testname'] = tmpdict[3].text
        tmppass['timestamp'] = tmpdict.find('date').text
        tmppass['status'] = tmpdict[1].text
        passes.append(tmppass)
      elif tmpdict[1].text == 'Debug':
        temp_details += tmpdict[3].text + '\n'

    _SUMMARY[testname]['details'] = temp_details
    if not options.regen:
      ProcessScreenshots(testname)
    for image_name in GetImageNames(testname):
      if IsImageEqual(testname, image_name) is False:
        _SUMMARY[testname]['warnings'][image_name] = 'Warning: The screenshot does not match the Baseline.  ' \
          'Do you want to Accept the Current image as the new Baseline?'
        _SUMMARY[testname]['alert'] = True
      else:
        _SUMMARY[testname]['warnings'][image_name] = None

  fmt_args = {'errors': errors,
                'passes': passes,
                'summary': _SUMMARY,
                'random_num': random.randint(1,sys.maxsize),
                'schemes': schemes,
                'scheme': options.conf
                }

  # Setup the templates directories.
  resources_path = os.path.dirname('%s/testing' % _BASE_PATH)
  template_path = os.path.join(resources_path, 'templates')
  _loader = template.Loader(template_path)
  summary_html = _loader.load('summary_results.test').generate(**fmt_args)

  f = open('%s/index.html' % test_path,'w')
  f.write(summary_html)
  f.close()

Example 82

Project: SALib Source File: morris_util.py
def find_most_distant(input_sample, N, num_params, k_choices, groups=None):
    '''
    Finds the 'k_choices' most distant choices from the
    'N' trajectories contained in 'input_sample'
    '''    
    # Now evaluate the (N choose k_choices) possible combinations
    if nchoosek(N, k_choices) >= sys.maxsize:
        raise ValueError("Number of combinations is too large")
    number_of_combinations = int(nchoosek(N, k_choices))

    # First compute the distance matrix for each possible pairing
    # of trajectories and store in a shared-memory array
    distance_matrix = compute_distance_matrix(input_sample,
                                              N,
                                              num_params,
                                              groups)


    # Initialise the output array

    chunk = int(1e6)
    if chunk > number_of_combinations:
        chunk = number_of_combinations

    counter = 0
    # Generate a list of all the possible combinations
    # combos = np.array([x for x in combinations(range(N),k_choices)])
    combo_gen = combinations(list(range(N)), k_choices)
    scores = np.empty(number_of_combinations, dtype=np.float32)
    # Generate the pairwise indices once
    pairwise = np.array([y for y in combinations(list(range(k_choices)), 2)])

    for combos in grouper(chunk, combo_gen):
        scores[(counter * chunk):((counter + 1) * chunk)] = mappable(combos, pairwise, distance_matrix)
        counter += 1
    return scores

Example 83

Project: git-pandas Source File: repository.py
    def commit_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):
        """
        Returns a pandas DataFrame containing all of the commits for a given branch. Included in that DataFrame will be
        the columns:

         * date (index)
         * author
         * committer
         * message
         * lines
         * insertions
         * deletions
         * net

        :param branch: the branch to return commits for
        :param limit: (optional, default=None) a maximum number of commits to return, None for no limit
        :param days: (optional, default=None) number of days to return, if limit is None
        :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
        :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
        :return: DataFrame
        """

        # setup the data-set of commits
        if limit is None:
            if days is None:
                ds = [[
                          x.author.name,
                          x.committer.name,
                          x.committed_date,
                          x.message,
                          self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs)
                      ] for x in self.repo.iter_commits(branch, max_count=sys.maxsize)]
            else:
                ds = []
                c_date = time.time()
                commits = self.repo.iter_commits(branch, max_count=sys.maxsize)
                dlim = time.time() - days * 24 * 3600
                while c_date > dlim:
                    try:
                        if sys.version_info.major == 2:
                            x = commits.next()
                        else:
                            x = commits.__next__()
                    except StopIteration:
                        break
                    c_date = x.committed_date
                    if c_date > dlim:
                        ds.append([
                            x.author.name,
                            x.committer.name,
                            x.committed_date,
                            x.message,
                            self.__check_extension(x.stats.files, ignore_globs=ignore_globs,
                                                   include_globs=include_globs)
                        ])

        else:
            ds = [[
                      x.author.name,
                      x.committer.name,
                      x.committed_date,
                      x.message,
                      self.__check_extension(x.stats.files, ignore_globs=ignore_globs, include_globs=include_globs)
                  ] for x in self.repo.iter_commits(branch, max_count=limit)]

        # aggregate stats
        ds = [x[:-1] + [sum([x[-1][key]['lines'] for key in x[-1].keys()]),
                        sum([x[-1][key]['insertions'] for key in x[-1].keys()]),
                        sum([x[-1][key]['deletions'] for key in x[-1].keys()]),
                        sum([x[-1][key]['insertions'] for key in x[-1].keys()]) - sum(
                            [x[-1][key]['deletions'] for key in x[-1].keys()])
                        ] for x in ds if len(x[-1].keys()) > 0]

        # make it a pandas dataframe
        df = DataFrame(ds,
                       columns=['author', 'committer', 'date', 'message', 'lines', 'insertions', 'deletions', 'net'])

        # format the date col and make it the index
        df['date'] = to_datetime(df['date'].map(datetime.datetime.fromtimestamp))
        df.set_index(keys=['date'], drop=True, inplace=True)

        return df

Example 84

Project: arcade Source File: sound.py
def load_sound_library():
    """
    Special code for Windows so we grab the proper avbin from our directory.
    Otherwise hope the correct package is installed.
    """

    # lazy loading
    if not load_sound_library._sound_library_loaded:
        load_sound_library._sound_library_loaded = True
    else:
        return

    import os
    appveyor = not os.environ.get('APPVEYOR') is None

    import platform
    system = platform.system()
    if system == 'Windows':

        import sys
        is64bit = sys.maxsize > 2**32

        import site
        packages = site.getsitepackages()

        if appveyor:
            if is64bit:
                path = "Win64/avbin"
            else:
                path = "Win32/avbin"

        else:
            if is64bit:
                path = packages[0] + "/lib/site-packages/arcade/Win64/avbin"
            else:
                path = packages[0] + "/lib/site-packages/arcade/Win32/avbin"
    elif system == 'Darwin':
        from distutils.sysconfig import get_python_lib
        path = get_python_lib() + '/lib/site-packages/arcade/lib/libavbin.10.dylib'
        pyglet.options['audio'] = ('openal', 'pulse', 'silent')

    else:
        path = "avbin"
        pyglet.options['audio'] = ('openal', 'pulse', 'silent')

    pyglet.lib.load_library(path)
    pyglet.have_avbin = True

Example 85

Project: ro-manager Source File: RdfReport.py
def process_query(qitem, rdfgraph, initvars, outstr, escape):
    """
    Process a single query+template structure
    """
    # do query
    log.debug("process_query:")
    log.debug(" - initvars: "+repr(initvars))
    query       = qitem.get('query', None)
    newbindings = [initvars]
    if query:
        for ql in query.split('\n'):
            if not re.match("\s*(PREFIX|$)", ql):
                log.debug(" - query: "+ql);
        resp = rdfgraph.query(qitem['query'],initBindings=initvars)
        if resp.type == 'ASK':
            if not resp.askAnswer: newbindings = []
        elif resp.type == 'SELECT':
            newbindings = resp.bindings
        else:
            raise "Unexpected query response type %s"%resp.type
    log.debug(" - newbindings: "+repr(newbindings))
    # Apply limit to result set
    maxrepeat   = qitem.get('max', sys.maxsize)
    newbindings = takefirst(maxrepeat, newbindings)
    # Process each binding in rsult set
    output  = qitem.get('output', None)
    report  = qitem.get('report', None)
    alt     = qitem.get('alt', None)
    altrep  = qitem.get('altreport', None)
    sep     = qitem.get('sep', None)
    usealt  = altrep or alt
    nextsep = None
    for b in newbindings:
        newbinding = initvars.copy()
        for k in b:
            if not isinstance(k,rdflib.BNode):
                newbinding[str(k)]        = b[k]
                newbinding[str(k)+"_esc"] = escape(b[k])
        if nextsep:
            outstr.write(nextsep%newbinding)
        if output:
            outstr.write(output%newbinding)
        if report:
            process_item(report, rdfgraph, newbinding, outstr, escape)
        usealt  = False
        nextsep = sep
    if usealt:
        if altrep:
            process_item(altrep, rdfgraph, initvars, outstr, escape)
        if alt:
            outstr.write(alt%initvars)
    return

Example 86

Project: cmonkey2 Source File: microbes_online.py
def make_operon_pairs(operon, features):
    """take an operon as a list of gene names, determines the head out of
    these gene names and generates a (head, gene) for each gene in the
    operon.
    features is a map from a gene alias/feature id to a Feature object
    The head is is determined as follows:
    1. retrieve the gene coordinates for each gene in the operon
    2. if most genes are on the forward strand, the head is the one
       with the lowest start position
    3. if most genes are on the reverse strand, the head is the one
       with the highest end position
    This function returns an empty result if
    1. the same amount of genes are on the forward and reverse strand
    2. the gene coordinates can't be retrieved
    """
    def get_reverse_head(feature_map):
        """determine reverse head of the operon"""
        max_gene = None
        max_end = 0
        for (gene, feature) in feature_map.items():
            if feature.location.end > max_end:
                max_end = feature.location.end
                max_gene = gene
        return max_gene

    def get_forward_head(feature_map):
        """determine forward head of the operon"""
        min_gene = None
        min_start = sys.maxsize
        for (gene, feature) in feature_map.items():
            if feature.location.start < min_start:
                min_start = feature.location.start
                min_gene = gene
        return min_gene

    feature_map = {}  # mapping from VNG name to feature
    num_reverse = 0

    # make sure we only take the genes that we have genomic information
    # for, and ignore the rest
    available_operon_genes = []
    for gene in operon:
        if gene in features.keys():
            available_operon_genes.append(gene)
        else:
            logging.warn("Microbes Online operon gene '%s' not found in " +
                         "RSAT features", gene)

    for gene in available_operon_genes:
        feature_map[gene] = features[gene]
        if feature_map[gene].location.reverse:
            num_reverse += 1

    num_total = len(available_operon_genes)
    if num_total > 0:
        percent_reverse = float(num_reverse) / float(num_total)
        if percent_reverse > 0.6:
            head = get_reverse_head(feature_map)
        elif percent_reverse < 0.4:
            head = get_forward_head(feature_map)
        else:
            logging.warning("can't determine head of operon - amounts " +
                            "of reverse and forward genes are too similar " +
                            "(%f-%f)",
                            percent_reverse, 1.0 - percent_reverse)
            return []
        return [(head, gene) for gene in available_operon_genes]
    else:
        logging.warning("Operon did not contain any available genes")
        return []

Example 87

Project: winpython Source File: disthelpers.py
def get_msvc_dlls(msvc_version, architecture=None):
    """Get the list of Microsoft Visual C++ DLLs associated to 
    architecture and Python version, create the manifest file.
    
    architecture: integer (32 or 64) -- if None, take the Python build arch
    python_version: X.Y"""
    current_architecture = 64 if sys.maxsize > 2**32 else 32
    if architecture is None:
        architecture = current_architecture

    filelist = []

    # simple vs2015 situation: nothing (system dll)
    if msvc_version == '14.0':
        return filelist
    
    msvc_major = msvc_version.split('.')[0]
    msvc_minor = msvc_version.split('.')[1]

    if msvc_major == '9':
        key = "1fc8b3b9a1e18e3b"
        atype = "" if architecture == 64 else "win32"
        arch = "amd64" if architecture == 64 else "x86"
        
        groups = {
                  'CRT': ('msvcr90.dll', 'msvcp90.dll', 'msvcm90.dll'),
#                  'OPENMP': ('vcomp90.dll',)
                  }

        for group, dll_list in groups.items():
            dlls = ''
            for dll in dll_list:
                dlls += '    <file name="%s" />%s' % (dll, os.linesep)
        
            manifest =\
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<!-- Copyright (c) Microsoft Corporation.  All rights reserved. -->
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
    <noInheritable/>
    <assemblyIdentity
        type="%(atype)s"
        name="Microsoft.VC90.%(group)s"
        version="%(version)s"
        processorArchitecture="%(arch)s"
        publicKeyToken="%(key)s"
    />
%(dlls)s</assembly>
""" % dict(version=msvc_version, key=key, atype=atype, arch=arch,
           group=group, dlls=dlls)

            vc90man = "Microsoft.VC90.%s.manifest" % group
            open(vc90man, 'w').write(manifest)
            _remove_later(vc90man)
            filelist += [vc90man]
    
            winsxs = osp.join(os.environ['windir'], 'WinSxS')
            vcstr = '%s_Microsoft.VC90.%s_%s_%s' % (arch, group,
                                                    key, msvc_version)
            for fname in os.listdir(winsxs):
                path = osp.join(winsxs, fname)
                if osp.isdir(path) and fname.lower().startswith(vcstr.lower()):
                    for dllname in os.listdir(path):
                        filelist.append(osp.join(path, dllname))
                    break
            else:
                raise RuntimeError("Microsoft Visual C++ %s DLLs version %s "\
                                    "were not found" % (group, msvc_version))

    elif msvc_major == '10' or msvc_major == '15':  # 15 for vs 2015
        namelist = [name % (msvc_major + msvc_minor) for name in 
                    (
                     'msvcp%s.dll', 'msvcr%s.dll',
                     'vcomp%s.dll',
                     )]
        if msvc_major == '15':
                    namelist = [name % ('14' + msvc_minor) for name in 
                    (
                     'vcruntime%s.dll', 'msvcp%s.dll', 'vccorlib%s.dll',
                     'concrt%s.dll','vcomp%s.dll',
                     )]
        windir = os.environ['windir']
        is_64bit_windows = osp.isdir(osp.join(windir, "SysWOW64"))

        # Reminder: WoW64 (*W*indows 32-bit *o*n *W*indows *64*-bit) is a 
        # subsystem of the Windows operating system capable of running 32-bit 
        # applications and is included on all 64-bit versions of Windows
        # (source: http://en.wikipedia.org/wiki/WoW64)
        #
        # In other words, "SysWOW64" contains 64-bit DLL and applications, 
        # whereas "System32" contains 64-bit DLL and applications on a 64-bit 
        # system.
        sysdir = "System32"
        if not is_64bit_windows and architecture == 64:
            raise RuntimeError("Can't find 64-bit MSVC DLLs on a 32-bit OS")
        if is_64bit_windows and architecture == 32:
            sysdir = "SysWOW64"

        for dllname in namelist:
            fname = osp.join(windir, sysdir, dllname)
            print('searching', fname )
            if osp.exists(fname):
                filelist.append(fname)
            else:
                raise RuntimeError("Microsoft Visual C++ DLLs version %s "\
                                   "were not found" % msvc_version)

    else:
        raise RuntimeError("Unsupported MSVC version %s" % msvc_version)
    
    return filelist

Example 88

Project: gensim Source File: lsimodel.py
    def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
        """
        Construct the (U, S) projection from a corpus `docs`. The projection can
        be later updated by merging it with another Projection via `self.merge()`.

        This is the class taking care of the 'core math'; interfacing with corpora,
        splitting large corpora into chunks and merging them etc. is done through
        the higher-level `LsiModel` class.
        """
        self.m, self.k = m, k
        self.power_iters = power_iters
        self.extra_dims = extra_dims
        if docs is not None:
            # base case decomposition: given a job `docs`, compute its decomposition,
            # *in-core*.
            if not use_svdlibc:
                u, s = stochastic_svd(
                    docs, k, chunksize=sys.maxsize,
                    num_terms=m, power_iters=self.power_iters,
                    extra_dims=self.extra_dims)
            else:
                try:
                    import sparsesvd
                except ImportError:
                    raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
                logger.info("computing sparse SVD of %s matrix", str(docs.shape))
                if not scipy.sparse.issparse(docs):
                    docs = matutils.corpus2csc(docs)
                ut, s, vt = sparsesvd.sparsesvd(docs, k + 30)  # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
                u = ut.T
                del ut, vt
                k = clip_spectrum(s**2, self.k)
            self.u = u[:, :k].copy()
            self.s = s[:k].copy()
        else:
            self.u, self.s = None, None

Example 89

Project: ClusterRunner Source File: build_runner.py
    def _block_until_finished(self, timeout=None):
        """
        Poll the build status endpoint until the build is finished or until the timeout is reached.

        :param timeout: The maximum number of seconds to wait until giving up, or None for no timeout
        :type timeout: int|None
        """
        timeout_time = time.time() + timeout if timeout else sys.maxsize
        build_status_url = self._master_api.url('build', self._build_id)
        self._logger.debug('Polling build status url: {}', build_status_url)

        while time.time() <= timeout_time:
            response = self._network.get(build_status_url)
            response_data = response.json()

            if 'build' not in response_data or 'status' not in response_data['build']:
                raise _BuildRunnerError('Status response does not contain a "build" object with a "status" value.'
                                        'URL: {}, Content:{}'.format(build_status_url, response_data))

            build_data = response_data['build']
            if build_data['status'] == BuildStatus.FINISHED:
                self._logger.info('Build is finished. (Build id: {})', self._build_id)
                completion_message = 'Build {} result was {}'.format(self._build_id, build_data['result'])
                is_success = build_data['result'] == BuildResult.NO_FAILURES
                if is_success:
                    self._logger.info(completion_message)
                else:
                    self._logger.error(completion_message)
                    if build_data['failed_atoms']:
                        self._logger.error('These atoms had non-zero exit codes (failures):')
                        for failure in build_data['failed_atoms']:
                            self._logger.error(failure)
                    return False

                return True

            if build_data['status'] == BuildStatus.ERROR:
                message = 'Build aborted due to error: {}'.format(build_data.get('error_message'))
                raise _BuildRunnerError(message)

            if build_data['status'] == BuildStatus.BUILDING:
                if build_data['details'] != self._last_build_status_details:
                    self._last_build_status_details = build_data['details']
                    self._logger.info(build_data['details'])

            time.sleep(1)

        raise _BuildRunnerError('Build timed out after {} seconds.'.format(timeout))

Example 90

Project: spiderfoot Source File: str_tools.py
Function: crop
def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE, get_remainder = False):
  """
  Shortens a string to a given length.

  If we crop content then a given ending is included (counting itself toward
  the size limitation). This crops on word breaks so we only include a word if
  we can display at least **min_word_length** characters of it.

  If there isn't room for even a truncated single word (or one word plus the
  ellipse if including those) then this provides an empty string.

  If a cropped string ends with a comma or period then it's stripped (unless
  we're providing the remainder back). For example...

    >>> crop('This is a looooong message', 17)
    'This is a looo...'

    >>> crop('This is a looooong message', 12)
    'This is a...'

    >>> crop('This is a looooong message', 3)
    ''

  The whole point of this method is to provide human friendly croppings, and as
  such details of how this works might change in the future. Callers should not
  rely on the details of how this crops.

  .. versionadded:: 1.3.0

  :param str msg: text to be processed
  :param int size: space available for text
  :param int min_word_length: minimum characters before which a word is
    dropped, requires whole word if **None**
  :param int min_crop: minimum characters that must be dropped if a word is
    cropped
  :param Ending ending: type of ending used when truncating, no special
    truncation is used if **None**
  :param bool get_remainder: returns a tuple with the second part being the
    cropped portion of the message

  :returns: **str** of the text truncated to the given length
  """

  # checks if there's room for the whole message

  if len(msg) <= size:
    return (msg, '') if get_remainder else msg

  if size < 0:
    raise ValueError("Crop size can't be negative (received %i)" % size)
  elif min_word_length and min_word_length < 0:
    raise ValueError("Crop's min_word_length can't be negative (received %i)" % min_word_length)
  elif min_crop < 0:
    raise ValueError("Crop's min_crop can't be negative (received %i)" % min_crop)

  # since we're cropping, the effective space available is less with an
  # ellipse, and cropping words requires an extra space for hyphens

  if ending == Ending.ELLIPSE:
    size -= 3
  elif min_word_length and ending == Ending.HYPHEN:
    min_word_length += 1

  if min_word_length is None:
    min_word_length = sys.maxsize

  # checks if there isn't the minimum space needed to include anything

  last_wordbreak = msg.rfind(' ', 0, size + 1)

  if last_wordbreak == -1:
    # we're splitting the first word

    if size < min_word_length:
      return ('', msg) if get_remainder else ''

    include_crop = True
  else:
    last_wordbreak = len(msg[:last_wordbreak].rstrip())  # drops extra ending whitespaces
    include_crop = size - last_wordbreak - 1 >= min_word_length

  # if there's a max crop size then make sure we're cropping at least that many characters

  if include_crop and min_crop:
    next_wordbreak = msg.find(' ', size)

    if next_wordbreak == -1:
      next_wordbreak = len(msg)

    include_crop = next_wordbreak - size + 1 >= min_crop

  if include_crop:
    return_msg, remainder = msg[:size], msg[size:]

    if ending == Ending.HYPHEN:
      remainder = return_msg[-1] + remainder
      return_msg = return_msg[:-1].rstrip() + '-'
  else:
    return_msg, remainder = msg[:last_wordbreak], msg[last_wordbreak:]

  # if this is ending with a comma or period then strip it off

  if not get_remainder and return_msg and return_msg[-1] in (',', '.'):
    return_msg = return_msg[:-1]

  if ending == Ending.ELLIPSE:
    return_msg = return_msg.rstrip() + '...'

  return (return_msg, remainder) if get_remainder else return_msg

Example 91

Project: scikit-neuralnetwork Source File: mlp.py
Function: fit
    def _fit(self, X, y, w=None):
        assert X.shape[0] == y.shape[0],\
            "Expecting same number of input and output samples."
        data_shape = X.shape
        known_size = hasattr(X, 'size') and hasattr(y, 'size')
        data_size = '{:,}'.format(X.size+y.size) if known_size else 'N/A'
        X, y = self._reshape(X, y)

        if not self.is_initialized:
            X, y = self._initialize(X, y, w)

        log.info("Training on dataset of {:,} samples with {} total size.".format(data_shape[0], data_size))
        if data_shape[1:] != X.shape[1:]:
            log.warning("  - Reshaping input array from {} to {}.".format(data_shape, X.shape))
        if self.valid_set is not None:
            X_v, _ = self.valid_set
            log.debug("  - Train: {: <9,}  Valid: {: <4,}".format(X.shape[0], X_v.shape[0]))
        regularize = self.regularize or self.auto_enabled.get('regularize', None)
        if regularize is not None:
            comment = ", auto-enabled from layers" if 'regularize' in self.auto_enabled else "" 
            log.debug("  - Using `%s` for regularization%s." % (regularize, comment))
        normalize = self.normalize or self.auto_enabled.get('normalize', None)
        if normalize is not None:
            comment = ", auto-enabled from layers" if 'normalize' in self.auto_enabled else ""
            log.debug("  - Using `%s` normalization%s." % (normalize, comment))
        if self.n_iter is not None:
            log.debug("  - Terminating loop after {} total iterations.".format(self.n_iter))
        if self.n_stable is not None and self.n_stable < (self.n_iter or sys.maxsize):
            log.debug("  - Early termination after {} stable iterations.".format(self.n_stable))

        if self.verbose:
            log.debug("\nEpoch       Training Error       Validation Error       Time"
                      "\n------------------------------------------------------------")

        try:
            self._train(X, y, w)
        except RuntimeError as e:
            log.error("\n{}{}{}\n\n{}\n".format(
                ansi.RED,
                "A runtime exception was caught during training. This likely occurred due to\n"
                "a divergence of the SGD algorithm, and NaN floats were found by the backend.",
                ansi.ENDC,
                "Try setting the `learning_rate` 10x lower to resolve this, for example:\n"
                "    learning_rate=%f" % (self.learning_rate * 0.1)))
            raise e

        return self

Example 92

Project: esimport Source File: esimport.py
def import_data(filename, \
	index_name, \
	type_name, \
	delimiter, \
	server, \
	delete_type=False, \
	field_translations=None, \
	mapping=None, \
	username=None, \
	password=None, \
	bulk_index_count=BULKINDEX_COUNT, \
	timeout=None, \
	verify=True):

	if server is None:
		server = SERVER_DEFAULT

	if bulk_index_count is None:
		bulk_index_count = BULKINDEX_COUNT

	data_lines = utils.retrieve_file_lines(filename)

	if len(data_lines) < 2:
		print "there is no data to import in " + filename
		return

	es = ElasticSearchConnection(server, username, password, timeout, verify)
	full_url = server + "/" + index_name + "/" + type_name

	if delete_type:
		print "clearing existing docuements from " + full_url
		es.clear_docuements(index_name, type_name)

	if es.ensure_index(index_name):
		if mapping is not None:
			print "applying mapping from " + mapping + " to " + full_url
			try:
				mapping_def = json.loads(utils.retrieve_file(mapping))
				es.ensure_mapping(index_name, type_name, mapping_def)
			except ValueError:
				print "supplied JSON was not formatted correctly, skipping this step"

		start_time = time.time()

		# ensure large fields can be parsed
		csv.field_size_limit(sys.maxsize)

		# translate field names if applicable
		if field_translations is not None:
			reader = translate_fields_reader(data_lines, field_translations, delimiter)
		else:
			reader = csv.DictReader(data_lines, delimiter=delimiter)

		# closure for displaying status of operation
		def show_status(current_count, total_count):
			percent_complete = current_count * 100 / total_count
			sys.stdout.write("\rstatus: %d%%" % percent_complete)
			sys.stdout.flush()

		print "importing data into " + full_url + " (" + str(bulk_index_count) + " rows at a time) from file " + filename
		count = es.bulk_index_docs(reader, \
			index_name, \
			type_name, \
			bulk_index_count, \
			show_status)

		# indicate completion
		show_status(100, 100)
		end_time = time.time() - start_time
		print ", import of " + str(count) + " docuements completed in %.2f seconds" % end_time

	else:
		print "index at " + server + "/" + index_name + " can't be written to"

	return

Example 93

Project: gitsome Source File: github.py
    def trending(self, language, weekly, monthly,
                 devs=False, browser=False, pager=False):
        """List trending repos for the given language.

        :type language: str
        :param language: The language (optional).
            If blank, shows 'Overall'.

        :type weekly: bool
        :param weekly: Determines whether to show the weekly rankings.
            Daily is the default.

        :type monthly: bool
        :param monthly: Determines whether to show the monthly rankings.
            Daily is the default.
            If both `monthly` and `weekly` are set, `monthly` takes precedence.

        :type devs: bool
        :param devs: determines whether to display the trending
                devs or repos.  Only valid with the -b/--browser option.

        :type browser: bool
        :param browser: Determines whether to view the profile
                in a browser, or in the terminal.

        :type pager: bool
        :param pager: Determines whether to show the output in a pager,
            if available.
        """
        language = language.lower()
        if language in language_rss_map:
            language = language_rss_map[language]
        if monthly:
            period = 'monthly'
            url_param = '?since=monthly'
        elif weekly:
            period = 'weekly'
            url_param = '?since=weekly'
        else:
            period = 'daily'
            url_param = ''
        if browser:
            webbrowser.open(
                ('https://github.com/trending' +
                 ('/developers' if devs else '') +
                 ('/' + language if language is not 'overall' else '') +
                 url_param))
        else:
            click.secho(
                'Listing {p} trending {l} repos...'.format(l=language,
                                                           p=period),
                fg=self.config.clr_message)
            url = ('http://github-trends.ryotarai.info/rss/github_trends_' +
                   language + '_')
            url += period + '.rss'
            items = self.trend_parser.parse(url)
            self.table.build_table_setup_trending(
                items.entries,
                self.formatter.format_trending_entry,
                limit=sys.maxsize,
                pager=pager)

Example 94

Project: sugar Source File: notifications.py
    @dbus.service.method(_DBUS_IFACE,
                         in_signature='susssava{sv}i', out_signature='u')
    def Notify(self, app_name, replaces_id, app_icon, summary, body, actions,
               hints, expire_timeout):

        logging.debug('Received notification: %r',
                      [app_name, replaces_id,
                       '<app_icon>', summary, body, actions, '<hints>',
                       expire_timeout])

        if replaces_id > 0:
            notification_id = replaces_id
        else:
            if self._notification_counter == sys.maxsize:
                self._notification_counter = 1
            else:
                self._notification_counter += 1
            notification_id = self._notification_counter

        if app_name not in self._buffer:
            self._buffer[app_name] = []
        self._buffer[app_name].append({'app_name': app_name,
                                       'replaces_id': replaces_id,
                                       'app_icon': app_icon,
                                       'summary': summary,
                                       'body': body,
                                       'actions': actions,
                                       'hints': hints,
                                       'expire_timeout': expire_timeout})

        self.notification_received.send(self,
                                        app_name=app_name,
                                        replaces_id=replaces_id,
                                        app_icon=app_icon,
                                        summary=summary,
                                        body=body,
                                        actions=actions,
                                        hints=hints,
                                        expire_timeout=expire_timeout)

        return notification_id

Example 95

Project: TrustRouter Source File: test_format.py
    def test_format(self):
        testformat("%.1d", (1,), "1")
        testformat("%.*d", (sys.maxsize,1), overflowok=True)  # expect overflow
        testformat("%.100d", (1,), '00000000000000000000000000000000000000'
                 '000000000000000000000000000000000000000000000000000000'
                 '00000001', overflowok=True)
        testformat("%#.117x", (1,), '0x00000000000000000000000000000000000'
                 '000000000000000000000000000000000000000000000000000000'
                 '0000000000000000000000000001',
                 overflowok=True)
        testformat("%#.118x", (1,), '0x00000000000000000000000000000000000'
                 '000000000000000000000000000000000000000000000000000000'
                 '00000000000000000000000000001',
                 overflowok=True)

        testformat("%f", (1.0,), "1.000000")
        # these are trying to test the limits of the internal magic-number-length
        # formatting buffer, if that number changes then these tests are less
        # effective
        testformat("%#.*g", (109, -1.e+49/3.))
        testformat("%#.*g", (110, -1.e+49/3.))
        testformat("%#.*g", (110, -1.e+100/3.))
        # test some ridiculously large precision, expect overflow
        testformat('%12.*f', (123456, 1.0))

        # check for internal overflow validation on length of precision
        # these tests should no longer cause overflow in Python
        # 2.7/3.1 and later.
        testformat("%#.*g", (110, -1.e+100/3.))
        testformat("%#.*G", (110, -1.e+100/3.))
        testformat("%#.*f", (110, -1.e+100/3.))
        testformat("%#.*F", (110, -1.e+100/3.))
        # Formatting of integers. Overflow is not ok
        testformat("%x", 10, "a")
        testformat("%x", 100000000000, "174876e800")
        testformat("%o", 10, "12")
        testformat("%o", 100000000000, "1351035564000")
        testformat("%d", 10, "10")
        testformat("%d", 100000000000, "100000000000")
        big = 123456789012345678901234567890
        testformat("%d", big, "123456789012345678901234567890")
        testformat("%d", -big, "-123456789012345678901234567890")
        testformat("%5d", -big, "-123456789012345678901234567890")
        testformat("%31d", -big, "-123456789012345678901234567890")
        testformat("%32d", -big, " -123456789012345678901234567890")
        testformat("%-32d", -big, "-123456789012345678901234567890 ")
        testformat("%032d", -big, "-0123456789012345678901234567890")
        testformat("%-032d", -big, "-123456789012345678901234567890 ")
        testformat("%034d", -big, "-000123456789012345678901234567890")
        testformat("%034d", big, "0000123456789012345678901234567890")
        testformat("%0+34d", big, "+000123456789012345678901234567890")
        testformat("%+34d", big, "   +123456789012345678901234567890")
        testformat("%34d", big, "    123456789012345678901234567890")
        testformat("%.2d", big, "123456789012345678901234567890")
        testformat("%.30d", big, "123456789012345678901234567890")
        testformat("%.31d", big, "0123456789012345678901234567890")
        testformat("%32.31d", big, " 0123456789012345678901234567890")
        testformat("%d", float(big), "123456________________________", 6)
        big = 0x1234567890abcdef12345  # 21 hex digits
        testformat("%x", big, "1234567890abcdef12345")
        testformat("%x", -big, "-1234567890abcdef12345")
        testformat("%5x", -big, "-1234567890abcdef12345")
        testformat("%22x", -big, "-1234567890abcdef12345")
        testformat("%23x", -big, " -1234567890abcdef12345")
        testformat("%-23x", -big, "-1234567890abcdef12345 ")
        testformat("%023x", -big, "-01234567890abcdef12345")
        testformat("%-023x", -big, "-1234567890abcdef12345 ")
        testformat("%025x", -big, "-0001234567890abcdef12345")
        testformat("%025x", big, "00001234567890abcdef12345")
        testformat("%0+25x", big, "+0001234567890abcdef12345")
        testformat("%+25x", big, "   +1234567890abcdef12345")
        testformat("%25x", big, "    1234567890abcdef12345")
        testformat("%.2x", big, "1234567890abcdef12345")
        testformat("%.21x", big, "1234567890abcdef12345")
        testformat("%.22x", big, "01234567890abcdef12345")
        testformat("%23.22x", big, " 01234567890abcdef12345")
        testformat("%-23.22x", big, "01234567890abcdef12345 ")
        testformat("%X", big, "1234567890ABCDEF12345")
        testformat("%#X", big, "0X1234567890ABCDEF12345")
        testformat("%#x", big, "0x1234567890abcdef12345")
        testformat("%#x", -big, "-0x1234567890abcdef12345")
        testformat("%#.23x", -big, "-0x001234567890abcdef12345")
        testformat("%#+.23x", big, "+0x001234567890abcdef12345")
        testformat("%# .23x", big, " 0x001234567890abcdef12345")
        testformat("%#+.23X", big, "+0X001234567890ABCDEF12345")
        testformat("%#-+.23X", big, "+0X001234567890ABCDEF12345")
        testformat("%#-+26.23X", big, "+0X001234567890ABCDEF12345")
        testformat("%#-+27.23X", big, "+0X001234567890ABCDEF12345 ")
        testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
        # next one gets two leading zeroes from precision, and another from the
        # 0 flag and the width
        testformat("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
        # same, except no 0 flag
        testformat("%#+27.23X", big, " +0X001234567890ABCDEF12345")
        testformat("%x", float(big), "123456_______________", 6)
        big = 0o12345670123456701234567012345670  # 32 octal digits
        testformat("%o", big, "12345670123456701234567012345670")
        testformat("%o", -big, "-12345670123456701234567012345670")
        testformat("%5o", -big, "-12345670123456701234567012345670")
        testformat("%33o", -big, "-12345670123456701234567012345670")
        testformat("%34o", -big, " -12345670123456701234567012345670")
        testformat("%-34o", -big, "-12345670123456701234567012345670 ")
        testformat("%034o", -big, "-012345670123456701234567012345670")
        testformat("%-034o", -big, "-12345670123456701234567012345670 ")
        testformat("%036o", -big, "-00012345670123456701234567012345670")
        testformat("%036o", big, "000012345670123456701234567012345670")
        testformat("%0+36o", big, "+00012345670123456701234567012345670")
        testformat("%+36o", big, "   +12345670123456701234567012345670")
        testformat("%36o", big, "    12345670123456701234567012345670")
        testformat("%.2o", big, "12345670123456701234567012345670")
        testformat("%.32o", big, "12345670123456701234567012345670")
        testformat("%.33o", big, "012345670123456701234567012345670")
        testformat("%34.33o", big, " 012345670123456701234567012345670")
        testformat("%-34.33o", big, "012345670123456701234567012345670 ")
        testformat("%o", big, "12345670123456701234567012345670")
        testformat("%#o", big, "0o12345670123456701234567012345670")
        testformat("%#o", -big, "-0o12345670123456701234567012345670")
        testformat("%#.34o", -big, "-0o0012345670123456701234567012345670")
        testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
        testformat("%# .34o", big, " 0o0012345670123456701234567012345670")
        testformat("%#+.34o", big, "+0o0012345670123456701234567012345670")
        testformat("%#-+.34o", big, "+0o0012345670123456701234567012345670")
        testformat("%#-+37.34o", big, "+0o0012345670123456701234567012345670")
        testformat("%#+37.34o", big, "+0o0012345670123456701234567012345670")
        # next one gets one leading zero from precision
        testformat("%.33o", big, "012345670123456701234567012345670")
        # base marker shouldn't change that, since "0" is redundant
        testformat("%#.33o", big, "0o012345670123456701234567012345670")
        # but reduce precision, and base marker should add a zero
        testformat("%#.32o", big, "0o12345670123456701234567012345670")
        # one leading zero from precision, and another from "0" flag & width
        testformat("%034.33o", big, "0012345670123456701234567012345670")
        # base marker shouldn't change that
        testformat("%0#34.33o", big, "0o012345670123456701234567012345670")
        testformat("%o", float(big), "123456__________________________", 6)
        # Some small ints, in both Python int and flavors).
        testformat("%d", 42, "42")
        testformat("%d", -42, "-42")
        testformat("%d", 42, "42")
        testformat("%d", -42, "-42")
        testformat("%d", 42.0, "42")
        testformat("%#x", 1, "0x1")
        testformat("%#x", 1, "0x1")
        testformat("%#X", 1, "0X1")
        testformat("%#X", 1, "0X1")
        testformat("%#x", 1.0, "0x1")
        testformat("%#o", 1, "0o1")
        testformat("%#o", 1, "0o1")
        testformat("%#o", 0, "0o0")
        testformat("%#o", 0, "0o0")
        testformat("%o", 0, "0")
        testformat("%o", 0, "0")
        testformat("%d", 0, "0")
        testformat("%d", 0, "0")
        testformat("%#x", 0, "0x0")
        testformat("%#x", 0, "0x0")
        testformat("%#X", 0, "0X0")
        testformat("%#X", 0, "0X0")
        testformat("%x", 0x42, "42")
        testformat("%x", -0x42, "-42")
        testformat("%x", 0x42, "42")
        testformat("%x", -0x42, "-42")
        testformat("%x", float(0x42), "42")
        testformat("%o", 0o42, "42")
        testformat("%o", -0o42, "-42")
        testformat("%o", 0o42, "42")
        testformat("%o", -0o42, "-42")
        testformat("%o", float(0o42), "42")
        testformat("%r", "\u0378", "'\\u0378'")  # non printable
        testformat("%a", "\u0378", "'\\u0378'")  # non printable
        testformat("%r", "\u0374", "'\u0374'")   # printable
        testformat("%a", "\u0374", "'\\u0374'")  # printable

        # alternate float formatting
        testformat('%g', 1.1, '1.1')
        testformat('%#g', 1.1, '1.10000')

        # Test exception for unknown format characters
        if verbose:
            print('Testing exceptions')
        def test_exc(formatstr, args, exception, excmsg):
            try:
                testformat(formatstr, args)
            except exception as exc:
                if str(exc) == excmsg:
                    if verbose:
                        print("yes")
                else:
                    if verbose: print('no')
                    print('Unexpected ', exception, ':', repr(str(exc)))
            except:
                if verbose: print('no')
                print('Unexpected exception')
                raise
            else:
                raise TestFailed('did not get expected exception: %s' % excmsg)
        test_exc('abc %b', 1, ValueError,
                 "unsupported format character 'b' (0x62) at index 5")
        #test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
        #         "unsupported format character '?' (0x3000) at index 5")
        test_exc('%d', '1', TypeError, "%d format: a number is required, not str")
        test_exc('%g', '1', TypeError, "a float is required")
        test_exc('no format', '1', TypeError,
                 "not all arguments converted during string formatting")
        test_exc('no format', '1', TypeError,
                 "not all arguments converted during string formatting")

        if maxsize == 2**31-1:
            # crashes 2.2.1 and earlier:
            try:
                "%*d"%(maxsize, -127)
            except MemoryError:
                pass
            else:
                raise TestFailed('"%*d"%(maxsize, -127) should fail')

Example 96

Project: Pyrseas Source File: table.py
Function: to_map
    def to_map(self, opts):
        """Convert a sequence definition to a YAML-suitable format

        :param opts: options to include/exclude tables, etc.
        :return: dictionary
        """
        if hasattr(opts, 'tables') and opts.tables and \
                (self.name not in opts.tables and
                 not hasattr(self, 'owner_table') or
                 self.owner_table not in opts.tables) or (
                     hasattr(opts, 'excl_tables') and opts.excl_tables and
                     self.name in opts.excl_tables):
            return None
        seq = {}
        for key, val in list(self.__dict__.items()):
            if key in self.keylist or key == 'dependent_table' or (
                    key == 'owner' and opts.no_owner) or (
                    key == 'privileges' and opts.no_privs) or (
                    key == 'description' and self.description is None):
                continue
            if key == 'privileges':
                privs = self.map_privs()
                if privs != []:
                    seq[key] = privs
            elif key == 'max_value' and val == MAX_BIGINT:
                seq[key] = None
            elif key == 'min_value' and val == 1:
                seq[key] = None
            else:
                if PY2:
                    if isinstance(val, (int, long)) and val <= sys.maxsize:
                        seq[key] = int(val)
                    else:
                        seq[key] = str(val)
                else:
                    if isinstance(val, int):
                        seq[key] = int(val)
                    else:
                        seq[key] = str(val)

        return seq

Example 97

Project: pretix Source File: event.py
def get_grouped_items(event):
    items = event.items.all().filter(
        Q(active=True)
        & Q(Q(available_from__isnull=True) | Q(available_from__lte=now()))
        & Q(Q(available_until__isnull=True) | Q(available_until__gte=now()))
        & Q(hide_without_voucher=False)
    ).select_related(
        'category',  # for re-grouping
    ).prefetch_related(
        'variations__quotas',  # for .availability()
        Prefetch('quotas',
                 queryset=event.quotas.all()),
        Prefetch('variations', to_attr='available_variations',
                 queryset=ItemVariation.objects.filter(active=True, quotas__isnull=False).distinct()),
    ).annotate(
        quotac=Count('quotas'),
        has_variations=Count('variations')
    ).filter(
        quotac__gt=0
    ).order_by('category__position', 'category_id', 'position', 'name')
    display_add_to_cart = False
    quota_cache = {}
    for item in items:
        if not item.has_variations:
            item.cached_availability = list(item.check_quotas(_cache=quota_cache))
            item.order_max = min(item.cached_availability[1]
                                 if item.cached_availability[1] is not None else sys.maxsize,
                                 int(event.settings.max_items_per_order))
            item.price = item.default_price
            display_add_to_cart = display_add_to_cart or item.order_max > 0
        else:
            for var in item.available_variations:
                var.cached_availability = list(var.check_quotas(_cache=quota_cache))
                var.order_max = min(var.cached_availability[1]
                                    if var.cached_availability[1] is not None else sys.maxsize,
                                    int(event.settings.max_items_per_order))
                display_add_to_cart = display_add_to_cart or var.order_max > 0
                var.price = var.default_price if var.default_price is not None else item.default_price
            if len(item.available_variations) > 0:
                item.min_price = min([v.price for v in item.available_variations])
                item.max_price = max([v.price for v in item.available_variations])

    items = [item for item in items if len(item.available_variations) > 0 or not item.has_variations]
    return items, display_add_to_cart

Example 98

Project: pychess Source File: lsort.py
def getMoveValue(board, table, depth, move):
    """ Sort criteria is as follows.
        1.  The move from the hash table
        2.  Captures as above.
        3.  Killers.
        4.  History.
        5.  Moves to the centre. """

    # As we only return directly from transposition table if hashf == hashfEXACT
    # There could be a non  hashfEXACT very promising move for us to test

    if table.isHashMove(depth, move):
        return sys.maxsize

    fcord = (move >> 6) & 63
    tcord = move & 63
    flag = move >> 12

    arBoard = board.arBoard
    fpiece = fcord if flag == DROP else arBoard[fcord]
    tpiece = arBoard[tcord]

    if tpiece != EMPTY:
        if board.variant == ATOMICCHESS:
            if kingExplode(board, move, board.color):
                return MATE_VALUE
        # We add some extra to ensure also bad captures will be searched early
        if board.variant in ASEAN_VARIANTS:
            return ASEAN_PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000
        else:
            return PIECE_VALUES[tpiece] - PIECE_VALUES[fpiece] + 1000

    if flag in PROMOTIONS:
        if board.variant in ASEAN_VARIANTS:
            return ASEAN_PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000
        else:
            return PIECE_VALUES[flag - 3] - PAWN_VALUE + 1000

    if flag == DROP:
        return PIECE_VALUES[tpiece] + 1000

    killervalue = table.isKiller(depth, move)
    if killervalue:
        return 1000 + killervalue

    # King tropism - a move that brings us nearer to the enemy king, is probably
    # a good move
    # opking = board.kings[1-board.color]
    # score = distance[fpiece][fcord][opking] - distance[fpiece][tcord][opking]

    if fpiece not in position_values:
        # That is, fpiece == EMPTY
        print(fcord, tcord)
        print(board)

    if board.variant in ASEAN_VARIANTS:
        score = 0
    else:
        score = position_values[fpiece][board.color][tcord] - \
            position_values[fpiece][board.color][fcord]

    # History heuristic
    score += table.getButterfly(move)

    return score

Example 99

Project: floto Source File: activity_worker.py
    def __init__(self, *, domain, task_list, swf=None, task_heartbeat_in_seconds=None,
            identity=None):
        """
        Parameters
        ----------
        domain: str
        task_list: str
            The task_list of the activity worker
        swf: Optional[floto.api.Swf]
            If None a new instance is initiated
        task_heartbeat_in_seconds: Optional[int]
            Heartbeats are sent every <task_heartbeat_in_seconds> to SWF during the execution. If
            set to 0 no heartbeats will be sent. Default is 120.
        identity: Optional[str]
            Identity of the worker making the request, recorded in the ActivityTaskStarted event in
            the workflow history. This enables diagnostic tracing when problems arise. The form of
            this identity is user defined. Default is the fully qualified domain name.
        """
        self.task_token = None
        self.last_response = None
        self._terminate_activity_worker = False
        self.max_polls = sys.maxsize
        self.input = None
        self.result = None
        self.swf = swf or floto.api.Swf()
        self.task_list = task_list
        self.domain = domain
        self.task_heartbeat_in_seconds = task_heartbeat_in_seconds
        if self.task_heartbeat_in_seconds is None:
            self.task_heartbeat_in_seconds = 90

        self.identity = identity
        if self.identity is None:
            self.identity = socket.getfqdn(socket.gethostname())

        self.heartbeat_sender = floto.HeartbeatSender()

Example 100

Project: TrustRouter Source File: test_long.py
    def test_mixed_compares(self):
        eq = self.assertEqual

        # We're mostly concerned with that mixing floats and longs does the
        # right stuff, even when longs are too large to fit in a float.
        # The safest way to check the results is to use an entirely different
        # method, which we do here via a skeletal rational class (which
        # represents all Python ints, longs and floats exactly).
        class Rat:
            def __init__(self, value):
                if isinstance(value, int):
                    self.n = value
                    self.d = 1
                elif isinstance(value, float):
                    # Convert to exact rational equivalent.
                    f, e = math.frexp(abs(value))
                    assert f == 0 or 0.5 <= f < 1.0
                    # |value| = f * 2**e exactly

                    # Suck up CHUNK bits at a time; 28 is enough so that we suck
                    # up all bits in 2 iterations for all known binary double-
                    # precision formats, and small enough to fit in an int.
                    CHUNK = 28
                    top = 0
                    # invariant: |value| = (top + f) * 2**e exactly
                    while f:
                        f = math.ldexp(f, CHUNK)
                        digit = int(f)
                        assert digit >> CHUNK == 0
                        top = (top << CHUNK) | digit
                        f -= digit
                        assert 0.0 <= f < 1.0
                        e -= CHUNK

                    # Now |value| = top * 2**e exactly.
                    if e >= 0:
                        n = top << e
                        d = 1
                    else:
                        n = top
                        d = 1 << -e
                    if value < 0:
                        n = -n
                    self.n = n
                    self.d = d
                    assert float(n) / float(d) == value
                else:
                    raise TypeError("can't deal with %r" % value)

            def _cmp__(self, other):
                if not isinstance(other, Rat):
                    other = Rat(other)
                x, y = self.n * other.d, self.d * other.n
                return (x > y) - (x < y)
            def __eq__(self, other):
                return self._cmp__(other) == 0
            def __ne__(self, other):
                return self._cmp__(other) != 0
            def __ge__(self, other):
                return self._cmp__(other) >= 0
            def __gt__(self, other):
                return self._cmp__(other) > 0
            def __le__(self, other):
                return self._cmp__(other) <= 0
            def __lt__(self, other):
                return self._cmp__(other) < 0

        cases = [0, 0.001, 0.99, 1.0, 1.5, 1e20, 1e200]
        # 2**48 is an important boundary in the internals.  2**53 is an
        # important boundary for IEEE double precision.
        for t in 2.0**48, 2.0**50, 2.0**53:
            cases.extend([t - 1.0, t - 0.3, t, t + 0.3, t + 1.0,
                          int(t-1), int(t), int(t+1)])
        cases.extend([0, 1, 2, sys.maxsize, float(sys.maxsize)])
        # 1 << 20000 should exceed all double formats.  int(1e200) is to
        # check that we get equality with 1e200 above.
        t = int(1e200)
        cases.extend([0, 1, 2, 1 << 20000, t-1, t, t+1])
        cases.extend([-x for x in cases])
        for x in cases:
            Rx = Rat(x)
            for y in cases:
                Ry = Rat(y)
                Rcmp = (Rx > Ry) - (Rx < Ry)
                xycmp = (x > y) - (x < y)
                eq(Rcmp, xycmp, Frm("%r %r %d %d", x, y, Rcmp, xycmp))
                eq(x == y, Rcmp == 0, Frm("%r == %r %d", x, y, Rcmp))
                eq(x != y, Rcmp != 0, Frm("%r != %r %d", x, y, Rcmp))
                eq(x < y, Rcmp < 0, Frm("%r < %r %d", x, y, Rcmp))
                eq(x <= y, Rcmp <= 0, Frm("%r <= %r %d", x, y, Rcmp))
                eq(x > y, Rcmp > 0, Frm("%r > %r %d", x, y, Rcmp))
                eq(x >= y, Rcmp >= 0, Frm("%r >= %r %d", x, y, Rcmp))
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3 Page 4