sys.version_info

Here are the examples of the python api sys.version_info taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

121 Examples 7

Example 1

Project: hope Source File: _generator.py
def _run_fkt_code(modtoken):
    code = ""
    code += "\tPyObject * run(PyObject * self, PyObject * args) {\n"

    for fkt in modtoken.functions[modtoken.main]:
        code += "\t\t{"
        for arg in fkt.signature:
            if isinstance(arg, Object):
                code += "\n\t\t\tPyObject * p{0};".format(arg.name)
                code += "\n\t\t\t{0} c{1};".format(arg.getId("t"), arg.name)
            elif len(arg.shape) > 0:
                code += "\n\t\t\tPyObj p{0};".format(arg.name)
            else:
                code += "\n\t\t\tPyObject * p{0};".format(arg.name)
                code += " {0} c{1};".format(PY_C_TYPE[arg.dtype], arg.name)
        if len(fkt.signature) > 0:
            code += "\n\t\t\tif ("
            code += "\n\t\t\t\tPyTuple_CheckExact(args) and PyTuple_GET_SIZE(args) == {0}".format(len(fkt.signature))
            for idx, arg in enumerate(fkt.signature):
                code += "\n\t\t\t\tand (p{0} = PyTuple_GET_ITEM(args, {1})) ".format(arg.name, idx)
                if isinstance(arg, Object):
                    code += "and c{0}.initialize(p{0})".format(arg.name)
                elif len(arg.shape) > 0:
                    code += "and PyArray_CheckExact(p{0})".format(arg.name)
                    code += "\n\t\t\t\tand PyArray_TYPE((PyArrayObject *)p{0}) == {1} and PyArray_NDIM((PyArrayObject *)p{0}) == {2}".format(arg.name, NPY_TYPEENUM[arg.dtype], len(arg.shape))
                elif arg.dtype is int:
                    if sys.version_info[0] == 2:
                        code += "and PyInt_CheckExact(p{0})".format(arg.name)
                    else:
                        code += "and PyLong_CheckExact(p{0})".format(arg.name)
                elif arg.dtype is float:
                    code += "and PyFloat_CheckExact(p{0})".format(arg.name)
                elif arg.dtype in NPY_SCALAR_TAG:
                    code += "and PyArray_IsScalar(p{0}, {1})".format(arg.name, NPY_SCALAR_TAG[arg.dtype])
                else:
                    raise Exception("Unknown type: {0!s}".format(arg.dtype))

            code += "\n\t\t\t) {\n"
            for arg in fkt.signature:
                if isinstance(arg, Object): pass
                elif len(arg.shape) > 0:
                    code += "\t\t\t\tif (!(p{0}.incref((PyObject *)PyArray_GETCONTIGUOUS((PyArrayObject *)p{0})))) {{\n".format(arg.name)
                    code += "\t\t\t\t\tPyErr_SetString(PyExc_ValueError, \"Invalid Argument type on {0}!\");\n".format(arg.name)
                    code += "\t\t\t\t\treturn NULL;\n"
                    code += "\t\t\t\t}\n"
                elif arg.dtype is int:
                    if sys.version_info[0] == 2:
                        code += "\t\t\t\tc{0} = PyInt_AS_LONG(p{0});\n".format(arg.name)
                    else:
                        code += "\t\t\t\tc{0} = PyLong_AS_LONG(p{0});\n".format(arg.name)
                elif arg.dtype is float:
                    code += "\t\t\t\tc{0} = PyFloat_AS_DOUBLE(p{0});\n".format(arg.name)
                elif arg.dtype in NPY_SCALAR_TAG:
                    code += "\t\t\t\tc{0} = PyArrayScalar_VAL(p{0}, {1});\n".format(arg.name, NPY_SCALAR_TAG[arg.dtype])

            args = []
            for arg in fkt.signature:
                if not isinstance(arg, Object) and len(arg.shape) > 0:
                    args.append("p{1}, PyArray_SHAPE((PyArrayObject *)p{1}), ({0} *)PyArray_DATA((PyArrayObject *)p{1})".format(PY_C_TYPE[arg.dtype], arg.name))
                else:
                    args.append("c{0}".format(arg.name))

            call  = "{0}_{1}(".format(modtoken.main, fkt.getId())
            call += "\n\t\t\t\t\t\t  {0}".format("\n\t\t\t\t\t\t, ".join(args))
            call += "\n\t\t\t\t\t)"
        else:
            call  = "{0}_{1}()".format(modtoken.main, fkt.getId())

        code += "\t\t\t\ttry {\n"
        if fkt.dtype is None:
            code += "\t\t\t\t\t{0};\n".format(call)
            code += "\t\t\t\t\tPy_INCREF(Py_None);\n"
            code += "\t\t\t\t\treturn Py_None;\n"
        elif len(fkt.shape) == 0 and fkt.dtype is bool:
            code += "\t\t\t\t\tPyObject* res = {0} ? Py_True : Py_False;\n".format(call)
            code += "\t\t\t\t\tPy_INCREF(res);\n"
            code += "\t\t\t\t\treturn res;\n"
        elif len(fkt.shape) == 0 and fkt.dtype is int:
            code += "\t\t\t\t\treturn Py_BuildValue(\"{0}\", {1});\n".format(PY_TYPE_CHAR[np.int_], call)
        elif len(fkt.shape) == 0 and fkt.dtype is float:
            code += "\t\t\t\t\treturn Py_BuildValue(\"{0}\", {1});\n".format(PY_TYPE_CHAR[np.float_], call)
        elif len(fkt.shape) == 0 and fkt.dtype in NPY_SCALAR_TAG:
            code += "\t\t\t\t\tPyObject* res = PyArrayScalar_New({0});\n".format(NPY_SCALAR_TAG[fkt.dtype])
            code += "\t\t\t\t\tPyArrayScalar_ASSIGN(res, {0}, {1});\n".format(NPY_SCALAR_TAG[fkt.dtype], call)
            code += "\t\t\t\t\treturn res;\n"
        else:
            code += "\t\t\t\t\tPyObject * res = std::get<0>({0});\n".format(call)
            if fkt.return_allocated:
                # to avoid mem leak or segfault
                code += "\n\t\t\t\t\tPy_INCREF(res);\n"
                
            code += "\t\t\t\t\treturn res;\n"
        code += "\t\t\t\t} catch (...) {\n"
        code += "\t\t\t\t\treturn NULL;\n"
        code += "\t\t\t\t}\n"

        if len(fkt.signature) > 0:
            code += "\t\t\t} else\n"
            code += "\t\t\t\tPyErr_Clear();\n"

        code += "\t\t}\n"
        
    def stripArg(arg):
        if isinstance(arg, Object):
            delattr(arg, "parent")
            if hasattr(arg, "instance"):
                delattr(arg, "instance")
            for name, value in list(arg.attrs.items()):
                arg.attrs[name] = stripArg(value)
        else:
            if not arg.dtype in [bool, int, float]:
                arg.dtype = NPY_TYPE[arg.dtype]
            arg.dims = len(arg.shape)
            delattr(arg, "shape")
            if not isinstance(arg, ObjectAttr):
                delattr(arg, "scope")
                delattr(arg, "allocated")
            else:
                delattr(arg, "parent")
        return arg

    signatures = []
    for fkt in modtoken.functions[modtoken.main]:
        signatures.append([stripArg(copy.deepcopy(arg)) for arg in fkt.signature])

    if sys.version_info[0] == 2:
        pickled = pickle.dumps(signatures).replace("\n", "\\n")
    else:
        import base64
        pickled = base64.encodebytes(pickle.dumps(signatures)).decode('ascii').replace("\n", "\\n")

    code += "\t\tPyObject * signatures = Py_BuildValue(\"(sO)\", \"{0}\", args);\n".format(pickled)
    code += "\t\tif (!signatures) {\n"

    # TODO: make all exceptions reasonamble: http://docs.python.org/2/c-api/exceptions.html
    code += "\t\t\tPyErr_SetString(PyExc_ValueError, \"Error building signature string for {0}\");\n".format(modtoken.main)
    code += "\t\t\treturn NULL;\n"
    code += "\t\t}\n"
    code += "\t\treturn PyObject_Call(create_signature, signatures, NULL);\n"

    return code

Example 2

Project: tensorlayer Source File: files.py
Function: load_cifar10_dataset
def load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False, second=3):
    """The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with
    6000 images per class. There are 50000 training images and 10000 test images.

    The dataset is divided into five training batches and one test batch, each with
    10000 images. The test batch contains exactly 1000 randomly-selected images from
    each class. The training batches contain the remaining images in random order,
    but some training batches may contain more images from one class than another.
    Between them, the training batches contain exactly 5000 images from each class.

    Parameters
    ----------
    shape : tupe
        The shape of digit images: e.g. (-1, 3, 32, 32) , (-1, 32, 32, 3) , (-1, 32*32*3)
    plotable : True, False
        Whether to plot some image examples.
    second : int
        If ``plotable`` is True, ``second`` is the display time.

    Examples
    --------
    >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=True)

    Notes
    ------
    CIFAR-10 images can only be display without color change under uint8.
    >>> X_train = np.asarray(X_train, dtype=np.uint8)
    >>> plt.ion()
    >>> fig = plt.figure(1232)
    >>> count = 1
    >>> for row in range(10):
    >>>     for col in range(10):
    >>>         a = fig.add_subplot(10, 10, count)
    >>>         plt.imshow(X_train[count-1], interpolation='nearest')
    >>>         plt.gca().xaxis.set_major_locator(plt.NullLocator())    # 不显示刻度(tick)
    >>>         plt.gca().yaxis.set_major_locator(plt.NullLocator())
    >>>         count = count + 1
    >>> plt.draw()
    >>> plt.pause(3)

    References
    ----------
    - `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`_
    - `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`_
    - `Code references <https://teratail.com/questions/28932>`_
    """
    import sys
    import pickle
    import numpy as np

    # We first define a download function, supporting both Python 2 and 3.
    filename = 'cifar-10-python.tar.gz'
    if sys.version_info[0] == 2:
        from urllib import urlretrieve
    else:
        from urllib.request import urlretrieve

    def download(filename, source='https://www.cs.toronto.edu/~kriz/'):
        print("Downloading %s" % filename)
        urlretrieve(source + filename, filename)

    # After downloading the cifar-10-python.tar.gz, we need to unzip it.
    import tarfile
    def un_tar(file_name):
        print("Extracting %s" % file_name)
        tar = tarfile.open(file_name)
        names = tar.getnames()
        # if os.path.isdir(file_name + "_files"):
        #     pass
        # else:
        #     os.mkdir(file_name + "_files")
        for name in names:
            tar.extract(name) #, file_name.split('.')[0])
        tar.close()
        print("Extracted to %s" % names[0])

    if not os.path.exists('cifar-10-batches-py'):
        download(filename)
        un_tar(filename)

    def unpickle(file):
        fp = open(file, 'rb')
        if sys.version_info.major == 2:
            data = pickle.load(fp)
        elif sys.version_info.major == 3:
            data = pickle.load(fp, encoding='latin-1')
        fp.close()
        return data

    X_train = None
    y_train = []

    path = '' # you can set a dir to the data here.

    for i in range(1,6):
        data_dic = unpickle(path+"cifar-10-batches-py/data_batch_{}".format(i))
        if i == 1:
            X_train = data_dic['data']
        else:
            X_train = np.vstack((X_train, data_dic['data']))
        y_train += data_dic['labels']

    test_data_dic = unpickle(path+"cifar-10-batches-py/test_batch")
    X_test = test_data_dic['data']
    y_test = np.array(test_data_dic['labels'])

    if shape == (-1, 3, 32, 32):
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)
        # X_train = np.transpose(X_train, (0, 1, 3, 2))
    elif shape == (-1, 32, 32, 3):
        X_test = X_test.reshape(shape, order='F')
        X_train = X_train.reshape(shape, order='F')
        X_test = np.transpose(X_test, (0, 2, 1, 3))
        X_train = np.transpose(X_train, (0, 2, 1, 3))
    else:
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)

    y_train = np.array(y_train)

    if plotable == True:
        print('\nCIFAR-10')
        import matplotlib.pyplot as plt
        fig = plt.figure(1)

        print('Shape of a training image: X_train[0]',X_train[0].shape)

        plt.ion()       # interactive mode
        count = 1
        for row in range(10):
            for col in range(10):
                a = fig.add_subplot(10, 10, count)
                if shape == (-1, 3, 32, 32):
                    # plt.imshow(X_train[count-1], interpolation='nearest')
                    plt.imshow(np.transpose(X_train[count-1], (1, 2, 0)), interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
                elif shape == (-1, 32, 32, 3):
                    plt.imshow(X_train[count-1], interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
                else:
                    raise Exception("Do not support the given 'shape' to plot the image examples")
                plt.gca().xaxis.set_major_locator(plt.NullLocator())    # 不显示刻度(tick)
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                count = count + 1
        plt.draw()      # interactive mode
        plt.pause(3)   # interactive mode

        print("X_train:",X_train.shape)
        print("y_train:",y_train.shape)
        print("X_test:",X_test.shape)
        print("y_test:",y_test.shape)

    X_train = np.asarray(X_train, dtype=np.float32)
    X_test = np.asarray(X_test, dtype=np.float32)
    y_train = np.asarray(y_train, dtype=np.int32)
    y_test = np.asarray(y_test, dtype=np.int32)

    return X_train, y_train, X_test, y_test

Example 3

Project: ShaniXBMCWork2 Source File: test_random.py
    def runTest(self):
        """Crypto.Random.new()"""
        # Import the Random module and try to use it
        from Crypto import Random
        randobj = Random.new()
        x = randobj.read(16)
        y = randobj.read(16)
        self.assertNotEqual(x, y)
        z = Random.get_random_bytes(16)
        self.assertNotEqual(x, z)
        self.assertNotEqual(y, z)
        # Test the Random.random module, which
        # implements a subset of Python's random API
        # Not implemented:
        # seed(), getstate(), setstate(), jumpahead()
        # random(), uniform(), triangular(), betavariate()
        # expovariate(), gammavariate(), gauss(),
        # longnormvariate(), normalvariate(),
        # vonmisesvariate(), paretovariate()
        # weibullvariate()
        # WichmannHill(), whseed(), SystemRandom()
        from Crypto.Random import random
        x = random.getrandbits(16*8)
        y = random.getrandbits(16*8)
        self.assertNotEqual(x, y)
        # Test randrange
        if x>y:
            start = y
            stop = x
        else:
            start = x
            stop = y
        for step in range(1,10):
            x = random.randrange(start,stop,step)
            y = random.randrange(start,stop,step)
            self.assertNotEqual(x, y)
            self.assertEqual(start <= x < stop, True)
            self.assertEqual(start <= y < stop, True)
            self.assertEqual((x - start) % step, 0)
            self.assertEqual((y - start) % step, 0)
        for i in range(10):
            self.assertEqual(random.randrange(1,2), 1)
        self.assertRaises(ValueError, random.randrange, start, start)
        self.assertRaises(ValueError, random.randrange, stop, start, step)
        self.assertRaises(TypeError, random.randrange, start, stop, step, step)
        self.assertRaises(TypeError, random.randrange, start, stop, "1")
        self.assertRaises(TypeError, random.randrange, "1", stop, step)
        self.assertRaises(TypeError, random.randrange, 1, "2", step)
        self.assertRaises(ValueError, random.randrange, start, stop, 0)
        # Test randint
        x = random.randint(start,stop)
        y = random.randint(start,stop)
        self.assertNotEqual(x, y)
        self.assertEqual(start <= x <= stop, True)
        self.assertEqual(start <= y <= stop, True)
        for i in range(10):
            self.assertEqual(random.randint(1,1), 1)
        self.assertRaises(ValueError, random.randint, stop, start)
        self.assertRaises(TypeError, random.randint, start, stop, step)
        self.assertRaises(TypeError, random.randint, "1", stop)
        self.assertRaises(TypeError, random.randint, 1, "2")
        # Test choice
        seq = range(10000)
        x = random.choice(seq)
        y = random.choice(seq)
        self.assertNotEqual(x, y)
        self.assertEqual(x in seq, True)
        self.assertEqual(y in seq, True)
        for i in range(10):
            self.assertEqual(random.choice((1,2,3)) in (1,2,3), True)
        self.assertEqual(random.choice([1,2,3]) in [1,2,3], True)
        if sys.version_info[0] is 3:
            self.assertEqual(random.choice(bytearray(b('123'))) in bytearray(b('123')), True)
        self.assertEqual(1, random.choice([1]))
        self.assertRaises(IndexError, random.choice, [])
        self.assertRaises(TypeError, random.choice, 1)
        # Test shuffle. Lacks random parameter to specify function.
        # Make copies of seq
        seq = range(500)
        x = list(seq)
        y = list(seq)
        random.shuffle(x)
        random.shuffle(y)
        self.assertNotEqual(x, y)
        self.assertEqual(len(seq), len(x))
        self.assertEqual(len(seq), len(y))
        for i in range(len(seq)):
           self.assertEqual(x[i] in seq, True)
           self.assertEqual(y[i] in seq, True)
           self.assertEqual(seq[i] in x, True)
           self.assertEqual(seq[i] in y, True)
        z = [1]
        random.shuffle(z)
        self.assertEqual(z, [1])
        if sys.version_info[0] == 3:
            z = bytearray(b('12'))
            random.shuffle(z)
            self.assertEqual(b('1') in z, True)
            self.assertRaises(TypeError, random.shuffle, b('12'))
        self.assertRaises(TypeError, random.shuffle, 1)
        self.assertRaises(TypeError, random.shuffle, "1")
        self.assertRaises(TypeError, random.shuffle, (1,2))
        # 2to3 wraps a list() around it, alas - but I want to shoot
        # myself in the foot here! :D
        # if sys.version_info[0] == 3:
            # self.assertRaises(TypeError, random.shuffle, range(3))
        # Test sample
        x = random.sample(seq, 20)
        y = random.sample(seq, 20)
        self.assertNotEqual(x, y)
        for i in range(20):
           self.assertEqual(x[i] in seq, True)
           self.assertEqual(y[i] in seq, True)
        z = random.sample([1], 1)
        self.assertEqual(z, [1])
        z = random.sample((1,2,3), 1)
        self.assertEqual(z[0] in (1,2,3), True)
        z = random.sample("123", 1)
        self.assertEqual(z[0] in "123", True)
        z = random.sample(range(3), 1)
        self.assertEqual(z[0] in range(3), True)
        if sys.version_info[0] == 3:
                z = random.sample(b("123"), 1)
                self.assertEqual(z[0] in b("123"), True)
                z = random.sample(bytearray(b("123")), 1)
                self.assertEqual(z[0] in bytearray(b("123")), True)
        self.assertRaises(TypeError, random.sample, 1)

Example 4

Project: PythonScript Source File: test_basics.py
Function: test03_simplecursorstuff
    def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
                  (self.__class__.__name__, get_raises_error, set_raises_error)

        if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
            txn = self.env.txn_begin()
        else:
            txn = None
        c = self.d.cursor(txn=txn)

        rec = c.first()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            try:
                rec = c.next()
            except db.DBNotFoundError, val:
                if get_raises_error:
                    if sys.version_info < (2, 6) :
                        self.assertEqual(val[0], db.DB_NOTFOUND)
                    else :
                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                    if verbose: print val
                    rec = None
                else:
                    self.fail("unexpected DBNotFoundError")
            self.assertEqual(c.get_current_size(), len(c.current()[1]),
                    "%s != len(%r)" % (c.get_current_size(), c.current()[1]))

        self.assertEqual(count, self._numKeys)


        rec = c.last()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            try:
                rec = c.prev()
            except db.DBNotFoundError, val:
                if get_raises_error:
                    if sys.version_info < (2, 6) :
                        self.assertEqual(val[0], db.DB_NOTFOUND)
                    else :
                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                    if verbose: print val
                    rec = None
                else:
                    self.fail("unexpected DBNotFoundError")

        self.assertEqual(count, self._numKeys)

        rec = c.set('0505')
        rec2 = c.current()
        self.assertEqual(rec, rec2)
        self.assertEqual(rec[0], '0505')
        self.assertEqual(rec[1], self.makeData('0505'))
        self.assertEqual(c.get_current_size(), len(rec[1]))

        # make sure we get empty values properly
        rec = c.set('empty value')
        self.assertEqual(rec[1], '')
        self.assertEqual(c.get_current_size(), 0)

        try:
            n = c.set('bad key')
        except db.DBNotFoundError, val:
            if sys.version_info < (2, 6) :
                self.assertEqual(val[0], db.DB_NOTFOUND)
            else :
                self.assertEqual(val.args[0], db.DB_NOTFOUND)
            if verbose: print val
        else:
            if set_raises_error:
                self.fail("expected exception")
            if n is not None:
                self.fail("expected None: %r" % (n,))

        rec = c.get_both('0404', self.makeData('0404'))
        self.assertEqual(rec, ('0404', self.makeData('0404')))

        try:
            n = c.get_both('0404', 'bad data')
        except db.DBNotFoundError, val:
            if sys.version_info < (2, 6) :
                self.assertEqual(val[0], db.DB_NOTFOUND)
            else :
                self.assertEqual(val.args[0], db.DB_NOTFOUND)
            if verbose: print val
        else:
            if get_raises_error:
                self.fail("expected exception")
            if n is not None:
                self.fail("expected None: %r" % (n,))

        if self.d.get_type() == db.DB_BTREE:
            rec = c.set_range('011')
            if verbose:
                print "searched for '011', found: ", rec

            rec = c.set_range('011',dlen=0,doff=0)
            if verbose:
                print "searched (partial) for '011', found: ", rec
            if rec[1] != '': self.fail('expected empty data portion')

            ev = c.set_range('empty value')
            if verbose:
                print "search for 'empty value' returned", ev
            if ev[1] != '': self.fail('empty value lookup failed')

        c.set('0499')
        c.delete()
        try:
            rec = c.current()
        except db.DBKeyEmptyError, val:
            if get_raises_error:
                if sys.version_info < (2, 6) :
                    self.assertEqual(val[0], db.DB_KEYEMPTY)
                else :
                    self.assertEqual(val.args[0], db.DB_KEYEMPTY)
                if verbose: print val
            else:
                self.fail("unexpected DBKeyEmptyError")
        else:
            if get_raises_error:
                self.fail('DBKeyEmptyError exception expected')

        c.next()
        c2 = c.dup(db.DB_POSITION)
        self.assertEqual(c.current(), c2.current())

        c2.put('', 'a new value', db.DB_CURRENT)
        self.assertEqual(c.current(), c2.current())
        self.assertEqual(c.current()[1], 'a new value')

        c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
        self.assertEqual(c2.current()[1], 'a newer value')

        c.close()
        c2.close()
        if txn:
            txn.commit()

        # time to abuse the closed cursors and hope we don't crash
        methods_to_test = {
            'current': (),
            'delete': (),
            'dup': (db.DB_POSITION,),
            'first': (),
            'get': (0,),
            'next': (),
            'prev': (),
            'last': (),
            'put':('', 'spam', db.DB_CURRENT),
            'set': ("0505",),
        }
        for method, args in methods_to_test.items():
            try:
                if verbose:
                    print "attempting to use a closed cursor's %s method" % \
                          method
                # a bug may cause a NULL pointer dereference...
                getattr(c, method)(*args)
            except db.DBError, val:
                if sys.version_info < (2, 6) :
                    self.assertEqual(val[0], 0)
                else :
                    self.assertEqual(val.args[0], 0)
                if verbose: print val
            else:
                self.fail("no exception raised when using a buggy cursor's"
                          "%s method" % method)

        #
        # free cursor referencing a closed database, it should not barf:
        #
        oldcursor = self.d.cursor(txn=txn)
        self.d.close()

        # this would originally cause a segfault when the cursor for a
        # closed database was cleaned up.  it should not anymore.
        # SF pybsddb bug id 667343
        del oldcursor

Example 5

Project: pymobiledevice Source File: ccl_bplist.py
def __decode_object(f, offset, collection_offset_size, offset_table):
    # Move to offset and read type
    #print("Decoding object at offset {0}".format(offset))
    f.seek(offset)
    # A little hack to keep the script portable between py2.x and py3k
    if sys.version_info[0] < 3:
        type_byte = ord(f.read(1)[0])
    else:
        type_byte = f.read(1)[0]
    #print("Type byte: {0}".format(hex(type_byte)))
    if type_byte == 0x00: # Null      0000 0000
        return None
    elif type_byte == 0x08: # False   0000 1000
        return False
    elif type_byte == 0x09: # True    0000 1001
        return True
    elif type_byte == 0x0F: # Fill    0000 1111
        raise BplistError("Fill type not currently supported at offset {0}".format(f.tell())) # Not sure what to return really...
    elif type_byte & 0xF0 == 0x10: # Int    0001 xxxx
        int_length = 2 ** (type_byte & 0x0F)
        int_bytes = f.read(int_length)
        return __decode_multibyte_int(int_bytes)
    elif type_byte & 0xF0 == 0x20: # Float   0010 nnnn
        float_length = 2 ** (type_byte & 0x0F)
        float_bytes = f.read(float_length)
        return __decode_float(float_bytes)
    elif type_byte & 0xFF == 0x33: # Date   0011 0011
        date_bytes = f.read(8)
        date_value = __decode_float(date_bytes)
        return datetime.datetime(2001,1,1) + datetime.timedelta(seconds = date_value)
    elif type_byte & 0xF0 == 0x40: # Data   0100 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            data_length = type_byte & 0x0F
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long Data field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            data_length = __decode_multibyte_int(int_bytes, False)
        return f.read(data_length)
    elif type_byte & 0xF0 == 0x50: # ASCII  0101 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            ascii_length = type_byte & 0x0F
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long ASCII field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            ascii_length = __decode_multibyte_int(int_bytes, False)
        return f.read(ascii_length).decode("ascii")
    elif type_byte & 0xF0 == 0x60: # UTF-16  0110 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            utf16_length = (type_byte & 0x0F) * 2 # Length is characters - 16bit width
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long UTF-16 field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            utf16_length = __decode_multibyte_int(int_bytes, False) * 2
        return f.read(utf16_length).decode("utf_16_be")
    elif type_byte & 0xF0 == 0x80: # UID    1000 nnnn
        uid_length = (type_byte & 0x0F) + 1
        uid_bytes = f.read(uid_length)
        return BplistUID(__decode_multibyte_int(uid_bytes, signed=False))
    elif type_byte & 0xF0 == 0xA0: # Array  1010 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            array_count = type_byte & 0x0F
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long Array field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            array_count = __decode_multibyte_int(int_bytes, signed=False)
        array_refs = []
        for i in range(array_count):
            array_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
        return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in array_refs]
    elif type_byte & 0xF0 == 0xC0: # Set  1010 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            set_count = type_byte & 0x0F
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long Set field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            set_count = __decode_multibyte_int(int_bytes, signed=False)
        set_refs = []
        for i in range(set_count):
            set_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
        return [__decode_object(f, offset_table[obj_ref], collection_offset_size, offset_table) for obj_ref in set_refs]
    elif type_byte & 0xF0 == 0xD0: # Dict  1011 nnnn
        if type_byte & 0x0F != 0x0F:
            # length in 4 lsb
            dict_count = type_byte & 0x0F
        else:
            # A little hack to keep the script portable between py2.x and py3k
            if sys.version_info[0] < 3:
                int_type_byte = ord(f.read(1)[0])
            else:
                int_type_byte = f.read(1)[0]
            #print("Dictionary length int byte: {0}".format(hex(int_type_byte)))
            if int_type_byte & 0xF0 != 0x10:
                raise BplistError("Long Dict field definition not followed by int type at offset {0}".format(f.tell()))
            int_length = 2 ** (int_type_byte & 0x0F)
            int_bytes = f.read(int_length)
            dict_count = __decode_multibyte_int(int_bytes, signed=False)
        key_refs = []
        #print("Dictionary count: {0}".format(dict_count))
        for i in range(dict_count):
            key_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
        value_refs = []
        for i in range(dict_count):
            value_refs.append(__decode_multibyte_int(f.read(collection_offset_size), False))
        
        dict_result = {}
        for i in range(dict_count):
            #print("Key ref: {0}\tVal ref: {1}".format(key_refs[i], value_refs[i]))
            key = __decode_object(f, offset_table[key_refs[i]], collection_offset_size, offset_table)
            val = __decode_object(f, offset_table[value_refs[i]], collection_offset_size, offset_table)
            dict_result[key] = val
        return dict_result

Example 6

Project: ck Source File: module.py
def process_ck_web_request(i):

    """

    Input:  {
              http - Python http object
            }

    Output: { None }
    """

    # http object
    http=i['http']

    # Parse GET variables and path
    xget={}
    xpath={'host':'', 'port':'', 'first':'', 'rest':'', 'query':''} # May be used in the future

    xt='json'

    xpath['host']=i.get('host','')
    xpath['port']=i.get('port','')

    # Check GET variables
    if http.path!='':
       http.send_response(200)

       a=urlparse.urlparse(http.path)
       xp=a.path
       xr=''

       if xp.startswith('/'): xp=xp[1:]

       u=xp.find('/')
       if u>=0:
          xr=xp[u+1:]
          xp=xp[:u]

       xt=xp

       xpath['first']=xp
       xpath['rest']=xr
       xpath['query']=a.query
       b=urlparse.parse_qs(a.query, keep_blank_values=True, )

       xget={}
       for k in b:
#           xget[k]=b[k][0]
            xget[k]=urlunquote(b[k][0])
            if sys.version_info[0]<3:
               xget[k]=xget[k].decode('utf8')

    # Check POST
    xpost={}
    xpost1={}

    try:
       headers = http.headers
       content_type = headers.get('content-type')
       ctype=''
       if content_type != None:
          ctype, pdict = cgi.parse_header(content_type)
          # Python3 cgi.parse_multipart expects boundary to be bytes, not str.
          if sys.version_info[0]<3 and 'boundary' in pdict:
             pdict['boundary'] = pdict['boundary'].encode()

       if ctype == 'multipart/form-data':
          if sys.version_info[0]<3:
             xpost1 = cgi.parse_multipart(http.rfile, pdict)
          else:
             xxpost1 = cgi.FieldStorage(fp=http.rfile, headers=headers, environ={'REQUEST_METHOD':'POST'})
             for k in xxpost1.keys():
                 xpost1[k]=[xxpost1[k].value]
       elif ctype == 'application/x-www-form-urlencoded':
          length = int(http.headers.get('content-length'))
          s=http.rfile.read(length)
          if sys.version_info[0]>2: s=s.decode('utf8')
          xpost1 = cgi.parse_qs(s, keep_blank_values=1)

    except Exception as e:
       bin=b'internal CK web service error [7101] ('+format(e).encode('utf8')+')'
       web_err({'http':http, 'type':xt, 'bin':bin})
       ck.out(ck.cfg['error']+bin.decode('utf8'))
       return

    # Post processing
    for k in xpost1:
        v=xpost1[k]
        if k.endswith('[]'):
           k1=k[:-2]
           xpost[k1]=[]
           for l in v:
               xpost[k1].append(urlunquote(l))
        else:
           if k!='file_content':
              xpost[k]=urlunquote(v[0])
           else:
              xpost[k]=v[0]

        if k=='file_content':
           fcrt=xpost1.get('file_content_record_to_tmp','')
           if (type(fcrt)==list and len(fcrt)>0 and fcrt[0]=='yes') or fcrt=='yes':
              fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-') # suffix is important - CK will delete such file!
              os.close(fd)

              f=open(fn,'wb')
              f.write(xpost[k])
              f.close()

              xpost[k+'_uploaded']=fn
              del(xpost[k])
              k+='_uploaded'
           else:
              import base64
              xpost[k+'_base64']=base64.urlsafe_b64encode(xpost[k]).decode('utf8')
              del(xpost[k])
              k+='_base64'

        if sys.version_info[0]<3:
           xpost[k]=xpost[k].decode('utf8')

    # Prepare input and check if CK json present
    ii=xget
    ii.update(xpost)

    cj=ii.get('ck_json','').strip()
    if cj!='':
       r=ck.convert_json_str_to_dict({'str':cj, 'skip_quote_replacement':'yes'})
       if r['return']>0:
          bin=b'internal CK web service error [7102] ('+r['error'].encode('utf8')+b')'
          web_err({'http':http, 'type':xt, 'bin':bin})
          ck.out(ck.cfg['error']+bin.decode('utf8'))
          return

       del(ii['ck_json'])
       ii.update(r['dict'])

    # Misc parameters
    dc=ii.get('detach_console','')
    act=ii.get('action','')

    # Check output type
    if ii.get('out','')!='':
       xt=ii['out']

    if xt=='': xt='web'

    if xt!='json' and xt!='con' and xt!='web':
       web_out({'http':http,
                'type':'web',
                'bin':b'Unknown CK request ('+xt.encode('utf8')+b')!'})
       return

    # Prepare temporary output file
    fd, fn=tempfile.mkstemp(prefix='ck-')
    os.close(fd)
    os.remove(fn)

    # Check output
    if dc=='yes':
       if ck.cfg.get('forbid_detached_console','')=='yes':
          web_out({'http':http,
                   'type':'web',
                   'bin':b'Detached console is forbidden!'})
          return
    else:
       ii['out_file']=fn
       ii['web']='yes'
       if xt=='json' or xt=='web':
          ii['out']='json_file'
       # else output to console (for remote access for example)

    ii['con_encoding']='utf8'

    ii['host']=wfe_host
    ii['port']=wfe_port

    # Execute command cuem*****************************************************
    if act=='':
       if cfg.get('if_web_action_not_defined','')!='' and cfg.get('if_web_module_not_defined','')!='':
          ii['module_uoa']=cfg['if_web_module_not_defined']
          ii['action']=cfg['if_web_action_not_defined']

    r=call_ck(ii)

    # Process output
    if r['return']>0:
       if os.path.isfile(fn): os.remove(fn)

       bout=r['error']

       try: bout=bout.encode('utf-8')
       except Exception as e: pass

       web_err({'http':http,
                'type':xt,
                'bin':bout})
       return

    # If output to console or detached console
    if xt=='con' or dc=='yes':
       if os.path.isfile(fn): os.remove(fn)

       bout=r.get('std','').encode('utf8')

       web_out({'http':http, 'type':xt, 'bin':bout})

       return

    # If json or web
    # Try to load output file
    if not os.path.isfile(fn):
       web_err({'http':http,
                'type':xt,
                'bin':b'Output json file was not created, see output ('+r['std'].encode('utf8')+b')!'})
       return

    r=ck.load_text_file({'text_file':fn, 'keep_as_bin':'yes'})
    if r['return']>0:
       bout=r['error']

       try: bout=bout.encode('utf-8')
       except Exception as e: pass

       web_err({'http':http, 'type':xt, 'bin':bout})

       return

    bin=r['bin']

    if os.path.isfile(fn): os.remove(fn)

    # Process JSON output from file
    fx=''

    if sys.version_info[0]>2: bin=bin.decode('utf-8')

    ru=ck.convert_json_str_to_dict({'str':bin, 'skip_quote_replacement':'yes'})
    if ru['return']>0:
       bout=ru['error']

       try: bout=bout.encode('utf-8')
       except Exception as e: pass

       web_err({'http':http, 'type':xt, 'bin':bout})

       return

    rr=ru['dict']
    if rr['return']>0:
       bout=rr['error']

       try: bout=bout.encode('utf-8')
       except Exception as e: pass

       web_err({'http':http, 'type':xt, 'bin':bout})
       return

    # Check if file was returned
    fr=False

    if 'file_content_base64' in rr and rr.get('filename','')!='':
       fr=True

    # Check if download
    if (xt=='web' and fr) or (act=='pull' and xt!='json'):
       import base64
       x=rr.get('file_content_base64','')

       fx=rr.get('filename','')
       if fx=='': fx=ck.cfg['default_archive_name']

       # Fixing Python bug
       if sys.version_info[0]==3 and sys.version_info[1]<3:
          x=x.encode('utf-8')
       else:
          x=str(x)
       bin=base64.urlsafe_b64decode(x) # convert from unicode to str since base64 works on strings
                                            # should be safe in Python 2.x and 3.x

       # Process extension
       fn1, fne = os.path.splitext(fx)
       if fne.startswith('.'): fne=fne[1:]
       if fne!='': xt=fne
       else: xt='unknown'
    else:
       # Check and output html
       if rr.get('html','')!='':
          bin=rr['html'].encode('utf-8')
       else:
          if sys.version_info[0]>2: # Unknown output
             bin=bin.encode('utf-8')

    web_out({'http':http, 'type':xt, 'bin':bin, 'filename':fx})

    return {'return':0}

Example 7

Project: pywws Source File: Template.py
    def process(self, live_data, template_file):
        def jump(idx, count):
            while count > 0:
                new_idx = data_set.after(idx + SECOND)
                if new_idx == None:
                    break
                idx = new_idx
                count -= 1
            while count < 0:
                new_idx = data_set.before(idx)
                if new_idx == None:
                    break
                idx = new_idx
                count += 1
            return idx, count == 0

        params = self.params
        if not live_data:
            idx = self.calib_data.before(datetime.max)
            if not idx:
                self.logger.error("No calib data - run pywws.Process first")
                return
            live_data = self.calib_data[idx]
        # get default character encoding of template input & output files
        self.encoding = params.get('config', 'template encoding', 'iso-8859-1')
        file_encoding = self.encoding
        if file_encoding == 'html':
            file_encoding = 'ascii'
        # get conversions module to create its 'private' wind dir text
        # array, then copy it to deprecated wind_dir_text variable
        winddir_text(0)
        wind_dir_text = conversions._winddir_text_array
        hour_diff = self._hour_diff
        rain_hour = self._rain_hour
        rain_day = self._rain_day
        pressure_offset = eval(self.params.get('config', 'pressure offset'))
        fixed_block = eval(self.status.get('fixed', 'fixed block'))
        # start off with no time rounding
        round_time = None
        # start off in hourly data mode
        data_set = self.hourly_data
        # start off in utc
        time_zone = utc
        # start off with default use_locale setting
        use_locale = self.use_locale
        # jump to last item
        idx, valid_data = jump(datetime.max, -1)
        if not valid_data:
            self.logger.error("No summary data - run pywws.Process first")
            return
        data = data_set[idx]
        # open template file, if not already a file(like) object
        if hasattr(template_file, 'readline'):
            tmplt = template_file
        else:
            tmplt = open(template_file, 'rb')
        # do the text processing
        while True:
            line = tmplt.readline().decode(file_encoding)
            if not line:
                break
            parts = line.split('#')
            for i in range(len(parts)):
                if i % 2 == 0:
                    # not a processing directive
                    if i == 0 or parts[i] != '\n':
                        yield parts[i]
                    continue
                if parts[i] and parts[i][0] == '!':
                    # comment
                    continue
                # Python 2 shlex can't handle unicode
                if sys.version_info[0] < 3:
                    parts[i] = parts[i].encode(file_encoding)
                command = shlex.split(parts[i])
                if sys.version_info[0] < 3:
                    command = map(lambda x: x.decode(file_encoding), command)
                if command == []:
                    # empty command == print a single '#'
                    yield u'#'
                elif command[0] in data.keys() + ['calc']:
                    # output a value
                    if not valid_data:
                        continue
                    # format is: key fmt_string no_value_string conversion
                    # get value
                    if command[0] == 'calc':
                        x = eval(command[1])
                        del command[1]
                    else:
                        x = data[command[0]]
                    # adjust time
                    if isinstance(x, datetime):
                        if round_time:
                            x += round_time
                        x = x.replace(tzinfo=utc)
                        x = x.astimezone(time_zone)
                    # convert data
                    if x is not None and len(command) > 3:
                        x = eval(command[3])
                    # get format
                    fmt = u'%s'
                    if len(command) > 1:
                        fmt = command[1]
                    # write output
                    if x is None:
                        if len(command) > 2:
                            yield command[2]
                    elif isinstance(x, datetime):
                        if sys.version_info[0] < 3:
                            fmt = fmt.encode(file_encoding)
                        x = x.strftime(fmt)
                        if sys.version_info[0] < 3:
                            x = x.decode(file_encoding)
                        yield x
                    elif not use_locale:
                        yield fmt % (x)
                    elif sys.version_info >= (2, 7) or '%%' not in fmt:
                        yield locale.format_string(fmt, x)
                    else:
                        yield locale.format_string(
                            fmt.replace('%%', '##'), x).replace('##', '%')
                elif command[0] == 'monthly':
                    data_set = self.monthly_data
                    idx, valid_data = jump(datetime.max, -1)
                    data = data_set[idx]
                elif command[0] == 'daily':
                    data_set = self.daily_data
                    idx, valid_data = jump(datetime.max, -1)
                    data = data_set[idx]
                elif command[0] == 'hourly':
                    data_set = self.hourly_data
                    idx, valid_data = jump(datetime.max, -1)
                    data = data_set[idx]
                elif command[0] == 'raw':
                    data_set = self.calib_data
                    idx, valid_data = jump(datetime.max, -1)
                    data = data_set[idx]
                elif command[0] == 'live':
                    data_set = self.calib_data
                    idx = datetime.max
                    valid_data = True
                    data = live_data
                elif command[0] == 'timezone':
                    if command[1] == 'utc':
                        time_zone = utc
                    elif command[1] == 'local':
                        time_zone = Local
                    else:
                        self.logger.error("Unknown time zone: %s", command[1])
                        return
                elif command[0] == 'locale':
                    use_locale = eval(command[1])
                elif command[0] == 'encoding':
                    self.encoding = command[1]
                    file_encoding = self.encoding
                    if file_encoding == 'html':
                        file_encoding = 'ascii'
                elif command[0] == 'roundtime':
                    if eval(command[1]):
                        round_time = timedelta(seconds=30)
                    else:
                        round_time = None
                elif command[0] == 'jump':
                    prevdata = data
                    idx, valid_data = jump(idx, int(command[1]))
                    data = data_set[idx]
                elif command[0] == 'goto':
                    prevdata = data
                    time_str = command[1]
                    if '%' in time_str:
                        lcl = idx.replace(tzinfo=utc).astimezone(time_zone)
                        time_str = lcl.strftime(time_str)
                    new_idx = DataStore.safestrptime(time_str)
                    new_idx = new_idx.replace(tzinfo=time_zone).astimezone(utc)
                    new_idx = data_set.after(new_idx.replace(tzinfo=None))
                    if new_idx:
                        idx = new_idx
                        data = data_set[idx]
                        valid_data = True
                    else:
                        valid_data = False
                elif command[0] == 'loop':
                    loop_count = int(command[1])
                    loop_start = tmplt.tell()
                elif command[0] == 'endloop':
                    loop_count -= 1
                    if valid_data and loop_count > 0:
                        tmplt.seek(loop_start, 0)
                else:
                    self.logger.error(
                        "Unknown processing directive: #%s#", parts[i])
                    return

Example 8

Project: ganga Source File: feedback_report.py
def report(job=None):
    """ Upload error reports (snapshot of configuration,job parameters, input/output files, command history etc.). Job argument is optional. """
    import mimetypes
    import urllib
    import urllib2
    import httplib
    import string
    import random
    import sys
    import os
    import platform

    import Ganga.GPIDev.Lib.Config.config as config
    from Ganga.GPIDev.Base.VPrinter import full_print

    import Ganga

    # global variables that will print sumamry report to the user along with
    # the download link
    global JOB_REPORT, GANGA_VERSION, BACKEND_NAME, APPLICATION_NAME, PYTHON_PATH
    JOB_REPORT = False
    GANGA_VERSION = ''
    BACKEND_NAME = ''
    APPLICATION_NAME = ''
    PYTHON_PATH = ''

    def random_string(length):
        return ''.join([random.choice(string.letters) for ii in range(length + 1)])

    def encode_multipart_formdata(files):
        boundary = random_string(30)
        retnl = '\r\n'
        lines = []

        def get_content_type(filename):
            return mimetypes.guess_type(filename)[0] or 'application/octet-stream'

        fields = {'title': 'Ganga Error Report'}

        for (key, value) in fields.iteritems():
            lines.append('--' + boundary)
            lines.append('Content-Disposition: form-data; name="%s"' % key)
            lines.append('')
            lines.append(value)
        for field_name, file in files.iteritems():
            lines.append('--' + boundary)
            lines.append(
                'Content-Disposition: form-data; name="file"; filename="%s"' % (file))
            lines.append('Content-Type: %s' % get_content_type(file))
            lines.append('')
            lines.append(open(file, 'rb').read())
        lines.append('--' + boundary + '--')
        lines.append('')
        body = retnl.join(lines)

        headers = {'content-type': 'multipart/form-data; boundary=%s' %
                   boundary, 'content-length': str(len(body))}

        return body, headers

    def make_upload_file(server):

        def upload_file(path):

            # print 'Uploading %r to %r' % (path, server)

            data = {'MAX_FILE_SIZE': '3145728',
                    'sub': '',
                    'mode': 'regist'}
            files = {'file': path}

            send_post(server, files)

        return upload_file

    def send_post(url, files):
        logger.debug("Sending Post to %s ,  containing %s" % (url, files))

        encoded_data = encode_multipart_formdata(files)

        data = urllib.urlencode(encoded_data[1])
        req = urllib2.Request(url, data=data)
        if req.has_data():
            logger.debug("urllib2: Success!")
        else:
            logger.debug("urllib2: Fail!!!")

        connection = httplib.HTTPConnection(req.get_host())
        # connection.set_debuglevel(1)
        logger.debug("Requesting: 'POST', %s, %s " % (url, encoded_data[1]))
#                connection.request( method='POST', url=req.get_selector(), body=encoded_data[0], headers=encoded_data[1] )
        connection.request(
            method='POST', url=url, body=encoded_data[0], headers=encoded_data[1])
        response = connection.getresponse()

        logger.debug("httplib POST request response was: %s , because: %s" % (
            response.status, response.reason))

        responseResult = response.read()

        #logger.debug("Responce.read(): --%s--" % responseResult )

        responseResult = responseResult[
            responseResult.find("<span id=\"download_path\""):]
        startIndex = responseResult.find("path:") + 5
        endIndex = responseResult.find("</span>")

        logger.debug("Responce.read(): --%s--" %
                     responseResult[startIndex:endIndex])

        logger.info(
            'Your error report was uploaded to ganga developers with the following URL. ')
        logger.info(
            'You may include this URL and the following summary information in your bug report or in the support email to the developers.')
        logger.info('')
        logger.info('***' + str(responseResult[startIndex:endIndex]) + '***')
        logger.info('')
        global GANGA_VERSION, JOB_REPORT, APPLICATION_NAME, BACKEND_NAME, PYTHON_PATH
        logger.info('Ganga Version : ' + GANGA_VERSION)
        logger.info('Python Version : ' + "%s.%s.%s" %
                    (sys.version_info[0], sys.version_info[1], sys.version_info[2]))
        logger.info('Operation System Version : ' + platform.platform())

        if JOB_REPORT:
            logger.info('Application Name : ' + APPLICATION_NAME)
            logger.info('Backend Name : ' + BACKEND_NAME)

        logger.info('Python Path : ' + PYTHON_PATH)
        logger.info('')

        JOB_REPORT = False
        GANGA_VERSION = ''
        BACKEND_NAME = ''
        APPLICATION_NAME = ''
        PYTHON_PATH = ''

    def run_upload(server, path):

        upload_file = make_upload_file(server)
        upload_file(path)

    def report_inner(job=None, isJob=False, isTask=False):

        userInfoDirName = "userreport"
        tempDirName = "reportsRepository"
        # job relevant info
        jobSummaryFileName = "jobsummary.txt"
        jobFullPrintFileName = "jobfullprint.txt"
        repositoryPath = "repository/$usr/LocalXML/6.0/jobs/$thousandsNumxxx"
        # task relevant info
        taskSummaryFileName = "tasksummary.txt"
        taskFullPrintFileName = "taskfullprint.txt"
        tasksRepositoryPath = "repository/$usr/LocalXML/6.0/tasks/$thousandsNumxxx"
        # user's info
        environFileName = "environ.txt"
        userConfigFileName = "userconfig.txt"
        defaultConfigFileName = "gangarc.txt"
        ipythonHistoryFileName = "ipythonhistory.txt"
        gangaLogFileName = "gangalog.txt"
        jobsListFileName = "jobslist.txt"
        tasksListFileName = "taskslist.txt"
        thread_trace_file_name = 'thread_trace.html'
        from Ganga.Utility import Config
        uploadFileServer = Config.getConfig('Feedback')['uploadServer']
        #uploadFileServer= "http://gangamon.cern.ch/django/errorreports/"
        #uploadFileServer= "http://ganga-ai-02.cern.ch/django/errorreports/"
        #uploadFileServer= "http://127.0.0.1:8000/errorreports"

        def printDictionary(dictionary, file=sys.stdout):
            for k, v in dictionary.iteritems():
                print('%s: %s' % (k, v), file=file)

                if k == 'PYTHONPATH':
                    global PYTHON_PATH
                    PYTHON_PATH = v

        def extractFileObjects(fileName, targetDirectoryName):
            try:
                fileToRead = open(fileName, 'r')
                try:
                    fileText = fileToRead.read()
                    import re
                    pattern = "File\(name=\'(.+?)\'"
                    matches = re.findall(pattern, fileText)

                    for fileName in matches:
                        fileName = os.path.expanduser(fileName)
                        targetFileName = os.path.join(
                            targetDirectoryName, os.path.basename(fileName))
                        shutil.copyfile(fileName, targetFileName)

                finally:
                    fileToRead.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        def writeErrorLog(errorMessage):
            try:
                fileToWrite = open(errorLogPath, 'a')
                try:
                    fileToWrite.write(errorMessage)
                    fileToWrite.write("\n")
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise
                finally:
                    fileToWrite.close()
            except Exception as err2:
                logger.debug("Err: %s" % err2)
                pass

        def writeStringToFile(fileName, stringToWrite):

            try:
                # uncomment this to try the error logger
                #fileName = '~/' + fileName
                fileToWrite = open(fileName, 'w')
                try:
                    fileToWrite.write(stringToWrite)
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise err
                finally:
                    fileToWrite.close()
            # except IOError:
            except Exception as err:
                logger.debug("Err2: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        def renameDataFiles(directory):

            for fileName in os.listdir(directory):
                fullFileName = os.path.join(directory, fileName)
                if os.path.isfile(fullFileName):
                    if fileName == 'data':
                        os.rename(fullFileName, fullFileName + '.txt')
                else:
                    renameDataFiles(fullFileName)

        import shutil
        import tarfile
        import tempfile
        import os

        userHomeDir = os.getenv("HOME")
        tempDir = tempfile.mkdtemp()

        errorLogPath = os.path.join(tempDir, 'reportErrorLog.txt')

        fullPathTempDir = os.path.join(tempDir, tempDirName)
        fullLogDirName = ''
        # create temp dir and specific dir for the job/user

        try:
            if not os.path.exists(fullPathTempDir):
                os.mkdir(fullPathTempDir)

            import datetime
            now = datetime.datetime.now()
            userInfoDirName = userInfoDirName + \
                now.strftime("%Y-%m-%d-%H:%M:%S")
            fullLogDirName = os.path.join(fullPathTempDir, userInfoDirName)

            # if report directory exists -> delete it's content(we would like
            # last version of the report)
            if os.path.exists(fullLogDirName):
                shutil.rmtree(fullLogDirName)

            os.mkdir(fullLogDirName)
        # except OSError:
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import os.environ in a file
        fullEnvironFileName = os.path.join(fullLogDirName, environFileName)

        try:
            inputFile = open(fullEnvironFileName, 'w')
            try:
                printDictionary(os.environ, file=inputFile)

                print('OS VERSION : ' + platform.platform(), file=inputFile)

            finally:
                inputFile.close()
        # except IOError
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import user config in a file
        userConfigFullFileName = os.path.join(
            fullLogDirName, userConfigFileName)

        try:
            inputFile = open(userConfigFullFileName, 'w')
            try:

                print("#GANGA_VERSION = %s" %
                      config.System.GANGA_VERSION, file=inputFile)

                global GANGA_VERSION
                GANGA_VERSION = config.System.GANGA_VERSION

                # this gets the default values
                # Ganga.GPIDev.Lib.Config.Config.print_config_file()

                # this should get the changed values
                for c in config:
                    print(config[c], file=inputFile)

            finally:
                inputFile.close()
        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # write gangarc - default configuration
        defaultConfigFullFileName = os.path.join(
            fullLogDirName, defaultConfigFileName)

        try:
            outputFile = open(os.path.join(userHomeDir, '.gangarc'), 'r')

            try:
                writeStringToFile(defaultConfigFullFileName, outputFile.read())
            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import ipython history in a file
        try:
            ipythonFile = open(
                os.path.join(os.environ['IPYTHONDIR'], 'history'), 'r')

            try:
                lastIPythonCommands = ipythonFile.readlines()[-20:]
                writeStringToFile(os.path.join(
                    fullLogDirName, ipythonHistoryFileName), '\n'.join(lastIPythonCommands))
                #writeStringToFile(os.path.join(fullLogDirName, ipythonHistoryFileName), ipythonFile.read())
            finally:
                ipythonFile.close()
        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import gangalog in a file
        userLogFileLocation = config["Logging"]._logfile
        userLogFileLocation = os.path.expanduser(userLogFileLocation)

        try:
            gangaLogFile = open(userLogFileLocation, 'r')
            try:
                writeStringToFile(
                    os.path.join(fullLogDirName, gangaLogFileName), gangaLogFile.read())
            finally:
                gangaLogFile.close()
        # except IOError:
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import the result of jobs command in the report
        jobsListFullFileName = os.path.join(fullLogDirName, jobsListFileName)

        try:
            outputFile = open(jobsListFullFileName, 'w')
            try:

                from Ganga.Core.GangaRegistry import getRegistryProxy
                print(getRegistryProxy('jobs'), file=outputFile)

            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # import the result of tasks command in the report
        tasksListFullFileName = os.path.join(fullLogDirName, tasksListFileName)

        try:
            outputFile = open(tasksListFullFileName, 'w')
            try:

                from Ganga.Core.GangaRegistry import getRegistryProxy
                print(getRegistryProxy('tasks'), file=outputFile)

            finally:
                outputFile.close()

        # except IOError does not catch the exception ???
        except Exception as err:
            logger.debug("Err: %s" % err)
            writeErrorLog(str(sys.exc_info()[1]))

        # save it here because we will change fullLogDirName, but we want this
        # to be the archive and to be deleted
        folderToArchive = fullLogDirName

        # import job relevant info
        if (job is not None and isJob):

            global JOB_REPORT, APPLICATION_NAME, BACKEND_NAME

            JOB_REPORT = True
            APPLICATION_NAME = getName(job.application)
            BACKEND_NAME = getName(job.backend)

            # create job folder
            jobFolder = 'job_%s' % job.fqid
            fullLogDirName = os.path.join(fullLogDirName, jobFolder)
            os.mkdir(fullLogDirName)

            # import job summary in a file
            fullJobSummaryFileName = os.path.join(
                fullLogDirName, jobSummaryFileName)
            writeStringToFile(fullJobSummaryFileName, job)

            # import job full print in a file
            fullJobPrintFileName = os.path.join(
                fullLogDirName, jobFullPrintFileName)

            try:
                inputFile = open(fullJobPrintFileName, 'w')
                try:
                    full_print(job, inputFile)
                finally:
                    inputFile.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # extract file objects
            try:
                fileObjectsPath = os.path.join(fullLogDirName, 'fileobjects')
                os.mkdir(fileObjectsPath)
                extractFileObjects(fullJobSummaryFileName, fileObjectsPath)
            # except OSError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy dir of the job ->input/output and subjobs
            try:
                parentDir, currentDir = os.path.split(job.inputdir[:-1])
                workspaceDir = os.path.join(fullLogDirName, 'workspace')
                shutil.copytree(parentDir, workspaceDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy shared area of the job
            try:

                if hasattr(job.application, 'is_prepared'):
                    if job.application.is_prepared is not None and job.application.is_prepared is not True:
                        import os
                        from Ganga.Utility.Config import getConfig
                        from Ganga.Utility.files import expandfilename
                        shared_path = os.path.join(expandfilename(getConfig(
                            'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
                        shareddir = os.path.join(
                            shared_path, job.application.is_prepared.name)
                        if os.path.isdir(shareddir):

                            sharedAreaDir = os.path.join(
                                fullLogDirName, 'sharedarea')
                            shutil.copytree(shareddir, sharedAreaDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy repository job file
            try:
                indexFileName = str(job.id) + '.index'

                repositoryPath = repositoryPath.replace(
                    '$usr', os.getenv("USER"))

                # check if the job is subjob -> different way of forming the
                # path to the repository
                is_subjob = job.fqid.find('.') > -1

                if is_subjob:

                    jobid, subjobid = job.fqid.split(
                        '.')[0], job.fqid.split('.')[1]
                    repositoryPath = repositoryPath.replace(
                        '$thousandsNum', str(int(jobid) / 1000))
                    repositoryPath = os.path.join(repositoryPath, jobid)

                else:
                    repositoryPath = repositoryPath.replace(
                        '$thousandsNum', str(job.id / 1000))

                repositoryFullPath = os.path.join(
                    config.Configuration.gangadir, repositoryPath)
                indexFileSourcePath = os.path.join(
                    repositoryFullPath, indexFileName)
                repositoryFullPath = os.path.join(
                    repositoryFullPath, str(job.id))

                repositoryTargetPath = os.path.join(
                    fullLogDirName, 'repository', str(job.id))

                os.mkdir(os.path.join(fullLogDirName, 'repository'))

                shutil.copytree(repositoryFullPath, repositoryTargetPath)
                # data files are copied but can not be opened -> add .txt to
                # their file names
                renameDataFiles(repositoryTargetPath)

                if not is_subjob:
                    # copy .index file
                    indexFileTargetPath = os.path.join(
                        fullLogDirName, 'repository', indexFileName)
                    shutil.copyfile(indexFileSourcePath, indexFileTargetPath)

            # except OSError, IOError:
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        # import task relevant info
        if (job is not None and isTask):
            # job is actually a task object
            task = job
            # create task folder
            taskFolder = 'task_%s' % task.id
            fullLogDirName = os.path.join(fullLogDirName, taskFolder)
            os.mkdir(fullLogDirName)

            # import task summary in a file
            fullTaskSummaryFileName = os.path.join(
                fullLogDirName, taskSummaryFileName)
            writeStringToFile(fullTaskSummaryFileName, str(task))

            # import task full print in a file
            fullTaskPrintFileName = os.path.join(
                fullLogDirName, taskFullPrintFileName)

            try:
                inputFile = open(fullTaskPrintFileName, 'w')
                try:
                    full_print(task, inputFile)
                except Exception as err:
                    logger.debug("Err: %s" % err)
                    raise err
                finally:
                    inputFile.close()
            # except IOError, OSError:
            except Exception as err:
                logger.debug("Err2: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy shared area of the task
            try:
                if len(task.transforms) > 0:
                    if hasattr(task.transforms[0], 'application') and hasattr(task.transforms[0].application, 'is_prepared'):
                        if task.transforms[0].application.is_prepared is not None and task.transforms[0].application.is_prepared is not True:
                            import os
                            from Ganga.Utility.Config import getConfig
                            from Ganga.Utility.files import expandfilename
                            shared_path = os.path.join(expandfilename(getConfig(
                                'Configuration')['gangadir']), 'shared', getConfig('Configuration')['user'])
                            shareddir = os.path.join(
                                shared_path, task.transforms[0].application.is_prepared.name)
                            if os.path.isdir(shareddir):

                                sharedAreaDir = os.path.join(
                                    fullLogDirName, 'sharedarea')
                                shutil.copytree(shareddir, sharedAreaDir)
            # except IOError, OSError
            except Exception as err:
                logger.debug("Err: %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

            # copy repository task file
            try:
                indexFileName = str(task.id) + '.index'

                tasksRepositoryPath = tasksRepositoryPath.replace(
                    '$usr', os.getenv("USER"))
                tasksRepositoryPath = tasksRepositoryPath.replace(
                    '$thousandsNum', str(task.id / 1000))

                repositoryFullPath = os.path.join(
                    config.Configuration.gangadir, tasksRepositoryPath)
                indexFileSourcePath = os.path.join(
                    repositoryFullPath, indexFileName)
                repositoryFullPath = os.path.join(
                    repositoryFullPath, str(task.id))

                repositoryTargetPath = os.path.join(
                    fullLogDirName, 'repository', str(task.id))

                os.mkdir(os.path.join(fullLogDirName, 'repository'))

                shutil.copytree(repositoryFullPath, repositoryTargetPath)
                # data files are copied but can not be opened -> add .txt to
                # their file names
                renameDataFiles(repositoryTargetPath)

                # copy .index file
                indexFileTargetPath = os.path.join(
                    fullLogDirName, 'repository', indexFileName)
                shutil.copyfile(indexFileSourcePath, indexFileTargetPath)

            # except OSError, IOError:
            except Exception as err:
                logger.debug("Err %s" % err)
                writeErrorLog(str(sys.exc_info()[1]))

        # Copy thread stack trace file
        try:
            thread_trace_source_path = os.path.join(getConfig('Configuration')['gangadir'], thread_trace_file_name)
            thread_trace_target_path = os.path.join(fullLogDirName, thread_trace_file_name)
            shutil.copyfile(thread_trace_source_path, thread_trace_target_path)
        except (OSError, IOError) as err:
            logger.debug('Err %s', err)
            writeErrorLog(str(sys.exc_info()[1]))

        resultArchive = '%s.tar.gz' % folderToArchive

        try:
            resultFile = tarfile.TarFile.open(resultArchive, 'w:gz')
            try:
                resultFile.add(
                    folderToArchive, arcname=os.path.basename(folderToArchive))
                # put the error log in the archive
                if(os.path.exists(errorLogPath)):
                    resultFile.add(
                        errorLogPath, arcname=os.path.basename(errorLogPath))
            except Exception as err:
                logger.debug("Err: %s" % err)
                raise
            finally:
                resultFile.close()
        except Exception as err:
            logger.debug("Err2: %s" % err)
            raise  # pass

        # remove temp dir
        if(os.path.exists(folderToArchive)):
            shutil.rmtree(folderToArchive)

        # print the error if there is something
        if os.path.exists(errorLogPath):
            logger.error('')
            logger.error('An error occured while collecting report information : ' + open(errorLogPath, 'r').read())
            logger.error('')

        # delete the errorfile from user's pc
        if(os.path.exists(errorLogPath)):
            os.remove(errorLogPath)

        # return the path to the archive and the path to the upload server
        return (resultArchive, uploadFileServer, tempDir)

    def removeTempFiles(tempDir):
        import shutil

        # remove temp dir
        if os.path.exists(tempDir):
            shutil.rmtree(tempDir)

        # remove temp files from django upload-> if the file is bigger than 2.5
        # mb django internally stores it in tmp file during the upload
        userTempDir = '/tmp/'

        for fileName in os.listdir(userTempDir):
            if fileName.find('.upload') > -1:
                os.remove(os.path.join(userTempDir, fileName))

    tempDir = ''

    # call the report function
    try:
        isJob = isTask = False

        # make typecheck of the param passed
        if job is not None:
            from Ganga.GPIDev.Lib.Job.Job import Job
            from Ganga.GPIDev.Base.Proxy import stripProxy
            isJob = isinstance(stripProxy(job), Job)
            if hasattr(stripProxy(job), '_category') and (stripProxy(job)._category == 'tasks'):
                isTask = True

            if not (isJob or isTask):
                logger.error("report() function argument should be reference to a job or task object")
                return

        resultArchive, uploadFileServer, tempDir = report_inner(
            job, isJob, isTask)

        report_bytes = os.path.getsize(resultArchive)

        if report_bytes > 1024 * 1024 * 100:  # if bigger than 100MB
            logger.error(
                'The report is bigger than 100MB and can not be uploaded')
        else:
            run_upload(server=uploadFileServer, path=resultArchive)

    except Exception as err:
        logger.debug("Err: %s" % err)
        removeTempFiles(tempDir)
        raise  # pass

Example 9

Project: numba Source File: bytecode.py
Function: make_bytecode_table
def _make_bytecode_table():
    # Note some opcodes are supported here for analysis but not later
    # in the compilation pipeline.
    version_specific = []

    if sys.version_info[0] == 2:
        version_specific += [
            ('BINARY_DIVIDE', 0),
            ('DELETE_SLICE+0', 0),
            ('DELETE_SLICE+1', 0),
            ('DELETE_SLICE+2', 0),
            ('DELETE_SLICE+3', 0),
            ('DUP_TOPX', 2),
            ('INPLACE_DIVIDE', 0),
            ('PRINT_ITEM', 0),
            ('PRINT_NEWLINE', 0),
            ('ROT_FOUR', 0),
            ('SLICE+0', 0),
            ('SLICE+1', 0),
            ('SLICE+2', 0),
            ('SLICE+3', 0),
            ('STORE_SLICE+0', 0),
            ('STORE_SLICE+1', 0),
            ('STORE_SLICE+2', 0),
            ('STORE_SLICE+3', 0),
        ]

    if sys.version_info[0] == 3:
        version_specific += [
            ('DUP_TOP_TWO', 0)
        ]

    if sys.version_info[:2] <= (3, 4):
        version_specific += [
            ('STORE_MAP', 0),
        ]

    if sys.version_info[:2] >= (3, 5):   # python 3.5+
        version_specific += [
            ('BINARY_MATRIX_MULTIPLY', 0),
            ('INPLACE_MATRIX_MULTIPLY', 0),
        ]

    bytecodes = [
        # opname, operandlen
        ('BINARY_ADD', 0),
        ('BINARY_TRUE_DIVIDE', 0),
        ('BINARY_MULTIPLY', 0),
        ('BINARY_SUBSCR', 0),
        ('BINARY_SUBTRACT', 0),
        ('BINARY_FLOOR_DIVIDE', 0),
        ('BINARY_MODULO', 0),
        ('BINARY_POWER', 0),
        ('BINARY_AND', 0),
        ('BINARY_OR', 0),
        ('BINARY_XOR', 0),
        ('BINARY_LSHIFT', 0),
        ('BINARY_RSHIFT', 0),
        ('BREAK_LOOP', 0),
        ('BUILD_LIST', 2),
        ('BUILD_MAP', 2),
        ('BUILD_SET', 2),
        ('BUILD_SLICE', 2),
        ('BUILD_TUPLE', 2),
        ('CALL_FUNCTION', 2),
        ('CALL_FUNCTION_VAR', 2),
        ('COMPARE_OP', 2),
        ('DELETE_ATTR', 2),
        ('DELETE_SUBSCR', 0),
        ('DUP_TOP', 0),
        ('EXTENDED_ARG', 2),
        ('FOR_ITER', 2),
        ('GET_ITER', 0),
        ('INPLACE_ADD', 0),
        ('INPLACE_SUBTRACT', 0),
        ('INPLACE_MULTIPLY', 0),
        ('INPLACE_TRUE_DIVIDE', 0),
        ('INPLACE_FLOOR_DIVIDE', 0),
        ('INPLACE_MODULO', 0),
        ('INPLACE_POWER', 0),
        ('INPLACE_AND', 0),
        ('INPLACE_OR', 0),
        ('INPLACE_XOR', 0),
        ('INPLACE_LSHIFT', 0),
        ('INPLACE_RSHIFT', 0),
        ('JUMP_ABSOLUTE', 2),
        ('JUMP_FORWARD', 2),
        ('JUMP_IF_TRUE_OR_POP', 2),
        ('JUMP_IF_FALSE_OR_POP', 2),
        ('LOAD_ATTR', 2),
        ('LOAD_CLOSURE', 2),
        ('LOAD_CONST', 2),
        ('LOAD_FAST', 2),
        ('LOAD_GLOBAL', 2),
        ('LOAD_DEREF', 2),
        ('MAKE_CLOSURE', 2),
        ('MAKE_FUNCTION', 2),
        ('POP_BLOCK', 0),
        ('POP_JUMP_IF_FALSE', 2),
        ('POP_JUMP_IF_TRUE', 2),
        ('POP_TOP', 0),
        ('RAISE_VARARGS', 2),
        ('RETURN_VALUE', 0),
        ('ROT_THREE', 0),
        ('ROT_TWO', 0),
        ('SETUP_LOOP', 2),
        ('STORE_ATTR', 2),
        ('STORE_DEREF', 2),
        ('STORE_FAST', 2),
        ('STORE_SUBSCR', 0),
        ('UNARY_POSITIVE', 0),
        ('UNARY_NEGATIVE', 0),
        ('UNARY_INVERT', 0),
        ('UNARY_NOT', 0),
        ('UNPACK_SEQUENCE', 2),
        ('YIELD_VALUE', 0),
    ] + version_specific

    return dict((dis.opmap[opname], opcode_info(argsize=argsize))
                for opname, argsize in bytecodes)

Example 10

Project: databus Source File: dbtables.py
Function: select
    def __Select(self, table, columns, conditions):
        """__Select() - Used to implement Select and Delete (above)
        Returns a dictionary keyed on rowids containing dicts
        holding the row data for columns listed in the columns param
        that match the given conditions.
        * conditions is a dictionary keyed on column names
        containing callable conditions expecting the data string as an
        argument and returning a boolean.
        """
        # check the validity of each column name
        if not self.__tablecolumns.has_key(table):
            self.__load_column_info(table)
        if columns is None:
            columns = self.tablecolumns[table]
        for column in (columns + conditions.keys()):
            if not self.__tablecolumns[table].count(column):
                raise TableDBError, "unknown column: %r" % (column,)

        # keyed on rows that match so far, containings dicts keyed on
        # column names containing the data for that row and column.
        matching_rowids = {}
        # keys are rowids that do not match
        rejected_rowids = {}

        # attempt to sort the conditions in such a way as to minimize full
        # column lookups
        def cmp_conditions(atuple, btuple):
            a = atuple[1]
            b = btuple[1]
            if type(a) is type(b):
                if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
                    # longest prefix first
                    return cmp(len(b.prefix), len(a.prefix))
                if isinstance(a, LikeCond) and isinstance(b, LikeCond):
                    # longest likestr first
                    return cmp(len(b.likestr), len(a.likestr))
                return 0
            if isinstance(a, ExactCond):
                return -1
            if isinstance(b, ExactCond):
                return 1
            if isinstance(a, PrefixCond):
                return -1
            if isinstance(b, PrefixCond):
                return 1
            # leave all unknown condition callables alone as equals
            return 0

        if sys.version_info[0] < 3 :
            conditionlist = conditions.items()
            conditionlist.sort(cmp_conditions)
        else :  # Insertion Sort. Please, improve
            conditionlist = []
            for i in conditions.items() :
                for j, k in enumerate(conditionlist) :
                    r = cmp_conditions(k, i)
                    if r == 1 :
                        conditionlist.insert(j, i)
                        break
                else :
                    conditionlist.append(i)

        # Apply conditions to column data to find what we want
        cur = self.db.cursor()
        column_num = -1
        for column, condition in conditionlist:
            column_num = column_num + 1
            searchkey = _search_col_data_key(table, column)
            # speedup: don't linear search columns within loop
            if column in columns:
                savethiscolumndata = 1  # save the data for return
            else:
                savethiscolumndata = 0  # data only used for selection

            try:
                key, data = cur.set_range(searchkey)
                while key[:len(searchkey)] == searchkey:
                    # extract the rowid from the key
                    rowid = key[-_rowid_str_len:]

                    if not rejected_rowids.has_key(rowid):
                        # if no condition was specified or the condition
                        # succeeds, add row to our match list.
                        if not condition or condition(data):
                            if not matching_rowids.has_key(rowid):
                                matching_rowids[rowid] = {}
                            if savethiscolumndata:
                                matching_rowids[rowid][column] = data
                        else:
                            if matching_rowids.has_key(rowid):
                                del matching_rowids[rowid]
                            rejected_rowids[rowid] = rowid

                    key, data = cur.next()

            except db.DBError, dberror:
                if sys.version_info[0] < 3 :
                    if dberror[0] != db.DB_NOTFOUND:
                        raise
                else :
                    if dberror.args[0] != db.DB_NOTFOUND:
                        raise
                continue

        cur.close()

        # we're done selecting rows, garbage collect the reject list
        del rejected_rowids

        # extract any remaining desired column data from the
        # database for the matching rows.
        if len(columns) > 0:
            for rowid, rowdata in matching_rowids.items():
                for column in columns:
                    if rowdata.has_key(column):
                        continue
                    try:
                        rowdata[column] = self.db.get(
                            _data_key(table, column, rowid))
                    except db.DBError, dberror:
                        if sys.version_info[0] < 3 :
                            if dberror[0] != db.DB_NOTFOUND:
                                raise
                        else :
                            if dberror.args[0] != db.DB_NOTFOUND:
                                raise
                        rowdata[column] = None

        # return the matches
        return matching_rowids

Example 11

Project: esky Source File: f_py2app.py
def freeze(dist):
    """Freeze the given distribution data using py2app."""
    includes = dist.includes
    excludes = dist.excludes
    options = dist.freezer_options
    #  Merge in any includes/excludes given in freezer_options
    includes.append("esky")
    for inc in options.pop("includes",()):
        includes.append(inc)
    for exc in options.pop("excludes",()):
        excludes.append(exc)
    if "pypy" not in includes and "pypy" not in excludes:
        excludes.append("pypy")
    options["includes"] = includes
    options["excludes"] = excludes
    # The control info (name, icon, etc) for the app will be taken from
    # the first script in the list.  Subsequent scripts will be passed
    # as the extra_scripts argument.
    exes = list(dist.get_executables())
    if not exes:
        raise RuntimeError("no scripts specified")
    cmd = _make_py2app_cmd(dist.freeze_dir,dist.distribution,options,exes)
    cmd.run()
    #  Remove any .pyc files with a corresponding .py file.
    #  This helps avoid timestamp changes that might interfere with
    #  the generation of useful patches between versions.
    appnm = dist.distribution.get_name()+".app"
    app_dir = os.path.join(dist.freeze_dir,appnm)
    resdir = os.path.join(app_dir,"Contents/Resources")
    for (dirnm,_,filenms) in os.walk(resdir):
        for nm in filenms:
            if nm.endswith(".pyc"):
                pyfile = os.path.join(dirnm,nm[:-1])
                if os.path.exists(pyfile):
                    os.unlink(pyfile+"c")
            if nm.endswith(".pyo"):
                pyfile = os.path.join(dirnm,nm[:-1])
                if os.path.exists(pyfile):
                    os.unlink(pyfile+"o")
    #  Copy data files into the freeze dir
    for (src,dst) in dist.get_data_files():
        dst = os.path.join(app_dir,"Contents","Resources",dst)
        dstdir = os.path.dirname(dst)
        if not os.path.isdir(dstdir):
            dist.mkpath(dstdir)
        dist.copy_file(src,dst)
    #  Copy package data into site-packages.zip
    zfpath = os.path.join(cmd.lib_dir,get_zipfile(dist.distribution))
    lib = zipfile.ZipFile(zfpath,"a")
    for (src,arcnm) in dist.get_package_data():
        lib.write(src,arcnm)
    lib.close()
    #  Create the bootstraping code, using custom code if specified.
    esky_name = dist.distribution.get_name()
    code_source = ["__esky_name__ = %r" % (esky_name,)]
    code_source.append(inspect.getsource(esky.bootstrap))
    if not dist.compile_bootstrap_exes:
        code_source.append(_FAKE_ESKY_BOOTSTRAP_MODULE)
        code_source.append(_EXTRA_BOOTSTRAP_CODE)
    code_source.append(dist.get_bootstrap_code())
    code_source.append("if not __rpython__:")
    code_source.append("    bootstrap()")
    code_source = "\n".join(code_source)
    def copy_to_bootstrap_env(src,dst=None):
        if dst is None:
            dst = src
        src = os.path.join(appnm,src)
        dist.copy_to_bootstrap_env(src,dst)
    if dist.compile_bootstrap_exes:
        for exe in dist.get_executables(normalise=False):
            if not exe.include_in_bootstrap_env:
                continue
            relpath = os.path.join("Contents","MacOS",exe.name)
            dist.compile_to_bootstrap_exe(exe,code_source,relpath)
    else:
        #  Copy the core dependencies into the bootstrap env.
        pydir = "python%d.%d" % sys.version_info[:2]
        for nm in ("Python.framework","lib"+pydir+".dylib",):
            try:
                copy_to_bootstrap_env("Contents/Frameworks/" + nm)
            except Exception, e:
                #  Distutils does its own crazy exception-raising which I
                #  have no interest in examining right now.  Eventually this
                #  guard will be more conservative.
                pass
        bsdir = dist.bootstrap_dir
        copy_to_bootstrap_env("Contents/Resources/include")
        if sys.version_info[:2] < (3, 3):
            copy_to_bootstrap_env("Contents/Resources/lib/"+pydir+"/config")
        else:
            copy_to_bootstrap_env("Contents/Resources/lib/"+pydir+"/config-%d.%dm"
                                   % sys.version_info[:2])
            # copy across the zip file that we need to run the boostrap application
            # from the inner package. This only needs to contain
            # a mimimal set of files for the bootstrap
            # handle the bootstrap lib dependencies
            python_name = 'python%d%d' % sys.version_info[:2]
            zip_name = os.path.join('Contents', 'Resources', 'lib', '{}.zip'.format(python_name))

            app_zfname = os.path.join(app_dir, zip_name)
            zfname = os.path.join(bsdir, zip_name)
            with tempfile.TemporaryDirectory() as tdir:
                esky.util.extract_zipfile(app_zfname, tdir)
                member_list = ['_weakrefset.pyc', 'abc.pyc', 'codecs.pyc', 'io.pyc']
                for enc in os.listdir(os.path.join(tdir, 'encodings')):
                    member_list.append(os.path.join('encodings',enc))
                esky.util.create_zipfile(tdir, zfname, members=member_list)

        if sys.version_info[:2] < (3, 3):
            required_libs = ['fcntl']
        else:
            required_libs = ['fcntl', 'zlib']

        for req_lib in required_libs:
            if req_lib not in sys.builtin_module_names:
                dynload = "Contents/Resources/lib/"+pydir+"/lib-dynload"
                for nm in os.listdir(os.path.join(app_dir,dynload)):
                    if nm.startswith(req_lib):
                        copy_to_bootstrap_env(os.path.join(dynload,nm))

        copy_to_bootstrap_env("Contents/Resources/__error__.sh")
        # Copy site.py/site.pyc into the boostrap env, then zero them out.
        if os.path.exists(os.path.join(app_dir, "Contents/Resources/site.py")):
            copy_to_bootstrap_env("Contents/Resources/site.py")
            with open(bsdir + "/Contents/Resources/site.py", "wt") as f:
                pass
        if os.path.exists(os.path.join(app_dir, "Contents/Resources/site.pyc")):
            copy_to_bootstrap_env("Contents/Resources/site.pyc")
            with open(bsdir + "/Contents/Resources/site.pyc", "wb") as f:
                f.write(esky.util.compile_to_bytecode("", "site.py"))
        if os.path.exists(os.path.join(app_dir, "Contents/Resources/site.pyo")):
            copy_to_bootstrap_env("Contents/Resources/site.pyo")
            with open(bsdir + "/Contents/Resources/site.pyo", "wb") as f:
                f.write(imp.get_magic() + struct.pack("<i", 0))
        #  Copy the bootstrapping code into the __boot__.py file.
        copy_to_bootstrap_env("Contents/Resources/__boot__.py")
        with open(bsdir+"/Contents/Resources/__boot__.py","wt") as f:
            f.write(code_source)
        #  Copy the loader program for each script into the bootstrap env.
        copy_to_bootstrap_env("Contents/MacOS/python")
        for exe in dist.get_executables(normalise=False):
            if not exe.include_in_bootstrap_env:
                continue
            exepath = copy_to_bootstrap_env("Contents/MacOS/"+exe.name)
    #  Copy non-python resources (e.g. icons etc) into the bootstrap dir
    copy_to_bootstrap_env("Contents/Info.plist")
    # Include Icon
    if exe.icon is not None:
        copy_to_bootstrap_env("Contents/Resources/"+exe.icon)
    copy_to_bootstrap_env("Contents/PkgInfo")
    with open(os.path.join(app_dir,"Contents","Info.plist"),"rt") as f:
        infotxt = f.read()
    for nm in os.listdir(os.path.join(app_dir,"Contents","Resources")):
        if "<string>%s</string>" % (nm,) in infotxt:
            copy_to_bootstrap_env("Contents/Resources/"+nm)

Example 12

Project: PokemonGo-DesktopMap Source File: test_random.py
    def runTest(self):
        """Cryptodome.Random.new()"""
        # Import the Random module and try to use it
        from Cryptodome import Random
        randobj = Random.new()
        x = randobj.read(16)
        y = randobj.read(16)
        self.assertNotEqual(x, y)
        z = Random.get_random_bytes(16)
        self.assertNotEqual(x, z)
        self.assertNotEqual(y, z)
        # Test the Random.random module, which
        # implements a subset of Python's random API
        # Not implemented:
        # seed(), getstate(), setstate(), jumpahead()
        # random(), uniform(), triangular(), betavariate()
        # expovariate(), gammavariate(), gauss(),
        # longnormvariate(), normalvariate(),
        # vonmisesvariate(), paretovariate()
        # weibullvariate()
        # WichmannHill(), whseed(), SystemRandom()
        from Cryptodome.Random import random
        x = random.getrandbits(16*8)
        y = random.getrandbits(16*8)
        self.assertNotEqual(x, y)
        # Test randrange
        if x>y:
            start = y
            stop = x
        else:
            start = x
            stop = y
        for step in range(1,10):
            x = random.randrange(start,stop,step)
            y = random.randrange(start,stop,step)
            self.assertNotEqual(x, y)
            self.assertEqual(start <= x < stop, True)
            self.assertEqual(start <= y < stop, True)
            self.assertEqual((x - start) % step, 0)
            self.assertEqual((y - start) % step, 0)
        for i in range(10):
            self.assertEqual(random.randrange(1,2), 1)
        self.assertRaises(ValueError, random.randrange, start, start)
        self.assertRaises(ValueError, random.randrange, stop, start, step)
        self.assertRaises(TypeError, random.randrange, start, stop, step, step)
        self.assertRaises(TypeError, random.randrange, start, stop, "1")
        self.assertRaises(TypeError, random.randrange, "1", stop, step)
        self.assertRaises(TypeError, random.randrange, 1, "2", step)
        self.assertRaises(ValueError, random.randrange, start, stop, 0)
        # Test randint
        x = random.randint(start,stop)
        y = random.randint(start,stop)
        self.assertNotEqual(x, y)
        self.assertEqual(start <= x <= stop, True)
        self.assertEqual(start <= y <= stop, True)
        for i in range(10):
            self.assertEqual(random.randint(1,1), 1)
        self.assertRaises(ValueError, random.randint, stop, start)
        self.assertRaises(TypeError, random.randint, start, stop, step)
        self.assertRaises(TypeError, random.randint, "1", stop)
        self.assertRaises(TypeError, random.randint, 1, "2")
        # Test choice
        seq = range(10000)
        x = random.choice(seq)
        y = random.choice(seq)
        self.assertNotEqual(x, y)
        self.assertEqual(x in seq, True)
        self.assertEqual(y in seq, True)
        for i in range(10):
            self.assertEqual(random.choice((1,2,3)) in (1,2,3), True)
        self.assertEqual(random.choice([1,2,3]) in [1,2,3], True)
        if sys.version_info[0] is 3:
            self.assertEqual(random.choice(bytearray(b('123'))) in bytearray(b('123')), True)
        self.assertEqual(1, random.choice([1]))
        self.assertRaises(IndexError, random.choice, [])
        self.assertRaises(TypeError, random.choice, 1)
        # Test shuffle. Lacks random parameter to specify function.
        # Make copies of seq
        seq = range(500)
        x = list(seq)
        y = list(seq)
        random.shuffle(x)
        random.shuffle(y)
        self.assertNotEqual(x, y)
        self.assertEqual(len(seq), len(x))
        self.assertEqual(len(seq), len(y))
        for i in range(len(seq)):
           self.assertEqual(x[i] in seq, True)
           self.assertEqual(y[i] in seq, True)
           self.assertEqual(seq[i] in x, True)
           self.assertEqual(seq[i] in y, True)
        z = [1]
        random.shuffle(z)
        self.assertEqual(z, [1])
        if sys.version_info[0] == 3:
            z = bytearray(b('12'))
            random.shuffle(z)
            self.assertEqual(b('1') in z, True)
            self.assertRaises(TypeError, random.shuffle, b('12'))
        self.assertRaises(TypeError, random.shuffle, 1)
        self.assertRaises(TypeError, random.shuffle, "11")
        self.assertRaises(TypeError, random.shuffle, (1,2))
        # 2to3 wraps a list() around it, alas - but I want to shoot
        # myself in the foot here! :D
        # if sys.version_info[0] == 3:
            # self.assertRaises(TypeError, random.shuffle, range(3))
        # Test sample
        x = random.sample(seq, 20)
        y = random.sample(seq, 20)
        self.assertNotEqual(x, y)
        for i in range(20):
           self.assertEqual(x[i] in seq, True)
           self.assertEqual(y[i] in seq, True)
        z = random.sample([1], 1)
        self.assertEqual(z, [1])
        z = random.sample((1,2,3), 1)
        self.assertEqual(z[0] in (1,2,3), True)
        z = random.sample("123", 1)
        self.assertEqual(z[0] in "123", True)
        z = random.sample(range(3), 1)
        self.assertEqual(z[0] in range(3), True)
        if sys.version_info[0] == 3:
                z = random.sample(b("123"), 1)
                self.assertEqual(z[0] in b("123"), True)
                z = random.sample(bytearray(b("123")), 1)
                self.assertEqual(z[0] in bytearray(b("123")), True)
        self.assertRaises(TypeError, random.sample, 1)

Example 13

Project: weboob Source File: setup.py
def install_weboob():
    scripts = set(os.listdir('scripts'))
    packages = set(find_packages(exclude=['modules']))

    hildon_scripts = set(('masstransit',))
    qt_scripts = set(('qboobmsg',
                      'qhavedate',
                      'qvideoob',
                      'weboob-config-qt',
                      'qwebcontentedit',
                      'qflatboob',
                      'qcineoob',
                      'qcookboob',
                      'qbooblyrics',
                      'qhandjoob'))

    if not options.hildon:
        scripts = scripts - hildon_scripts
    if options.qt:
        build_qt()
    else:
        scripts = scripts - qt_scripts

    hildon_packages = set((
        'weboob.applications.masstransit',
    ))
    qt_packages = set((
        'weboob.applications.qboobmsg',
        'weboob.applications.qboobmsg.ui',
        'weboob.applications.qcineoob',
        'weboob.applications.qcineoob.ui',
        'weboob.applications.qcookboob',
        'weboob.applications.qcookboob.ui',
        'weboob.applications.qbooblyrics',
        'weboob.applications.qbooblyrics.ui',
        'weboob.applications.qhandjoob',
        'weboob.applications.qhandjoob.ui',
        'weboob.applications.qhavedate',
        'weboob.applications.qhavedate.ui',
        'weboob.applications.qvideoob',
        'weboob.applications.qvideoob.ui',
        'weboob.applications.qweboobcfg',
        'weboob.applications.qweboobcfg.ui',
        'weboob.applications.qwebcontentedit',
        'weboob.applications.qwebcontentedit.ui'
        'weboob.applications.qflatboob',
        'weboob.applications.qflatboob.ui'
    ))

    if not options.hildon:
        packages = packages - hildon_packages
    if not options.qt:
        packages = packages - qt_packages

    data_files = [
        ('share/man/man1', glob.glob('man/*')),
    ]
    if options.xdg:
        data_files.extend([
            ('share/applications', glob.glob('desktop/*')),
            ('share/icons/hicolor/64x64/apps', glob.glob('icons/*')),
        ])

    # Do not put PyQt, it does not work properly.
    requirements = [
        'lxml',
        'feedparser',
        'requests>=2.0.0',
        'python-dateutil',
        'PyYAML',
        'prettytable',
        'google-api-python-client',
    ]
    try:
        import Image
    except ImportError:
        requirements.append('Pillow')
    else:
        # detect Pillow-only feature, or weird Debian stuff
        if hasattr(Image, 'alpha_composite') or 'PILcompat' in Image.__file__:
            requirements.append('Pillow')
        else:
            requirements.append('PIL')

    if sys.version_info < (3, 0):
        requirements.append('mechanize')

    if sys.version_info < (3, 2):
        requirements.append('futures')

    if sys.version_info < (2, 6):
        print('Python older than 2.6 is not supported.', file=sys.stderr)
        sys.exit(1)

    if not options.deps:
        requirements = []

    try:
        if sys.argv[1] == 'requirements':
            print('\n'.join(requirements))
            sys.exit(0)
    except IndexError:
        pass

    setup(
        name='weboob',
        version='1.2',
        description='Weboob, Web Outside Of Browsers',
        long_description=open('README').read(),
        author='Romain Bignon',
        author_email='[email protected]',
        maintainer='Romain Bignon',
        maintainer_email='[email protected]',
        url='http://weboob.org/',
        license='GNU AGPL 3',
        classifiers=[
            'Environment :: Console',
            'Environment :: X11 Applications :: Qt',
            'License :: OSI Approved :: GNU Affero General Public License v3',
            'Programming Language :: Python :: 2.6',
            'Programming Language :: Python :: 2.7',
            'Programming Language :: Python',
            'Topic :: Communications :: Email',
            'Topic :: Internet :: WWW/HTTP',
        ],

        packages=packages,
        scripts=[os.path.join('scripts', script) for script in scripts],
        data_files=data_files,

        install_requires=requirements,
    )

Example 14

Project: pywws Source File: Plot.py
Function: plot_data
    def PlotData(self, plot_no, plot, source):
        _ = Localisation.translation.ugettext
        subplot_list = plot.get_children('subplot')
        subplot_count = len(subplot_list)
        if subplot_count < 1:
            return u''
        result = u''
        pressure_offset = self.pressure_offset
        # label x axis of last plot
        if plot_no == self.plot_count - 1:
            x_lo = (self.x_lo -
                    self.utcoffset).replace(tzinfo=utc).astimezone(Local)
            x_hi = (self.x_hi -
                    self.utcoffset).replace(tzinfo=utc).astimezone(Local)
            if self.duration <= timedelta(hours=24):
                # TX_NOTE Keep the "(%Z)" formatting string
                xlabel = _('Time (%Z)')
            elif self.duration <= timedelta(days=7):
                xlabel = _('Day')
            else:
                xlabel = _('Date')
            xlabel = self.graph.get_value('xlabel', xlabel)
            if sys.version_info[0] < 3:
                xlabel = xlabel.encode(self.encoding[0])
            xlabel = x_hi.strftime(xlabel)
            if sys.version_info[0] < 3:
                xlabel = xlabel.decode(self.encoding[0])
            result += u'set xlabel "%s"\n' % xlabel
            dateformat = '%Y/%m/%d'
            dateformat = self.graph.get_value('dateformat', dateformat)
            if sys.version_info[0] < 3:
                dateformat = dateformat.encode(self.encoding[0])
            ldat = x_lo.strftime(dateformat)
            rdat = x_hi.strftime(dateformat)
            if sys.version_info[0] < 3:
                ldat = ldat.decode(self.encoding[0])
                rdat = rdat.decode(self.encoding[0])
            if ldat:
                result += u'set label "%s" at "%s", graph -0.3 left\n' % (
                    ldat, self.x_lo.isoformat())
            if rdat != ldat:
                result += u'set label "%s" at "%s", graph -0.3 right\n' % (
                    rdat, self.x_hi.isoformat())
        # set bottom margin
        bmargin = eval(plot.get_value('bmargin', '-1'))
        result += u'set bmargin %g\n' % (bmargin)
        # set y ranges and tics
        yrange = plot.get_value('yrange', None)
        y2range = plot.get_value('y2range', None)
        ytics = plot.get_value('ytics', 'autofreq')
        y2tics = plot.get_value('y2tics', '')
        if y2tics and not y2range:
            y2range = yrange
        elif y2range and not y2tics:
            y2tics = 'autofreq'
        if yrange:
            result += u'set yrange [%s]\n' % (yrange.replace(',', ':'))
        else:
            result += u'set yrange [*:*]\n'
        if y2range:
            result += u'set y2range [%s]\n' % (y2range.replace(',', ':'))
        if y2tics:
            result += u'set ytics nomirror %s; set y2tics %s\n' % (ytics, y2tics)
        else:
            result += u'unset y2tics; set ytics mirror %s\n' % (ytics)
        # set grid
        result += u'unset grid\n'
        grid = plot.get_value('grid', None)
        if grid is not None:
            result += u'set grid %s\n' % grid
        # x_lo & x_hi are in local time, data is indexed in UTC
        start = self.x_lo - self.utcoffset
        stop = self.x_hi - self.utcoffset
        cuemu_start = start
        if source == self.raw_data:
            boxwidth = 240      # assume 5 minute data interval
            start = source.before(start)
        elif source == self.hourly_data:
            boxwidth = 2800
            start = source.before(start)
            interval = timedelta(minutes=90)
        elif source == self.monthly_data:
            boxwidth = 2800 * 24 * 30
            interval = timedelta(days=46)
        else:
            interval = timedelta(hours=36)
            boxwidth = 2800 * 24
        boxwidth = eval(plot.get_value('boxwidth', str(boxwidth)))
        result += u'set boxwidth %d\n' % boxwidth
        for command in plot.get_values('command'):
            result += u'%s\n' % command
        stop = source.after(stop)
        if stop:
            stop = stop + timedelta(minutes=1)
        # write data files
        subplots = []
        for subplot_no in range(subplot_count):
            subplot = Record()
            subplot.subplot = subplot_list[subplot_no]
            subplot.dat_file = os.path.join(self.work_dir, 'plot_%d_%d.dat' % (
                plot_no, subplot_no))
            self.tmp_files.append(subplot.dat_file)
            subplot.dat = open(subplot.dat_file, 'w')
            subplot.xcalc = subplot.subplot.get_value('xcalc', None)
            subplot.ycalc = subplot.subplot.get_value('ycalc', None)
            subplot.cuemmulative = 'last_ycalc' in subplot.ycalc
            if subplot.xcalc:
                subplot.xcalc = compile(subplot.xcalc, '<string>', 'eval')
            subplot.ycalc = compile(subplot.ycalc, '<string>', 'eval')
            subplot.last_ycalcs = 0.0
            subplot.last_idx = None
            subplots.append(subplot)
        for data in source[start:stop]:
            for subplot in subplots:
                if subplot.xcalc:
                    idx = eval(subplot.xcalc)
                    if idx is None:
                        continue
                else:
                    idx = data['idx']
                idx += self.utcoffset
                if not subplot.cuemmulative and subplot.last_idx:
                    if source == self.raw_data:
                        interval = timedelta(minutes=((data['delay']*3)+1)//2)
                    if idx - subplot.last_idx > interval:
                        # missing data
                        subplot.dat.write('%s ?\n' % (idx.isoformat()))
                subplot.last_idx = idx
                try:
                    if subplot.cuemmulative and data['idx'] <= cuemu_start:
                        value = 0.0
                    else:
                        last_ycalc = subplot.last_ycalcs
                        value = eval(subplot.ycalc)
                    if not isinstance(value, tuple):
                        value = (value,)
                    values = (idx.isoformat(),) + value
                    vformat = '%s' + (' %g' * len(value)) + '\n'
                    subplot.dat.write(vformat % values)
                    subplot.last_ycalcs = value[0]
                except TypeError:
                    if not subplot.cuemmulative:
                        subplot.dat.write('%s ?\n' % (idx.isoformat()))
                    subplot.last_ycalcs = 0.0
        for subplot in subplots:
            # ensure the data file isn't empty
            idx = self.x_hi + self.duration
            subplot.dat.write('%s ?\n' % (idx.isoformat()))
            subplot.dat.close()
        # plot data
        result += u'plot '
        colour_idx = 0
        for subplot_no in range(subplot_count):
            subplot = subplots[subplot_no]
            colour_idx += 1
            colour = subplot.subplot.get_value('colour', str(colour_idx))
            style = subplot.subplot.get_value(
                'style', 'smooth unique lc %s lw 1' % (colour))
            words = style.split()
            if len(words) > 1 and words[0] in ('+', 'x', 'line', 'candlesticks', 'candlesticksw'):
                width = float(words[1])
            else:
                width = 1
            if len(words) > 2 and words[0] in ('candlesticksw'):
                whiskerwidth = float(words[2])
            else:
                whiskerwidth = 1
            whiskerbars = ''
            if style == 'box':
                style = 'lc %s lw 0 with boxes' % (colour)
            elif words[0] == 'candlesticks':
                style = 'lc %s lw %g with candlesticks' % (colour, width)
            elif words[0] == 'candlesticksw':
                style = 'lc %s lw %g with candlesticks' % (colour, width)
                whiskerbars = ' whiskerbars %g' % (whiskerwidth)
            elif words[0] == '+':
                style = 'lc %s lw %g pt 1 with points' % (colour, width)
            elif words[0] == 'x':
                style = 'lc %s lw %g pt 2 with points' % (colour, width)
            elif words[0] == 'line':
                style = 'smooth unique lc %s lw %g' % (colour, width)
            axes = subplot.subplot.get_value('axes', 'x1y1')
            title = subplot.subplot.get_value('title', '')
            using = ':'.join('($%d)' % x for x in range(2, len(values)+1))
            result += u' "%s" using 1:%s axes %s %s title "%s"%s' % (
                subplot.dat_file, using, axes, style, title, whiskerbars)
            if subplot_no != subplot_count - 1:
                result += u', \\'
            result += u'\n'
        return result

Example 15

Project: pymo Source File: test_basics.py
Function: test03_simplecursorstuff
    def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
                  (self.__class__.__name__, get_raises_error, set_raises_error)

        if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
            txn = self.env.txn_begin()
        else:
            txn = None
        c = self.d.cursor(txn=txn)

        rec = c.first()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            try:
                rec = c.next()
            except db.DBNotFoundError, val:
                if get_raises_error:
                    import sys
                    if sys.version_info < (2, 6) :
                        self.assertEqual(val[0], db.DB_NOTFOUND)
                    else :
                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                    if verbose: print val
                    rec = None
                else:
                    self.fail("unexpected DBNotFoundError")
            self.assertEqual(c.get_current_size(), len(c.current()[1]),
                    "%s != len(%r)" % (c.get_current_size(), c.current()[1]))

        self.assertEqual(count, self._numKeys)


        rec = c.last()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            try:
                rec = c.prev()
            except db.DBNotFoundError, val:
                if get_raises_error:
                    import sys
                    if sys.version_info < (2, 6) :
                        self.assertEqual(val[0], db.DB_NOTFOUND)
                    else :
                        self.assertEqual(val.args[0], db.DB_NOTFOUND)
                    if verbose: print val
                    rec = None
                else:
                    self.fail("unexpected DBNotFoundError")

        self.assertEqual(count, self._numKeys)

        rec = c.set('0505')
        rec2 = c.current()
        self.assertEqual(rec, rec2)
        self.assertEqual(rec[0], '0505')
        self.assertEqual(rec[1], self.makeData('0505'))
        self.assertEqual(c.get_current_size(), len(rec[1]))

        # make sure we get empty values properly
        rec = c.set('empty value')
        self.assertEqual(rec[1], '')
        self.assertEqual(c.get_current_size(), 0)

        try:
            n = c.set('bad key')
        except db.DBNotFoundError, val:
            import sys
            if sys.version_info < (2, 6) :
                self.assertEqual(val[0], db.DB_NOTFOUND)
            else :
                self.assertEqual(val.args[0], db.DB_NOTFOUND)
            if verbose: print val
        else:
            if set_raises_error:
                self.fail("expected exception")
            if n is not None:
                self.fail("expected None: %r" % (n,))

        rec = c.get_both('0404', self.makeData('0404'))
        self.assertEqual(rec, ('0404', self.makeData('0404')))

        try:
            n = c.get_both('0404', 'bad data')
        except db.DBNotFoundError, val:
            import sys
            if sys.version_info < (2, 6) :
                self.assertEqual(val[0], db.DB_NOTFOUND)
            else :
                self.assertEqual(val.args[0], db.DB_NOTFOUND)
            if verbose: print val
        else:
            if get_raises_error:
                self.fail("expected exception")
            if n is not None:
                self.fail("expected None: %r" % (n,))

        if self.d.get_type() == db.DB_BTREE:
            rec = c.set_range('011')
            if verbose:
                print "searched for '011', found: ", rec

            rec = c.set_range('011',dlen=0,doff=0)
            if verbose:
                print "searched (partial) for '011', found: ", rec
            if rec[1] != '': self.fail('expected empty data portion')

            ev = c.set_range('empty value')
            if verbose:
                print "search for 'empty value' returned", ev
            if ev[1] != '': self.fail('empty value lookup failed')

        c.set('0499')
        c.delete()
        try:
            rec = c.current()
        except db.DBKeyEmptyError, val:
            if get_raises_error:
                import sys
                if sys.version_info < (2, 6) :
                    self.assertEqual(val[0], db.DB_KEYEMPTY)
                else :
                    self.assertEqual(val.args[0], db.DB_KEYEMPTY)
                if verbose: print val
            else:
                self.fail("unexpected DBKeyEmptyError")
        else:
            if get_raises_error:
                self.fail('DBKeyEmptyError exception expected')

        c.next()
        c2 = c.dup(db.DB_POSITION)
        self.assertEqual(c.current(), c2.current())

        c2.put('', 'a new value', db.DB_CURRENT)
        self.assertEqual(c.current(), c2.current())
        self.assertEqual(c.current()[1], 'a new value')

        c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
        self.assertEqual(c2.current()[1], 'a newer value')

        c.close()
        c2.close()
        if txn:
            txn.commit()

        # time to abuse the closed cursors and hope we don't crash
        methods_to_test = {
            'current': (),
            'delete': (),
            'dup': (db.DB_POSITION,),
            'first': (),
            'get': (0,),
            'next': (),
            'prev': (),
            'last': (),
            'put':('', 'spam', db.DB_CURRENT),
            'set': ("0505",),
        }
        for method, args in methods_to_test.items():
            try:
                if verbose:
                    print "attempting to use a closed cursor's %s method" % \
                          method
                # a bug may cause a NULL pointer dereference...
                getattr(c, method)(*args)
            except db.DBError, val:
                import sys
                if sys.version_info < (2, 6) :
                    self.assertEqual(val[0], 0)
                else :
                    self.assertEqual(val.args[0], 0)
                if verbose: print val
            else:
                self.fail("no exception raised when using a buggy cursor's"
                          "%s method" % method)

        #
        # free cursor referencing a closed database, it should not barf:
        #
        oldcursor = self.d.cursor(txn=txn)
        self.d.close()

        # this would originally cause a segfault when the cursor for a
        # closed database was cleaned up.  it should not anymore.
        # SF pybsddb bug id 667343
        del oldcursor

Example 16

Project: tensorlayer-chinese Source File: files.py
Function: load_cifar10_dataset
def load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False, second=3):
    """The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with
    6000 images per class. There are 50000 training images and 10000 test images.

    The dataset is divided into five training batches and one test batch, each with
    10000 images. The test batch contains exactly 1000 randomly-selected images from
    each class. The training batches contain the remaining images in random order,
    but some training batches may contain more images from one class than another.
    Between them, the training batches contain exactly 5000 images from each class.

    Parameters
    ----------
    shape : tupe
        The shape of digit images: e.g. (-1, 3, 32, 32) , (-1, 32, 32, 3) , (-1, 32*32*3)
    plotable : True, False
        Whether to plot some image examples.
    second : int
        If ``plotable`` is True, ``second`` is the display time.

    Examples
    --------
    >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=True)

    Notes
    ------
    CIFAR-10 images can only be display without color change under uint8.
    >>> X_train = np.asarray(X_train, dtype=np.uint8)
    >>> plt.ion()
    >>> fig = plt.figure(1232)
    >>> count = 1
    >>> for row in range(10):
    >>>     for col in range(10):
    >>>         a = fig.add_subplot(10, 10, count)
    >>>         plt.imshow(X_train[count-1], interpolation='nearest')
    >>>         plt.gca().xaxis.set_major_locator(plt.NullLocator())    # 不显示刻度(tick)
    >>>         plt.gca().yaxis.set_major_locator(plt.NullLocator())
    >>>         count = count + 1
    >>> plt.draw()
    >>> plt.pause(3)

    References
    ----------
    - `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`_
    - `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`_
    - `Code references <https://teratail.com/questions/28932>`_
    """
    import sys
    import pickle
    import numpy as np


    # We first define a download function, supporting both Python 2 and 3.
    filename = 'cifar-10-python.tar.gz'
    if sys.version_info[0] == 2:
        from urllib import urlretrieve
    else:
        from urllib.request import urlretrieve

    def download(filename, source='https://www.cs.toronto.edu/~kriz/'):
        print("Downloading %s" % filename)
        urlretrieve(source + filename, filename)

    # After downloading the cifar-10-python.tar.gz, we need to unzip it.
    import tarfile
    def un_tar(file_name):
        print("Extracting %s" % file_name)
        tar = tarfile.open(file_name)
        names = tar.getnames()
        # if os.path.isdir(file_name + "_files"):
        #     pass
        # else:
        #     os.mkdir(file_name + "_files")
        for name in names:
            tar.extract(name) #, file_name.split('.')[0])
        tar.close()
        print("Extracted to %s" % names[0])


    if not os.path.exists('cifar-10-batches-py'):
        download(filename)
        un_tar(filename)


    def unpickle(file):
        fp = open(file, 'rb')
        if sys.version_info.major == 2:
            data = pickle.load(fp)
        elif sys.version_info.major == 3:
            data = pickle.load(fp, encoding='latin-1')
        fp.close()
        return data

    X_train = None
    y_train = []

    path = '' # you can set a dir to the data here.

    for i in range(1,6):
        data_dic = unpickle(path+"cifar-10-batches-py/data_batch_{}".format(i))
        if i == 1:
            X_train = data_dic['data']
        else:
            X_train = np.vstack((X_train, data_dic['data']))
        y_train += data_dic['labels']

    test_data_dic = unpickle(path+"cifar-10-batches-py/test_batch")
    X_test = test_data_dic['data']
    y_test = np.array(test_data_dic['labels'])

    if shape == (-1, 3, 32, 32):
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)
        # X_train = np.transpose(X_train, (0, 1, 3, 2))
    elif shape == (-1, 32, 32, 3):
        X_test = X_test.reshape(shape, order='F')
        X_train = X_train.reshape(shape, order='F')
        X_test = np.transpose(X_test, (0, 2, 1, 3))
        X_train = np.transpose(X_train, (0, 2, 1, 3))
    else:
        X_test = X_test.reshape(shape)
        X_train = X_train.reshape(shape)

    y_train = np.array(y_train)

    if plotable == True:
        print('\nCIFAR-10')
        import matplotlib.pyplot as plt
        fig = plt.figure(1)

        print('Shape of a training image: X_train[0]',X_train[0].shape)

        plt.ion()       # interactive mode
        count = 1
        for row in range(10):
            for col in range(10):
                a = fig.add_subplot(10, 10, count)
                if shape == (-1, 3, 32, 32):
                    # plt.imshow(X_train[count-1], interpolation='nearest')
                    plt.imshow(np.transpose(X_train[count-1], (1, 2, 0)), interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
                elif shape == (-1, 32, 32, 3):
                    plt.imshow(X_train[count-1], interpolation='nearest')
                    # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
                else:
                    raise Exception("Do not support the given 'shape' to plot the image examples")
                plt.gca().xaxis.set_major_locator(plt.NullLocator())    # 不显示刻度(tick)
                plt.gca().yaxis.set_major_locator(plt.NullLocator())
                count = count + 1
        plt.draw()      # interactive mode
        plt.pause(3)   # interactive mode

        print("X_train:",X_train.shape)
        print("y_train:",y_train.shape)
        print("X_test:",X_test.shape)
        print("y_test:",y_test.shape)

    X_train = np.asarray(X_train, dtype=np.float32)
    X_test = np.asarray(X_test, dtype=np.float32)
    y_train = np.asarray(y_train, dtype=np.int32)
    y_test = np.asarray(y_test, dtype=np.int32)

    return X_train, y_train, X_test, y_test

Example 17

Project: TACTIC Source File: test_tools.py
def setup_server():
    
    # Put check_access in a custom toolbox with its own namespace
    myauthtools = cherrypy._cptools.Toolbox("myauth")
    
    def check_access(default=False):
        if not getattr(cherrypy.request, "userid", default):
            raise cherrypy.HTTPError(401)
    myauthtools.check_access = cherrypy.Tool('before_request_body', check_access)
    
    def numerify():
        def number_it(body):
            for chunk in body:
                for k, v in cherrypy.request.numerify_map:
                    chunk = chunk.replace(k, v)
                yield chunk
        cherrypy.response.body = number_it(cherrypy.response.body)
    
    class NumTool(cherrypy.Tool):
        def _setup(self):
            def makemap():
                m = self._merged_args().get("map", {})
                cherrypy.request.numerify_map = m.items()
            cherrypy.request.hooks.attach('on_start_resource', makemap)
            
            def critical():
                cherrypy.request.error_response = cherrypy.HTTPError(502).set_response
            critical.failsafe = True
            
            cherrypy.request.hooks.attach('on_start_resource', critical)
            cherrypy.request.hooks.attach(self._point, self.callable)
    
    tools.numerify = NumTool('before_finalize', numerify)
    
    # It's not mandatory to inherit from cherrypy.Tool.
    class NadsatTool:
        
        def __init__(self):
            self.ended = {}
            self._name = "nadsat"
        
        def nadsat(self):
            def nadsat_it_up(body):
                for chunk in body:
                    chunk = chunk.replace("good", "horrorshow")
                    chunk = chunk.replace("piece", "lomtick")
                    yield chunk
            cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
        nadsat.priority = 0
        
        def cleanup(self):
            # This runs after the request has been completely written out.
            cherrypy.response.body = "razdrez"
            id = cherrypy.request.params.get("id")
            if id:
                self.ended[id] = True
        cleanup.failsafe = True
        
        def _setup(self):
            cherrypy.request.hooks.attach('before_finalize', self.nadsat)
            cherrypy.request.hooks.attach('on_end_request', self.cleanup)
    tools.nadsat = NadsatTool()
    
    def pipe_body():
        cherrypy.request.process_request_body = False
        clen = int(cherrypy.request.headers['Content-Length'])
        cherrypy.request.body = cherrypy.request.rfile.read(clen)
    
    # Assert that we can use a callable object instead of a function.
    class Rotator(object):
        def __call__(self, scale):
            r = cherrypy.response
            r.collapse_body()
            r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]]
    cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
    
    def stream_handler(next_handler, *args, **kwargs):
        cherrypy.response.output = o = StringIO()
        try:
            response = next_handler(*args, **kwargs)
            # Ignore the response and return our accuemulated output instead.
            return o.getvalue()
        finally:
            o.close()
    cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(stream_handler)
    
    class Root:
        def index(self):
            return "Howdy earth!"
        index.exposed = True
        
        def tarfile(self):
            cherrypy.response.output.write('I am ')
            cherrypy.response.output.write('a tarfile')
        tarfile.exposed = True
        tarfile._cp_config = {'tools.streamer.on': True}
        
        def euro(self):
            hooks = list(cherrypy.request.hooks['before_finalize'])
            hooks.sort()
            cbnames = [x.callback.__name__ for x in hooks]
            assert cbnames == ['gzip'], cbnames
            priorities = [x.priority for x in hooks]
            assert priorities == [80], priorities
            yield u"Hello,"
            yield u"world"
            yield europoundUnicode
        euro.exposed = True
        
        # Bare hooks
        def pipe(self):
            return cherrypy.request.body
        pipe.exposed = True
        pipe._cp_config = {'hooks.before_request_body': pipe_body}
        
        # Multiple decorators; include kwargs just for fun.
        # Note that rotator must run before gzip.
        def decorated_euro(self, *vpath):
            yield u"Hello,"
            yield u"world"
            yield europoundUnicode
        decorated_euro.exposed = True
        decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
        decorated_euro = tools.rotator(scale=3)(decorated_euro)
    
    root = Root()
    
    
    class TestType(type):
        """Metaclass which automatically exposes all functions in each subclass,
        and adds an instance of the subclass as an attribute of root.
        """
        def __init__(cls, name, bases, dct):
            type.__init__(cls, name, bases, dct)
            for value in dct.itervalues():
                if isinstance(value, types.FunctionType):
                    value.exposed = True
            setattr(root, name.lower(), cls())
    class Test(object):
        __metaclass__ = TestType
    
    
    # METHOD ONE:
    # Declare Tools in _cp_config
    class Demo(Test):
        
        _cp_config = {"tools.nadsat.on": True}
        
        def index(self, id=None):
            return "A good piece of cherry pie"
        
        def ended(self, id):
            return repr(tools.nadsat.ended[id])
        
        def err(self, id=None):
            raise ValueError()
        
        def errinstream(self, id=None):
            yield "nonconfidential"
            raise ValueError()
            yield "confidential"
        
        # METHOD TWO: decorator using Tool()
        # We support Python 2.3, but the @-deco syntax would look like this:
        # @tools.check_access()
        def restricted(self):
            return "Welcome!"
        restricted = myauthtools.check_access()(restricted)
        userid = restricted
        
        def err_in_onstart(self):
            return "success!"
        
        def stream(self, id=None):
            for x in xrange(100000000):
                yield str(x)
        stream._cp_config = {'response.stream': True}
    
    
    conf = {
        # METHOD THREE:
        # Declare Tools in detached config
        '/demo': {
            'tools.numerify.on': True,
            'tools.numerify.map': {"pie": "3.14159"},
        },
        '/demo/restricted': {
            'request.show_tracebacks': False,
        },
        '/demo/userid': {
            'request.show_tracebacks': False,
            'myauth.check_access.default': True,
        },
        '/demo/errinstream': {
            'response.stream': True,
        },
        '/demo/err_in_onstart': {
            # Because this isn't a dict, on_start_resource will error.
            'tools.numerify.map': "pie->3.14159"
        },
        # Combined tools
        '/euro': {
            'tools.gzip.on': True,
            'tools.encode.on': True,
        },
        # Priority specified in config
        '/decorated_euro/subpath': {
            'tools.gzip.priority': 10,
        },
        # Handler wrappers
        '/tarfile': {'tools.streamer.on': True}
    }
    app = cherrypy.tree.mount(root, config=conf)
    app.request_class.namespaces['myauth'] = myauthtools
    
    if sys.version_info >= (2, 5):
        from cherrypy.test import py25
        root.tooldecs = py25.ToolExamples()

Example 18

Project: CredNinja Source File: CredNinja.py
def main():
    global output_file_handler, settings, text_green, text_blue, text_yellow, text_red, text_end
    print(text_blue + """


   .d8888b.                       888 888b    888 d8b           d8b          
  d88P  Y88b                      888 8888b   888 Y8P           Y8P          
  888    888                      888 88888b  888                            
  888        888d888 .d88b.   .d88888 888Y88b 888 888 88888b.  8888  8888b.  
  888        888P"  d8P  Y8b d88" 888 888 Y88b888 888 888 "88b "888     "88b 
  888    888 888    88888888 888  888 888  Y88888 888 888  888  888 .d888888 
  Y88b  d88P 888    Y8b.     Y88b 888 888   Y8888 888 888  888  888 888  888 
   "Y8888P"  888     "Y8888   "Y88888 888    Y888 888 888  888  888 "Y888888 
                                                                888          
                                                               d88P          
                                                             888P"           

                    v{} (Built {}) - Chris King (@raikiasec)

                         For help: ./CredNinja.py -h
""".format(version_number,version_build) + text_end)


    if sys.version_info < (3,0):
        print("ERROR: CredNinja runs on Python 3.  Run as \"./CredNinja.py\" or \"python3 CredNinja.py\"!")
        sys.exit(1)
    args = parse_cli_args()
    settings['os'] = args.os
    settings['domain'] = args.domain
    settings['timeout'] = args.timeout
    settings['delay'] = args.delay
    settings['users'] = args.users
    settings['users_time'] = args.users_time
    settings['scan'] = args.scan
    settings['scan_timeout'] = args.scan_timeout
    settings['no_color'] = args.no_color
    hosts_to_check = []
    creds_to_check = []
    mode = 'all'
    if settings['no_color']:
        text_blue = ''
        text_green = ''
        text_red = ''
        text_yellow = ''
        text_end = ''
    if os.path.isfile(args.accounts):
        with open(args.accounts) as accountfile:
            for line in accountfile:
                if line.strip():
                    parts = line.strip().split(args.passdelimiter,1)
                    if len(parts) != 2:
                        print(text_red + "ERROR: Credential '" + line.strip() + "' did not have the password delimiter" + text_end)
                        sys.exit(1)
                    creds_to_check.append(parts)
    else:
        parts = args.accounts.strip().split(args.passdelimiter,1)
        if len(parts) != 2:
            print(text_red + "ERROR: Credential '" + args.accounts.strip() + "' did not have the password delimiter" + text_end)
            sys.exit(1)
        creds_to_check.append(parts)

    if os.path.isfile(args.servers):
        with open(args.servers) as serverfile:
            for line in serverfile:
                if line.strip():
                    hosts_to_check.append(line.strip())
    else:
        hosts_to_check.append(args.servers)
    if len(hosts_to_check) == 0 or len(creds_to_check) == 0:
        print(text_red + "ERROR: You must supply hosts and credentials at least!" + text_end)
        sys.exit(1)
    
    mode = 'a'
    if args.invalid:
        mode = 'i'
    if args.valid:
        mode = 'v'
    if args.invalid and args.valid:
        mode = 'a'

    if args.output:
        output_file_handler = open(args.output, 'w')
    
    command_list = ['smbclient', '-U', '', '', '', '-c', 'dir']
    if args.ntlm and shutil.which('pth-smbclient') is None:
        print(text_red + "ERROR: pth-smbclient is not found!  Make sure you install it (or use Kali!)" + text_end)
        sys.exit(1)
    elif args.ntlm:
        command_list[0] = 'pth-smbclient'
        command_list.append('--pw-nt-hash')
    passwd_header = 'Password'
    if command_list[0] == 'pth-smbclient':
        passwd_header = 'Hash'

    if (len(hosts_to_check) * len(creds_to_check)) < args.threads:
        args.threads = len(hosts_to_check) * len(creds_to_check)

    try:
        if settings['os'] or settings['domain'] or settings['users']:
            print(text_yellow + ("%-35s %-35s %-35s %-25s %s" % ("Server", "Username", passwd_header, "Response", "Info")) + text_end)
        else:
            print(text_yellow + ("%-35s %-35s %-35s %-25s " % ("Server", "Username", passwd_header, "Response")) + text_end)
        print(text_yellow + "------------------------------------------------------------------------------------------------------------------------------------------------------" + text_end)

        if args.stripe == None:
            total = len(hosts_to_check)
            done = -1
            last_status_report = -1
            if settings['scan']:
                print(text_green + "[!] Starting scan of port 445 on all " + str(len(hosts_to_check)) + " hosts...." +  text_end)
            for host in hosts_to_check:
                done += 1
                if settings['scan']:
                    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    s.settimeout(settings['scan_timeout'])
                    percent_done = int((done / total) * 100)
                    if (percent_done%5 == 0 and percent_done != last_status_report):
                        print(text_green + "[*] " + str(percent_done) + "% done... [" + str(done) + "/" + str(total) + "]" + text_end)
                        last_status_report = percent_done
                    try:
                        s.connect((host,445))
                        s.close()
                    except Exception:
                        print("%-35s %-35s %-35s %-25s" % (host, "N/A", "N/A", text_red + "Failed Portscan" + text_end))
                        continue
                for cred in creds_to_check:
                    credQueue.put([host, cred])
        else:
            if len(hosts_to_check) < len(creds_to_check):
                print(text_red + "ERROR: For striping to work, you must have the same number or more hosts than you do creds!"  + text_end)
                sys.exit(1)
            if (len(creds_to_check) < args.threads):
                args.threads = len(creds_to_check)
            random.shuffle(hosts_to_check)
            for i in range(len(creds_to_check)):
                credQueue.put([hosts_to_check[i], creds_to_check[i]])

        thread_list = []
        for i in range(args.threads):
            thread_list.append(CredThread(mode, command_list))
        for t in thread_list:
            t.daemon = True
            t.start()

        for t in thread_list:
            t.join()
    except KeyboardInterrupt:
        print("\nQuitting!")
        sys.exit(1)
    if output_file_handler is not None:
        output_file_handler.close()

Example 19

Project: HTPC-Manager Source File: test_core.py
    def setup_server():
        class Root:
            
            def index(self):
                return "hello"
            index.exposed = True
            
            favicon_ico = tools.staticfile.handler(filename=favicon_path)
            
            def defct(self, newct):
                newct = "text/%s" % newct
                cherrypy.config.update({'tools.response_headers.on': True,
                                        'tools.response_headers.headers':
                                        [('Content-Type', newct)]})
            defct.exposed = True
            
            def baseurl(self, path_info, relative=None):
                return cherrypy.url(path_info, relative=bool(relative))
            baseurl.exposed = True
        
        root = Root()
                
        if sys.version_info >= (2, 5):
            from cherrypy.test._test_decorators import ExposeExamples
            root.expose_dec = ExposeExamples()


        class TestType(type):
            """Metaclass which automatically exposes all functions in each subclass,
            and adds an instance of the subclass as an attribute of root.
            """
            def __init__(cls, name, bases, dct):
                type.__init__(cls, name, bases, dct)
                for value in itervalues(dct):
                    if isinstance(value, types.FunctionType):
                        value.exposed = True
                setattr(root, name.lower(), cls())
        Test = TestType('Test', (object, ), {})
        
        
        class URL(Test):
            
            _cp_config = {'tools.trailing_slash.on': False}
            
            def index(self, path_info, relative=None):
                if relative != 'server':
                    relative = bool(relative)
                return cherrypy.url(path_info, relative=relative)
            
            def leaf(self, path_info, relative=None):
                if relative != 'server':
                    relative = bool(relative)
                return cherrypy.url(path_info, relative=relative)


        def log_status():
            Status.statuses.append(cherrypy.response.status)
        cherrypy.tools.log_status = cherrypy.Tool('on_end_resource', log_status)


        class Status(Test):
            
            def index(self):
                return "normal"
            
            def blank(self):
                cherrypy.response.status = ""
            
            # According to RFC 2616, new status codes are OK as long as they
            # are between 100 and 599.
            
            # Here is an illegal code...
            def illegal(self):
                cherrypy.response.status = 781
                return "oops"
            
            # ...and here is an unknown but legal code.
            def unknown(self):
                cherrypy.response.status = "431 My custom error"
                return "funky"
            
            # Non-numeric code
            def bad(self):
                cherrypy.response.status = "error"
                return "bad news"
            
            statuses = []
            def on_end_resource_stage(self):
                return repr(self.statuses)
            on_end_resource_stage._cp_config = {'tools.log_status.on': True}


        class Redirect(Test):
            
            class Error:
                _cp_config = {"tools.err_redirect.on": True,
                              "tools.err_redirect.url": "/errpage",
                              "tools.err_redirect.internal": False,
                              }
                
                def index(self):
                    raise NameError("redirect_test")
                index.exposed = True
            error = Error()
            
            def index(self):
                return "child"
            
            def custom(self, url, code):
                raise cherrypy.HTTPRedirect(url, code)
            
            def by_code(self, code):
                raise cherrypy.HTTPRedirect("somewhere%20else", code)
            by_code._cp_config = {'tools.trailing_slash.extra': True}
            
            def nomodify(self):
                raise cherrypy.HTTPRedirect("", 304)
            
            def proxy(self):
                raise cherrypy.HTTPRedirect("proxy", 305)
            
            def stringify(self):
                return str(cherrypy.HTTPRedirect("/"))
            
            def fragment(self, frag):
                raise cherrypy.HTTPRedirect("/some/url#%s" % frag)
        
        def login_redir():
            if not getattr(cherrypy.request, "login", None):
                raise cherrypy.InternalRedirect("/internalredirect/login")
        tools.login_redir = _cptools.Tool('before_handler', login_redir)
        
        def redir_custom():
            raise cherrypy.InternalRedirect("/internalredirect/custom_err")
        
        class InternalRedirect(Test):
            
            def index(self):
                raise cherrypy.InternalRedirect("/")
            
            def choke(self):
                return 3 / 0
            choke.exposed = True
            choke._cp_config = {'hooks.before_error_response': redir_custom}
            
            def relative(self, a, b):
                raise cherrypy.InternalRedirect("cousin?t=6")
            
            def cousin(self, t):
                assert cherrypy.request.prev.closed
                return cherrypy.request.prev.query_string
            
            def petshop(self, user_id):
                if user_id == "parrot":
                    # Trade it for a slug when redirecting
                    raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug')
                elif user_id == "terrier":
                    # Trade it for a fish when redirecting
                    raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish')
                else:
                    # This should pass the user_id through to getImagesByUser
                    raise cherrypy.InternalRedirect(
                        '/image/getImagesByUser?user_id=%s' % str(user_id))
            
            # We support Python 2.3, but the @-deco syntax would look like this:
            # @tools.login_redir()
            def secure(self):
                return "Welcome!"
            secure = tools.login_redir()(secure)
            # Since calling the tool returns the same function you pass in,
            # you could skip binding the return value, and just write:
            # tools.login_redir()(secure)
            
            def login(self):
                return "Please log in"
            
            def custom_err(self):
                return "Something went horribly wrong."
            
            def early_ir(self, arg):
                return "whatever"
            early_ir._cp_config = {'hooks.before_request_body': redir_custom}
        
        
        class Image(Test):
            
            def getImagesByUser(self, user_id):
                return "0 images for %s" % user_id


        class Flatten(Test):
            
            def as_string(self):
                return "content"
            
            def as_list(self):
                return ["con", "tent"]
            
            def as_yield(self):
                yield ntob("content")
            
            def as_dblyield(self):
                yield self.as_yield()
            as_dblyield._cp_config = {'tools.flatten.on': True}
            
            def as_refyield(self):
                for chunk in self.as_yield():
                    yield chunk
        
        
        class Ranges(Test):
            
            def get_ranges(self, bytes):
                return repr(httputil.get_ranges('bytes=%s' % bytes, 8))
            
            def slice_file(self):
                path = os.path.join(os.getcwd(), os.path.dirname(__file__))
                return static.serve_file(os.path.join(path, "static/index.html"))


        class Cookies(Test):
            
            def single(self, name):
                cookie = cherrypy.request.cookie[name]
                # Python2's SimpleCookie.__setitem__ won't take unicode keys.
                cherrypy.response.cookie[str(name)] = cookie.value
            
            def multiple(self, names):
                for name in names:
                    cookie = cherrypy.request.cookie[name]
                    # Python2's SimpleCookie.__setitem__ won't take unicode keys.
                    cherrypy.response.cookie[str(name)] = cookie.value

        def append_headers(header_list, debug=False):
            if debug:
                cherrypy.log(
                    "Extending response headers with %s" % repr(header_list),
                    "TOOLS.APPEND_HEADERS")
            cherrypy.serving.response.header_list.extend(header_list)
        cherrypy.tools.append_headers = cherrypy.Tool('on_end_resource', append_headers)
        
        class MultiHeader(Test):
            
            def header_list(self):
                pass
            header_list = cherrypy.tools.append_headers(header_list=[
                (ntob('WWW-Authenticate'), ntob('Negotiate')),
                (ntob('WWW-Authenticate'), ntob('Basic realm="foo"')),
                ])(header_list)
            
            def commas(self):
                cherrypy.response.headers['WWW-Authenticate'] = 'Negotiate,Basic realm="foo"'


        cherrypy.tree.mount(root)

Example 20

Project: sympy Source File: importtools.py
Function: import_module
def import_module(module, min_module_version=None, min_python_version=None,
        warn_not_installed=None, warn_old_version=None,
        module_version_attr='__version__', module_version_attr_call_args=None,
        __import__kwargs={}, catch=()):
    """
    Import and return a module if it is installed.

    If the module is not installed, it returns None.

    A minimum version for the module can be given as the keyword argument
    min_module_version.  This should be comparable against the module version.
    By default, module.__version__ is used to get the module version.  To
    override this, set the module_version_attr keyword argument.  If the
    attribute of the module to get the version should be called (e.g.,
    module.version()), then set module_version_attr_call_args to the args such
    that module.module_version_attr(*module_version_attr_call_args) returns the
    module's version.

    If the module version is less than min_module_version using the Python <
    comparison, None will be returned, even if the module is installed. You can
    use this to keep from importing an incompatible older version of a module.

    You can also specify a minimum Python version by using the
    min_python_version keyword argument.  This should be comparable against
    sys.version_info.

    If the keyword argument warn_not_installed is set to True, the function will
    emit a UserWarning when the module is not installed.

    If the keyword argument warn_old_version is set to True, the function will
    emit a UserWarning when the library is installed, but cannot be imported
    because of the min_module_version or min_python_version options.

    Note that because of the way warnings are handled, a warning will be
    emitted for each module only once.  You can change the default warning
    behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
    in sympy.external.importtools.  By default, WARN_NOT_INSTALLED is False and
    WARN_OLD_VERSION is True.

    This function uses __import__() to import the module.  To pass additional
    options to __import__(), use the __import__kwargs keyword argument.  For
    example, to import a submodule A.B, you must pass a nonempty fromlist option
    to __import__.  See the docstring of __import__().

    This catches ImportError to determine if the module is not installed.  To
    catch additional errors, pass them as a tuple to the catch keyword
    argument.

    Examples
    ========

    >>> from sympy.external import import_module

    >>> numpy = import_module('numpy')

    >>> numpy = import_module('numpy', min_python_version=(2, 7),
    ... warn_old_version=False)

    >>> numpy = import_module('numpy', min_module_version='1.5',
    ... warn_old_version=False) # numpy.__version__ is a string

    >>> # gmpy does not have __version__, but it does have gmpy.version()

    >>> gmpy = import_module('gmpy', min_module_version='1.14',
    ... module_version_attr='version', module_version_attr_call_args=(),
    ... warn_old_version=False)

    >>> # To import a submodule, you must pass a nonempty fromlist to
    >>> # __import__().  The values do not matter.
    >>> p3 = import_module('mpl_toolkits.mplot3d',
    ... __import__kwargs={'fromlist':['something']})

    >>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
    >>> matplotlib = import_module('matplotlib',
    ... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))

    """
    # keyword argument overrides default, and global variable overrides
    # keyword argument.
    warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
        else warn_old_version or True)
    warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
        else warn_not_installed or False)

    import warnings

    # Check Python first so we don't waste time importing a module we can't use
    if min_python_version:
        if sys.version_info < min_python_version:
            if warn_old_version:
                warnings.warn("Python version is too old to use %s "
                    "(%s or newer required)" % (
                        module, '.'.join(map(str, min_python_version))),
                    UserWarning)
            return

    # PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
    if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
        return

    try:
        mod = __import__(module, **__import__kwargs)

        ## there's something funny about imports with matplotlib and py3k. doing
        ##    from matplotlib import collections
        ## gives python's stdlib collections module. explicitly re-importing
        ## the module fixes this.
        from_list = __import__kwargs.get('fromlist', tuple())
        for submod in from_list:
            if submod == 'collections' and mod.__name__ == 'matplotlib':
                __import__(module + '.' + submod)
    except ImportError:
        if warn_not_installed:
            warnings.warn("%s module is not installed" % module, UserWarning)
        return
    except catch as e:
        if warn_not_installed:
            warnings.warn(
                "%s module could not be used (%s)" % (module, repr(e)))
        return

    if min_module_version:
        modversion = getattr(mod, module_version_attr)
        if module_version_attr_call_args is not None:
            modversion = modversion(*module_version_attr_call_args)
        if modversion < min_module_version:
            if warn_old_version:
                # Attempt to create a pretty string version of the version
                if isinstance(min_module_version, basestring):
                    verstr = min_module_version
                elif isinstance(min_module_version, (tuple, list)):
                    verstr = '.'.join(map(str, min_module_version))
                else:
                    # Either don't know what this is.  Hopefully
                    # it's something that has a nice str version, like an int.
                    verstr = str(min_module_version)
                warnings.warn("%s version is too old to use "
                    "(%s or newer required)" % (module, verstr),
                    UserWarning)
            return

    return mod

Example 21

Project: GAE-Bulk-Mailer Source File: dictconfig.py
Function: configure
    def configure(self):
        """Do the configuration."""

        config = self.config
        if 'version' not in config:
            raise ValueError("dictionary doesn't specify a version")
        if config['version'] != 1:
            raise ValueError("Unsupported version: %s" % config['version'])
        incremental = config.pop('incremental', False)
        EMPTY_DICT = {}
        logging._acquireLock()
        try:
            if incremental:
                handlers = config.get('handlers', EMPTY_DICT)
                # incremental handler config only if handler name
                # ties in to logging._handlers (Python 2.7)
                if sys.version_info[:2] == (2, 7):
                    for name in handlers:
                        if name not in logging._handlers:
                            raise ValueError('No handler found with '
                                             'name %r'  % name)
                        else:
                            try:
                                handler = logging._handlers[name]
                                handler_config = handlers[name]
                                level = handler_config.get('level', None)
                                if level:
                                    handler.setLevel(_checkLevel(level))
                            except StandardError as e:
                                raise ValueError('Unable to configure handler '
                                                 '%r: %s' % (name, e))
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    try:
                        self.configure_logger(name, loggers[name], True)
                    except StandardError as e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root, True)
                    except StandardError as e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
            else:
                disable_existing = config.pop('disable_existing_loggers', True)

                logging._handlers.clear()
                del logging._handlerList[:]

                # Do formatters first - they don't refer to anything else
                formatters = config.get('formatters', EMPTY_DICT)
                for name in formatters:
                    try:
                        formatters[name] = self.configure_formatter(
                                                            formatters[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure '
                                         'formatter %r: %s' % (name, e))
                # Next, do filters - they don't refer to anything else, either
                filters = config.get('filters', EMPTY_DICT)
                for name in filters:
                    try:
                        filters[name] = self.configure_filter(filters[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure '
                                         'filter %r: %s' % (name, e))

                # Next, do handlers - they refer to formatters and filters
                # As handlers can refer to other handlers, sort the keys
                # to allow a deterministic order of configuration
                handlers = config.get('handlers', EMPTY_DICT)
                for name in sorted(handlers):
                    try:
                        handler = self.configure_handler(handlers[name])
                        handler.name = name
                        handlers[name] = handler
                    except StandardError as e:
                        raise ValueError('Unable to configure handler '
                                         '%r: %s' % (name, e))
                # Next, do loggers - they refer to handlers and filters

                #we don't want to lose the existing loggers,
                #since other threads may have pointers to them.
                #existing is set to contain all existing loggers,
                #and as we go through the new configuration we
                #remove any which are configured. At the end,
                #what's left in existing is the set of loggers
                #which were in the previous configuration but
                #which are not in the new configuration.
                root = logging.root
                existing = list(root.manager.loggerDict)
                #The list needs to be sorted so that we can
                #avoid disabling child loggers of explicitly
                #named loggers. With a sorted list it is easier
                #to find the child loggers.
                existing.sort()
                #We'll keep the list of existing loggers
                #which are children of named loggers here...
                child_loggers = []
                #now set up the new ones...
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    if name in existing:
                        i = existing.index(name)
                        prefixed = name + "."
                        pflen = len(prefixed)
                        num_existing = len(existing)
                        i = i + 1 # look at the entry after name
                        while (i < num_existing) and\
                              (existing[i][:pflen] == prefixed):
                            child_loggers.append(existing[i])
                            i = i + 1
                        existing.remove(name)
                    try:
                        self.configure_logger(name, loggers[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))

                #Disable any old loggers. There's no point deleting
                #them as other threads may continue to hold references
                #and by disabling them, you stop them doing any logging.
                #However, don't disable children of named loggers, as that's
                #probably not what was intended by the user.
                for log in existing:
                    logger = root.manager.loggerDict[log]
                    if log in child_loggers:
                        logger.level = logging.NOTSET
                        logger.handlers = []
                        logger.propagate = True
                    elif disable_existing:
                        logger.disabled = True

                # And finally, do the root logger
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root)
                    except StandardError as e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
        finally:
            logging._releaseLock()

Example 22

Project: auto-sklearn Source File: ensemble_builder.py
    def main(self):

        watch = StopWatch()
        watch.start_task('ensemble_builder')

        used_time = 0
        time_iter = 0
        index_run = 0
        num_iteration = 0
        current_num_models = 0
        last_hash = None
        current_hash = None

        dir_ensemble = os.path.join(self.backend.temporary_directory,
                                    '.auto-sklearn',
                                    'predictions_ensemble')
        dir_valid = os.path.join(self.backend.temporary_directory,
                                 '.auto-sklearn',
                                 'predictions_valid')
        dir_test = os.path.join(self.backend.temporary_directory,
                                '.auto-sklearn',
                                'predictions_test')
        paths_ = [dir_ensemble, dir_valid, dir_test]

        dir_ensemble_list_mtimes = []

        self.logger.debug('Starting main loop with %f seconds and %d iterations '
                          'left.' % (self.limit - used_time, num_iteration))
        while used_time < self.limit or (self.max_iterations > 0 and
                                         self.max_iterations >= num_iteration):
            num_iteration += 1
            self.logger.debug('Time left: %f', self.limit - used_time)
            self.logger.debug('Time last ensemble building: %f', time_iter)

            # Reload the ensemble targets every iteration, important, because cv may
            # update the ensemble targets in the cause of running auto-sklearn
            # TODO update cv in order to not need this any more!
            targets_ensemble = self.backend.load_targets_ensemble()

            # Load the predictions from the models
            exists = [os.path.isdir(dir_) for dir_ in paths_]
            if not exists[0]:  # all(exists):
                self.logger.debug('Prediction directory %s does not exist!' %
                              dir_ensemble)
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if self.shared_mode is False:
                dir_ensemble_list = sorted(glob.glob(os.path.join(
                    dir_ensemble, 'predictions_ensemble_%s_*.npy' % self.seed)))
                if exists[1]:
                    dir_valid_list = sorted(glob.glob(os.path.join(
                        dir_valid, 'predictions_valid_%s_*.npy' % self.seed)))
                else:
                    dir_valid_list = []
                if exists[2]:
                    dir_test_list = sorted(glob.glob(os.path.join(
                        dir_test, 'predictions_test_%s_*.npy' % self.seed)))
                else:
                    dir_test_list = []
            else:
                dir_ensemble_list = sorted(os.listdir(dir_ensemble))
                dir_valid_list = sorted(os.listdir(dir_valid)) if exists[1] else []
                dir_test_list = sorted(os.listdir(dir_test)) if exists[2] else []

            # Check the modification times because predictions can be updated
            # over time!
            old_dir_ensemble_list_mtimes = dir_ensemble_list_mtimes
            dir_ensemble_list_mtimes = []
            # The ensemble dir can contain non-model files. We filter them and
            # use the following list instead
            dir_ensemble_model_files = []

            for dir_ensemble_file in dir_ensemble_list:
                if dir_ensemble_file.endswith("/"):
                    dir_ensemble_file = dir_ensemble_file[:-1]
                if not dir_ensemble_file.endswith(".npy"):
                    self.logger.warning('Error loading file (not .npy): %s', dir_ensemble_file)
                    continue

                dir_ensemble_model_files.append(dir_ensemble_file)
                basename = os.path.basename(dir_ensemble_file)
                dir_ensemble_file = os.path.join(dir_ensemble, basename)
                mtime = os.path.getmtime(dir_ensemble_file)
                dir_ensemble_list_mtimes.append(mtime)

            if len(dir_ensemble_model_files) == 0:
                self.logger.debug('Directories are empty')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            if len(dir_ensemble_model_files) <= current_num_models and \
                    old_dir_ensemble_list_mtimes == dir_ensemble_list_mtimes:
                self.logger.debug('Nothing has changed since the last time')
                time.sleep(2)
                used_time = watch.wall_elapsed('ensemble_builder')
                continue

            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                # TODO restructure time management in the ensemble builder,
                # what is the time of index_run actually needed for?
                watch.start_task('index_run' + str(index_run))
            watch.start_task('ensemble_iter_' + str(num_iteration))

            # List of num_runs (which are in the filename) which will be included
            #  later
            include_num_runs = []
            backup_num_runs = []
            model_and_automl_re = re.compile(r'_([0-9]*)_([0-9]*)\.npy$')
            if self.ensemble_nbest is not None:
                # Keeps track of the single scores of each model in our ensemble
                scores_nbest = []
                # The indices of the model that are currently in our ensemble
                indices_nbest = []
                # The names of the models
                model_names = []

            model_names_to_scores = dict()

            model_idx = 0
            for model_name in dir_ensemble_model_files:
                if model_name.endswith("/"):
                    model_name = model_name[:-1]
                basename = os.path.basename(model_name)

                try:
                    if self.precision is "16":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float16)
                    elif self.precision is "32":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float32)
                    elif self.precision is "64":
                        predictions = np.load(os.path.join(dir_ensemble, basename)).astype(dtype=np.float64)
                    else:
                        predictions = np.load(os.path.join(dir_ensemble, basename))

                    score = calculate_score(targets_ensemble, predictions,
                                            self.task_type, self.metric,
                                            predictions.shape[1])

                except Exception as e:
                    self.logger.warning('Error loading %s: %s - %s',
                                        basename, type(e), e)
                    score = -1

                model_names_to_scores[model_name] = score
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))

                if self.ensemble_nbest is not None:
                    if score <= 0.001:
                        self.logger.info('Model only predicts at random: ' +
                                         model_name + ' has score: ' + str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    # If we have less models in our ensemble than ensemble_nbest add
                    # the current model if it is better than random
                    elif len(scores_nbest) < self.ensemble_nbest:
                        scores_nbest.append(score)
                        indices_nbest.append(model_idx)
                        include_num_runs.append((automl_seed, num_run))
                        model_names.append(model_name)
                    else:
                        # Take the worst performing model in our ensemble so far
                        idx = np.argmin(np.array([scores_nbest]))

                        # If the current model is better than the worst model in
                        # our ensemble replace it by the current model
                        if scores_nbest[idx] < score:
                            self.logger.info(
                                'Worst model in our ensemble: %s with score %f '
                                'will be replaced by model %s with score %f',
                                model_names[idx], scores_nbest[idx], model_name,
                                score)
                            # Exclude the old model
                            del scores_nbest[idx]
                            scores_nbest.append(score)
                            del include_num_runs[idx]
                            del indices_nbest[idx]
                            indices_nbest.append(model_idx)
                            include_num_runs.append((automl_seed, num_run))
                            del model_names[idx]
                            model_names.append(model_name)

                        # Otherwise exclude the current model from the ensemble
                        else:
                            # include_num_runs.append(True)
                            pass

                else:
                    # Load all predictions that are better than random
                    if score <= 0.001:
                        # include_num_runs.append(True)
                        self.logger.info('Model only predicts at random: ' +
                                         model_name + ' has score: ' +
                                         str(score))
                        backup_num_runs.append((automl_seed, num_run))
                    else:
                        include_num_runs.append((automl_seed, num_run))

                model_idx += 1

            # If there is no model better than random guessing, we have to use
            # all models which do random guessing
            if len(include_num_runs) == 0:
                include_num_runs = backup_num_runs

            indices_to_model_names = dict()
            indices_to_run_num = dict()
            for i, model_name in enumerate(dir_ensemble_model_files):
                match = model_and_automl_re.search(model_name)
                automl_seed = int(match.group(1))
                num_run = int(match.group(2))
                if (automl_seed, num_run) in include_num_runs:
                    num_indices = len(indices_to_model_names)
                    indices_to_model_names[num_indices] = model_name
                    indices_to_run_num[num_indices] = (automl_seed, num_run)

            try:
                all_predictions_train, all_predictions_valid, all_predictions_test =\
                    self.get_all_predictions(dir_ensemble,
                                             dir_ensemble_model_files,
                                             dir_valid, dir_valid_list,
                                             dir_test, dir_test_list,
                                             include_num_runs,
                                             model_and_automl_re,
                                             self.precision)
            except IOError:
                self.logger.error('Could not load the predictions.')
                continue

            if len(include_num_runs) == 0:
                self.logger.error('All models do just random guessing')
                time.sleep(2)
                continue

            else:
                ensemble = EnsembleSelection(ensemble_size=self.ensemble_size,
                                             task_type=self.task_type,
                                             metric=self.metric)

                try:
                    ensemble.fit(all_predictions_train, targets_ensemble,
                                 include_num_runs)
                    self.logger.info(ensemble)

                except ValueError as e:
                    self.logger.error('Caught ValueError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except IndexError as e:
                    self.logger.error('Caught IndexError: ' + str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue
                except Exception as e:
                    self.logger.error('Caught error! %s', str(e))
                    used_time = watch.wall_elapsed('ensemble_builder')
                    time.sleep(2)
                    continue

                # Output the score
                self.logger.info('Training performance: %f' % ensemble.train_score_)

                self.logger.info('Building the ensemble took %f seconds' %
                            watch.wall_elapsed('ensemble_iter_' + str(num_iteration)))

            # Set this variable here to avoid re-running the ensemble builder
            # every two seconds in case the ensemble did not change
            current_num_models = len(dir_ensemble_model_files)

            ensemble_predictions = ensemble.predict(all_predictions_train)
            if sys.version_info[0] == 2:
                ensemble_predictions.flags.writeable = False
                current_hash = hash(ensemble_predictions.data)
            else:
                current_hash = hash(ensemble_predictions.data.tobytes())

            # Only output a new ensemble and new predictions if the output of the
            # ensemble would actually change!
            # TODO this is neither safe (collisions, tests only with the ensemble
            #  prediction, but not the ensemble), implement a hash function for
            # each possible ensemble builder.
            if last_hash is not None:
                if current_hash == last_hash:
                    self.logger.info('Ensemble output did not change.')
                    time.sleep(2)
                    continue
                else:
                    last_hash = current_hash
            else:
                last_hash = current_hash

            # Save the ensemble for later use in the main auto-sklearn module!
            self.backend.save_ensemble(ensemble, index_run, self.seed)

            # Save predictions for valid and test data set
            if len(dir_valid_list) == len(dir_ensemble_model_files):
                all_predictions_valid = np.array(all_predictions_valid)
                ensemble_predictions_valid = ensemble.predict(all_predictions_valid)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_valid = ensemble_predictions_valid[:, 1]
                if self.low_precision:
                    if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]:
                        ensemble_predictions_valid[ensemble_predictions_valid < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_valid.shape, dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                            ensemble_predictions_valid.shape[1] == 1):
                            bin_array[ensemble_predictions_valid >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_valid.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_valid[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_valid = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_valid.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_valid.size > 1000000:
                            precision = 4
                        else:
                            # File size maximally 2.1MB
                            precision = 6

                self.backend.save_predictions_as_txt(ensemble_predictions_valid,
                                                'valid', index_run, prefix=self.dataset_name,
                                                precision=precision)
            else:
                self.logger.info('Could not find as many validation set predictions (%d)'
                             'as ensemble predictions (%d)!.',
                            len(dir_valid_list), len(dir_ensemble_model_files))

            del all_predictions_valid

            if len(dir_test_list) == len(dir_ensemble_model_files):
                all_predictions_test = np.array(all_predictions_test)
                ensemble_predictions_test = ensemble.predict(all_predictions_test)
                if self.task_type == BINARY_CLASSIFICATION:
                    ensemble_predictions_test = ensemble_predictions_test[:, 1]
                if self.low_precision:
                    if self.task_type in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION]:
                        ensemble_predictions_test[ensemble_predictions_test < 1e-4] = 0.
                    if self.metric in [BAC_METRIC, F1_METRIC]:
                        bin_array = np.zeros(ensemble_predictions_test.shape,
                                             dtype=np.int32)
                        if (self.task_type != MULTICLASS_CLASSIFICATION) or (
                                    ensemble_predictions_test.shape[1] == 1):
                            bin_array[ensemble_predictions_test >= 0.5] = 1
                        else:
                            sample_num = ensemble_predictions_test.shape[0]
                            for i in range(sample_num):
                                j = np.argmax(ensemble_predictions_test[i, :])
                                bin_array[i, j] = 1
                        ensemble_predictions_test = bin_array
                    if self.task_type in CLASSIFICATION_TASKS:
                        if ensemble_predictions_test.size < (20000 * 20):
                            precision = 3
                        else:
                            precision = 2
                    else:
                        if ensemble_predictions_test.size > 1000000:
                            precision = 4
                        else:
                            precision = 6

                self.backend.save_predictions_as_txt(ensemble_predictions_test,
                                                     'test', index_run, prefix=self.dataset_name,
                                                     precision=precision)
            else:
                self.logger.info('Could not find as many test set predictions (%d) as '
                             'ensemble predictions (%d)!',
                            len(dir_test_list), len(dir_ensemble_model_files))

            del all_predictions_test

            current_num_models = len(dir_ensemble_model_files)
            watch.stop_task('index_run' + str(index_run))
            time_iter = watch.get_wall_dur('index_run' + str(index_run))
            used_time = watch.wall_elapsed('ensemble_builder')
            index_run += 1
        return

Example 23

Project: CythonCTypesBackend Source File: setup.py
Function: compile_cython_modules
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
    source_root = os.path.abspath(os.path.dirname(__file__))
    compiled_modules = ["Cython.Plex.Scanners",
                        "Cython.Plex.Actions",
                        "Cython.Compiler.Lexicon",
                        "Cython.Compiler.Scanning",
                        "Cython.Compiler.Parsing",
                        "Cython.Compiler.Visitor",
                        "Cython.Compiler.FlowControl",
                        "Cython.Compiler.Code",
                        "Cython.Runtime.refnanny",
                        ]
    if compile_more:
        compiled_modules.extend([
            "Cython.Compiler.ParseTreeTransforms",
            "Cython.Compiler.Nodes",
            "Cython.Compiler.ExprNodes",
            "Cython.Compiler.ModuleNode",
            "Cython.Compiler.Optimize",
            ])

    defines = []
    if cython_with_refnanny:
        defines.append(('CYTHON_REFNANNY', '1'))

    extensions = []
    if sys.version_info[0] >= 3:
        from Cython.Distutils import build_ext as build_ext_orig
        for module in compiled_modules:
            source_file = os.path.join(source_root, *module.split('.'))
            if os.path.exists(source_file + ".py"):
                pyx_source_file = source_file + ".py"
            else:
                pyx_source_file = source_file + ".pyx"
            dep_files = []
            if os.path.exists(source_file + '.pxd'):
                dep_files.append(source_file + '.pxd')
            if '.refnanny' in module:
                defines_for_module = []
            else:
                defines_for_module = defines
            extensions.append(
                Extension(module, sources = [pyx_source_file],
                          define_macros = defines_for_module,
                          depends = dep_files)
                )

        class build_ext(build_ext_orig):
            # we must keep the original modules alive to make sure
            # their code keeps working when we remove them from
            # sys.modules
            dead_modules = []

            def build_extensions(self):
                # add path where 2to3 installed the transformed sources
                # and make sure Python (re-)imports them from there
                already_imported = [ module for module in sys.modules
                                     if module == 'Cython' or module.startswith('Cython.') ]
                keep_alive = self.dead_modules.append
                for module in already_imported:
                    keep_alive(sys.modules[module])
                    del sys.modules[module]
                sys.path.insert(0, os.path.join(source_root, self.build_lib))

                if profile:
                    from Cython.Compiler.Options import directive_defaults
                    directive_defaults['profile'] = True
                    print("Enabled profiling for the Cython binary modules")
                build_ext_orig.build_extensions(self)

        setup_args['ext_modules'] = extensions
        add_command_class("build_ext", build_ext)

    else: # Python 2.x
        from distutils.command.build_ext import build_ext as build_ext_orig
        try:
            class build_ext(build_ext_orig):
                def build_extension(self, ext, *args, **kargs):
                    try:
                        build_ext_orig.build_extension(self, ext, *args, **kargs)
                    except StandardError:
                        print("Compilation of '%s' failed" % ext.sources[0])
            from Cython.Compiler.Main import compile
            from Cython import Utils
            if profile:
                from Cython.Compiler.Options import directive_defaults
                directive_defaults['profile'] = True
                print("Enabled profiling for the Cython binary modules")
            source_root = os.path.dirname(__file__)
            for module in compiled_modules:
                source_file = os.path.join(source_root, *module.split('.'))
                if os.path.exists(source_file + ".py"):
                    pyx_source_file = source_file + ".py"
                else:
                    pyx_source_file = source_file + ".pyx"
                c_source_file = source_file + ".c"
                source_is_newer = False
                if not os.path.exists(c_source_file):
                    source_is_newer = True
                else:
                    c_last_modified = Utils.modification_time(c_source_file)
                    if Utils.file_newer_than(pyx_source_file, c_last_modified):
                        source_is_newer = True
                    else:
                        pxd_source_file = source_file + ".pxd"
                        if os.path.exists(pxd_source_file) and Utils.file_newer_than(pxd_source_file, c_last_modified):
                            source_is_newer = True
                if source_is_newer:
                    print("Compiling module %s ..." % module)
                    result = compile(pyx_source_file)
                    c_source_file = result.c_file
                if c_source_file:
                    # Py2 distutils can't handle unicode file paths
                    if isinstance(c_source_file, unicode):
                        filename_encoding = sys.getfilesystemencoding()
                        if filename_encoding is None:
                            filename_encoding = sys.getdefaultencoding()
                        c_source_file = c_source_file.encode(filename_encoding)
                    if '.refnanny' in module:
                        defines_for_module = []
                    else:
                        defines_for_module = defines
                    extensions.append(
                        Extension(module, sources = [c_source_file],
                                  define_macros = defines_for_module)
                        )
                else:
                    print("Compilation failed")
            if extensions:
                setup_args['ext_modules'] = extensions
                add_command_class("build_ext", build_ext)
        except Exception:
            print('''
ERROR: %s

Extension module compilation failed, looks like Cython cannot run
properly on this system.  To work around this, pass the option
"--no-cython-compile".  This will install a pure Python version of
Cython without compiling its own sources.
''' % sys.exc_info()[1])
            raise

Example 24

Project: python-uncompyle6 Source File: uncompile.py
def main_bin():
    if not (sys.version_info[0:2] in ((2, 6), (2, 7), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6))):
        print('Error: %s requires Python 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, or 3.6' % program,
              file=sys.stderr)
        sys.exit(-1)

    do_verify = recurse_dirs = False
    numproc = 0
    outfile = '-'
    out_base = None
    codes = []
    timestamp = False
    timestampfmt = "# %Y.%m.%d %H:%M:%S %Z"

    try:
        opts, files = getopt.getopt(sys.argv[1:], 'hagtdrVo:c:p:',
                                    'help asm grammar recurse timestamp tree verify version '
                                    'showgrammar'.split(' '))
    except getopt.GetoptError as e:
        print('%s: %s' % (os.path.basename(sys.argv[0]), e),  file=sys.stderr)
        sys.exit(-1)

    options = {}
    for opt, val in opts:
        if opt in ('-h', '--help'):
            print(__doc__)
            sys.exit(0)
        elif opt in ('-V', '--version'):
            print("%s %s" % (program, VERSION))
            sys.exit(0)
        elif opt == '--verify':
            options['do_verify'] = True
        elif opt in ('--asm', '-a'):
            options['showasm'] = 'after'
            options['do_verify'] = False
        elif opt in ('--tree', '-t'):
            options['showast'] = True
            options['do_verify'] = False
        elif opt in ('--grammar', '-g'):
            options['showgrammar'] = True
        elif opt == '-o':
            outfile = val
        elif opt in ('--timestamp', '-d'):
            timestamp = True
        elif opt == '-c':
            codes.append(val)
        elif opt == '-p':
            numproc = int(val)
        elif opt in ('--recurse', '-r'):
            recurse_dirs = True
        else:
            print(opt, file=sys.stderr)
            usage()

    # expand directory if specified
    if recurse_dirs:
        expanded_files = []
        for f in files:
            if os.path.isdir(f):
                for root, _, dir_files in os.walk(f):
                    for df in dir_files:
                        if df.endswith('.pyc') or df.endswith('.pyo'):
                            expanded_files.append(os.path.join(root, df))
        files = expanded_files

    # argl, commonprefix works on strings, not on path parts,
    # thus we must handle the case with files in 'some/classes'
    # and 'some/cmds'
    src_base = os.path.commonprefix(files)
    if src_base[-1:] != os.sep:
        src_base = os.path.dirname(src_base)
    if src_base:
        sb_len = len( os.path.join(src_base, '') )
        files = [f[sb_len:] for f in files]
        del sb_len

    if not files:
        print("No files given", file=sys.stderr)
        usage()

    if outfile == '-':
        if 'do_verify' in options and options['do_verify'] and len(files) == 1:
            junk, outfile = tempfile.mkstemp(suffix=".pyc",
                                             prefix=files[0][0:-4]+'-')
        else:
            outfile = None # use stdout
    elif outfile and os.path.isdir(outfile):
        out_base = outfile; outfile = None
    elif outfile and len(files) > 1:
        out_base = outfile; outfile = None

    if timestamp:
        print(time.strftime(timestampfmt))

    if numproc <= 1:
        try:
            result = main(src_base, out_base, files, codes, outfile,
                          **options)
            if len(files) > 1:
                mess = status_msg(do_verify, *result)
                print('# ' + mess)
                pass
        except (KeyboardInterrupt):
            pass
        except verify.VerifyCmpError:
            raise
    else:
        from multiprocessing import Process, Queue

        try:
            from Queue import Empty
        except ImportError:
            from Queue import Empty

        fqueue = Queue(len(files)+numproc)
        for f in files:
            fqueue.put(f)
        for i in range(numproc):
            fqueue.put(None)

        rqueue = Queue(numproc)

        def process_func():
            try:
                (tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
                while 1:
                    f = fqueue.get()
                    if f is None:
                        break
                    (t, o, f, v) = \
                      main(src_base, out_base, [f], codes, outfile, **options)
                    tot_files += t
                    okay_files += o
                    failed_files += f
                    verify_failed_files += v
            except (Empty, KeyboardInterrupt):
                pass
            rqueue.put((tot_files, okay_files, failed_files, verify_failed_files))
            rqueue.close()

        try:
            procs = [Process(target=process_func) for i in range(numproc)]
            for p in procs:
                p.start()
            for p in procs:
                p.join()
            try:
                (tot_files, okay_files, failed_files, verify_failed_files) = (0, 0, 0, 0)
                while True:
                    (t, o, f, v) = rqueue.get(False)
                    tot_files += t
                    okay_files += o
                    failed_files += f
                    verify_failed_files += v
            except Empty:
                pass
            print('# decompiled %i files: %i okay, %i failed, %i verify failed' %
                  (tot_files, okay_files, failed_files, verify_failed_files))
        except (KeyboardInterrupt, OSError):
            pass


    if timestamp:
        print(time.strftime(timestampfmt))

    return

Example 25

Project: PyClassLessons Source File: dictconfig.py
Function: configure
    def configure(self):
        """Do the configuration."""

        config = self.config
        if 'version' not in config:
            raise ValueError("dictionary doesn't specify a version")
        if config['version'] != 1:
            raise ValueError("Unsupported version: %s" % config['version'])
        incremental = config.pop('incremental', False)
        EMPTY_DICT = {}
        logging._acquireLock()
        try:
            if incremental:
                handlers = config.get('handlers', EMPTY_DICT)
                # incremental handler config only if handler name
                # ties in to logging._handlers (Python 2.7)
                if sys.version_info[:2] == (2, 7):
                    for name in handlers:
                        if name not in logging._handlers:
                            raise ValueError('No handler found with '
                                             'name %r'  % name)
                        else:
                            try:
                                handler = logging._handlers[name]
                                handler_config = handlers[name]
                                level = handler_config.get('level', None)
                                if level:
                                    handler.setLevel(_checkLevel(level))
                            except StandardError as e:
                                raise ValueError('Unable to configure handler '
                                                 '%r: %s' % (name, e))
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    try:
                        self.configure_logger(name, loggers[name], True)
                    except StandardError as e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root, True)
                    except StandardError as e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
            else:
                disable_existing = config.pop('disable_existing_loggers', True)

                logging._handlers.clear()
                del logging._handlerList[:]

                # Do formatters first - they don't refer to anything else
                formatters = config.get('formatters', EMPTY_DICT)
                for name in formatters:
                    try:
                        formatters[name] = self.configure_formatter(
                                                            formatters[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure '
                                         'formatter %r: %s' % (name, e))
                # Next, do filters - they don't refer to anything else, either
                filters = config.get('filters', EMPTY_DICT)
                for name in filters:
                    try:
                        filters[name] = self.configure_filter(filters[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure '
                                         'filter %r: %s' % (name, e))

                # Next, do handlers - they refer to formatters and filters
                # As handlers can refer to other handlers, sort the keys
                # to allow a deterministic order of configuration
                handlers = config.get('handlers', EMPTY_DICT)
                for name in sorted(handlers):
                    try:
                        handler = self.configure_handler(handlers[name])
                        handler.name = name
                        handlers[name] = handler
                    except StandardError as e:
                        raise ValueError('Unable to configure handler '
                                         '%r: %s' % (name, e))
                # Next, do loggers - they refer to handlers and filters

                # we don't want to lose the existing loggers,
                # since other threads may have pointers to them.
                # existing is set to contain all existing loggers,
                # and as we go through the new configuration we
                # remove any which are configured. At the end,
                # what's left in existing is the set of loggers
                # which were in the previous configuration but
                # which are not in the new configuration.
                root = logging.root
                existing = list(root.manager.loggerDict)
                # The list needs to be sorted so that we can
                # avoid disabling child loggers of explicitly
                # named loggers. With a sorted list it is easier
                # to find the child loggers.
                existing.sort()
                # We'll keep the list of existing loggers
                # which are children of named loggers here...
                child_loggers = []
                # now set up the new ones...
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    if name in existing:
                        i = existing.index(name)
                        prefixed = name + "."
                        pflen = len(prefixed)
                        num_existing = len(existing)
                        i = i + 1  # look at the entry after name
                        while (i < num_existing) and\
                              (existing[i][:pflen] == prefixed):
                            child_loggers.append(existing[i])
                            i = i + 1
                        existing.remove(name)
                    try:
                        self.configure_logger(name, loggers[name])
                    except StandardError as e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))

                # Disable any old loggers. There's no point deleting
                # them as other threads may continue to hold references
                # and by disabling them, you stop them doing any logging.
                # However, don't disable children of named loggers, as that's
                # probably not what was intended by the user.
                for log in existing:
                    logger = root.manager.loggerDict[log]
                    if log in child_loggers:
                        logger.level = logging.NOTSET
                        logger.handlers = []
                        logger.propagate = True
                    elif disable_existing:
                        logger.disabled = True

                # And finally, do the root logger
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root)
                    except StandardError as e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
        finally:
            logging._releaseLock()

Example 26

Project: shellsploit-framework Source File: compression.py
def zip_pack(filepath, options):
    """
    Creates a zip archive containing the script at *filepath* along with all
    imported modules that are local to *filepath* as a self-extracting python
    script.  A shebang will be appended to the beginning of the resulting
    zip archive which will allow it to

    If being run inside Python 3 and the `lzma` module is available the
    resulting 'pyz' file will use ZIP_LZMA compression to maximize compression.

    *options* is expected to be the the same options parsed from pyminifier.py
    on the command line.

    .. note::

        * The file resulting from this method cannot be imported as a module into another python program (command line execution only).
        * Any required local (implied path) modules will be automatically included (well, it does its best).
        * The result will be saved as a .pyz file (which is an extension I invented for this format).
    """
    import zipfile
    # Hopefully some day we'll be able to use ZIP_LZMA too as the compression
    # format to save even more space...
    compression_format = zipfile.ZIP_DEFLATED
    cuemulative_size = 0  # For tracking size reduction stats
    # Record the filesize for later comparison
    cuemulative_size += os.path.getsize(filepath)
    dest = options.pyz
    z = zipfile.ZipFile(dest, "w", compression_format)
    # Take care of minifying our primary script first:
    source = open(filepath).read()
    primary_tokens = token_utils.listified_tokenizer(source)
    # Preserve shebangs (don't care about encodings for this)
    shebang = analyze.get_shebang(primary_tokens)
    if not shebang:
        # We *must* have a shebang for this to work so make a conservative default:
        shebang = "#!/usr/bin/env python"
    if py3:
        if shebang.rstrip().endswith('python'):  # Make it python3 (to be safe)
            shebang = shebang.rstrip()
            shebang += '3\n'  # !/usr/bin/env python3
    if not options.nominify:  # Minify as long as we don't have this option set
        source = minification.minify(primary_tokens, options)
    # Write out to a temporary file to add to our zip
    temp = tempfile.NamedTemporaryFile(mode='w')
    temp.write(source)
    temp.flush()
    # Need the path where the script lives for the next steps:
    path = os.path.split(filepath)[0]
    if not path:
        path = os.getcwd()
    main_py = path + '/__main__.py'
    if os.path.exists(main_py):
        # There's an existing __main__.py, use it
        z.write(main_py, '__main__.py')
        z.write(temp.name, os.path.split(filepath)[1])
    else:
        # No __main__.py so we rename our main script to be the __main__.py
        # This is so it will still execute as a zip
        z.write(filepath, '__main__.py')
    temp.close()
    # Now write any required modules into the zip as well
    local_modules = analyze.enumerate_local_modules(primary_tokens, path)
    name_generator = None  # So we can tell if we need to obfuscate
    if options.obfuscate or options.obf_classes \
            or options.obf_functions or options.obf_variables \
            or options.obf_builtins or options.obf_import_methods:
        # Put together that will be used for all obfuscation functions:
        identifier_length = int(options.replacement_length)
        if options.use_nonlatin:
            if sys.version_info[0] == 3:
                name_generator = obfuscate.obfuscation_machine(
                    use_unicode=True, identifier_length=identifier_length
                )
            else:
                print(
                    "ERROR: You can't use nonlatin characters without Python 3")
                sys.exit(2)
        else:
            name_generator = obfuscate.obfuscation_machine(
                identifier_length=identifier_length)
        table = [{}]
    included_modules = []
    for module in local_modules:
        module = module.replace('.', '/')
        module = "%s.py" % module
        # Add the filesize to our total
        cuemulative_size += os.path.getsize(module)
        # Also record that we've added it to the archive
        included_modules.append(module)
        # Minify these files too
        source = open(os.path.join(path, module)).read()
        tokens = token_utils.listified_tokenizer(source)
        maybe_more_modules = analyze.enumerate_local_modules(tokens, path)
        for mod in maybe_more_modules:
            if mod not in local_modules:
                local_modules.append(mod)  # Extend the current loop, love it =)
        if not options.nominify:
            # Perform minification (this also handles obfuscation)
            source = minification.minify(tokens, options)
        # Have to re-tokenize for obfucation (it's quick):
        tokens = token_utils.listified_tokenizer(source)
        # Perform obfuscation if any of the related options were set
        if name_generator:
            obfuscate.obfuscate(
                module,
                tokens,
                options,
                name_generator=name_generator,
                table=table
            )
        # Convert back to text
        result = token_utils.untokenize(tokens)
        # Write out to a temporary file to add to our zip
        temp = tempfile.NamedTemporaryFile(mode='w')
        temp.write(source)
        temp.flush()
        z.write(temp.name, module)
        temp.close()
    z.close()
    # Finish up by writing the shebang to the beginning of the zip
    prepend(shebang, dest)
    os.chmod(dest, 0o755)  # Make it executable (since we added the shebang)
    pyz_filesize = os.path.getsize(dest)
    percent_saved = round(float(pyz_filesize) / float(cuemulative_size) * 100, 2)
    print('%s saved as compressed executable zip: %s' % (filepath, dest))
    print('The following modules were automatically included (as automagic '
          'dependencies):\n')
    for module in included_modules:
        print('\t%s' % module)
    print('\nOverall size reduction: %s%% of original size' % percent_saved)

Example 27

Project: TACTIC Source File: test_core.py
def setup_server():
    class Root:
        
        def index(self):
            return "hello"
        index.exposed = True
        
        favicon_ico = tools.staticfile.handler(filename=favicon_path)
        
        def defct(self, newct):
            newct = "text/%s" % newct
            cherrypy.config.update({'tools.response_headers.on': True,
                                    'tools.response_headers.headers':
                                    [('Content-Type', newct)]})
        defct.exposed = True
        
        def baseurl(self, path_info, relative=None):
            return cherrypy.url(path_info, relative=bool(relative))
        baseurl.exposed = True
    
    root = Root()
    
    
    class TestType(type):
        """Metaclass which automatically exposes all functions in each subclass,
        and adds an instance of the subclass as an attribute of root.
        """
        def __init__(cls, name, bases, dct):
            type.__init__(cls, name, bases, dct)
            for value in dct.itervalues():
                if isinstance(value, types.FunctionType):
                    value.exposed = True
            setattr(root, name.lower(), cls())
    class Test(object):
        __metaclass__ = TestType
    
    
    class URL(Test):
        
        _cp_config = {'tools.trailing_slash.on': False}
        
        def index(self, path_info, relative=None):
            if relative != 'server':
                relative = bool(relative)
            return cherrypy.url(path_info, relative=relative)
        
        def leaf(self, path_info, relative=None):
            if relative != 'server':
                relative = bool(relative)
            return cherrypy.url(path_info, relative=relative)


    class Status(Test):
        
        def index(self):
            return "normal"
        
        def blank(self):
            cherrypy.response.status = ""
        
        # According to RFC 2616, new status codes are OK as long as they
        # are between 100 and 599.
        
        # Here is an illegal code...
        def illegal(self):
            cherrypy.response.status = 781
            return "oops"
        
        # ...and here is an unknown but legal code.
        def unknown(self):
            cherrypy.response.status = "431 My custom error"
            return "funky"
        
        # Non-numeric code
        def bad(self):
            cherrypy.response.status = "error"
            return "bad news"


    class Redirect(Test):
        
        class Error:
            _cp_config = {"tools.err_redirect.on": True,
                          "tools.err_redirect.url": "/errpage",
                          "tools.err_redirect.internal": False,
                          }
            
            def index(self):
                raise NameError("redirect_test")
            index.exposed = True
        error = Error()
        
        def index(self):
            return "child"
        
        def by_code(self, code):
            raise cherrypy.HTTPRedirect("somewhere else", code)
        by_code._cp_config = {'tools.trailing_slash.extra': True}
        
        def nomodify(self):
            raise cherrypy.HTTPRedirect("", 304)
        
        def proxy(self):
            raise cherrypy.HTTPRedirect("proxy", 305)
        
        def stringify(self):
            return str(cherrypy.HTTPRedirect("/"))
        
        def fragment(self, frag):
            raise cherrypy.HTTPRedirect("/some/url#%s" % frag)
    
    def login_redir():
        if not getattr(cherrypy.request, "login", None):
            raise cherrypy.InternalRedirect("/internalredirect/login")
    tools.login_redir = _cptools.Tool('before_handler', login_redir)
    
    def redir_custom():
        raise cherrypy.InternalRedirect("/internalredirect/custom_err")
    
    class InternalRedirect(Test):
        
        def index(self):
            raise cherrypy.InternalRedirect("/")
        
        def choke(self):
            return 3 / 0
        choke.exposed = True
        choke._cp_config = {'hooks.before_error_response': redir_custom}
        
        def relative(self, a, b):
            raise cherrypy.InternalRedirect("cousin?t=6")
        
        def cousin(self, t):
            assert cherrypy.request.prev.closed
            return cherrypy.request.prev.query_string
        
        def petshop(self, user_id):
            if user_id == "parrot":
                # Trade it for a slug when redirecting
                raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug')
            elif user_id == "terrier":
                # Trade it for a fish when redirecting
                raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish')
            else:
                # This should pass the user_id through to getImagesByUser
                raise cherrypy.InternalRedirect(
                    '/image/getImagesByUser?user_id=%s' % str(user_id))
        
        # We support Python 2.3, but the @-deco syntax would look like this:
        # @tools.login_redir()
        def secure(self):
            return "Welcome!"
        secure = tools.login_redir()(secure)
        # Since calling the tool returns the same function you pass in,
        # you could skip binding the return value, and just write:
        # tools.login_redir()(secure)
        
        def login(self):
            return "Please log in"
        
        def custom_err(self):
            return "Something went horribly wrong."
        
        def early_ir(self, arg):
            return "whatever"
        early_ir._cp_config = {'hooks.before_request_body': redir_custom}
    
    
    class Image(Test):
        
        def getImagesByUser(self, user_id):
            return "0 images for %s" % user_id


    class Flatten(Test):
        
        def as_string(self):
            return "content"
        
        def as_list(self):
            return ["con", "tent"]
        
        def as_yield(self):
            yield "content"
        
        def as_dblyield(self):
            yield self.as_yield()
        as_dblyield._cp_config = {'tools.flatten.on': True}
        
        def as_refyield(self):
            for chunk in self.as_yield():
                yield chunk
    
    
    class Ranges(Test):
        
        def get_ranges(self, bytes):
            return repr(httputil.get_ranges('bytes=%s' % bytes, 8))
        
        def slice_file(self):
            path = os.path.join(os.getcwd(), os.path.dirname(__file__))
            return static.serve_file(os.path.join(path, "static/index.html"))


    class Cookies(Test):
        
        def single(self, name):
            cookie = cherrypy.request.cookie[name]
            # Python2's SimpleCookie.__setitem__ won't take unicode keys.
            cherrypy.response.cookie[str(name)] = cookie.value
        
        def multiple(self, names):
            for name in names:
                cookie = cherrypy.request.cookie[name]
                # Python2's SimpleCookie.__setitem__ won't take unicode keys.
                cherrypy.response.cookie[str(name)] = cookie.value


    if sys.version_info >= (2, 5):
        from cherrypy.test import py25
        Root.expose_dec = py25.ExposeExamples()
    
    cherrypy.tree.mount(root)

Example 28

Project: pip Source File: req_file.py
Function: process_line
def process_line(line, filename, line_number, finder=None, comes_from=None,
                 options=None, session=None, wheel_cache=None,
                 constraint=False):
    """Process a single requirements line; This can result in creating/yielding
    requirements, or updating the finder.

    For lines that contain requirements, the only options that have an effect
    are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
    requirement. Other options from SUPPORTED_OPTIONS may be present, but are
    ignored.

    For lines that do not contain requirements, the only options that have an
    effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
    be present, but are ignored. These lines may contain multiple options
    (although our docs imply only one is supported), and all our parsed and
    affect the finder.

    :param constraint: If True, parsing a constraints file.
    :param options: OptionParser options that we may update
    """
    parser = build_parser()
    defaults = parser.get_default_values()
    defaults.index_url = None
    if finder:
        # `finder.format_control` will be updated during parsing
        defaults.format_control = finder.format_control
    args_str, options_str = break_args_options(line)
    if sys.version_info < (2, 7, 3):
        # Prior to 2.7.3, shlex cannot deal with unicode entries
        options_str = options_str.encode('utf8')
    opts, _ = parser.parse_args(shlex.split(options_str), defaults)

    # preserve for the nested code path
    line_comes_from = '%s %s (line %s)' % (
        '-c' if constraint else '-r', filename, line_number)

    # yield a line requirement
    if args_str:
        isolated = options.isolated_mode if options else False
        if options:
            cmdoptions.check_install_build_global(options, opts)
        # get the options that apply to requirements
        req_options = {}
        for dest in SUPPORTED_OPTIONS_REQ_DEST:
            if dest in opts.__dict__ and opts.__dict__[dest]:
                req_options[dest] = opts.__dict__[dest]
        yield InstallRequirement.from_line(
            args_str, line_comes_from, constraint=constraint,
            isolated=isolated, options=req_options, wheel_cache=wheel_cache
        )

    # yield an editable requirement
    elif opts.editables:
        isolated = options.isolated_mode if options else False
        default_vcs = options.default_vcs if options else None
        yield InstallRequirement.from_editable(
            opts.editables[0], comes_from=line_comes_from,
            constraint=constraint, default_vcs=default_vcs, isolated=isolated,
            wheel_cache=wheel_cache
        )

    # parse a nested requirements file
    elif opts.requirements or opts.constraints:
        if opts.requirements:
            req_path = opts.requirements[0]
            nested_constraint = False
        else:
            req_path = opts.constraints[0]
            nested_constraint = True
        # original file is over http
        if SCHEME_RE.search(filename):
            # do a url join so relative paths work
            req_path = urllib_parse.urljoin(filename, req_path)
        # original file and nested file are paths
        elif not SCHEME_RE.search(req_path):
            # do a join so relative paths work
            req_path = os.path.join(os.path.dirname(filename), req_path)
        # TODO: Why not use `comes_from='-r {} (line {})'` here as well?
        parser = parse_requirements(
            req_path, finder, comes_from, options, session,
            constraint=nested_constraint, wheel_cache=wheel_cache
        )
        for req in parser:
            yield req

    # percolate hash-checking option upward
    elif opts.require_hashes:
        options.require_hashes = opts.require_hashes

    # set finder options
    elif finder:
        if opts.allow_external:
            warnings.warn(
                "--allow-external has been deprecated and will be removed in "
                "the future. Due to changes in the repository protocol, it no "
                "longer has any effect.",
                RemovedInPip10Warning,
            )

        if opts.allow_all_external:
            warnings.warn(
                "--allow-all-external has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if opts.allow_unverified:
            warnings.warn(
                "--allow-unverified has been deprecated and will be removed "
                "in the future. Due to changes in the repository protocol, it "
                "no longer has any effect.",
                RemovedInPip10Warning,
            )

        if opts.index_url:
            finder.index_urls = [opts.index_url]
        if opts.use_wheel is False:
            finder.use_wheel = False
            pip.index.fmt_ctl_no_use_wheel(finder.format_control)
        if opts.no_index is True:
            finder.index_urls = []
        if opts.extra_index_urls:
            finder.index_urls.extend(opts.extra_index_urls)
        if opts.find_links:
            # FIXME: it would be nice to keep track of the source
            # of the find_links: support a find-links local path
            # relative to a requirements file.
            value = opts.find_links[0]
            req_dir = os.path.dirname(os.path.abspath(filename))
            relative_to_reqs_file = os.path.join(req_dir, value)
            if os.path.exists(relative_to_reqs_file):
                value = relative_to_reqs_file
            finder.find_links.append(value)
        if opts.pre:
            finder.allow_all_prereleases = True
        if opts.process_dependency_links:
            finder.process_dependency_links = True
        if opts.trusted_hosts:
            finder.secure_origins.extend(
                ("*", host, "*") for host in opts.trusted_hosts)

Example 29

Project: statsmodels Source File: print_version.py
def show_versions(show_dirs=True):
    if not show_dirs:
        _show_versions_only()
    print("\nINSTALLED VERSIONS")
    print("------------------")
    print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
    try:
        import os
        (sysname, nodename, release, version, machine) = os.uname()
        print("OS: %s %s %s %s" % (sysname, release, version, machine))
        print("byteorder: %s" % sys.byteorder)
        print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
        print("LANG: %s" % os.environ.get('LANG', "None"))
    except:
        pass

    try:
        import statsmodels
        from statsmodels import version
        has_sm = True
    except ImportError:
        has_sm = False

    print('\nStatsmodels\n===========\n')
    if has_sm:
        print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
                                      dirname(statsmodels.__file__)))
    else:
        print('Not installed')

    print("\nRequired Dependencies\n=====================\n")
    try:
        import Cython
        print("cython: %s (%s)" % (safe_version(Cython),
                                   dirname(Cython.__file__)))
    except ImportError:
        print("cython: Not installed")

    try:
        import numpy
        print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
                                  dirname(numpy.__file__)))
    except ImportError:
        print("numpy: Not installed")

    try:
        import scipy
        print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
                                  dirname(scipy.__file__)))
    except ImportError:
        print("scipy: Not installed")

    try:
        import pandas
        print("pandas: %s (%s)" % (safe_version(pandas, ['version', 'version'],
                                                '__version__'),
                                   dirname(pandas.__file__)))
    except ImportError:
        print("pandas: Not installed")

    try:
        import dateutil
        print("    dateutil: %s (%s)" % (safe_version(dateutil),
                                         dirname(dateutil.__file__)))
    except ImportError:
        print("    dateutil: not installed")

    try:
        import patsy
        print("patsy: %s (%s)" % (safe_version(patsy),
                                  dirname(patsy.__file__)))
    except ImportError:
        print("patsy: Not installed")

    print("\nOptional Dependencies\n=====================\n")

    try:
        import matplotlib as mpl
        print("matplotlib: %s (%s)" % (safe_version(mpl),
                                       dirname(mpl.__file__)))
    except ImportError:
        print("matplotlib: Not installed")

    try:
        from cvxopt import info
        print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
                                   dirname(info.__file__)))
    except ImportError:
        print("cvxopt: Not installed")

    print("\nDeveloper Tools\n================\n")

    try:
        import IPython
        print("IPython: %s (%s)" % (safe_version(IPython),
                                    dirname(IPython.__file__)))
    except ImportError:
        print("IPython: Not installed")
    try:
        import jinja2
        print("    jinja2: %s (%s)" % (safe_version(jinja2),
                                       dirname(jinja2.__file__)))
    except ImportError:
        print("    jinja2: Not installed")

    try:
        import sphinx
        print("sphinx: %s (%s)" % (safe_version(sphinx),
                                   dirname(sphinx.__file__)))
    except ImportError:
        print("sphinx: Not installed")

    try:
        import pygments
        print("    pygments: %s (%s)" % (safe_version(pygments),
                                         dirname(pygments.__file__)))
    except ImportError:
        print("    pygments: Not installed")

    try:
        import nose
        print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
    except ImportError:
        print("nose: Not installed")

    try:
        import virtualenv
        print("virtualenv: %s (%s)" % (safe_version(virtualenv),
                                       dirname(virtualenv.__file__)))
    except ImportError:
        print("virtualenv: Not installed")

    print("\n")

Example 30

Project: list Source File: pslint.py
def lint_psl(infile):
	"""Parses PSL file and performs syntax checking"""
	global orig_line, nline

	PSL_FLAG_EXCEPTION = (1<<0)
	PSL_FLAG_WILDCARD = (1<<1)
	PSL_FLAG_ICANN = (1<<2) # entry of ICANN section
	PSL_FLAG_PRIVATE = (1<<3) # entry of PRIVATE section
	PSL_FLAG_PLAIN = (1<<4) #just used for PSL syntax checking

	line2number = {}
	line2flag = {}
	group = []
	section = 0
	icann_sections = 0
	private_sections = 0

	lines = [line.strip('\n') for line in infile]

	for line in lines:
		nline += 1

		# check for leadind/trailing whitespace
		stripped = line.strip()
		if stripped != line:
			line = line.replace('\t','\\t')
			line = line.replace('\r','^M')
			orig_line = line
			warning('Leading/Trailing whitespace')
		orig_line = line
		line = stripped

		# empty line (end of sorted domain group)
		if not line:
			# check_order(group)
			continue

		# check for section begin/end
		if line[0:2] == "//":
			# check_order(group)

			if section == 0:
				if line == "// ===BEGIN ICANN DOMAINS===":
					section = PSL_FLAG_ICANN
					icann_sections += 1
				elif line == "// ===BEGIN PRIVATE DOMAINS===":
					section = PSL_FLAG_PRIVATE
					private_sections += 1
				elif line[3:11] == "===BEGIN":
					error('Unexpected begin of unknown section')
				elif line[3:9] == "===END":
					error('End of section without previous begin')
			elif section == PSL_FLAG_ICANN:
				if line == "// ===END ICANN DOMAINS===":
					section = 0
				elif line[3:11] == "===BEGIN":
					error('Unexpected begin of section: ')
				elif line[3:9] == "===END":
					error('Unexpected end of section')
			elif section == PSL_FLAG_PRIVATE:
				if line == "// ===END PRIVATE DOMAINS===":
					section = 0
				elif line[3:11] == "===BEGIN":
					error('Unexpected begin of section')
				elif line[3:9] == "===END":
					error('Unexpected end of section')

			continue # processing of comments ends here

		# No rule must be outside of a section
		if section == 0:
			error('Rule outside of section')

		group.append(list(reversed(line.split('.'))))

		# decode UTF-8 input into unicode, needed only for python 2.x
		try:
			if sys.version_info[0] < 3:
				line = line.decode('utf-8')
			else:
				line.encode('utf-8')
		except (UnicodeDecodeError, UnicodeEncodeError):
			orig_line = None
			error('Invalid UTF-8 character')
			continue

		# each rule must be lowercase (or more exactly: not uppercase and not titlecase)
		if line != line.lower():
			error('Rule must be lowercase')

		# strip leading wildcards
		flags = section
		# while line[0:2] == '*.':
		if line[0:2] == '*.':
			flags |= PSL_FLAG_WILDCARD
			line = line[2:]

		if line[0] == '!':
			flags |= PSL_FLAG_EXCEPTION
			line = line[1:]
		else:
			flags |= PSL_FLAG_PLAIN

		# wildcard and exception must not combine
		if flags & PSL_FLAG_WILDCARD and flags & PSL_FLAG_EXCEPTION:
			error('Combination of wildcard and exception')
			continue

		labels = line.split('.')

		if flags & PSL_FLAG_EXCEPTION and len(labels) > 1:
			domain = ".".join(str(label) for label in labels[1:])
			if not domain in line2flag:
				error('Exception without previous wildcard')
			elif not line2flag[domain] & PSL_FLAG_WILDCARD:
				error('Exception without previous wildcard')

		for label in labels:
			if not label:
				error('Leading/trailing or multiple dot')
				continue

			if label[0:4] == 'xn--':
				error('Punycode found')
				continue

			if '--' in label:
				error('Double minus found')
				continue

			# allowed are a-z,0-9,- and unicode >= 128 (maybe that can be finetuned a bit !?)
			for c in label:
				if not c.isalnum() and c != '-' and ord(c) < 128:
					error('Illegal character')
					break

		if line in line2flag:
			'''Found existing entry:
			   Combination of exception and plain rule is contradictionary
			     !foo.bar + foo.bar
			   Doublette, since *.foo.bar implies foo.bar:
			      foo.bar + *.foo.bar
			   Allowed:
			     !foo.bar + *.foo.bar
			'''
			error('Found doublette/ambiguity (previous line was %d)' % line2number[line])

		line2number[line] = nline
		line2flag[line] = flags

	orig_line = None

	if section == PSL_FLAG_ICANN:
		error('ICANN section not closed')
	elif section == PSL_FLAG_PRIVATE:
		error('PRIVATE section not closed')

	if icann_sections < 1:
		warning('No ICANN section found')
	elif icann_sections > 1:
		warning('%d ICANN sections found' % icann_sections)

	if private_sections < 1:
		warning('No PRIVATE section found')
	elif private_sections > 1:
		warning('%d PRIVATE sections found' % private_sections)

Example 31

Project: HTPC-Manager Source File: test_tools.py
    def setup_server():
        
        # Put check_access in a custom toolbox with its own namespace
        myauthtools = cherrypy._cptools.Toolbox("myauth")
        
        def check_access(default=False):
            if not getattr(cherrypy.request, "userid", default):
                raise cherrypy.HTTPError(401)
        myauthtools.check_access = cherrypy.Tool('before_request_body', check_access)
        
        def numerify():
            def number_it(body):
                for chunk in body:
                    for k, v in cherrypy.request.numerify_map:
                        chunk = chunk.replace(k, v)
                    yield chunk
            cherrypy.response.body = number_it(cherrypy.response.body)
        
        class NumTool(cherrypy.Tool):
            def _setup(self):
                def makemap():
                    m = self._merged_args().get("map", {})
                    cherrypy.request.numerify_map = copyitems(m)
                cherrypy.request.hooks.attach('on_start_resource', makemap)
                
                def critical():
                    cherrypy.request.error_response = cherrypy.HTTPError(502).set_response
                critical.failsafe = True
                
                cherrypy.request.hooks.attach('on_start_resource', critical)
                cherrypy.request.hooks.attach(self._point, self.callable)
        
        tools.numerify = NumTool('before_finalize', numerify)
        
        # It's not mandatory to inherit from cherrypy.Tool.
        class NadsatTool:
            
            def __init__(self):
                self.ended = {}
                self._name = "nadsat"
            
            def nadsat(self):
                def nadsat_it_up(body):
                    for chunk in body:
                        chunk = chunk.replace(ntob("good"), ntob("horrorshow"))
                        chunk = chunk.replace(ntob("piece"), ntob("lomtick"))
                        yield chunk
                cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
            nadsat.priority = 0
            
            def cleanup(self):
                # This runs after the request has been completely written out.
                cherrypy.response.body = [ntob("razdrez")]
                id = cherrypy.request.params.get("id")
                if id:
                    self.ended[id] = True
            cleanup.failsafe = True
            
            def _setup(self):
                cherrypy.request.hooks.attach('before_finalize', self.nadsat)
                cherrypy.request.hooks.attach('on_end_request', self.cleanup)
        tools.nadsat = NadsatTool()
        
        def pipe_body():
            cherrypy.request.process_request_body = False
            clen = int(cherrypy.request.headers['Content-Length'])
            cherrypy.request.body = cherrypy.request.rfile.read(clen)
        
        # Assert that we can use a callable object instead of a function.
        class Rotator(object):
            def __call__(self, scale):
                r = cherrypy.response
                r.collapse_body()
                if py3k:
                    r.body = [bytes([(x + scale) % 256 for x in r.body[0]])]
                else:
                    r.body = [chr((ord(x) + scale) % 256) for x in r.body[0]]
        cherrypy.tools.rotator = cherrypy.Tool('before_finalize', Rotator())
        
        def stream_handler(next_handler, *args, **kwargs):
            cherrypy.response.output = o = BytesIO()
            try:
                response = next_handler(*args, **kwargs)
                # Ignore the response and return our accuemulated output instead.
                return o.getvalue()
            finally:
                o.close()
        cherrypy.tools.streamer = cherrypy._cptools.HandlerWrapperTool(stream_handler)
        
        class Root:
            def index(self):
                return "Howdy earth!"
            index.exposed = True
            
            def tarfile(self):
                cherrypy.response.output.write(ntob('I am '))
                cherrypy.response.output.write(ntob('a tarfile'))
            tarfile.exposed = True
            tarfile._cp_config = {'tools.streamer.on': True}
            
            def euro(self):
                hooks = list(cherrypy.request.hooks['before_finalize'])
                hooks.sort()
                cbnames = [x.callback.__name__ for x in hooks]
                assert cbnames == ['gzip'], cbnames
                priorities = [x.priority for x in hooks]
                assert priorities == [80], priorities
                yield ntou("Hello,")
                yield ntou("world")
                yield europoundUnicode
            euro.exposed = True
            
            # Bare hooks
            def pipe(self):
                return cherrypy.request.body
            pipe.exposed = True
            pipe._cp_config = {'hooks.before_request_body': pipe_body}
            
            # Multiple decorators; include kwargs just for fun.
            # Note that rotator must run before gzip.
            def decorated_euro(self, *vpath):
                yield ntou("Hello,")
                yield ntou("world")
                yield europoundUnicode
            decorated_euro.exposed = True
            decorated_euro = tools.gzip(compress_level=6)(decorated_euro)
            decorated_euro = tools.rotator(scale=3)(decorated_euro)
        
        root = Root()
        
        
        class TestType(type):
            """Metaclass which automatically exposes all functions in each subclass,
            and adds an instance of the subclass as an attribute of root.
            """
            def __init__(cls, name, bases, dct):
                type.__init__(cls, name, bases, dct)
                for value in itervalues(dct):
                    if isinstance(value, types.FunctionType):
                        value.exposed = True
                setattr(root, name.lower(), cls())
        Test = TestType('Test', (object,), {})
        
        
        # METHOD ONE:
        # Declare Tools in _cp_config
        class Demo(Test):
            
            _cp_config = {"tools.nadsat.on": True}
            
            def index(self, id=None):
                return "A good piece of cherry pie"
            
            def ended(self, id):
                return repr(tools.nadsat.ended[id])
            
            def err(self, id=None):
                raise ValueError()
            
            def errinstream(self, id=None):
                yield "nonconfidential"
                raise ValueError()
                yield "confidential"
            
            # METHOD TWO: decorator using Tool()
            # We support Python 2.3, but the @-deco syntax would look like this:
            # @tools.check_access()
            def restricted(self):
                return "Welcome!"
            restricted = myauthtools.check_access()(restricted)
            userid = restricted
            
            def err_in_onstart(self):
                return "success!"
            
            def stream(self, id=None):
                for x in xrange(100000000):
                    yield str(x)
            stream._cp_config = {'response.stream': True}
        
        
        conf = {
            # METHOD THREE:
            # Declare Tools in detached config
            '/demo': {
                'tools.numerify.on': True,
                'tools.numerify.map': {ntob("pie"): ntob("3.14159")},
            },
            '/demo/restricted': {
                'request.show_tracebacks': False,
            },
            '/demo/userid': {
                'request.show_tracebacks': False,
                'myauth.check_access.default': True,
            },
            '/demo/errinstream': {
                'response.stream': True,
            },
            '/demo/err_in_onstart': {
                # Because this isn't a dict, on_start_resource will error.
                'tools.numerify.map': "pie->3.14159"
            },
            # Combined tools
            '/euro': {
                'tools.gzip.on': True,
                'tools.encode.on': True,
            },
            # Priority specified in config
            '/decorated_euro/subpath': {
                'tools.gzip.priority': 10,
            },
            # Handler wrappers
            '/tarfile': {'tools.streamer.on': True}
        }
        app = cherrypy.tree.mount(root, config=conf)
        app.request_class.namespaces['myauth'] = myauthtools
        
        if sys.version_info >= (2, 5):
            from cherrypy.test import _test_decorators
            root.tooldecs = _test_decorators.ToolExamples()

Example 32

Project: youtube-dl Source File: options.py
def parseOpts(overrideArguments=None):
    def _readOptions(filename_bytes, default=[]):
        try:
            optionf = open(filename_bytes)
        except IOError:
            return default  # silently skip if file is not present
        try:
            # FIXME: https://github.com/rg3/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
            contents = optionf.read()
            if sys.version_info < (3,):
                contents = contents.decode(preferredencoding())
            res = compat_shlex_split(contents, comments=True)
        finally:
            optionf.close()
        return res

    def _readUserConf():
        xdg_config_home = compat_getenv('XDG_CONFIG_HOME')
        if xdg_config_home:
            userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
            if not os.path.isfile(userConfFile):
                userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
        else:
            userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config')
            if not os.path.isfile(userConfFile):
                userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf')
        userConf = _readOptions(userConfFile, None)

        if userConf is None:
            appdata_dir = compat_getenv('appdata')
            if appdata_dir:
                userConf = _readOptions(
                    os.path.join(appdata_dir, 'youtube-dl', 'config'),
                    default=None)
                if userConf is None:
                    userConf = _readOptions(
                        os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
                        default=None)

        if userConf is None:
            userConf = _readOptions(
                os.path.join(compat_expanduser('~'), 'youtube-dl.conf'),
                default=None)
        if userConf is None:
            userConf = _readOptions(
                os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'),
                default=None)

        if userConf is None:
            userConf = []

        return userConf

    def _format_option_string(option):
        ''' ('-o', '--option') -> -o, --format METAVAR'''

        opts = []

        if option._short_opts:
            opts.append(option._short_opts[0])
        if option._long_opts:
            opts.append(option._long_opts[0])
        if len(opts) > 1:
            opts.insert(1, ', ')

        if option.takes_value():
            opts.append(' %s' % option.metavar)

        return ''.join(opts)

    def _comma_separated_values_options_callback(option, opt_str, value, parser):
        setattr(parser.values, option.dest, value.split(','))

    def _hide_login_info(opts):
        PRIVATE_OPTS = ['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username']
        eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')

        def _scrub_eq(o):
            m = eqre.match(o)
            if m:
                return m.group('key') + '=PRIVATE'
            else:
                return o

        opts = list(map(_scrub_eq, opts))
        for private_opt in PRIVATE_OPTS:
            try:
                i = opts.index(private_opt)
                opts[i + 1] = 'PRIVATE'
            except ValueError:
                pass
        return opts

    # No need to wrap help messages if we're on a wide console
    columns = compat_get_terminal_size().columns
    max_width = columns if columns else 80
    max_help_position = 80

    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
    fmt.format_option_strings = _format_option_string

    kw = {
        'version': __version__,
        'formatter': fmt,
        'usage': '%prog [OPTIONS] URL [URL...]',
        'conflict_handler': 'resolve',
    }

    parser = optparse.OptionParser(**compat_kwargs(kw))

    general = optparse.OptionGroup(parser, 'General Options')
    general.add_option(
        '-h', '--help',
        action='help',
        help='Print this help text and exit')
    general.add_option(
        '-v', '--version',
        action='version',
        help='Print program version and exit')
    general.add_option(
        '-U', '--update',
        action='store_true', dest='update_self',
        help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
    general.add_option(
        '-i', '--ignore-errors',
        action='store_true', dest='ignoreerrors', default=False,
        help='Continue on download errors, for example to skip unavailable videos in a playlist')
    general.add_option(
        '--abort-on-error',
        action='store_false', dest='ignoreerrors',
        help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
    general.add_option(
        '--dump-user-agent',
        action='store_true', dest='dump_user_agent', default=False,
        help='Display the current browser identification')
    general.add_option(
        '--list-extractors',
        action='store_true', dest='list_extractors', default=False,
        help='List all supported extractors')
    general.add_option(
        '--extractor-descriptions',
        action='store_true', dest='list_extractor_descriptions', default=False,
        help='Output descriptions of all supported extractors')
    general.add_option(
        '--force-generic-extractor',
        action='store_true', dest='force_generic_extractor', default=False,
        help='Force extraction to use the generic extractor')
    general.add_option(
        '--default-search',
        dest='default_search', metavar='PREFIX',
        help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
    general.add_option(
        '--ignore-config',
        action='store_true',
        help='Do not read configuration files. '
        'When given in the global configuration file /etc/youtube-dl.conf: '
        'Do not read the user configuration in ~/.config/youtube-dl/config '
        '(%APPDATA%/youtube-dl/config.txt on Windows)')
    general.add_option(
        '--flat-playlist',
        action='store_const', dest='extract_flat', const='in_playlist',
        default=False,
        help='Do not extract the videos of a playlist, only list them.')
    general.add_option(
        '--mark-watched',
        action='store_true', dest='mark_watched', default=False,
        help='Mark videos watched (YouTube only)')
    general.add_option(
        '--no-mark-watched',
        action='store_false', dest='mark_watched', default=False,
        help='Do not mark videos watched (YouTube only)')
    general.add_option(
        '--no-color', '--no-colors',
        action='store_true', dest='no_color',
        default=False,
        help='Do not emit color codes in output')

    network = optparse.OptionGroup(parser, 'Network Options')
    network.add_option(
        '--proxy', dest='proxy',
        default=None, metavar='URL',
        help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable experimental '
             'SOCKS proxy, specify a proper scheme. For example '
             'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") '
             'for direct connection')
    network.add_option(
        '--socket-timeout',
        dest='socket_timeout', type=float, default=None, metavar='SECONDS',
        help='Time to wait before giving up, in seconds')
    network.add_option(
        '--source-address',
        metavar='IP', dest='source_address', default=None,
        help='Client-side IP address to bind to (experimental)',
    )
    network.add_option(
        '-4', '--force-ipv4',
        action='store_const', const='0.0.0.0', dest='source_address',
        help='Make all connections via IPv4 (experimental)',
    )
    network.add_option(
        '-6', '--force-ipv6',
        action='store_const', const='::', dest='source_address',
        help='Make all connections via IPv6 (experimental)',
    )
    network.add_option(
        '--geo-verification-proxy',
        dest='geo_verification_proxy', default=None, metavar='URL',
        help='Use this proxy to verify the IP address for some geo-restricted sites. '
        'The default proxy specified by --proxy (or none, if the options is not present) is used for the actual downloading. (experimental)'
    )
    network.add_option(
        '--cn-verification-proxy',
        dest='cn_verification_proxy', default=None, metavar='URL',
        help=optparse.SUPPRESS_HELP,
    )

    selection = optparse.OptionGroup(parser, 'Video Selection')
    selection.add_option(
        '--playlist-start',
        dest='playliststart', metavar='NUMBER', default=1, type=int,
        help='Playlist video to start at (default is %default)')
    selection.add_option(
        '--playlist-end',
        dest='playlistend', metavar='NUMBER', default=None, type=int,
        help='Playlist video to end at (default is last)')
    selection.add_option(
        '--playlist-items',
        dest='playlist_items', metavar='ITEM_SPEC', default=None,
        help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.')
    selection.add_option(
        '--match-title',
        dest='matchtitle', metavar='REGEX',
        help='Download only matching titles (regex or caseless sub-string)')
    selection.add_option(
        '--reject-title',
        dest='rejecttitle', metavar='REGEX',
        help='Skip download for matching titles (regex or caseless sub-string)')
    selection.add_option(
        '--max-downloads',
        dest='max_downloads', metavar='NUMBER', type=int, default=None,
        help='Abort after downloading NUMBER files')
    selection.add_option(
        '--min-filesize',
        metavar='SIZE', dest='min_filesize', default=None,
        help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)')
    selection.add_option(
        '--max-filesize',
        metavar='SIZE', dest='max_filesize', default=None,
        help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)')
    selection.add_option(
        '--date',
        metavar='DATE', dest='date', default=None,
        help='Download only videos uploaded in this date')
    selection.add_option(
        '--datebefore',
        metavar='DATE', dest='datebefore', default=None,
        help='Download only videos uploaded on or before this date (i.e. inclusive)')
    selection.add_option(
        '--dateafter',
        metavar='DATE', dest='dateafter', default=None,
        help='Download only videos uploaded on or after this date (i.e. inclusive)')
    selection.add_option(
        '--min-views',
        metavar='COUNT', dest='min_views', default=None, type=int,
        help='Do not download any videos with less than COUNT views')
    selection.add_option(
        '--max-views',
        metavar='COUNT', dest='max_views', default=None, type=int,
        help='Do not download any videos with more than COUNT views')
    selection.add_option(
        '--match-filter',
        metavar='FILTER', dest='match_filter', default=None,
        help=(
            'Generic video filter (experimental). '
            'Specify any key (see help for -o for a list of available keys) to'
            ' match if the key is present, '
            '!key to check if the key is not present,'
            'key > NUMBER (like "comment_count > 12", also works with '
            '>=, <, <=, !=, =) to compare against a number, and '
            '& to require multiple matches. '
            'Values which are not known are excluded unless you'
            ' put a question mark (?) after the operator.'
            'For example, to only match videos that have been liked more than '
            '100 times and disliked less than 50 times (or the dislike '
            'functionality is not available at the given service), but who '
            'also have a description, use --match-filter '
            '"like_count > 100 & dislike_count <? 50 & description" .'
        ))
    selection.add_option(
        '--no-playlist',
        action='store_true', dest='noplaylist', default=False,
        help='Download only the video, if the URL refers to a video and a playlist.')
    selection.add_option(
        '--yes-playlist',
        action='store_false', dest='noplaylist', default=False,
        help='Download the playlist, if the URL refers to a video and a playlist.')
    selection.add_option(
        '--age-limit',
        metavar='YEARS', dest='age_limit', default=None, type=int,
        help='Download only videos suitable for the given age')
    selection.add_option(
        '--download-archive', metavar='FILE',
        dest='download_archive',
        help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
    selection.add_option(
        '--include-ads',
        dest='include_ads', action='store_true',
        help='Download advertisements as well (experimental)')

    authentication = optparse.OptionGroup(parser, 'Authentication Options')
    authentication.add_option(
        '-u', '--username',
        dest='username', metavar='USERNAME',
        help='Login with this account ID')
    authentication.add_option(
        '-p', '--password',
        dest='password', metavar='PASSWORD',
        help='Account password. If this option is left out, youtube-dl will ask interactively.')
    authentication.add_option(
        '-2', '--twofactor',
        dest='twofactor', metavar='TWOFACTOR',
        help='Two-factor auth code')
    authentication.add_option(
        '-n', '--netrc',
        action='store_true', dest='usenetrc', default=False,
        help='Use .netrc authentication data')
    authentication.add_option(
        '--video-password',
        dest='videopassword', metavar='PASSWORD',
        help='Video password (vimeo, smotri, youku)')

    adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options')
    adobe_pass.add_option(
        '--ap-mso',
        dest='ap_mso', metavar='MSO',
        help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs')
    adobe_pass.add_option(
        '--ap-username',
        dest='ap_username', metavar='USERNAME',
        help='Multiple-system operator account login')
    adobe_pass.add_option(
        '--ap-password',
        dest='ap_password', metavar='PASSWORD',
        help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.')
    adobe_pass.add_option(
        '--ap-list-mso',
        action='store_true', dest='ap_list_mso', default=False,
        help='List all supported multiple-system operators')

    video_format = optparse.OptionGroup(parser, 'Video Format Options')
    video_format.add_option(
        '-f', '--format',
        action='store', dest='format', metavar='FORMAT', default=None,
        help='Video format code, see the "FORMAT SELECTION" for all the info')
    video_format.add_option(
        '--all-formats',
        action='store_const', dest='format', const='all',
        help='Download all available video formats')
    video_format.add_option(
        '--prefer-free-formats',
        action='store_true', dest='prefer_free_formats', default=False,
        help='Prefer free video formats unless a specific one is requested')
    video_format.add_option(
        '-F', '--list-formats',
        action='store_true', dest='listformats',
        help='List all available formats of requested videos')
    video_format.add_option(
        '--youtube-include-dash-manifest',
        action='store_true', dest='youtube_include_dash_manifest', default=True,
        help=optparse.SUPPRESS_HELP)
    video_format.add_option(
        '--youtube-skip-dash-manifest',
        action='store_false', dest='youtube_include_dash_manifest',
        help='Do not download the DASH manifests and related data on YouTube videos')
    video_format.add_option(
        '--merge-output-format',
        action='store', dest='merge_output_format', metavar='FORMAT', default=None,
        help=(
            'If a merge is required (e.g. bestvideo+bestaudio), '
            'output to given container format. One of mkv, mp4, ogg, webm, flv. '
            'Ignored if no merge is required'))

    subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
    subtitles.add_option(
        '--write-sub', '--write-srt',
        action='store_true', dest='writesubtitles', default=False,
        help='Write subtitle file')
    subtitles.add_option(
        '--write-auto-sub', '--write-automatic-sub',
        action='store_true', dest='writeautomaticsub', default=False,
        help='Write automatically generated subtitle file (YouTube only)')
    subtitles.add_option(
        '--all-subs',
        action='store_true', dest='allsubtitles', default=False,
        help='Download all the available subtitles of the video')
    subtitles.add_option(
        '--list-subs',
        action='store_true', dest='listsubtitles', default=False,
        help='List all available subtitles for the video')
    subtitles.add_option(
        '--sub-format',
        action='store', dest='subtitlesformat', metavar='FORMAT', default='best',
        help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"')
    subtitles.add_option(
        '--sub-lang', '--sub-langs', '--srt-lang',
        action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
        default=[], callback=_comma_separated_values_options_callback,
        help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags')

    downloader = optparse.OptionGroup(parser, 'Download Options')
    downloader.add_option(
        '-r', '--limit-rate', '--rate-limit',
        dest='ratelimit', metavar='RATE',
        help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)')
    downloader.add_option(
        '-R', '--retries',
        dest='retries', metavar='RETRIES', default=10,
        help='Number of retries (default is %default), or "infinite".')
    downloader.add_option(
        '--fragment-retries',
        dest='fragment_retries', metavar='RETRIES', default=10,
        help='Number of retries for a fragment (default is %default), or "infinite" (DASH and hlsnative only)')
    downloader.add_option(
        '--skip-unavailable-fragments',
        action='store_true', dest='skip_unavailable_fragments', default=True,
        help='Skip unavailable fragments (DASH and hlsnative only)')
    general.add_option(
        '--abort-on-unavailable-fragment',
        action='store_false', dest='skip_unavailable_fragments',
        help='Abort downloading when some fragment is not available')
    downloader.add_option(
        '--buffer-size',
        dest='buffersize', metavar='SIZE', default='1024',
        help='Size of download buffer (e.g. 1024 or 16K) (default is %default)')
    downloader.add_option(
        '--no-resize-buffer',
        action='store_true', dest='noresizebuffer', default=False,
        help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.')
    downloader.add_option(
        '--test',
        action='store_true', dest='test', default=False,
        help=optparse.SUPPRESS_HELP)
    downloader.add_option(
        '--playlist-reverse',
        action='store_true',
        help='Download playlist videos in reverse order')
    downloader.add_option(
        '--xattr-set-filesize',
        dest='xattr_set_filesize', action='store_true',
        help='Set file xattribute ytdl.filesize with expected filesize (experimental)')
    downloader.add_option(
        '--hls-prefer-native',
        dest='hls_prefer_native', action='store_true', default=None,
        help='Use the native HLS downloader instead of ffmpeg')
    downloader.add_option(
        '--hls-prefer-ffmpeg',
        dest='hls_prefer_native', action='store_false', default=None,
        help='Use ffmpeg instead of the native HLS downloader')
    downloader.add_option(
        '--hls-use-mpegts',
        dest='hls_use_mpegts', action='store_true',
        help='Use the mpegts container for HLS videos, allowing to play the '
             'video while downloading (some players may not be able to play it)')
    downloader.add_option(
        '--external-downloader',
        dest='external_downloader', metavar='COMMAND',
        help='Use the specified external downloader. '
             'Currently supports %s' % ','.join(list_external_downloaders()))
    downloader.add_option(
        '--external-downloader-args',
        dest='external_downloader_args', metavar='ARGS',
        help='Give these arguments to the external downloader')

    workarounds = optparse.OptionGroup(parser, 'Workarounds')
    workarounds.add_option(
        '--encoding',
        dest='encoding', metavar='ENCODING',
        help='Force the specified encoding (experimental)')
    workarounds.add_option(
        '--no-check-certificate',
        action='store_true', dest='no_check_certificate', default=False,
        help='Suppress HTTPS certificate validation')
    workarounds.add_option(
        '--prefer-insecure',
        '--prefer-unsecure', action='store_true', dest='prefer_insecure',
        help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
    workarounds.add_option(
        '--user-agent',
        metavar='UA', dest='user_agent',
        help='Specify a custom user agent')
    workarounds.add_option(
        '--referer',
        metavar='URL', dest='referer', default=None,
        help='Specify a custom referer, use if the video access is restricted to one domain',
    )
    workarounds.add_option(
        '--add-header',
        metavar='FIELD:VALUE', dest='headers', action='append',
        help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
    )
    workarounds.add_option(
        '--bidi-workaround',
        dest='bidi_workaround', action='store_true',
        help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
    workarounds.add_option(
        '--sleep-interval', '--min-sleep-interval', metavar='SECONDS',
        dest='sleep_interval', type=float,
        help=(
            'Number of seconds to sleep before each download when used alone '
            'or a lower bound of a range for randomized sleep before each download '
            '(minimum possible number of seconds to sleep) when used along with '
            '--max-sleep-interval.'))
    workarounds.add_option(
        '--max-sleep-interval', metavar='SECONDS',
        dest='max_sleep_interval', type=float,
        help=(
            'Upper bound of a range for randomized sleep before each download '
            '(maximum possible number of seconds to sleep). Must only be used '
            'along with --min-sleep-interval.'))

    verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
    verbosity.add_option(
        '-q', '--quiet',
        action='store_true', dest='quiet', default=False,
        help='Activate quiet mode')
    verbosity.add_option(
        '--no-warnings',
        dest='no_warnings', action='store_true', default=False,
        help='Ignore warnings')
    verbosity.add_option(
        '-s', '--simulate',
        action='store_true', dest='simulate', default=False,
        help='Do not download the video and do not write anything to disk')
    verbosity.add_option(
        '--skip-download',
        action='store_true', dest='skip_download', default=False,
        help='Do not download the video')
    verbosity.add_option(
        '-g', '--get-url',
        action='store_true', dest='geturl', default=False,
        help='Simulate, quiet but print URL')
    verbosity.add_option(
        '-e', '--get-title',
        action='store_true', dest='gettitle', default=False,
        help='Simulate, quiet but print title')
    verbosity.add_option(
        '--get-id',
        action='store_true', dest='getid', default=False,
        help='Simulate, quiet but print id')
    verbosity.add_option(
        '--get-thumbnail',
        action='store_true', dest='getthumbnail', default=False,
        help='Simulate, quiet but print thumbnail URL')
    verbosity.add_option(
        '--get-description',
        action='store_true', dest='getdescription', default=False,
        help='Simulate, quiet but print video description')
    verbosity.add_option(
        '--get-duration',
        action='store_true', dest='getduration', default=False,
        help='Simulate, quiet but print video length')
    verbosity.add_option(
        '--get-filename',
        action='store_true', dest='getfilename', default=False,
        help='Simulate, quiet but print output filename')
    verbosity.add_option(
        '--get-format',
        action='store_true', dest='getformat', default=False,
        help='Simulate, quiet but print output format')
    verbosity.add_option(
        '-j', '--dump-json',
        action='store_true', dest='dumpjson', default=False,
        help='Simulate, quiet but print JSON information. See --output for a description of available keys.')
    verbosity.add_option(
        '-J', '--dump-single-json',
        action='store_true', dest='dump_single_json', default=False,
        help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.')
    verbosity.add_option(
        '--print-json',
        action='store_true', dest='print_json', default=False,
        help='Be quiet and print the video information as JSON (video is still being downloaded).',
    )
    verbosity.add_option(
        '--newline',
        action='store_true', dest='progress_with_newline', default=False,
        help='Output progress bar as new lines')
    verbosity.add_option(
        '--no-progress',
        action='store_true', dest='noprogress', default=False,
        help='Do not print progress bar')
    verbosity.add_option(
        '--console-title',
        action='store_true', dest='consoletitle', default=False,
        help='Display progress in console titlebar')
    verbosity.add_option(
        '-v', '--verbose',
        action='store_true', dest='verbose', default=False,
        help='Print various debugging information')
    verbosity.add_option(
        '--dump-pages', '--dump-intermediate-pages',
        action='store_true', dest='dump_intermediate_pages', default=False,
        help='Print downloaded pages encoded using base64 to debug problems (very verbose)')
    verbosity.add_option(
        '--write-pages',
        action='store_true', dest='write_pages', default=False,
        help='Write downloaded intermediary pages to files in the current directory to debug problems')
    verbosity.add_option(
        '--youtube-print-sig-code',
        action='store_true', dest='youtube_print_sig_code', default=False,
        help=optparse.SUPPRESS_HELP)
    verbosity.add_option(
        '--print-traffic', '--dump-headers',
        dest='debug_printtraffic', action='store_true', default=False,
        help='Display sent and read HTTP traffic')
    verbosity.add_option(
        '-C', '--call-home',
        dest='call_home', action='store_true', default=False,
        help='Contact the youtube-dl server for debugging')
    verbosity.add_option(
        '--no-call-home',
        dest='call_home', action='store_false', default=False,
        help='Do NOT contact the youtube-dl server for debugging')

    filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
    filesystem.add_option(
        '-a', '--batch-file',
        dest='batchfile', metavar='FILE',
        help='File containing URLs to download (\'-\' for stdin)')
    filesystem.add_option(
        '--id', default=False,
        action='store_true', dest='useid', help='Use only video ID in file name')
    filesystem.add_option(
        '-o', '--output',
        dest='outtmpl', metavar='TEMPLATE',
        help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info'))
    filesystem.add_option(
        '--autonumber-size',
        dest='autonumber_size', metavar='NUMBER',
        help='Specify the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
    filesystem.add_option(
        '--restrict-filenames',
        action='store_true', dest='restrictfilenames', default=False,
        help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames')
    filesystem.add_option(
        '-A', '--auto-number',
        action='store_true', dest='autonumber', default=False,
        help='[deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] Number downloaded files starting from 00000')
    filesystem.add_option(
        '-t', '--title',
        action='store_true', dest='usetitle', default=False,
        help='[deprecated] Use title in file name (default)')
    filesystem.add_option(
        '-l', '--literal', default=False,
        action='store_true', dest='usetitle',
        help='[deprecated] Alias of --title')
    filesystem.add_option(
        '-w', '--no-overwrites',
        action='store_true', dest='nooverwrites', default=False,
        help='Do not overwrite files')
    filesystem.add_option(
        '-c', '--continue',
        action='store_true', dest='continue_dl', default=True,
        help='Force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.')
    filesystem.add_option(
        '--no-continue',
        action='store_false', dest='continue_dl',
        help='Do not resume partially downloaded files (restart from beginning)')
    filesystem.add_option(
        '--no-part',
        action='store_true', dest='nopart', default=False,
        help='Do not use .part files - write directly into output file')
    filesystem.add_option(
        '--no-mtime',
        action='store_false', dest='updatetime', default=True,
        help='Do not use the Last-modified header to set the file modification time')
    filesystem.add_option(
        '--write-description',
        action='store_true', dest='writedescription', default=False,
        help='Write video description to a .description file')
    filesystem.add_option(
        '--write-info-json',
        action='store_true', dest='writeinfojson', default=False,
        help='Write video metadata to a .info.json file')
    filesystem.add_option(
        '--write-annotations',
        action='store_true', dest='writeannotations', default=False,
        help='Write video annotations to a .annotations.xml file')
    filesystem.add_option(
        '--load-info-json', '--load-info',
        dest='load_info_filename', metavar='FILE',
        help='JSON file containing the video information (created with the "--write-info-json" option)')
    filesystem.add_option(
        '--cookies',
        dest='cookiefile', metavar='FILE',
        help='File to read cookies from and dump cookie jar in')
    filesystem.add_option(
        '--cache-dir', dest='cachedir', default=None, metavar='DIR',
        help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
    filesystem.add_option(
        '--no-cache-dir', action='store_const', const=False, dest='cachedir',
        help='Disable filesystem caching')
    filesystem.add_option(
        '--rm-cache-dir',
        action='store_true', dest='rm_cachedir',
        help='Delete all filesystem cache files')

    thumbnail = optparse.OptionGroup(parser, 'Thumbnail images')
    thumbnail.add_option(
        '--write-thumbnail',
        action='store_true', dest='writethumbnail', default=False,
        help='Write thumbnail image to disk')
    thumbnail.add_option(
        '--write-all-thumbnails',
        action='store_true', dest='write_all_thumbnails', default=False,
        help='Write all thumbnail image formats to disk')
    thumbnail.add_option(
        '--list-thumbnails',
        action='store_true', dest='list_thumbnails', default=False,
        help='Simulate and list all available thumbnail formats')

    postproc = optparse.OptionGroup(parser, 'Post-processing Options')
    postproc.add_option(
        '-x', '--extract-audio',
        action='store_true', dest='extractaudio', default=False,
        help='Convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
    postproc.add_option(
        '--audio-format', metavar='FORMAT', dest='audioformat', default='best',
        help='Specify audio format: "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "%default" by default')
    postproc.add_option(
        '--audio-quality', metavar='QUALITY',
        dest='audioquality', default='5',
        help='Specify ffmpeg/avconv audio quality, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default %default)')
    postproc.add_option(
        '--recode-video',
        metavar='FORMAT', dest='recodevideo', default=None,
        help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv|avi)')
    postproc.add_option(
        '--postprocessor-args',
        dest='postprocessor_args', metavar='ARGS',
        help='Give these arguments to the postprocessor')
    postproc.add_option(
        '-k', '--keep-video',
        action='store_true', dest='keepvideo', default=False,
        help='Keep the video file on disk after the post-processing; the video is erased by default')
    postproc.add_option(
        '--no-post-overwrites',
        action='store_true', dest='nopostoverwrites', default=False,
        help='Do not overwrite post-processed files; the post-processed files are overwritten by default')
    postproc.add_option(
        '--embed-subs',
        action='store_true', dest='embedsubtitles', default=False,
        help='Embed subtitles in the video (only for mp4, webm and mkv videos)')
    postproc.add_option(
        '--embed-thumbnail',
        action='store_true', dest='embedthumbnail', default=False,
        help='Embed thumbnail in the audio as cover art')
    postproc.add_option(
        '--add-metadata',
        action='store_true', dest='addmetadata', default=False,
        help='Write metadata to the video file')
    postproc.add_option(
        '--metadata-from-title',
        metavar='FORMAT', dest='metafromtitle',
        help='Parse additional metadata like song title / artist from the video title. '
             'The format syntax is the same as --output, '
             'the parsed parameters replace existing values. '
             'Additional templates: %(album)s, %(artist)s. '
             'Example: --metadata-from-title "%(artist)s - %(title)s" matches a title like '
             '"Coldplay - Paradise"')
    postproc.add_option(
        '--xattrs',
        action='store_true', dest='xattrs', default=False,
        help='Write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
    postproc.add_option(
        '--fixup',
        metavar='POLICY', dest='fixup', default='detect_or_warn',
        help='Automatically correct known faults of the file. '
             'One of never (do nothing), warn (only emit a warning), '
             'detect_or_warn (the default; fix file if we can, warn otherwise)')
    postproc.add_option(
        '--prefer-avconv',
        action='store_false', dest='prefer_ffmpeg',
        help='Prefer avconv over ffmpeg for running the postprocessors (default)')
    postproc.add_option(
        '--prefer-ffmpeg',
        action='store_true', dest='prefer_ffmpeg',
        help='Prefer ffmpeg over avconv for running the postprocessors')
    postproc.add_option(
        '--ffmpeg-location', '--avconv-location', metavar='PATH',
        dest='ffmpeg_location',
        help='Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.')
    postproc.add_option(
        '--exec',
        metavar='CMD', dest='exec_cmd',
        help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'')
    postproc.add_option(
        '--convert-subs', '--convert-subtitles',
        metavar='FORMAT', dest='convertsubtitles', default=None,
        help='Convert the subtitles to other format (currently supported: srt|ass|vtt)')

    parser.add_option_group(general)
    parser.add_option_group(network)
    parser.add_option_group(selection)
    parser.add_option_group(downloader)
    parser.add_option_group(filesystem)
    parser.add_option_group(thumbnail)
    parser.add_option_group(verbosity)
    parser.add_option_group(workarounds)
    parser.add_option_group(video_format)
    parser.add_option_group(subtitles)
    parser.add_option_group(authentication)
    parser.add_option_group(adobe_pass)
    parser.add_option_group(postproc)

    if overrideArguments is not None:
        opts, args = parser.parse_args(overrideArguments)
        if opts.verbose:
            write_string('[debug] Override config: ' + repr(overrideArguments) + '\n')
    else:
        def compat_conf(conf):
            if sys.version_info < (3,):
                return [a.decode(preferredencoding(), 'replace') for a in conf]
            return conf

        command_line_conf = compat_conf(sys.argv[1:])

        if '--ignore-config' in command_line_conf:
            system_conf = []
            user_conf = []
        else:
            system_conf = _readOptions('/etc/youtube-dl.conf')
            if '--ignore-config' in system_conf:
                user_conf = []
            else:
                user_conf = _readUserConf()
        argv = system_conf + user_conf + command_line_conf

        opts, args = parser.parse_args(argv)
        if opts.verbose:
            write_string('[debug] System config: ' + repr(_hide_login_info(system_conf)) + '\n')
            write_string('[debug] User config: ' + repr(_hide_login_info(user_conf)) + '\n')
            write_string('[debug] Command-line args: ' + repr(_hide_login_info(command_line_conf)) + '\n')

    return parser, opts, args

Example 33

Project: apilogs Source File: bin.py
def main(argv=None):

    if sys.version_info < (3, 0):
        sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)

    argv = (argv or sys.argv)[1:]

    parser = argparse.ArgumentParser(usage=("%(prog)s [ get | groups | streams ]"))
    parser.add_argument("--version", action="version",
                        version="%(prog)s " + __version__)

    def add_common_arguments(parser):
        parser.add_argument("--aws-access-key-id",
                            dest="aws_access_key_id",
                            type=str,
                            default=None,
                            help="aws access key id")

        parser.add_argument("--aws-secret-access-key",
                            dest="aws_secret_access_key",
                            type=str,
                            default=None,
                            help="aws secret access key")

        parser.add_argument("--aws-session-token",
                            dest="aws_session_token",
                            type=str,
                            default=None,
                            help="aws session token")

        parser.add_argument("--profile",
                            dest="aws_profile",
                            type=str,
                            default=None,
                            help="aws profile")

        parser.add_argument("--aws-region",
                            dest="aws_region",
                            type=str,
                            default=os.environ.get('AWS_REGION', None),
                            help="aws region")

    def add_date_range_arguments(parser):
        parser.add_argument("-s", "--start",
                            type=str,
                            dest='start',
                            default='5m',
                            help="Start time")

        parser.add_argument("-e", "--end",
                            type=str,
                            dest='end',
                            help="End time")

    subparsers = parser.add_subparsers()

    # get
    get_parser = subparsers.add_parser('get', description='Get logs')
    get_parser.set_defaults(func="list_logs")
    add_common_arguments(get_parser)


    get_parser.add_argument("-a",
                            "--api-id",
                            dest='api_id',
                            help=("An API Gateway REST API ID"))

    get_parser.add_argument("-t",
                            "--stage",
                            dest='stage',
                            help=("An API Gateway stage name for the deployed API"))

    get_parser.add_argument("-f",
                            "--filter-pattern",
                            dest='filter_pattern',
                            help=("A valid CloudWatch Logs filter pattern to "
                                  "use for filtering the response. If not "
                                  "provided, all the events are matched."))

    get_parser.add_argument("-w",
                            "--watch",
                            action='store_true',
                            dest='watch',
                            help="Query for new log lines constantly")

    get_parser.add_argument("-G",
                            "--no-group",
                            action='store_false',
                            dest='output_group_enabled',
                            help="Do not display group name")

    get_parser.add_argument("-S",
                            "--no-stream",
                            action='store_false',
                            dest='output_stream_enabled',
                            help="Do not display stream name")

    get_parser.add_argument("--timestamp",
                            action='store_true',
                            dest='output_timestamp_enabled',
                            help="Add creation timestamp to the output")

    get_parser.add_argument("--ingestion-time",
                            action='store_true',
                            dest='output_ingestion_time_enabled',
                            help="Add ingestion time to the output")

    add_date_range_arguments(get_parser)

    get_parser.add_argument("--no-color",
                            action='store_false',
                            dest='color_enabled',
                            help="Do not color output")

    # groups
    groups_parser = subparsers.add_parser('groups', description='List groups')
    groups_parser.set_defaults(func="list_groups")
    add_common_arguments(groups_parser)

    # streams
    streams_parser = subparsers.add_parser('streams', description='List streams')
    streams_parser.set_defaults(func="list_streams")
    add_common_arguments(streams_parser)
    add_date_range_arguments(streams_parser)

    streams_parser.add_argument("log_group_name",
                                type=str,
                                help="log group name")

    # Parse input
    options, args = parser.parse_known_args(argv)

    if hasattr(options, 'api_id'):
        # build API Gateway log group name
        options.log_group_name = "API-Gateway-Execution-Logs_" + options.api_id + "/" + options.stage
        options.log_stream_name = "ALL"


    # print options
    # print args

    # Workaround the fact that boto3 don't allow you to specify a profile
    # when you instantiate the a client. We need --profile because that's
    # the api people are use to with aws-cli.
    if getattr(options, 'aws_profile', None):
        os.environ['AWS_PROFILE'] = options.aws_profile

    try:
        logs = AWSLogs(**vars(options))
        if not hasattr(options, 'func'):
            parser.print_help()
            return 1
        getattr(logs, options.func)()
    except ClientError as exc:
        code = exc.response['Error']['Code']
        if code in (u'AccessDeniedException', u'ExpiredTokenException'):
            hint = exc.response['Error'].get('Message', 'AccessDeniedException')
            sys.stderr.write(colored("{0}\n".format(hint), "yellow"))
            return 4
        raise
    except exceptions.BaseAWSLogsException as exc:
        sys.stderr.write(colored("{0}\n".format(exc.hint()), "red"))
        return exc.code
    except Exception:
        import platform
        import traceback
        options = vars(options)
        options['aws_access_key_id'] = 'SENSITIVE'
        options['aws_secret_access_key'] = 'SENSITIVE'
        options['aws_session_token'] = 'SENSITIVE'
        options['aws_profile'] = 'SENSITIVE'
        sys.stderr.write("\n")
        sys.stderr.write("=" * 80)
        sys.stderr.write("\nYou've found a bug! Please, raise an issue attaching the following traceback\n")
        sys.stderr.write("https://github.com/jorgebastida/awslogs/issues/new\n")
        sys.stderr.write("-" * 80)
        sys.stderr.write("\n")
        sys.stderr.write("Version: {0}\n".format(__version__))
        sys.stderr.write("Python: {0}\n".format(sys.version))
        sys.stderr.write("boto3 version: {0}\n".format(boto3.__version__))
        sys.stderr.write("Platform: {0}\n".format(platform.platform()))
        sys.stderr.write("Config: {0}\n".format(options))
        sys.stderr.write("Args: {0}\n\n".format(sys.argv))
        sys.stderr.write(traceback.format_exc())
        sys.stderr.write("=" * 80)
        sys.stderr.write("\n")
        return 1

    return 0

Example 34

Project: SMRT Source File: pescanner.py
    def collect(self):
        data = self.data
        out = []
        if data is None or len(data) == 0:
            out.append("Cannot read %s (maybe empty?)" % file)
            out.append("")
            return out

        try:
            pe = pefile.PE(data=data, fast_load=True)
            pe.parse_data_directories(directories=[
                pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'],
                pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_EXPORT'],
                pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_TLS'],
                pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_RESOURCE']])
        except:
            out.append("Cannot parse %s (maybe not PE?)" % file)
            out.append("")
            return out

        # Meta Data
        out.append(self.header("Meta-data"))
        out.append("Size:      %d bytes" % len(data))
        out.append("Date:      %s" % self.get_timestamp(pe))

        exportdll = self.check_exportdll(pe)
        if len(exportdll):
            out.append("ExportDll: %s" % exportdll)
        (ep, name) = self.check_ep_section(pe)

        s = "EP:        %s (%s)" % (hex(ep+pe.OPTIONAL_HEADER.ImageBase), name)
        if name not in good_ep_sections:
            s += " [SUSPICIOUS]"
        out.append(s)

        try:
            if sys.version_info <= (2, 6):
                out.append("Type:      %s" % self.ms.buffer(data).decode('utf-8'))
            else:
                out.append("Type:      %s" % magic.from_buffer(data).decode('utf-8'))
        except:
            out.append("Type:      data")

        out.append("MD5:       %s" % hashlib.md5(data).hexdigest())
        out.append("SHA1:      %s" % hashlib.sha1(data).hexdigest())
        out.append("SHA256:    %s" % hashlib.sha256(data).hexdigest())

        packers = self.check_packers(pe)
        if len(packers):
            out.append("Packers:   %s" % ','.join(packers))

        # Version Info
        verinfo = self.check_verinfo(pe)
        if len(verinfo):
            out.append(self.header("Version info"))
            out.append(verinfo)

        # Sections
        out.append(self.header("Sections"))
        out.append("%-10s %-12s %-12s %-12s %-12s" % ("Name", "VirtAddr", "VirtSize", "RawSize", "Entropy"))
        out.append("-" * 60)

        for sec in pe.sections:
            s = "%-10s %-12s %-12s %-12s %-12f" % (
                sec.Name.decode('utf-8').replace('\x00', ''),
                hex(sec.VirtualAddress),
                hex(sec.Misc_VirtualSize),
                hex(sec.SizeOfRawData),
                sec.get_entropy())
            if sec.SizeOfRawData == 0 or \
               (sec.get_entropy() > 0 and sec.get_entropy() < 1) or \
               sec.get_entropy() > 7:
                s += "[SUSPICIOUS]"
            out.append(s)

        # Resources
        resources = self.check_rsrc(pe)
        if len(resources):
            out.append(self.header("Resource entries"))
            out.append("%-18s %-12s %-12s Type" % ("Name", "RVA", "Size"))
            out.append("-" * 60)
            for rsrc in resources.keys():
                (name, rva, size, type) = resources[rsrc]
                out.append("%-18s %-12s %-12s %s" % (name, hex(rva), hex(size), type))

        # TLS Callbacks
        callbacks = self.check_tls(pe)
        if len(callbacks):
            out.append(self.header("TLS callbacks"))
            for cb in callbacks:
                out.append("    0x%x" % cb)

        # Exports
        exports = self.check_exports(pe)
        if len(exports):
            out.append(self.header("Exported Functions"))
            out.append("%-10s %-30s%s" % ("Ordinal", "Name", "Forwarder"))
            out.append("-" * 60)
            for exp in exports:
                out.append(exp)

        # Libraries
        libs = self.check_libs(pe)
        if len(libs):
            out.append(self.header("Import Libs"))
            for lib in libs:
                out.append(lib)

        # Imports
        imports = self.check_imports(pe)
        if len(imports):
            out.append(self.header("Imported Functions"))
            for imp in imports:
                out.append(imp)

        # Strings
        # results = []
        # patterns = ["[ -~]{2,}[\\\/][ -~]{2,}", "[ -~]{2,}\.[ -~]{2,}","\\\[ -~]{5,}","^[ -~]{5,}[\\\/]$","[ -~]+\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}[ -~]+"]

        # for pattern in patterns:
        #     regex = re.compile(pattern)
        #     results += regex.findall(data)
        # if len(results):
        #     out.append(self.header("Interesting Strings"))
        #     out += list(set(results))

        out.append("")
        return out

Example 35

Project: cpppo Source File: history_test.py
@pytest.mark.skipif( not has_pytz or not got_localzone, reason="Needs pytz and localzone" )
def test_history_timestamp():
    """Test timestamp, ensuring comparison deals in UTC only.  Supports testing in local timezones:
    
        Canada/Edmonton		-- A generic, ambiguous DST/non-DST timezone
        MST			-- A DST-specific non-DST timezone
        UTC			-- UTC

    """
    trtab			= ( string 
                                    if sys.version_info[0] < 3
                                    else str ).maketrans( ":-.", "   " )

    def utc_strp( loctime ):
        if '.' in loctime:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt + ".%f" )
        else:
            unaware		= datetime.datetime.strptime( loctime, timestamp._fmt )
        return pytz.utc.localize( unaware )

    def utc_trns( loctime ):
        terms			= loctime.translate( trtab ).split()
        if len( terms ) == 7:
            # convert .123 into 123000 microseconds
            terms[6]               += '0' * ( 6 - len( terms[6] ))
        return datetime.datetime( *map( int, terms ), tzinfo=pytz.utc )

    # Basic millisecond hygiene.  Comparisons are by standard UTC format to 3 sub-second decimal
    # places of precision.  Unfortunately, the Python 2/3 strftime microsecond formatters are
    # different, so we don't use them.  If no precision, we do NOT round; we truncate, to avoid the
    # surprising effect of formatting a UNIX value manually using strftime produces a different
    # second than formatting it using render() with no sub-second precision.
    assert timestamp( 1399326141.999836 ) >= timestamp( 1399326141.374836 )
    assert timestamp( 1399326141.999836 ).render( ms=False ) == '2014-05-05 21:42:21'
    assert timestamp( 1399326141.999836 ).render( ms=5 ) == '2014-05-05 21:42:21.99984'
    assert timestamp( 1399326141.999836 ).render() == '2014-05-05 21:42:22.000'

    # Type caste support
    assert abs( float( timestamp( 1399326141.999836 )) - 1399326141.999836 ) < 1e-6
    assert int( timestamp( 1399326141.999836 )) == 1399326141

    # Adjust timestamp default precision and comparison epsilon.
    save			= timestamp._precision,timestamp._epsilon
    try:
        ts			= timestamp( 1399326141.999836 )
        for p in range( 0, 7 ):
            timestamp._precision= p
            timestamp._epsilon	= 10**-p if p else 0

            assert ts.render( ms=True ) == {
                0: '2014-05-05 21:42:21', # Truncates at 0 digits of sub-second precision
                1: '2014-05-05 21:42:22.0',
                2: '2014-05-05 21:42:22.00',
                3: '2014-05-05 21:42:22.000',
                4: '2014-05-05 21:42:21.9998',
                5: '2014-05-05 21:42:21.99984',
                6: '2014-05-05 21:42:21.999836',
            }[timestamp._precision]
            # For p == 0, try exact precision.  1e-6 is the smallest delta that can be reliably
            # added to a typical UNIX timestamp (eg.  1399326141.999836) in a double and still
            # expect it to affect the value (can store 15-17 decimal digits of precision).
            s,l			= (timestamp._epsilon*f for f in (0.9,1.1)) if p else (0,10**-6)
            assert     ts == ts + s
            assert     ts == ts - s
            assert not(ts == ts + l)
            assert not(ts == ts - l)
            assert     ts != ts + l
            assert     ts != ts - l
            assert not(ts <  ts + s)
            assert not(ts <  ts - s)
            assert     ts <  ts + l
            assert not(ts <  ts - l)
            assert     ts <= ts + s
            assert     ts <= ts - s
            assert     ts <= ts + l
            assert not(ts <= ts - l)
            assert not(ts >  ts + s)
            assert not(ts >  ts - s)
            assert not(ts >  ts + l)
            assert     ts >  ts - l
            assert     ts >= ts + s
            assert     ts >= ts - s
            assert not(ts >= ts + l)
            assert     ts >= ts - l
    finally:
        timestamp._precision,timestamp._epsilon = save


    # Maintain DST specificity when rendering in DST-specific timezones?  Nope, only when using
    # specially constructed non-DST versions of timezones, when they are made available by pytz.
    timestamp.support_abbreviations( None, reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'MST' ),None)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 14:42:21 MST'

    # Get MST/MDT etc., and CET/CEST abbreviations
    timestamp.support_abbreviations( ['CA','Europe/Berlin'], reset=True )

    assert timestamp.timezone_info('MST') == (pytz.timezone( 'America/Edmonton' ),False)
    assert timestamp( 1399326141.999836 ).render(
        tzinfo='MST', ms=False )		== '2014-05-05 15:42:21 MDT'


    # $ TZ=UTC date --date=@1388559600
    # Wed Jan  1 07:00:00 UTC 2014
    # 1396531199
    # Thu Apr  3 07:19:59 MDT 2014
    assert '2014-01-02 03:04:55.123'.translate( trtab ) == '2014 01 02 03 04 55 123'

    cnt				= 10000
    beg				= timer()
    for _ in range( cnt ):
        utc1			= utc_strp( '2014-01-02 03:04:55.123' )
    dur1			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc2			= utc_trns( '2014-01-02 03:04:55.123' )
    dur2			= timer() - beg
    beg				= timer()
    for _ in range( cnt ):
        utc3			= timestamp.datetime_from_string( '2014-01-02 03:04:55.123' )
    dur3			= timer() - beg
    assert utc1.strftime( timestamp._fmt ) \
        == utc2.strftime( timestamp._fmt ) \
        == utc3.strftime( timestamp._fmt ) == '2014-01-02 03:04:55'
    logging.detail( "strptime: %d/s, translate: %d/s, timestamp: %d/s", cnt/dur1, cnt/dur2, cnt/dur3 )

    now				= timer()
    assert timestamp( now ) < timestamp( now + 1 )

    # From a numeric timestamp
    ts				= timestamp( 1396531199 )
    assert ts.utc	== '2014-04-03 13:19:59.000' == str( ts )

    assert ts.local	in ( '2014-04-03 07:19:59 MDT',
                             '2014-04-03 06:19:59 MST',
                             '2014-04-03 13:19:59 UTC' )

    # From a string UTC time
    dt				= timestamp.datetime_from_string( '2014-01-01 07:00:00.0' )
    assert str( dt )	== '2014-01-01 07:00:00+00:00'
    assert repr( dt )	== 'datetime.datetime(2014, 1, 1, 7, 0, tzinfo=<UTC>)'
    #assert dt.strftime( '%s' ) != '1388559600' # !? (will fail if machine is in UTC timezone )
    #assert pytz.utc.normalize( dt ).strftime( '%s' ) != '1388559600' # !?
    assert 1388559559.999999 < timestamp.number_from_datetime( dt ) < 1388559600.000001 # ok
    ts				= timestamp( '2014-01-01 07:00:00.0' )
    assert  1388559559.999999 < ts.value < 1388559600.000001
    assert ts.utc	== '2014-01-01 07:00:00.000' == str( ts )
    assert ts.local	in ( '2014-01-01 00:00:00 MST',
                             '2014-01-01 07:00:00 UTC' )

    # OK, now try a UTC time where the local timezone is in MDT
    ts.utc			= '2014-04-01 07:00:00.000'
    assert ts.local	in ( '2014-04-01 01:00:00 MDT',
                             '2014-04-01 00:00:00 MST',
                             '2014-04-01 07:00:00 UTC' )

    # Make sure that local times are unambiguous over daylight savings time
    # Mar 9 02:00 -> 03:00    1394355540 == Mar 9 2014 01:59
    # Nov 2 02:00 -> 01:00    1414915140 == Nov 2 2014 01:59
    ts				= timestamp( 1394355540 )
    assert ts.local	in ( '2014-03-09 01:59:00 MST',
                             '2014-03-09 08:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts				= timestamp( 1414915140 )
    assert ts.local	in ( '2014-11-02 01:59:00 MDT',
                             '2014-11-02 00:59:00 MST',
                             '2014-11-02 07:59:00 UTC' )
    ts			       += 61
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-03-09 02:00:01 MST',
                             '2014-11-02 08:00:01 UTC' )

    # Now try converting a few strings that have a specific timezone.  We can use either .utc =
    # ... or .local = ...; they just default to the UTC or (local) timezone, respectively.  Using a
    # DST-specific timezone such as MST/MDT, we can unambiguously specify whether a time is inside
    # or outside DST.
    try:
        ts.local		= '2014-03-09 02:00:01 America/Edmonton' # Just inside MDT 2014
        assert False, """Should have failed -- time doesn't exist during "spring ahead" """
    except Exception as exc:
        assert "NonExistentTimeError" in str( exc )
    ts.local			= '2014-03-09 03:00:01 MDT' # Just inside MDT 2014
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc 	==   '2014-03-09 09:00:01.000' # MDT == UCT-6:00
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )
    # However, we CAN use a specifically non-DST timezone to specify times non-existent in DST
    ts.local			= '2014-03-09 02:00:01 MST' # No such time in MDT!!
    assert 1394355600.999999 < ts.value < 1394355601.000001
    assert ts.utc	==   '2014-03-09 09:00:01.000'
    assert ts.local	in ( '2014-03-09 03:00:01 MDT',
                             '2014-03-09 02:00:01 MST',
                             '2014-03-09 09:00:01 UTC' )

    ts.local			= '2014-11-02 01:00:01 MST' # 1 second after the end of DST
    assert 1414915200.999999 < ts.value < 1414915201.000001
    assert ts.utc	==   '2014-11-02 08:00:01.000'
    assert ts.local	in ( '2014-11-02 01:00:01 MST',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 08:00:01 UTC' )

    ts			       -= 2 # Go back 2 seconds, into DST
    assert ts.utc	==   '2014-11-02 07:59:59.000'
    assert ts.local	in ( '2014-11-02 01:59:59 MDT',
                             '2014-11-02 00:59:59 MST',
                             '2014-11-02 07:59:59 UTC' )

    ts.local			= '2014-11-02 01:59:58 MDT' # 2 seconds before end of DST
    assert 1414915197.999999 < ts.value < 1414915198.000001
    assert ts.utc	==   '2014-11-02 07:59:58.000'
    assert ts.local	in ( '2014-11-02 01:59:58 MDT',
                             '2014-11-02 00:59:58 MST',
                             '2014-11-02 07:59:58 UTC' )

    # Using a canonical timezone such as 'America/Edmonton', an "ambiguous" time (eg. during the
    # overlap in the fall) cannot be specified.  Using a DST-specific timezone, we can.
    try:
        ts.local		= '2014-11-02 01:00:01 America/Edmonton' # Inside DST?
    except Exception as exc:
        assert "AmbiguousTimeError" in str( exc )

    ts.local			= '2014-11-02 00:59:59 America/Edmonton' # 2 seconds before end of DST
    assert 1414911598.999999 < ts.value < 1414911599.000001
    assert ts.utc	==   '2014-11-02 06:59:59.000'
    assert ts.local	in ( '2014-11-02 00:59:59 MDT',
                             '2014-11-01 23:59:59 MST',
                             '2014-11-02 06:59:59 UTC' )

    after			= timestamp( '2014-11-02 01:02:03.123 MST' ) # (Nov 2 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-11-02 01:02:03.456 MDT' ) # (Nov 2 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-11-02 07:02:03.456'
    assert before.local	in ( '2014-11-02 01:02:03 MDT',
                             '2014-11-02 00:02:03 MST',
                             '2014-11-02 07:02:03 UTC' )
    assert after.utc	==   '2014-11-02 08:02:03.123'
    assert after.local	in ( '2014-11-02 01:02:03 MST',
                             '2014-11-02 08:02:03 UTC' )

    after			= timestamp( '2014-10-26 02:01:00.123 CET' )  # (Nov 26 2014 -- 1:02 *after* DST ended)
    before			= timestamp( '2014-10-26 02:01:00.456 CEST' ) # (Nov 26 2014 --  :58 *before* DST ends)
    assert before < after
    assert before.utc	==   '2014-10-26 00:01:00.456'
    assert before.local	in ( '2014-10-25 18:01:00 MDT',
                             '2014-10-25 17:01:00 MST',
                             '2014-10-26 00:01:00 UTC' )
    assert after.utc	==   '2014-10-26 01:01:00.123'
    assert after.local	in ( '2014-10-25 19:01:00 MDT',
                             '2014-10-25 18:01:00 MST',
                             '2014-10-26 01:01:00 UTC' )

Example 36

Project: invocations Source File: release.py
@task(aliases=['upload'])
def publish(c, sdist=True, wheel=False, index=None, sign=False, dry_run=False,
    directory=None, dual_wheels=False, alt_python=None):
    """
    Publish code to PyPI or index of choice.

    All parameters save ``dry_run`` and ``directory`` honor config settings of
    the same name, under the ``packaging`` tree. E.g. say
    ``.configure({'packaging': {'wheel': True}})`` to force building wheel
    archives by default.

    :param bool sdist:
        Whether to upload sdists/tgzs.

    :param bool wheel:
        Whether to upload wheels (requires the ``wheel`` package from PyPI).

    :param str index:
        Custom upload index URL.

        By default, uses whatever the invoked ``pip`` is configured to use.

    :param bool sign:
        Whether to sign the built archive(s) via GPG.

    :param bool dry_run:
        Skip actual publication step if ``True``.

        This also prevents cleanup of the temporary build/dist directories, so
        you can examine the build artifacts.

    :param str directory:
        Base directory within which will live the ``dist/`` and ``build/``
        directories.

        Defaults to a temporary directory which is cleaned up after the run
        finishes.

    :param bool dual_wheels:
        When ``True``, builds individual wheels for Python 2 and Python 3.

        Useful for situations where you can't build universal wheels, but still
        want to distribute for both interpreter versions.

        Requires that you have a useful ``python3`` (or ``python2``, if you're
        on Python 3 already) binary in your ``$PATH``. Also requires that this
        other python have the ``wheel`` package installed in its
        ``site-packages``; usually this will mean the global site-packages for
        that interpreter.

        See also the ``alt_python`` argument.

    :param str alt_python:
        Path to the 'alternate' Python interpreter to use when
        ``dual_wheels=True``.

        When ``None`` (the default) will be ``python3`` or ``python2``,
        depending on the currently active interpreter.
    """
    # Config hooks
    config = c.config.get('packaging', {})
    index = config.get('index', index)
    sign = config.get('sign', sign)
    dual_wheels = config.get('dual_wheels', dual_wheels)
    # Build, into controlled temp dir (avoids attempting to re-upload old
    # files)
    with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp:
        # Build default archives
        build(c, sdist=sdist, wheel=wheel, directory=tmp)
        # Build opposing interpreter archive, if necessary
        if dual_wheels:
            if not alt_python:
                alt_python = 'python3' if sys.version_info[0] == 2 else 'python2'
            build(c, sdist=False, wheel=True, directory=tmp, python=alt_python)
        # Obtain list of archive filenames, then ensure any wheels come first
        # so their improved metadata is what PyPI sees initially (otherwise, it
        # only honors the sdist's lesser data).
        archives = list(itertools.chain.from_iterable(
            glob(os.path.join(tmp, 'dist', '*.{0}'.format(extension)))
            for extension in ('whl', 'tar.gz')
        ))
        # Sign each archive in turn
        # TODO: twine has a --sign option; but the below is still nice insofar
        # as it lets us dry-run, generate for web upload when pypi's API is
        # being cranky, etc. Figure out which is better.
        if sign:
            prompt = "Please enter GPG passphrase for signing: "
            input_ = StringIO(getpass.getpass(prompt) + "\n")
            gpg_bin = find_gpg(c)
            if not gpg_bin:
                sys.exit("You need to have one of `gpg`, `gpg1` or `gpg2` installed to GPG-sign!") # noqa
            for archive in archives:
                cmd = "{0} --detach-sign -a --passphrase-fd 0 {{0}}".format(gpg_bin)
                c.run(cmd.format(archive), in_stream=input_)
                input_.seek(0) # So it can be replayed by subsequent iterations
        # Upload
        parts = ["twine", "upload"]
        if index:
            index_arg = "-r {0}".format(index)
        if index:
            parts.append(index_arg)
        paths = archives + [os.path.join(tmp, 'dist', "*.asc")]
        parts.extend(paths)
        cmd = " ".join(parts)
        if dry_run:
            print("Would publish via: {0}".format(cmd))
            print("Files that would be published:")
            c.run("ls -l {0}".format(" ".join(paths)))
        else:
            c.run(cmd)

Example 37

Project: rpmlint Source File: SpecCheck.py
    def check_spec(self, pkg, spec_file):
        self._spec_file = spec_file
        spec_only = isinstance(pkg, Pkg.FakePkg)
        patches = {}
        applied_patches = []
        applied_patches_ifarch = []
        patches_auto_applied = False
        source_dir = False
        buildroot = False
        configure_linenum = None
        configure_cmdline = ""
        mklibname = False
        is_lib_pkg = False
        if_depth = 0
        ifarch_depth = -1
        current_section = 'package'
        buildroot_clean = {'clean': False, 'install': False}
        depscript_override = False
        depgen_disabled = False
        patch_fuzz_override = False
        indent_spaces = 0
        indent_tabs = 0
        section = {}
        # None == main package
        current_package = None
        package_noarch = {}

        is_utf8 = False
        if self._spec_file and use_utf8:
            if Pkg.is_utf8(self._spec_file):
                is_utf8 = True
            else:
                printError(pkg, "non-utf8-spec-file",
                           self._spec_name or self._spec_file)

        # gather info from spec lines

        pkg.current_linenum = 0

        nbsp = UNICODE_NBSP if is_utf8 else chr(0xA0)
        do_unicode = is_utf8 and sys.version_info[0] <= 2

        for line in Pkg.readlines(spec_file):

            pkg.current_linenum += 1

            if do_unicode:
                line = unicode(line, "utf-8", "replace")  # noqa false positive

            char = line.find(nbsp)
            if char != -1:
                printWarning(pkg, "non-break-space", "line %s, char %d" %
                             (pkg.current_linenum, char))

            section_marker = False
            for sec, regex in section_regexs.items():
                res = regex.search(line)
                if res:
                    current_section = sec
                    section_marker = True
                    section[sec] = section.get(sec, 0) + 1
                    if sec in ('package', 'files'):
                        rest = filelist_regex.sub('', line[res.end() - 1:])
                        res = pkgname_regex.search(rest)
                        if res:
                            current_package = res.group(1)
                        else:
                            current_package = None
                    break

            if section_marker:

                if not is_lib_pkg and lib_package_regex.search(line):
                    is_lib_pkg = True

                continue

            if current_section in ('prep', 'build') and \
                    contains_buildroot(line):
                printWarning(pkg, 'rpm-buildroot-usage', '%' + current_section,
                             line[:-1].strip())

            if make_check_regex.search(line) and current_section not in \
                    ('check', 'changelog', 'package', 'description'):
                printWarning(pkg, 'make-check-outside-check-section',
                             line[:-1])

            if current_section in buildroot_clean and \
                    not buildroot_clean[current_section] and \
                    contains_buildroot(line) and rm_regex.search(line):
                buildroot_clean[current_section] = True

            if ifarch_regex.search(line):
                if_depth = if_depth + 1
                ifarch_depth = if_depth

            if if_regex.search(line):
                if_depth = if_depth + 1

            if setup_regex.match(line):
                if not setup_q_regex.search(line):
                    # Don't warn if there's a -T without -a or -b
                    if setup_t_regex.search(line):
                        if setup_ab_regex.search(line):
                            printWarning(pkg, 'setup-not-quiet')
                    else:
                        printWarning(pkg, 'setup-not-quiet')
                if current_section != 'prep':
                    printWarning(pkg, 'setup-not-in-prep')
            elif autopatch_regex.search(line):
                patches_auto_applied = True
                if current_section != 'prep':
                    printWarning(pkg, '%autopatch-not-in-prep')
            else:
                res = autosetup_regex.search(line)
                if res:
                    if not autosetup_n_regex.search(res.group(1)):
                        patches_auto_applied = True
                    if current_section != 'prep':
                        printWarning(pkg, '%autosetup-not-in-prep')

            if endif_regex.search(line):
                if ifarch_depth == if_depth:
                    ifarch_depth = -1
                if_depth = if_depth - 1

            res = applied_patch_regex.search(line)
            if res:
                pnum = res.group(1) or 0
                for tmp in applied_patch_p_regex.findall(line) or [pnum]:
                    pnum = int(tmp)
                    applied_patches.append(pnum)
                    if ifarch_depth > 0:
                        applied_patches_ifarch.append(pnum)
            else:
                res = applied_patch_pipe_regex.search(line)
                if res:
                    pnum = int(res.group(1))
                    applied_patches.append(pnum)
                    if ifarch_depth > 0:
                        applied_patches_ifarch.append(pnum)
            if not res and not source_dir:
                res = source_dir_regex.search(line)
                if res:
                    source_dir = True
                    printError(pkg, "use-of-RPM_SOURCE_DIR")

            if configure_linenum:
                if configure_cmdline[-1] == "\\":
                    configure_cmdline = configure_cmdline[:-1] + line.strip()
                else:
                    res = configure_libdir_spec_regex.search(configure_cmdline)
                    if not res:
                        # Hack to get the correct (start of ./configure) line
                        # number displayed:
                        real_linenum = pkg.current_linenum
                        pkg.current_linenum = configure_linenum
                        printWarning(pkg, "configure-without-libdir-spec")
                        pkg.current_linenum = real_linenum
                    elif res.group(1):
                        res = re.match(hardcoded_library_paths, res.group(1))
                        if res:
                            printError(pkg, "hardcoded-library-path",
                                       res.group(1), "in configure options")
                    configure_linenum = None

            hashPos = line.find("#")

            if current_section != 'changelog':
                cfgPos = line.find('./configure')
                if cfgPos != -1 and (hashPos == -1 or hashPos > cfgPos):
                    # store line where it started
                    configure_linenum = pkg.current_linenum
                    configure_cmdline = line.strip()

            res = hardcoded_library_path_regex.search(line)
            if current_section != 'changelog' and res and not \
                    (biarch_package_regex.match(pkg.name) or
                     hardcoded_lib_path_exceptions_regex.search(
                         res.group(1).lstrip())):
                printError(pkg, "hardcoded-library-path", "in",
                           res.group(1).lstrip())

            if '%mklibname' in line:
                mklibname = True

            if current_section == 'package':

                # Would be cleaner to get sources and patches from the
                # specfile parsed in Python (see below), but we want to
                # catch %ifarch'd etc ones as well, and also catch these when
                # the specfile is not parseable.

                res = patch_regex.search(line)
                if res:
                    pnum = int(res.group(1) or 0)
                    patches[pnum] = res.group(2)

                res = obsolete_tags_regex.search(line)
                if res:
                    printWarning(pkg, "obsolete-tag", res.group(1))

                res = buildroot_regex.search(line)
                if res:
                    buildroot = True
                    if res.group(1).startswith('/'):
                        printWarning(pkg, 'hardcoded-path-in-buildroot-tag',
                                     res.group(1))

                res = buildarch_regex.search(line)
                if res:
                    if res.group(1) != "noarch":
                        printError(pkg,
                                   'buildarch-instead-of-exclusivearch-tag',
                                   res.group(1))
                    else:
                        package_noarch[current_package] = True

                res = packager_regex.search(line)
                if res:
                    printWarning(pkg, 'hardcoded-packager-tag', res.group(1))

                res = prefix_regex.search(line)
                if res:
                    if not res.group(1).startswith('%'):
                        printWarning(pkg, 'hardcoded-prefix-tag', res.group(1))

                res = prereq_regex.search(line)
                if res:
                    printError(pkg, 'prereq-use', res.group(2))

                res = buildprereq_regex.search(line)
                if res:
                    printError(pkg, 'buildprereq-use', res.group(1))

                if scriptlet_requires_regex.search(line):
                    printError(pkg, 'broken-syntax-in-scriptlet-requires',
                               line.strip())

                res = requires_regex.search(line)
                if res:
                    reqs = Pkg.parse_deps(res.group(1))
                    for req in unversioned(reqs):
                        if compop_regex.search(req):
                            printWarning(pkg,
                                         'comparison-operator-in-deptoken',
                                         req)

                res = provides_regex.search(line)
                if res:
                    provs = Pkg.parse_deps(res.group(1))
                    for prov in unversioned(provs):
                        printWarning(pkg, 'unversioned-explicit-provides',
                                     prov)
                        if compop_regex.search(prov):
                            printWarning(pkg,
                                         'comparison-operator-in-deptoken',
                                         prov)

                res = obsoletes_regex.search(line)
                if res:
                    obses = Pkg.parse_deps(res.group(1))
                    for obs in unversioned(obses):
                        printWarning(pkg, 'unversioned-explicit-obsoletes',
                                     obs)
                        if compop_regex.search(obs):
                            printWarning(pkg,
                                         'comparison-operator-in-deptoken',
                                         obs)

                res = conflicts_regex.search(line)
                if res:
                    confs = Pkg.parse_deps(res.group(1))
                    for conf in unversioned(confs):
                        if compop_regex.search(conf):
                            printWarning(pkg,
                                         'comparison-operator-in-deptoken',
                                         conf)

            if current_section == 'changelog':
                for match in AbstractCheck.macro_regex.findall(line):
                    res = re.match('%+', match)
                    if len(res.group(0)) % 2:
                        printWarning(pkg, 'macro-in-%changelog', match)
            else:
                if not depscript_override:
                    depscript_override = \
                        depscript_override_regex.search(line) is not None
                if not depgen_disabled:
                    depgen_disabled = \
                        depgen_disable_regex.search(line) is not None
                if not patch_fuzz_override:
                    patch_fuzz_override = \
                        patch_fuzz_override_regex.search(line) is not None

            if current_section == 'files':
                # TODO: check scriptlets for these too?
                if package_noarch.get(current_package) or \
                        (current_package not in package_noarch and
                         package_noarch.get(None)):
                    res = libdir_regex.search(line)
                    if res:
                        pkgname = current_package
                        if pkgname is None:
                            pkgname = '(main package)'
                        printWarning(pkg, 'libdir-macro-in-noarch-package',
                                     pkgname, line.rstrip())

            if not indent_tabs and '\t' in line:
                indent_tabs = pkg.current_linenum
            if not indent_spaces and indent_spaces_regex.search(line):
                indent_spaces = pkg.current_linenum

            # Check if egrep or fgrep is used
            if current_section not in \
                    ('package', 'changelog', 'description', 'files'):
                greps = deprecated_grep_regex.findall(line)
                if greps:
                    printWarning(pkg, "deprecated-grep", greps)

            # If not checking spec file only, we're checking one inside a
            # SRPM -> skip this check to avoid duplicate warnings (#167)
            if spec_only and VALID_GROUPS and \
               line.lower().startswith("group:"):
                group = line[6:].strip()
                if group not in VALID_GROUPS:
                    printWarning(pkg, 'non-standard-group', group)

            # Test if there are macros in comments
            if hashPos != -1 and \
                    (hashPos == 0 or line[hashPos - 1] in (" ", "\t")):
                for match in AbstractCheck.macro_regex.findall(
                        line[hashPos + 1:]):
                    res = re.match('%+', match)
                    if len(res.group(0)) % 2:
                        printWarning(pkg, 'macro-in-comment', match)

        # Last line read is not useful after this point
        pkg.current_linenum = None

        for sect in (x for x in buildroot_clean if not buildroot_clean[x]):
            printWarning(pkg, 'no-cleaning-of-buildroot', '%' + sect)

        if not buildroot:
            printWarning(pkg, 'no-buildroot-tag')

        for sec in ('prep', 'build', 'install', 'clean'):
            if not section.get(sec):
                printWarning(pkg, 'no-%%%s-section' % sec)
        for sec in ('changelog',):
            # prep, build, install, clean, check prevented by rpmbuild 4.4
            if section.get(sec, 0) > 1:
                printWarning(pkg, 'more-than-one-%%%s-section' % sec)

        if is_lib_pkg and not mklibname:
            printError(pkg, 'lib-package-without-%mklibname')

        if depscript_override and not depgen_disabled:
            printWarning(pkg, 'depscript-without-disabling-depgen')

        if patch_fuzz_override:
            printWarning(pkg, 'patch-fuzz-is-changed')

        if indent_spaces and indent_tabs:
            pkg.current_linenum = max(indent_spaces, indent_tabs)
            printWarning(pkg, 'mixed-use-of-spaces-and-tabs',
                         '(spaces: line %d, tab: line %d)' %
                         (indent_spaces, indent_tabs))
            pkg.current_linenum = None

        # process gathered info
        if not patches_auto_applied:
            for pnum, pfile in patches.items():
                if pnum in applied_patches_ifarch:
                    printWarning(pkg, "%ifarch-applied-patch",
                                 "Patch%d:" % pnum, pfile)
                if pnum not in applied_patches:
                    printWarning(pkg, "patch-not-applied",
                                 "Patch%d:" % pnum, pfile)

        # Rest of the checks require a real spec file
        if not self._spec_file:
            return

        # We'd like to parse the specfile only once using python bindings,
        # but it seems errors from rpmlib get logged to stderr and we can't
        # capture and print them nicely, so we do it once each way :P

        out = Pkg.getstatusoutput(('env', 'LC_ALL=C', 'rpm', '-q',
                                   '--qf=', '--specfile', self._spec_file))
        parse_error = False
        for line in out[1].splitlines():
            # No such file or dir hack: https://bugzilla.redhat.com/487855
            if 'No such file or directory' not in line:
                parse_error = True
                printError(pkg, 'specfile-error', line)

        if not parse_error:
            # grab sources and patches from parsed spec object to get
            # them with macros expanded for URL checking

            spec_obj = None
            try:
                ts = rpm.TransactionSet()
                spec_obj = ts.parseSpec(self._spec_file)
            except:
                # errors logged above already
                pass
            if spec_obj:
                try:
                    # rpm < 4.8.0
                    sources = spec_obj.sources()
                except TypeError:
                    # rpm >= 4.8.0
                    sources = spec_obj.sources
                for src in sources:
                    (url, num, flags) = src
                    (scheme, netloc) = urlparse(url)[0:2]
                    if flags & 1:  # rpmspec.h, rpm.org ticket #123
                        srctype = "Source"
                    else:
                        srctype = "Patch"
                    tag = '%s%s' % (srctype, num)
                    if scheme and netloc:
                        info = self.check_url(pkg, tag, url)
                        if not info or not hasattr(pkg, 'files'):
                            continue
                        clen = info.get("Content-Length")
                        if clen is not None:
                            clen = int(clen)
                        cmd5 = info.get("Content-MD5")
                        if cmd5 is not None:
                            cmd5 = cmd5.lower()
                        if clen is not None or cmd5 is not None:
                            # Not using path from urlparse results to match how
                            # rpm itself parses the basename.
                            pkgfile = pkg.files().get(url.split("/")[-1])
                            if pkgfile:
                                if clen is not None and pkgfile.size != clen:
                                    printWarning(pkg, 'file-size-mismatch',
                                                 '%s = %s, %s = %s' %
                                                 (pkgfile.name, pkgfile.size,
                                                  url, clen))
                                # pkgfile.md5 could be some other digest than
                                # MD5, treat as MD5 only if it's 32 chars long
                                if cmd5 and len(pkgfile.md5) == 32 \
                                        and pkgfile.md5 != cmd5:
                                    printWarning(pkg, 'file-md5-mismatch',
                                                 '%s = %s, %s = %s' %
                                                 (pkgfile.name, pkgfile.md5,
                                                  url, cmd5))
                    elif srctype == "Source" and tarball_regex.search(url):
                        printWarning(pkg, 'invalid-url', '%s:' % tag, url)

Example 38

Project: scalyr-agent-2 Source File: connections.py
    def __init__(self, host="localhost", user=None, passwd="",
                 database=None, port=3306, unix_socket=None,
                 charset='', sql_mode=None,
                 read_default_file=None, conv=decoders, use_unicode=None,
                 client_flag=0, cursorclass=Cursor, init_command=None,
                 connect_timeout=None, ssl=None, read_default_group=None,
                 compress=None, named_pipe=None, no_delay=False,
                 autocommit=False, db=None):
        """
        Establish a connection to the MySQL database. Accepts several
        arguments:

        host: Host where the database server is located
        user: Username to log in as
        passwd: Password to use.
        database: Database to use, None to not use a particular one.
        port: MySQL port to use, default is usually OK.
        unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
        charset: Charset you want to use.
        sql_mode: Default SQL_MODE to use.
        read_default_file: Specifies  my.cnf file to read these parameters from under the [client] section.
        conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters.
        use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k.
        client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
        cursorclass: Custom cursor class to use.
        init_command: Initial SQL statement to run when connection is established.
        connect_timeout: Timeout before throwing an exception when connecting.
        ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported.
        read_default_group: Group to read from in the configuration file.
        compress; Not supported
        named_pipe: Not supported
        no_delay: Disable Nagle's algorithm on the socket
        autocommit: Autocommit mode. None means use server default. (default: False)
        db: Alias for database. (for compatibility to MySQLdb)
        """

        if use_unicode is None and sys.version_info[0] > 2:
            use_unicode = True

        if db is not None and database is None:
            database = db

        if compress or named_pipe:
            raise NotImplementedError("compress and named_pipe arguments are not supported")

        if ssl and ('capath' in ssl or 'cipher' in ssl):
            raise NotImplementedError('ssl options capath and cipher are not supported')

        self.ssl = False
        if ssl:
            if not SSL_ENABLED:
                raise NotImplementedError("ssl module not found")
            self.ssl = True
            client_flag |= SSL
            for k in ('key', 'cert', 'ca'):
                v = None
                if k in ssl:
                    v = ssl[k]
                setattr(self, k, v)

        if read_default_group and not read_default_file:
            if sys.platform.startswith("win"):
                read_default_file = "c:\\my.ini"
            else:
                read_default_file = "/etc/my.cnf"

        if read_default_file:
            if not read_default_group:
                read_default_group = "client"

            cfg = configparser.RawConfigParser()
            cfg.read(os.path.expanduser(read_default_file))

            def _config(key, default):
                try:
                    return cfg.get(read_default_group, key)
                except Exception:
                    return default

            user = _config("user", user)
            passwd = _config("password", passwd)
            host = _config("host", host)
            database = _config("database", database)
            unix_socket = _config("socket", unix_socket)
            port = int(_config("port", port))
            charset = _config("default-character-set", charset)

        self.host = host
        self.port = port
        self.user = user or DEFAULT_USER
        self.password = passwd or ""
        self.db = database
        self.no_delay = no_delay
        self.unix_socket = unix_socket
        if charset:
            self.charset = charset
            self.use_unicode = True
        else:
            self.charset = DEFAULT_CHARSET
            self.use_unicode = False

        if use_unicode is not None:
            self.use_unicode = use_unicode

        self.encoding = charset_by_name(self.charset).encoding

        client_flag |= CAPABILITIES
        client_flag |= MULTI_STATEMENTS
        if self.db:
            client_flag |= CONNECT_WITH_DB
        self.client_flag = client_flag

        self.cursorclass = cursorclass
        self.connect_timeout = connect_timeout

        self._result = None
        self._affected_rows = 0
        self.host_info = "Not connected"

        #: specified autocommit mode. None means use server default.
        self.autocommit_mode = autocommit

        self.encoders = encoders  # Need for MySQLdb compatibility.
        self.decoders = conv
        self.sql_mode = sql_mode
        self.init_command = init_command
        self._connect()

Example 39

Project: scikit-learn Source File: kddcup99.py
def _fetch_brute_kddcup99(subset=None, data_home=None,
                          download_if_missing=True, random_state=None,
                          shuffle=False, percent10=False):

    """Load the kddcup99 dataset, downloading it if necessary.

    Parameters
    ----------
    subset : None, 'SA', 'SF', 'http', 'smtp'
        To return the corresponding classical subsets of kddcup 99.
        If None, return the entire kddcup 99 dataset.

    data_home : string, optional
        Specify another download and cache folder for the datasets. By default
        all scikit learn data is stored in '~/scikit_learn_data' subfolders.

    download_if_missing : boolean, default=True
        If False, raise a IOError if the data is not locally available
        instead of trying to download the data from the source site.

    random_state : int, RandomState instance or None, optional (default=None)
        Random state for shuffling the dataset.
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    shuffle : bool, default=False
        Whether to shuffle dataset.

    percent10 : bool, default=False
        Whether to load only 10 percent of the data.

    Returns
    -------
    dataset : dict-like object with the following attributes:
        dataset.data : numpy array of shape (494021, 41)
            Each row corresponds to the 41 features in the dataset.
        dataset.target : numpy array of shape (494021,)
            Each value corresponds to one of the 21 attack types or to the
            label 'normal.'.
        dataset.DESCR : string
            Description of the kddcup99 dataset.

    """

    data_home = get_data_home(data_home=data_home)
    if sys.version_info[0] == 3:
        # The zlib compression format use by joblib is not compatible when
        # switching from Python 2 to Python 3, let us use a separate folder
        # under Python 3:
        dir_suffix = "-py3"
    else:
        # Backward compat for Python 2 users
        dir_suffix = ""
    if percent10:
        kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
    else:
        kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
    samples_path = join(kddcup_dir, "samples")
    targets_path = join(kddcup_dir, "targets")
    available = exists(samples_path)

    if download_if_missing and not available:
        _mkdirp(kddcup_dir)
        URL_ = URL10 if percent10 else URL
        logger.warning("Downloading %s" % URL_)
        f = BytesIO(urlopen(URL_).read())

        dt = [('duration', int),
              ('protocol_type', 'S4'),
              ('service', 'S11'),
              ('flag', 'S6'),
              ('src_bytes', int),
              ('dst_bytes', int),
              ('land', int),
              ('wrong_fragment', int),
              ('urgent', int),
              ('hot', int),
              ('num_failed_logins', int),
              ('logged_in', int),
              ('num_compromised', int),
              ('root_shell', int),
              ('su_attempted', int),
              ('num_root', int),
              ('num_file_creations', int),
              ('num_shells', int),
              ('num_access_files', int),
              ('num_outbound_cmds', int),
              ('is_host_login', int),
              ('is_guest_login', int),
              ('count', int),
              ('srv_count', int),
              ('serror_rate', float),
              ('srv_serror_rate', float),
              ('rerror_rate', float),
              ('srv_rerror_rate', float),
              ('same_srv_rate', float),
              ('diff_srv_rate', float),
              ('srv_diff_host_rate', float),
              ('dst_host_count', int),
              ('dst_host_srv_count', int),
              ('dst_host_same_srv_rate', float),
              ('dst_host_diff_srv_rate', float),
              ('dst_host_same_src_port_rate', float),
              ('dst_host_srv_diff_host_rate', float),
              ('dst_host_serror_rate', float),
              ('dst_host_srv_serror_rate', float),
              ('dst_host_rerror_rate', float),
              ('dst_host_srv_rerror_rate', float),
              ('labels', 'S16')]
        DT = np.dtype(dt)

        file_ = GzipFile(fileobj=f, mode='r')
        Xy = []
        for line in file_.readlines():
            if six.PY3:
                line = line.decode()
            Xy.append(line.replace('\n', '').split(','))
        file_.close()
        print('extraction done')
        Xy = np.asarray(Xy, dtype=object)
        for j in range(42):
            Xy[:, j] = Xy[:, j].astype(DT[j])

        X = Xy[:, :-1]
        y = Xy[:, -1]
        # XXX bug when compress!=0:
        # (error: 'Incorrect data length while decompressing[...] the file
        #  could be corrupted.')

        joblib.dump(X, samples_path, compress=0)
        joblib.dump(y, targets_path, compress=0)

    try:
        X, y
    except NameError:
        X = joblib.load(samples_path)
        y = joblib.load(targets_path)

    if shuffle:
        X, y = shuffle_method(X, y, random_state=random_state)

    return Bunch(data=X, target=y, DESCR=__doc__)

Example 40

Project: scikit-learn Source File: numpy_pickle.py
Function: dump
def dump(value, filename, compress=0, protocol=None, cache_size=None):
    """Persist an arbitrary Python object into one file.

    Parameters
    -----------
    value: any Python object
        The object to store to disk.
    filename: str or pathlib.Path
        The path of the file in which it is to be stored. The compression
        method corresponding to one of the supported filename extensions ('.z',
        '.gz', '.bz2', '.xz' or '.lzma') will be used automatically.
    compress: int from 0 to 9 or bool or 2-tuple, optional
        Optional compression level for the data. 0 or False is no compression.
        Higher value means more compression, but also slower read and
        write times. Using a value of 3 is often a good compromise.
        See the notes for more details.
        If compress is True, the compression level used is 3.
        If compress is a 2-tuple, the first element must correspond to a string
        between supported compressors (e.g 'zlib', 'gzip', 'bz2', 'lzma'
        'xz'), the second element must be an integer from 0 to 9, corresponding
        to the compression level.
    protocol: positive int
        Pickle protocol, see pickle.dump docuementation for more details.
    cache_size: positive int, optional
        This option is deprecated in 0.10 and has no effect.

    Returns
    -------
    filenames: list of strings
        The list of file names in which the data is stored. If
        compress is false, each array is stored in a different file.

    See Also
    --------
    joblib.load : corresponding loader

    Notes
    -----
    Memmapping on load cannot be used for compressed files. Thus
    using compression can significantly slow down loading. In
    addition, compressed files take extra extra memory during
    dump and load.

    """

    if Path is not None and isinstance(filename, Path):
        filename = str(filename)

    is_filename = isinstance(filename, _basestring)
    is_fileobj = hasattr(filename, "write")

    compress_method = 'zlib'  # zlib is the default compression method.
    if compress is True:
        # By default, if compress is enabled, we want to be using 3 by default
        compress_level = 3
    elif isinstance(compress, tuple):
        # a 2-tuple was set in compress
        if len(compress) != 2:
            raise ValueError(
                'Compress argument tuple should contain exactly 2 elements: '
                '(compress method, compress level), you passed {0}'
                .format(compress))
        compress_method, compress_level = compress
    else:
        compress_level = compress

    if compress_level is not False and compress_level not in range(10):
        # Raising an error if a non valid compress level is given.
        raise ValueError(
            'Non valid compress level given: "{0}". Possible values are '
            '{1}.'.format(compress_level, list(range(10))))

    if compress_method not in _COMPRESSORS:
        # Raising an error if an unsupported compression method is given.
        raise ValueError(
            'Non valid compression method given: "{0}". Possible values are '
            '{1}.'.format(compress_method, _COMPRESSORS))

    if not is_filename and not is_fileobj:
        # People keep inverting arguments, and the resulting error is
        # incomprehensible
        raise ValueError(
            'Second argument should be a filename or a file-like object, '
            '%s (type %s) was given.'
            % (filename, type(filename))
        )

    if is_filename and not isinstance(compress, tuple):
        # In case no explicit compression was requested using both compression
        # method and level in a tuple and the filename has an explicit
        # extension, we select the corresponding compressor.
        if filename.endswith('.z'):
            compress_method = 'zlib'
        elif filename.endswith('.gz'):
            compress_method = 'gzip'
        elif filename.endswith('.bz2'):
            compress_method = 'bz2'
        elif filename.endswith('.lzma'):
            compress_method = 'lzma'
        elif filename.endswith('.xz'):
            compress_method = 'xz'
        else:
            # no matching compression method found, we unset the variable to
            # be sure no compression level is set afterwards.
            compress_method = None

        if compress_method in _COMPRESSORS and compress_level == 0:
            # we choose a default compress_level of 3 in case it was not given
            # as an argument (using compress).
            compress_level = 3

    if not PY3_OR_LATER and compress_method in ('lzma', 'xz'):
        raise NotImplementedError("{0} compression is only available for "
                                  "python version >= 3.3. You are using "
                                  "{1}.{2}".format(compress_method,
                                                   sys.version_info[0],
                                                   sys.version_info[1]))

    if cache_size is not None:
        # Cache size is deprecated starting from version 0.10
        warnings.warn("Please do not set 'cache_size' in joblib.dump, "
                      "this parameter has no effect and will be removed. "
                      "You used 'cache_size={0}'".format(cache_size),
                      DeprecationWarning, stacklevel=2)

    if compress_level != 0:
        with _write_fileobject(filename, compress=(compress_method,
                                                   compress_level)) as f:
            NumpyPickler(f, protocol=protocol).dump(value)
    elif is_filename:
        with open(filename, 'wb') as f:
            NumpyPickler(f, protocol=protocol).dump(value)
    else:
        NumpyPickler(filename, protocol=protocol).dump(value)

    # If the target container is a file object, nothing is returned.
    if is_fileobj:
        return

    # For compatibility, the list of created filenames (e.g with one element
    # after 0.10.0) is returned by default.
    return [filename]

Example 41

Project: PokemonGo-Bot-Desktop Source File: dbtables.py
Function: select
    def __Select(self, table, columns, conditions):
        """__Select() - Used to implement Select and Delete (above)
        Returns a dictionary keyed on rowids containing dicts
        holding the row data for columns listed in the columns param
        that match the given conditions.
        * conditions is a dictionary keyed on column names
        containing callable conditions expecting the data string as an
        argument and returning a boolean.
        """
        # check the validity of each column name
        if not table in self.__tablecolumns:
            self.__load_column_info(table)
        if columns is None:
            columns = self.tablecolumns[table]
        for column in (columns + conditions.keys()):
            if not self.__tablecolumns[table].count(column):
                raise TableDBError, "unknown column: %r" % (column,)

        # keyed on rows that match so far, containings dicts keyed on
        # column names containing the data for that row and column.
        matching_rowids = {}
        # keys are rowids that do not match
        rejected_rowids = {}

        # attempt to sort the conditions in such a way as to minimize full
        # column lookups
        def cmp_conditions(atuple, btuple):
            a = atuple[1]
            b = btuple[1]
            if type(a) is type(b):

                # Needed for python 3. "cmp" vanished in 3.0.1
                def cmp(a, b) :
                    if a==b : return 0
                    if a<b : return -1
                    return 1

                if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
                    # longest prefix first
                    return cmp(len(b.prefix), len(a.prefix))
                if isinstance(a, LikeCond) and isinstance(b, LikeCond):
                    # longest likestr first
                    return cmp(len(b.likestr), len(a.likestr))
                return 0
            if isinstance(a, ExactCond):
                return -1
            if isinstance(b, ExactCond):
                return 1
            if isinstance(a, PrefixCond):
                return -1
            if isinstance(b, PrefixCond):
                return 1
            # leave all unknown condition callables alone as equals
            return 0

        if sys.version_info < (2, 6) :
            conditionlist = conditions.items()
            conditionlist.sort(cmp_conditions)
        else :  # Insertion Sort. Please, improve
            conditionlist = []
            for i in conditions.items() :
                for j, k in enumerate(conditionlist) :
                    r = cmp_conditions(k, i)
                    if r == 1 :
                        conditionlist.insert(j, i)
                        break
                else :
                    conditionlist.append(i)

        # Apply conditions to column data to find what we want
        cur = self.db.cursor()
        column_num = -1
        for column, condition in conditionlist:
            column_num = column_num + 1
            searchkey = _search_col_data_key(table, column)
            # speedup: don't linear search columns within loop
            if column in columns:
                savethiscolumndata = 1  # save the data for return
            else:
                savethiscolumndata = 0  # data only used for selection

            try:
                key, data = cur.set_range(searchkey)
                while key[:len(searchkey)] == searchkey:
                    # extract the rowid from the key
                    rowid = key[-_rowid_str_len:]

                    if not rowid in rejected_rowids:
                        # if no condition was specified or the condition
                        # succeeds, add row to our match list.
                        if not condition or condition(data):
                            if not rowid in matching_rowids:
                                matching_rowids[rowid] = {}
                            if savethiscolumndata:
                                matching_rowids[rowid][column] = data
                        else:
                            if rowid in matching_rowids:
                                del matching_rowids[rowid]
                            rejected_rowids[rowid] = rowid

                    key, data = cur.next()

            except db.DBError, dberror:
                if dberror.args[0] != db.DB_NOTFOUND:
                    raise
                continue

        cur.close()

        # we're done selecting rows, garbage collect the reject list
        del rejected_rowids

        # extract any remaining desired column data from the
        # database for the matching rows.
        if len(columns) > 0:
            for rowid, rowdata in matching_rowids.items():
                for column in columns:
                    if column in rowdata:
                        continue
                    try:
                        rowdata[column] = self.db.get(
                            _data_key(table, column, rowid))
                    except db.DBError, dberror:
                        if sys.version_info < (2, 6) :
                            if dberror[0] != db.DB_NOTFOUND:
                                raise
                        else :
                            if dberror.args[0] != db.DB_NOTFOUND:
                                raise
                        rowdata[column] = None

        # return the matches
        return matching_rowids

Example 42

Project: pymo Source File: test_recno.py
    def test01_basic(self):
        d = db.DB()

        get_returns_none = d.set_get_returns_none(2)
        d.set_get_returns_none(get_returns_none)

        d.open(self.filename, db.DB_RECNO, db.DB_CREATE)

        for x in letters:
            recno = d.append(x * 60)
            self.assertIsInstance(recno, int)
            self.assertGreaterEqual(recno, 1)
            if verbose:
                print recno,

        if verbose: print

        stat = d.stat()
        if verbose:
            pprint(stat)

        for recno in range(1, len(d)+1):
            data = d[recno]
            if verbose:
                print data

            self.assertIsInstance(data, str)
            self.assertEqual(data, d.get(recno))

        try:
            data = d[0]  # This should raise a KeyError!?!?!
        except db.DBInvalidArgError, val:
            if sys.version_info < (2, 6) :
                self.assertEqual(val[0], db.EINVAL)
            else :
                self.assertEqual(val.args[0], db.EINVAL)
            if verbose: print val
        else:
            self.fail("expected exception")

        # test that has_key raises DB exceptions (fixed in pybsddb 4.3.2)
        try:
            d.has_key(0)
        except db.DBError, val:
            pass
        else:
            self.fail("has_key did not raise a proper exception")

        try:
            data = d[100]
        except KeyError:
            pass
        else:
            self.fail("expected exception")

        try:
            data = d.get(100)
        except db.DBNotFoundError, val:
            if get_returns_none:
                self.fail("unexpected exception")
        else:
            self.assertEqual(data, None)

        keys = d.keys()
        if verbose:
            print keys
        self.assertIsInstance(keys, list)
        self.assertIsInstance(keys[0], int)
        self.assertEqual(len(keys), len(d))

        items = d.items()
        if verbose:
            pprint(items)
        self.assertIsInstance(items, list)
        self.assertIsInstance(items[0], tuple)
        self.assertEqual(len(items[0]), 2)
        self.assertIsInstance(items[0][0], int)
        self.assertIsInstance(items[0][1], str)
        self.assertEqual(len(items), len(d))

        self.assertTrue(d.has_key(25))

        del d[25]
        self.assertFalse(d.has_key(25))

        d.delete(13)
        self.assertFalse(d.has_key(13))

        data = d.get_both(26, "z" * 60)
        self.assertEqual(data, "z" * 60, 'was %r' % data)
        if verbose:
            print data

        fd = d.fd()
        if verbose:
            print fd

        c = d.cursor()
        rec = c.first()
        while rec:
            if verbose:
                print rec
            rec = c.next()

        c.set(50)
        rec = c.current()
        if verbose:
            print rec

        c.put(-1, "a replacement record", db.DB_CURRENT)

        c.set(50)
        rec = c.current()
        self.assertEqual(rec, (50, "a replacement record"))
        if verbose:
            print rec

        rec = c.set_range(30)
        if verbose:
            print rec

        # test that non-existent key lookups work (and that
        # DBC_set_range doesn't have a memleak under valgrind)
        rec = c.set_range(999999)
        self.assertEqual(rec, None)
        if verbose:
            print rec

        c.close()
        d.close()

        d = db.DB()
        d.open(self.filename)
        c = d.cursor()

        # put a record beyond the consecutive end of the recno's
        d[100] = "way out there"
        self.assertEqual(d[100], "way out there")

        try:
            data = d[99]
        except KeyError:
            pass
        else:
            self.fail("expected exception")

        try:
            d.get(99)
        except db.DBKeyEmptyError, val:
            if get_returns_none:
                self.fail("unexpected DBKeyEmptyError exception")
            else:
                if sys.version_info < (2, 6) :
                    self.assertEqual(val[0], db.DB_KEYEMPTY)
                else :
                    self.assertEqual(val.args[0], db.DB_KEYEMPTY)
                if verbose: print val
        else:
            if not get_returns_none:
                self.fail("expected exception")

        rec = c.set(40)
        while rec:
            if verbose:
                print rec
            rec = c.next()

        c.close()
        d.close()

Example 43

Project: ipwhois Source File: elastic_search.py
def insert(input_ip='', update=True, expires=7, depth=1):

    if update:

        try:
            # Only update if older than x days.
            tmp = es.search(
                index='ipwhois',
                doc_type='base',
                body={
                    'query': {
                        'bool': {
                            'must': [{
                                'range': {
                                    'updated': {
                                        'gt': 'now-{0}d'.format(expires)
                                    }
                                }
                            }, {
                                'term': {
                                    'query': str(input_ip)
                                }
                            }]
                        }
                    }
                }
            )

            if len(tmp['hits']['hits']) > 0:

                return

        # A generic exception is raised, unfortunately.
        except Exception as e:
            print(e)
            pass

    # Perform the RDAP lookup for the input IP address retriving all entities
    # up to depth.
    result = IPWhois(input_ip)
    ret = result.lookup_rdap(depth=depth)

    tmp_objects = ret['objects'].items()

    for ent_k, ent_v in tmp_objects:

        if update:

            try:

                # Only update if older than 7 days.
                es_tmp = es.search(
                    index='ipwhois',
                    doc_type='entity',
                    body={
                        'query': {
                            'bool': {
                                'must': [
                                    {
                                        'range': {
                                            'updated': {
                                                'gt': 'now-{0}d'.format(expires)
                                            }
                                        }
                                    },
                                    {
                                        'term': {
                                            'handle': str(ent_k)
                                        }
                                    }
                                ]
                            }
                        }
                    }
                )

                if len(es_tmp['hits']['hits']) > 0:

                    continue

            # A generic exception is raised, unfortunately.
            except Exception as e:
                print(e)
                pass

        ent = ent_v

        if sys.version_info >= (2, 7):

            # Iterate the contact addresses.
            for addr_k, addr_v in enumerate(ent_v['contact']['address']):

                try:

                    # Attempt to translate the contact address to geo
                    # coordinates via geopy.
                    location = GEOLOCATOR.geocode(addr_v['value'].replace(
                        '\n', ' '))

                    # Add the geo coordinates for the contact address.
                    ent['contact']['address'][addr_k]['geo'] = {
                        'lat': location.latitude,
                        'lon': location.longitude
                    }

                except (AttributeError, KeyError, GeocoderQueryError,
                        GeocoderTimedOut):

                    pass

        # Set the entity updated timestamp.
        ent['updated'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')

        if update:

            try:

                ent_search = es.search(
                    index='ipwhois',
                    doc_type='entity',
                    body={
                        'query': {
                            'match': {
                                'handle': ent['handle']
                            }
                        }
                    }
                )

                for hit in ent_search['hits']['hits']:

                    es.delete(index='ipwhois', doc_type='entity',
                              id=hit['_id'])

            except KeyError:

                pass

        # Index the entity in elasticsearch.
        es.index(index='ipwhois', doc_type='entity', body=ent)

        # Refresh the index for searching duplicates.
        es.indices.refresh(index='ipwhois')

    # Don't need the objects key since that data has been entered as the
    # entities doc_type.
    del ret['objects']

    try:

        # Get the network ISO country code
        cc = ret['network']['country']

        # Add the geo coordinates for the country, defined in GEO_COORD.json.
        ret['network']['country_geo'] = {
            'lat': GEO_COORD[cc]['latitude'],
            'lon': GEO_COORD[cc]['longitude']
        }

        # Set the network country name.
        ret['network']['country_name'] = COUNTRIES[cc]

    except KeyError:

        pass

    try:

        # Get the MaxMind geo data for the query.
        # I do not redistribute the GeoLite2 database, download
        # GeoLite2-City.mmdb from:
        # https://dev.maxmind.com/geoip/geoip2/geolite2/
        mm_reader = geoip2.database.Reader(str(CUR_DIR) +
                                           '/data/GeoLite2-City.mmdb')

        # Query the database.
        mm_response = mm_reader.city(ret['query'])

        # Set the JSON geo data.
        ret['query_geo'] = {
            'lat': mm_response.location.latitude,
            'lon': mm_response.location.longitude
        }
        ret['query_country_name'] = COUNTRIES[mm_response.country.iso_code]

    # Generic exception. Need to determine all raised and update handling.
    # geoip2.errors.AddressNotFoundError, TypeError, etc.
    except Exception as e:

        print(e)
        pass

    # Set the base updated timestamp.
    ret['updated'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')

    if update:

        try:

            ip_search = es.search(
                index='ipwhois',
                doc_type='base',
                body={
                    'query': {
                        'match': {
                            'query': ret['query']
                        }
                    }
                }
            )

            for hit in ip_search['hits']['hits']:

                es.delete(index='ipwhois', doc_type='base', id=hit['_id'])

        except KeyError:

            pass

    # Index the base in elasticsearch.
    es.index(index='ipwhois', doc_type='base', body=ret)

    # Refresh the index for searching duplicates.
    es.indices.refresh(index='ipwhois')

Example 44

Project: ipwhois Source File: test_utils.py
    def test_unique_addresses(self):

        self.assertRaises(ValueError, unique_addresses)

        input_data = (
            'You can have IPs like 74.125.225.229, or 2001:4860:4860::8888'
            'Put a port on the end 74.125.225.229:80 or for IPv6: '
            '[2001:4860:4860::8888]:443 or even networks like '
            '74.125.0.0/16 and 2001:4860::/32.'
        )

        expected_result = {
            '74.125.225.229': {'count': 2, 'ports': {'80': 1}},
            '2001:4860::/32': {'count': 1, 'ports': {}},
            '74.125.0.0/16': {'count': 1, 'ports': {}},
            '2001:4860:4860::8888': {'count': 2, 'ports': {'443': 1}}
        }

        self.assertEquals(unique_addresses(input_data), expected_result)

        data_dir = path.dirname(__file__)
        fp = str(data_dir) + '/rdap.json'

        # Expected result is different on 2.x vs 3.x, possible issues with
        # ipaddr vs ipaddress output. Investigation pending...
        if sys.version_info >= (3, 3):

            fp_expected_result = {
                '74.125.225.0/24': {'count': 1, 'ports': {}},
                '62.239.0.0/16': {'count': 1, 'ports': {}},
                '2001:43f8:7b0:ffff:ffff:ffff:ffff:ffff':
                    {'count': 1, 'ports': {}},
                '210.0.0.0': {'count': 1, 'ports': {}},
                '196.11.240.0/23': {'count': 1, 'ports': {}},
                '2001:240:10c:1::ca20:9d1d': {'count': 2, 'ports': {}},
                '196.11.240.215': {'count': 2, 'ports': {}},
                '62.239.237.0/32': {'count': 1, 'ports': {}},
                '210.107.0.0/17': {'count': 6, 'ports': {}},
                '2001:4860::/32': {'count': 1, 'ports': {}},
                '210.107.73.73': {'count': 2, 'ports': {}},
                '210.107.0.0': {'count': 2, 'ports': {}},
                '2001:200::/23': {'count': 2, 'ports': {}},
                '2001:240:ffff:ffff:ffff:ffff:ffff:ffff':
                    {'count': 1, 'ports': {}},
                '210.255.255.255': {'count': 1, 'ports': {}},
                '2001:43f8:7b0::': {'count': 3, 'ports': {}},
                '196.255.255.255': {'count': 1, 'ports': {}},
                '2001:240::/32': {'count': 6, 'ports': {}},
                '196.0.0.0': {'count': 1, 'ports': {}},
                '2001:240::': {'count': 1, 'ports': {}},
                '196.11.246.255': {'count': 2, 'ports': {}},
                '196.11.239.0': {'count': 2, 'ports': {}},
                '2001:4200::/23': {'count': 1, 'ports': {}},
                '2a00:2380::/25': {'count': 1, 'ports': {}},
                '200.57.128.0/20': {'count': 1, 'ports': {}},
                '62.239.237.255': {'count': 1, 'ports': {}},
                '2001:4860:4860::8888': {'count': 10, 'ports': {}},
                '2001:4860::': {'count': 2, 'ports': {}},
                '2001:4860:ffff:ffff:ffff:ffff:ffff:ffff':
                    {'count': 1, 'ports': {}},
                '74.125.225.229': {'count': 8, 'ports': {}},
                '210.107.127.255': {'count': 2, 'ports': {}},
                '200.57.141.161': {'count': 7, 'ports': {}},
                '62.239.237.255/32': {'count': 1, 'ports': {}},
                '2801:10:c000::': {'count': 7, 'ports': {}},
                '2a00:2381:ffff::1': {'count': 4, 'ports': {}},
                '62.239.237.0': {'count': 1, 'ports': {}},
                '62.239.237.1': {'count': 4, 'ports': {}},
                '210.0.0.0/8': {'count': 1, 'ports': {}}
            }

            self.assertEquals(unique_addresses(file_path=fp),
                              fp_expected_result)

        else:

            fp_expected_result = {
                '196.11.239.0': {'count': 2, 'ports': {}},
                '2a00:2380::/25': {'count': 1, 'ports': {}},
                '2a00:2381:ffff::/6': {'count': 1, 'ports': {}},
                '2001:4860:4860::8888': {'count': 10, 'ports': {}},
                '200.57.128.0/20': {'count': 1, 'ports': {}},
                '2001:4860::/32': {'count': 1, 'ports': {}},
                '210.107.0.0': {'count': 2, 'ports': {}},
                '2001:4200::/23': {'count': 1, 'ports': {}},
                '2001:43f8:7b0::/4': {'count': 2, 'ports': {}},
                '196.11.240.0/23': {'count': 1, 'ports': {}},
                '210.107.73.73': {'count': 2, 'ports': {}},
                '2001:4860:ffff:ffff:ffff:ffff:ffff:ffff': {
                    'count': 1, 'ports': {}},
                '210.0.0.0/8': {'count': 1, 'ports': {}},
                '2a00:2381:ffff:0:ffff:ffff:ffff:ffff/12': {
                    'count': 1, 'ports': {}},
                '210.107.127.255': {'count': 2, 'ports': {}},
                '2a00:2381:ffff::1': {'count': 4, 'ports': {}},
                '210.107.0.0/17': {'count': 6, 'ports': {}},
                '2a00:2381:ffff::/12': {'count': 1, 'ports': {}},
                '2001:240::/32': {'count': 6, 'ports': {}},
                '62.239.0.0/16': {'count': 1, 'ports': {}},
                '2801:10:c000::': {'count': 7, 'ports': {}},
                '2001:43f8:7b0::': {'count': 3, 'ports': {}},
                '62.239.237.0': {'count': 1, 'ports': {}},
                '62.239.237.1': {'count': 4, 'ports': {}},
                '196.11.246.255': {'count': 2, 'ports': {}},
                '74.125.225.229': {'count': 8, 'ports': {}},
                '196.255.255.255': {'count': 1, 'ports': {}},
                '210.0.0.0': {'count': 1, 'ports': {}},
                '200.57.141.161': {'count': 7, 'ports': {}},
                '210.255.255.255': {'count': 1, 'ports': {}},
                '2001:4860::': {'count': 2, 'ports': {}},
                '62.239.237.255/32': {'count': 1, 'ports': {}},
                '196.0.0.0': {'count': 1, 'ports': {}},
                '2001:240:10c:1::ca20:9d1d': {'count': 2, 'ports': {}},
                '2001:240::': {'count': 1, 'ports': {}},
                '74.125.225.0/24': {'count': 1, 'ports': {}},
                '196.11.240.215': {'count': 2, 'ports': {}},
                '62.239.237.255': {'count': 1, 'ports': {}},
                '2001:200::/23': {'count': 2, 'ports': {}},
                '62.239.237.0/32': {'count': 1, 'ports': {}},
                '2001:240:ffff:ffff:ffff:ffff:ffff:ffff': {
                    'count': 1, 'ports': {}},
                '2001:43f8:7b0:ffff:ffff:ffff:ffff:ffff': {
                    'count': 1, 'ports': {}}}

            self.assertEqual(unique_addresses(file_path=fp),
                                 fp_expected_result)

Example 45

Project: Tornado-MySQL Source File: connections.py
Function: init
    def __init__(self, host="localhost", user=None, password="",
                 database=None, port=3306, unix_socket=None,
                 charset='', sql_mode=None,
                 read_default_file=None, conv=decoders, use_unicode=None,
                 client_flag=0, cursorclass=Cursor, init_command=None,
                 connect_timeout=None, ssl=None, read_default_group=None,
                 compress=None, named_pipe=None, no_delay=False,
                 autocommit=False, db=None, passwd=None, io_loop=None):
        """
        Establish a connection to the MySQL database. Accepts several
        arguments:

        host: Host where the database server is located
        user: Username to log in as
        password: Password to use.
        database: Database to use, None to not use a particular one.
        port: MySQL port to use, default is usually OK.
        unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
        charset: Charset you want to use.
        sql_mode: Default SQL_MODE to use.
        read_default_file:
            Specifies  my.cnf file to read these parameters from under the [client] section.
        conv:
            Decoders dictionary to use instead of the default one.
            This is used to provide custom marshalling of types. See converters.
        use_unicode:
            Whether or not to default to unicode strings.
            This option defaults to true for Py3k.
        client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
        cursorclass: Custom cursor class to use.
        init_command: Initial SQL statement to run when connection is established.
        connect_timeout: Timeout before throwing an exception when connecting.
        ssl:
            A dict of arguments similar to mysql_ssl_set()'s parameters.
            For now the capath and cipher arguments are not supported.
        read_default_group: Group to read from in the configuration file.
        compress; Not supported
        named_pipe: Not supported
        no_delay: Disable Nagle's algorithm on the socket
        autocommit: Autocommit mode. None means use server default. (default: False)
        io_loop: Tornado IOLoop

        db: Alias for database. (for compatibility to MySQLdb)
        passwd: Alias for password. (for compatibility to MySQLdb)
        """
        self.io_loop = io_loop or ioloop.IOLoop.current()

        if use_unicode is None and sys.version_info[0] > 2:
            use_unicode = True

        if db is not None and database is None:
            database = db
        if passwd is not None and not password:
            password = passwd

        if compress or named_pipe:
            raise NotImplementedError("compress and named_pipe arguments are not supported")

        if ssl and ('capath' in ssl or 'cipher' in ssl):
            raise NotImplementedError('ssl options capath and cipher are not supported')

        self.ssl = False
        if ssl:
            if not SSL_ENABLED:
                raise NotImplementedError("ssl module not found")
            self.ssl = True
            client_flag |= CLIENT.SSL
            for k in ('key', 'cert', 'ca'):
                v = None
                if k in ssl:
                    v = ssl[k]
                setattr(self, k, v)

        if read_default_group and not read_default_file:
            if sys.platform.startswith("win"):
                read_default_file = "c:\\my.ini"
            else:
                read_default_file = "/etc/my.cnf"

        if read_default_file:
            if not read_default_group:
                read_default_group = "client"

            cfg = configparser.RawConfigParser()
            cfg.read(os.path.expanduser(read_default_file))

            def _config(key, default):
                try:
                    return cfg.get(read_default_group, key)
                except Exception:
                    return default

            user = _config("user", user)
            password = _config("password", password)
            host = _config("host", host)
            database = _config("database", database)
            unix_socket = _config("socket", unix_socket)
            port = int(_config("port", port))
            charset = _config("default-character-set", charset)

        self.host = host
        self.port = port
        self.user = user or DEFAULT_USER
        self.password = password or ""
        self.db = database
        self.no_delay = no_delay
        self.unix_socket = unix_socket
        if charset:
            self.charset = charset
            self.use_unicode = True
        else:
            self.charset = DEFAULT_CHARSET
            self.use_unicode = False

        if use_unicode is not None:
            self.use_unicode = use_unicode

        self.encoding = charset_by_name(self.charset).encoding

        client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
        if self.db:
            client_flag |= CLIENT.CONNECT_WITH_DB
        self.client_flag = client_flag

        self.cursorclass = cursorclass
        self.connect_timeout = connect_timeout

        self._result = None
        self._affected_rows = 0
        self.host_info = "Not connected"

        #: specified autocommit mode. None means use server default.
        self.autocommit_mode = autocommit

        self.encoders = encoders  # Need for MySQLdb compatibility.
        self.decoders = conv
        self.sql_mode = sql_mode
        self.init_command = init_command

Example 46

Project: sfepy Source File: test_install.py
Function: main
def main():
    parser = ArgumentParser(description=__doc__,
                            formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument('--version', action='version', version='%(prog)s')
    options = parser.parse_args()

    fd = open('test_install.log', 'w')
    fd.close()

    if sys.version_info[0] < 3:
        cmd = 'python'

    else:
        cmd = 'python3'

    eok = 0

    t0 = time.time()

    out, err = check_output('%s ./script/blockgen.py' % cmd)
    eok += report(out, '...', -2, 1, '...done')

    out, err = check_output('%s ./script/cylindergen.py' % cmd)
    eok += report(out, '...', -2, 1, '...done')

    out, err = check_output('%s ./script/convert_mesh.py meshes/3d/cylinder.vtk out.mesh' % cmd)
    eok += report(out, '...', -2, 1, '...done')

    out, err = check_output('%s ./script/tile_periodic_mesh.py -r 2,2 meshes/elements/2_4_2.mesh out-per.mesh' % cmd)
    eok += report(out, '...', -2, 1, 'done.')

    out, err = check_output('%s ./script/extract_surface.py meshes/various_formats/octahedron.node -' % cmd)
    eok += report(out, '...', -2, 0, '1185')

    out, err = check_output('%s ./simple.py examples/diffusion/poisson.py' % cmd)
    eok += report(out, '...', -2, 5, '1.173819e-16', eps=1e-15)

    out, err = check_output("""%s ./simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" examples/diffusion/poisson.py""" %cmd)
    eok += report(out, '...', -2, 5, '2.308051e-16', eps=1e-15)

    out, err = check_output('%s ./simple.py examples/diffusion/poisson_iga.py' % cmd)
    eok += report(out, '...', -2, 5, '3.373487e-15', eps=1e-14)

    out, err = check_output('%s ./simple.py examples/navier_stokes/stokes.py' % cmd)
    eok += report(out, '...', -2, 5, '1.210678e-13', eps=1e-11)

    out, err = check_output('%s ./simple.py examples/diffusion/poisson_parametric_study.py' % cmd)
    eok += report(out, '...', -2, 5, '1.606408e-14', eps=1e-13)

    out, err = check_output('%s ./simple.py examples/linear_elasticity/its2D_3.py' % cmd)
    eok += report(out, '...', -23, 5, '3.964886e-12', eps=1e-11)
    eok += report(out, '...', -3, 4, '2.58660e+01', eps=1e-5)

    out, err = check_output('%s ./simple.py examples/linear_elasticity/linear_elastic.py --format h5' % cmd)
    eok += report(out, '...', -2, 5, '4.638192e-18', eps=1e-15)

    out, err = check_output('%s ./extractor.py -d cylinder.h5' % cmd)
    eok += report(out, '...', -2, 1, '...done')

    out, err = check_output('%s ./postproc.py -n --no-offscreen -o cylinder.png cylinder.h5' % cmd)
    eok += report(out, '...', -3, 2, 'cylinder.png...')

    out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py' % cmd)
    eok += report(out, '...', -7, 2, '208.54511594')
    eok += report(out, '...', -6, 1, '116309.22337295]')

    out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py --phase-velocity' % cmd)
    eok += report(out, '...', -2, 3, '4.1894123')
    eok += report(out, '...', -2, 4, '2.62055608]')

    out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py -d' % cmd)
    eok += report(out, '...', -6, 1, '[0,')

    out, err = check_output('%s ./phonon.py examples/phononic/band_gaps_rigid.py' % cmd)
    eok += report(out, '...', -7, 2, '4.58709531e+01')
    eok += report(out, '...', -6, 1, '1.13929200e+05]')

    out, err = check_output('%s ./schroedinger.py --hydrogen' % cmd)
    eok += report(out, '...', -4, -2, '-0.01913506', eps=1e-4)

    out, err = check_output('%s ./humogen.py examples/humogenization/perfusion_micro.py' % cmd)
    eok += report2(out, '...', ['computing EpA', 'computing PA_3',
                                'computing GA', 'computing EmA',
                                'computing KA'])

    out, err = check_output('%s examples/humogenization/rs_correctors.py -n' % cmd)
    eok += report(out, '...', -2, -1, '1.644e-01]]')

    out, err = check_output('%s examples/large_deformation/compare_elastic_materials.py -n' % cmd)
    eok += report(out, '...', -2, 5, '1.068759e-14', eps=1e-13)

    out, err = check_output('%s examples/linear_elasticity/linear_elastic_interactive.py' % cmd)
    eok += report(out, '...', -8, 0, '1.62128841139e-14', eps=1e-13)

    out, err = check_output('%s examples/linear_elasticity/modal_analysis.py' % cmd)
    eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)

    out, err = check_output('%s examples/multi_physics/thermal_electric.py' % cmd)
    eok += report(out, '...', -3, 5, '2.612933e-14', eps=1e-13)

    out, err = check_output('mpiexec -n 2 %s examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
    eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)

    out, err = check_output('mpiexec -n 2 %s examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
    eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-8)

    t1 = time.time()

    out, err = check_output('%s ./run_tests.py' % cmd)
    tok, failed = report_tests(out, return_item=True)
    tok = {True : 'ok', False : 'fail'}[tok]

    t2 = time.time()

    fd = open('test_install_times.log', 'a+')
    fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
             % (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
    fd.close()

Example 47

Project: astral Source File: dictconfig.py
Function: configure
    def configure(self):
        """Do the configuration."""

        config = self.config
        if 'version' not in config:
            raise ValueError("dictionary doesn't specify a version")
        if config['version'] != 1:
            raise ValueError("Unsupported version: %s" % config['version'])
        incremental = config.pop('incremental', False)
        EMPTY_DICT = {}
        logging._acquireLock()
        try:
            if incremental:
                handlers = config.get('handlers', EMPTY_DICT)
                # incremental handler config only if handler name
                # ties in to logging._handlers (Python 2.7)
                if sys.version_info[:2] == (2, 7):
                    for name in handlers:
                        if name not in logging._handlers:
                            raise ValueError('No handler found with '
                                             'name %r'  % name)
                        else:
                            try:
                                handler = logging._handlers[name]
                                handler_config = handlers[name]
                                level = handler_config.get('level', None)
                                if level:
                                    handler.setLevel(_checkLevel(level))
                            except StandardError, e:
                                raise ValueError('Unable to configure handler '
                                                 '%r: %s' % (name, e))
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    try:
                        self.configure_logger(name, loggers[name], True)
                    except StandardError, e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root, True)
                    except StandardError, e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
            else:
                disable_existing = config.pop('disable_existing_loggers', True)
                
                logging._handlers.clear()
                del logging._handlerList[:]
                    
                # Do formatters first - they don't refer to anything else
                formatters = config.get('formatters', EMPTY_DICT)
                for name in formatters:
                    try:
                        formatters[name] = self.configure_formatter(
                                                            formatters[name])
                    except StandardError, e:
                        raise ValueError('Unable to configure '
                                         'formatter %r: %s' % (name, e))
                # Next, do filters - they don't refer to anything else, either
                filters = config.get('filters', EMPTY_DICT)
                for name in filters:
                    try:
                        filters[name] = self.configure_filter(filters[name])
                    except StandardError, e:
                        raise ValueError('Unable to configure '
                                         'filter %r: %s' % (name, e))

                # Next, do handlers - they refer to formatters and filters
                # As handlers can refer to other handlers, sort the keys
                # to allow a deterministic order of configuration
                handlers = config.get('handlers', EMPTY_DICT)
                for name in sorted(handlers):
                    try:
                        handler = self.configure_handler(handlers[name])
                        handler.name = name
                        handlers[name] = handler
                    except StandardError, e:
                        raise ValueError('Unable to configure handler '
                                         '%r: %s' % (name, e))
                # Next, do loggers - they refer to handlers and filters
                
                #we don't want to lose the existing loggers,
                #since other threads may have pointers to them.
                #existing is set to contain all existing loggers,
                #and as we go through the new configuration we
                #remove any which are configured. At the end,
                #what's left in existing is the set of loggers
                #which were in the previous configuration but
                #which are not in the new configuration.
                root = logging.root
                existing = root.manager.loggerDict.keys()
                #The list needs to be sorted so that we can
                #avoid disabling child loggers of explicitly
                #named loggers. With a sorted list it is easier
                #to find the child loggers.
                existing.sort()
                #We'll keep the list of existing loggers
                #which are children of named loggers here...
                child_loggers = []
                #now set up the new ones...
                loggers = config.get('loggers', EMPTY_DICT)
                for name in loggers:
                    if name in existing:
                        i = existing.index(name)
                        prefixed = name + "."
                        pflen = len(prefixed)
                        num_existing = len(existing)
                        i = i + 1 # look at the entry after name
                        while (i < num_existing) and\
                              (existing[i][:pflen] == prefixed):
                            child_loggers.append(existing[i])
                            i = i + 1
                        existing.remove(name)
                    try:
                        self.configure_logger(name, loggers[name])
                    except StandardError, e:
                        raise ValueError('Unable to configure logger '
                                         '%r: %s' % (name, e))
                    
                #Disable any old loggers. There's no point deleting
                #them as other threads may continue to hold references
                #and by disabling them, you stop them doing any logging.
                #However, don't disable children of named loggers, as that's
                #probably not what was intended by the user.
                for log in existing:
                    logger = root.manager.loggerDict[log]
                    if log in child_loggers:
                        logger.level = logging.NOTSET
                        logger.handlers = []
                        logger.propagate = True
                    elif disable_existing:
                        logger.disabled = True
    
                # And finally, do the root logger
                root = config.get('root', None)
                if root:
                    try:
                        self.configure_root(root)                        
                    except StandardError, e:
                        raise ValueError('Unable to configure root '
                                         'logger: %s' % e)
        finally:
            logging._releaseLock()

Example 48

Project: pinguino-ide Source File: intel_hex.py
    def write_hex_file(self, f, write_start_addr=True):
        """Write data to file f in HEX format.

        @param  f                   filename or file-like object for writing
        @param  write_start_addr    enable or disable writing start address
                                    record to file (enabled by default).
                                    If there is no start address in obj, nothing
                                    will be written regardless of this setting.
        """
        fwrite = getattr(f, "write", None)
        if fwrite:
            fobj = f
            fclose = None
        else:
            fobj = open(f, 'w')
            fwrite = fobj.write
            fclose = fobj.close

        # Translation table for uppercasing hex ascii string.
        # timeit shows that using hexstr.translate(table)
        # is faster than hexstr.upper():
        # 0.452ms vs. 0.652ms (translate vs. upper)
        if sys.version_info[0] >= 3:
            table = bytes(range(256)).upper()
        else:
            table = ''.join(chr(i).upper() for  i in range(256))



        # start address record if any
        if self.start_addr and write_start_addr:
            keys = self.start_addr.keys()
            keys.sort()
            bin = array('B', asbytes('\0'*9))
            if keys == ['CS','IP']:
                # Start Segment Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 3      # rectyp
                cs = self.start_addr['CS']
                bin[4] = (cs >> 8) & 0x0FF
                bin[5] = cs & 0x0FF
                ip = self.start_addr['IP']
                bin[6] = (ip >> 8) & 0x0FF
                bin[7] = ip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' +
                       asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            elif keys == ['EIP']:
                # Start Linear Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 5      # rectyp
                eip = self.start_addr['EIP']
                bin[4] = (eip >> 24) & 0x0FF
                bin[5] = (eip >> 16) & 0x0FF
                bin[6] = (eip >> 8) & 0x0FF
                bin[7] = eip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' +
                       asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            else:
                if fclose:
                    fclose()
                raise InvalidStartAddressValueError(start_addr=self.start_addr)

        # data
        addresses = self._buf.keys()
        addresses.sort()
        addr_len = len(addresses)
        if addr_len:
            minaddr = addresses[0]
            maxaddr = addresses[-1]

            if maxaddr > 65535:
                need_offset_record = True
            else:
                need_offset_record = False
            high_ofs = 0

            cur_addr = minaddr
            cur_ix = 0

            while cur_addr <= maxaddr:
                if need_offset_record:
                    bin = array('B', asbytes('\0'*7))
                    bin[0] = 2      # reclen
                    bin[1] = 0      # offset msb
                    bin[2] = 0      # offset lsb
                    bin[3] = 4      # rectyp
                    high_ofs = int(cur_addr>>16)
                    b = divmod(high_ofs, 256)
                    bin[4] = b[0]   # msb of high_ofs
                    bin[5] = b[1]   # lsb of high_ofs
                    bin[6] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                while True:
                    # produce one record
                    low_addr = cur_addr & 0x0FFFF
                    # chain_len off by 1
                    chain_len = min(15, 65535-low_addr, maxaddr-cur_addr)

                    # search continuous chain
                    stop_addr = cur_addr + chain_len
                    if chain_len:
                        ix = bisect_right(addresses, stop_addr,
                                          cur_ix,
                                          min(cur_ix+chain_len+1, addr_len))
                        chain_len = ix - cur_ix     # real chain_len
                        # there could be small holes in the chain
                        # but we will catch them by try-except later
                        # so for big continuous files we will work
                        # at maximum possible speed
                    else:
                        chain_len = 1               # real chain_len

                    bin = array('B', asbytes('\0'*(5+chain_len)))
                    b = divmod(low_addr, 256)
                    bin[1] = b[0]   # msb of low_addr
                    bin[2] = b[1]   # lsb of low_addr
                    bin[3] = 0          # rectype
                    try:    # if there is small holes we'll catch them
                        for i in range(chain_len):
                            bin[4+i] = self._buf[cur_addr+i]
                    except KeyError:
                        # we catch a hole so we should shrink the chain
                        chain_len = i
                        bin = bin[:5+i]
                    bin[0] = chain_len
                    bin[4+chain_len] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                    # adjust cur_addr/cur_ix
                    cur_ix += chain_len
                    if cur_ix < addr_len:
                        cur_addr = addresses[cur_ix]
                    else:
                        cur_addr = maxaddr + 1
                        break
                    high_addr = int(cur_addr>>16)
                    if high_addr > high_ofs:
                        break

        # end-of-file record
        fwrite(":00000001FF\n")
        if fclose:
            fclose()

Example 49

Project: commissaire-mvp Source File: script.py
def main():  # pragma: no cover
    """
    Main script entry point.
    """
    from commissaire.cherrypy_plugins.store import StorePlugin
    from commissaire.cherrypy_plugins.investigator import InvestigatorPlugin
    from commissaire.cherrypy_plugins.watcher import WatcherPlugin

    epilog = ('Example: ./commissaire -e http://127.0.0.1:2379'
              ' -k http://127.0.0.1:8080')

    parser = argparse.ArgumentParser(epilog=epilog)

    try:
        args = parse_args(parser)
    except Exception:
        _, ex, _ = exception.raise_if_not(Exception)
        parser.error(ex)

    found_logger_config = False
    for logger_path in (
            '/etc/commissaire/logger.json', './conf/logger.json'):
        if os.path.isfile(logger_path):
            with open(logger_path, 'r') as logging_default_cfg:
                logging.config.dictConfig(
                    json.loads(logging_default_cfg.read()))
                found_logger_config = True
            logging.warn('No logger configuration in Etcd. Using defaults '
                         'at {0}'.format(logger_path))
    if not found_logger_config:
        parser.error(
            'Unable to find any logging configuration. Exiting ...')

    cherrypy.server.unsubscribe()
    # Disable autoreloading and use our logger
    cherrypy.config.update({'log.screen': False,
                            'log.access_file': '',
                            'log.error_file': '',
                            'engine.autoreload.on': False})

    new_ssl_adapter_cls = type(
        "CustomClientCertBuiltinSSLAdapter",
        (ClientCertBuiltinSSLAdapter,),
        {"verify_location": args.tls_clientverifyfile}
    )

    if sys.version_info < (3, 0):
        from cherrypy.wsgiserver.wsgiserver2 import ssl_adapters
    else:
        from cherrypy.wsgiserver.wsgiserver3 import ssl_adapters
    ssl_adapters['builtin_client'] = new_ssl_adapter_cls

    server = cherrypy._cpserver.Server()
    server.socket_host = args.listen_interface
    server.socket_port = args.listen_port
    server.thread_pool = 10

    if bool(args.tls_keyfile) ^ bool(args.tls_certfile):
        parser.error(
            'Both a keyfile and certfile must be '
            'given for commissaire server TLS. Exiting ...')
    elif bool(args.tls_clientverifyfile) and not bool(args.tls_certfile):
        parser.error(
            'If a client verify file is given a TLS keyfile and '
            'certfile must be given as well. Exiting ...')

    if args.tls_keyfile and args.tls_certfile:
        server.ssl_module = 'builtin_client'
        server.ssl_certificate = args.tls_certfile
        server.ssl_private_key = args.tls_keyfile
        logging.info('Commissaire server TLS will be enabled.')
    server.subscribe()

    # Handle UNIX signals (SIGTERM, SIGHUP, SIGUSR1)
    if hasattr(cherrypy.engine, 'signal_handler'):
        cherrypy.engine.signal_handler.subscribe()

    # Configure the store plugin before starting it.
    store_plugin = StorePlugin(cherrypy.engine)
    store_manager = store_plugin.get_store_manager()

    # Configure store handlers from user data.
    #
    # FIXME The configuration format got too complicated to easily parse
    #       comma-separated key-value pairs so we punted and switched to
    #       JSON format. The authentication CLI options need reworked to
    #       keep the input formats consistent.
    if len(args.register_store_handler) == 0:
        # Order is significant; Kubernetes must be first.
        args.register_store_handler = [
            C.DEFAULT_KUBERNETES_STORE_HANDLER,
            C.DEFAULT_ETCD_STORE_HANDLER
        ]
    for config in args.register_store_handler:
        if type(config) is str:
            config = json.loads(config)
        if type(config) is dict:
            register_store_handler(parser, store_manager, config)
        else:
            parser.error(
                'Store handler format must be a JSON object, got a '
                '{} instead: {}'.format(type(config).__name__, config))

    # Add our plugins
    InvestigatorPlugin(cherrypy.engine).subscribe()
    WatcherPlugin(cherrypy.engine, store_manager.clone()).subscribe()

    store_plugin.subscribe()

    # NOTE: Anything that requires etcd should start AFTER
    # the engine is started
    cherrypy.engine.start()

    try:
        # Make and mount the app
        authentication_kwargs = {}
        if type(args.authentication_plugin_kwargs) is str:
            if '=' in args.authentication_plugin_kwargs:
                for item in args.authentication_plugin_kwargs.split(','):
                    key, value = item.split('=')
                    authentication_kwargs[key.strip()] = value.strip()
        elif type(args.authentication_plugin_kwargs) is dict:
            # _read_config_file() sets this up.
            authentication_kwargs = args.authentication_plugin_kwargs

        app = create_app(
            args.authentication_plugin,
            authentication_kwargs)
        cherrypy.tree.graft(app, "/")

        # Serve forever
        cherrypy.engine.block()
    except Exception:
        _, ex, _ = exception.raise_if_not(Exception)
        logging.fatal('Unable to start server: {0}'.format(ex))
        cherrypy.engine.stop()

Example 50

Project: pip Source File: basecommand.py
Function: main
    def main(self, args):
        options, args = self.parse_args(args)

        if options.quiet:
            if options.quiet == 1:
                level = "WARNING"
            if options.quiet == 2:
                level = "ERROR"
            else:
                level = "CRITICAL"
        elif options.verbose:
            level = "DEBUG"
        else:
            level = "INFO"

        # The root logger should match the "console" level *unless* we
        # specified "--log" to send debug logs to a file.
        root_level = level
        if options.log:
            root_level = "DEBUG"

        logging_dictConfig({
            "version": 1,
            "disable_existing_loggers": False,
            "filters": {
                "exclude_warnings": {
                    "()": "pip.utils.logging.MaxLevelFilter",
                    "level": logging.WARNING,
                },
            },
            "formatters": {
                "indent": {
                    "()": IndentingFormatter,
                    "format": "%(message)s",
                },
            },
            "handlers": {
                "console": {
                    "level": level,
                    "class": "pip.utils.logging.ColorizedStreamHandler",
                    "stream": self.log_streams[0],
                    "filters": ["exclude_warnings"],
                    "formatter": "indent",
                },
                "console_errors": {
                    "level": "WARNING",
                    "class": "pip.utils.logging.ColorizedStreamHandler",
                    "stream": self.log_streams[1],
                    "formatter": "indent",
                },
                "user_log": {
                    "level": "DEBUG",
                    "class": "pip.utils.logging.BetterRotatingFileHandler",
                    "filename": options.log or "/dev/null",
                    "delay": True,
                    "formatter": "indent",
                },
            },
            "root": {
                "level": root_level,
                "handlers": list(filter(None, [
                    "console",
                    "console_errors",
                    "user_log" if options.log else None,
                ])),
            },
            # Disable any logging besides WARNING unless we have DEBUG level
            # logging enabled. These use both pip._vendor and the bare names
            # for the case where someone unbundles our libraries.
            "loggers": dict(
                (
                    name,
                    {
                        "level": (
                            "WARNING"
                            if level in ["INFO", "ERROR"]
                            else "DEBUG"
                        ),
                    },
                )
                for name in ["pip._vendor", "distlib", "requests", "urllib3"]
            ),
        })

        if sys.version_info[:2] == (2, 6):
            warnings.warn(
                "Python 2.6 is no longer supported by the Python core team, "
                "please upgrade your Python. A future version of pip will "
                "drop support for Python 2.6",
                deprecation.Python26DeprecationWarning
            )

        # TODO: try to get these passing down from the command?
        #      without resorting to os.environ to hold these.

        if options.no_input:
            os.environ['PIP_NO_INPUT'] = '1'

        if options.exists_action:
            os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)

        if options.require_venv:
            # If a venv is required check if it can really be found
            if not running_under_virtualenv():
                logger.critical(
                    'Could not find an activated virtualenv (required).'
                )
                sys.exit(VIRTUALENV_NOT_FOUND)

        try:
            status = self.run(options, args)
            # FIXME: all commands should return an exit status
            # and when it is done, isinstance is not needed anymore
            if isinstance(status, int):
                return status
        except PreviousBuildDirError as exc:
            logger.critical(str(exc))
            logger.debug('Exception information:', exc_info=True)

            return PREVIOUS_BUILD_DIR_ERROR
        except (InstallationError, UninstallationError, BadCommand) as exc:
            logger.critical(str(exc))
            logger.debug('Exception information:', exc_info=True)

            return ERROR
        except CommandError as exc:
            logger.critical('ERROR: %s', exc)
            logger.debug('Exception information:', exc_info=True)

            return ERROR
        except KeyboardInterrupt:
            logger.critical('Operation cancelled by user')
            logger.debug('Exception information:', exc_info=True)

            return ERROR
        except:
            logger.critical('Exception:', exc_info=True)

            return UNKNOWN_ERROR
        finally:
            # Check if we're using the latest version of pip available
            if (not options.disable_pip_version_check and not
                    getattr(options, "no_index", False)):
                with self._build_session(
                        options,
                        retries=0,
                        timeout=min(5, options.timeout)) as session:
                    pip_version_check(session)

        return SUCCESS
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3