sys.stderr.write

Here are the examples of the python api sys.stderr.write taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: autotest
Source File: fence_apc_snmp.py
View license
def main():
    apc_base = "enterprises.apc.products.hardware."
    apc_outletctl = "masterswitch.sPDUOutletControl.sPDUOutletControlTable.sPDUOutletControlEntry.sPDUOutletCtl."
    apc_outletstatus = "masterswitch.sPDUOutletStatus.sPDUOutletStatusMSPTable.sPDUOutletStatusMSPEntry.sPDUOutletStatusMSP."

    address = ""
    output = ""
    port = ""
    action = "outletReboot"
    status_check = False
    verbose = False

    if not glob('/usr/share/snmp/mibs/powernet*.mib'):
        sys.stderr.write('This APC Fence script uses snmp to control the APC power switch. This script requires that net-snmp-utils be installed on all nodes in the cluster, and that the powernet369.mib file be located in /usr/share/snmp/mibs/\n')
        sys.exit(1)

    if len(sys.argv) > 1:
        try:
            opts, args = getopt.getopt(sys.argv[1:], "a:hl:p:n:o:vV", ["help", "output="])
        except getopt.GetoptError:
            # print help info and quit
            usage()
            sys.exit(2)

        for o, a in opts:
            if o == "-v":
                verbose = True
            if o == "-V":
                print "%s\n" % FENCE_RELEASE_NAME
                print "%s\n" % REDHAT_COPYRIGHT
                print "%s\n" % BUILD_DATE
                sys.exit(0)
            if o in ("-h", "--help"):
                usage()
                sys.exit(0)
            if o == "-n":
                port = a
            if o == "-o":
                lcase = a.lower()  # Lower case string
                if lcase == "off":
                    action = "outletOff"
                elif lcase == "on":
                    action = "outletOn"
                elif lcase == "reboot":
                    action = "outletReboot"
                elif lcase == "status":
                    #action = "sPDUOutletStatusMSPOutletState"
                    action = ""
                    status_check = True
                else:
                    usage()
                    sys.exit()
            if o == "-a":
                address = a

        if address == "":
            usage()
            sys.exit(1)

        if port == "":
            usage()
            sys.exit(1)

    else:  # Get opts from stdin
        params = {}
        # place params in dict
        for line in sys.stdin:
            val = line.split("=")
            if len(val) == 2:
                params[val[0].strip()] = val[1].strip()

        try:
            address = params["ipaddr"]
        except KeyError, e:
            sys.stderr.write("FENCE: Missing ipaddr param for fence_apc...exiting")
            sys.exit(1)
        try:
            login = params["login"]
        except KeyError, e:
            sys.stderr.write("FENCE: Missing login param for fence_apc...exiting")
            sys.exit(1)

        try:
            passwd = params["passwd"]
        except KeyError, e:
            sys.stderr.write("FENCE: Missing passwd param for fence_apc...exiting")
            sys.exit(1)

        try:
            port = params["port"]
        except KeyError, e:
            sys.stderr.write("FENCE: Missing port param for fence_apc...exiting")
            sys.exit(1)

        try:
            a = params["option"]
            if a == "Off" or a == "OFF" or a == "off":
                action = POWER_OFF
            elif a == "On" or a == "ON" or a == "on":
                action = POWER_ON
            elif a == "Reboot" or a == "REBOOT" or a == "reboot":
                action = POWER_REBOOT
        except KeyError, e:
            action = POWER_REBOOT

        # End of stdin section

    apc_command = apc_base + apc_outletctl + port

    args_status = list()
    args_off = list()
    args_on = list()

    args_status.append("/usr/bin/snmpget")
    args_status.append("-Oqu")  # sets printing options
    args_status.append("-v")
    args_status.append("1")
    args_status.append("-c")
    args_status.append("private")
    args_status.append("-m")
    args_status.append("ALL")
    args_status.append(address)
    args_status.append(apc_command)

    args_off.append("/usr/bin/snmpset")
    args_off.append("-Oqu")  # sets printing options
    args_off.append("-v")
    args_off.append("1")
    args_off.append("-c")
    args_off.append("private")
    args_off.append("-m")
    args_off.append("ALL")
    args_off.append(address)
    args_off.append(apc_command)
    args_off.append("i")
    args_off.append("outletOff")

    args_on.append("/usr/bin/snmpset")
    args_on.append("-Oqu")  # sets printing options
    args_on.append("-v")
    args_on.append("1")
    args_on.append("-c")
    args_on.append("private")
    args_on.append("-m")
    args_on.append("ALL")
    args_on.append(address)
    args_on.append(apc_command)
    args_on.append("i")
    args_on.append("outletOn")

    cmdstr_status = ' '.join(args_status)
    cmdstr_off = ' '.join(args_off)
    cmdstr_on = ' '.join(args_on)

# This section issues the actual commands. Reboot is split into
# Off, then On to make certain both actions work as planned.
#
# The status command just dumps the outlet status to stdout.
# The status checks that are made when turning an outlet on or off, though,
# use the execWithCaptureStatus so that the stdout from snmpget can be
# examined and the desired operation confirmed.

    if status_check:
        if verbose:
            fd = open("/tmp/apclog", "w")
            fd.write("Attempting the following command: %s\n" % cmdstr_status)
        strr = os.system(cmdstr_status)
        print strr
        if verbose:
            fd.write("Result: %s\n" % strr)
            fd.close()

    else:
        if action == POWER_OFF:
            if verbose:
                fd = open("/tmp/apclog", "w")
                fd.write("Attempting the following command: %s\n" % cmdstr_off)
            strr = os.system(cmdstr_off)
            time.sleep(1)
            strr, code = execWithCaptureStatus("/usr/bin/snmpget", args_status)
            if verbose:
                fd.write("Result: %s\n" % strr)
                fd.close()
            if strr.find(POWER_OFF) >= 0:
                print "Success. Outlet off"
                sys.exit(0)
            else:
                if verbose:
                    fd.write("Unable to power off apc outlet")
                    fd.close()
                sys.exit(1)

        elif action == POWER_ON:
            if verbose:
                fd = open("/tmp/apclog", "w")
                fd.write("Attempting the following command: %s\n" % cmdstr_on)
            strr = os.system(cmdstr_on)
            time.sleep(1)
            strr, code = execWithCaptureStatus("/usr/bin/snmpget", args_status)
            #strr = os.system(cmdstr_status)
            if verbose:
                fd.write("Result: %s\n" % strr)
            if strr.find(POWER_ON) >= 0:
                if verbose:
                    fd.close()
                print "Success. Outlet On."
                sys.exit(0)
            else:
                print "Unable to power on apc outlet"
                if verbose:
                    fd.write("Unable to power on apc outlet")
                    fd.close()
                sys.exit(1)

        elif action == POWER_REBOOT:
            if verbose:
                fd = open("/tmp/apclog", "w")
                fd.write("Attempting the following command: %s\n" % cmdstr_off)
            strr = os.system(cmdstr_off)
            time.sleep(1)
            strr, code = execWithCaptureStatus("/usr/bin/snmpget", args_status)
            #strr = os.system(cmdstr_status)
            if verbose:
                fd.write("Result: %s\n" % strr)
            if strr.find(POWER_OFF) < 0:
                print "Unable to power off apc outlet"
                if verbose:
                    fd.write("Unable to power off apc outlet")
                    fd.close()
                sys.exit(1)

            if verbose:
                fd.write("Attempting the following command: %s\n" % cmdstr_on)
            strr = os.system(cmdstr_on)
            time.sleep(1)
            strr, code = execWithCaptureStatus("/usr/bin/snmpget", args_status)
            #strr = os.system(cmdstr_status)
            if verbose:
                fd.write("Result: %s\n" % strr)
            if strr.find(POWER_ON) >= 0:
                if verbose:
                    fd.close()
                print "Success: Outlet Rebooted."
                sys.exit(0)
            else:
                print "Unable to power on apc outlet"
                if verbose:
                    fd.write("Unable to power on apc outlet")
                    fd.close()
                sys.exit(1)

Example 2

Project: meshtool
Source File: load_obj.py
View license
def loadOBJ(data, aux_file_loader=None, validate_output=False):
    """Loads an OBJ file
    
    :param data: A binary data string containing the OBJ file
    :param aux_file_loader: Should be a callable function that takes one parameter.
                            The parameter will be a string containing an auxiliary
                            file that needs to be found, in this case usually a .mtl
                            file or a texture file.
    
    :returns: An instance of :class:`collada.Collada` or None if could not be loaded
    """
    
    mesh = collada.Collada(validate_output=validate_output)
    namer = NameUniqifier()
    material_map = {}
    cimages = []
    materialNamer = NameUniqifier()
    
    vertices = []
    normals = []
    texcoords = []
    
    groups = []
    group = ObjGroup(namer.name("default"))
    geometry_name = namer.name("convertedobjgeometry")
    
    file_like = StringIO(to_unicode(data))
    for line in file_like:
        line = line.strip()
        
        # ignore blank lines and comments
        if len(line) == 0 or line.startswith('#'):
            continue
        
        # split off the first non-whitespace token and ignore the line if there isn't > 1 token
        splitup = line.split(None, 1)
        if len(splitup) != 2:
            continue
        command, line = splitup
        
        if command == 'v':
            line_tokens = line.split()
            vertices.extend(line_tokens[:3])
            
        elif command == 'vn':
            line_tokens = line.split()
            normals.extend(line_tokens[:3])
           
        elif command == 'vt':
            line_tokens = line.split()
            texcoords.extend(line_tokens[:2])
            
        # TODO: other vertex data statements
        # vp
        # cstype
        # deg
        # bmat
        # step
            
        elif command == 'f':
            faces = line.split()
            
            if group.face_mode == FACEMODE.UNKNOWN:
                group.face_mode = detectFaceStyle(faces[0])
                if group.face_mode is None:
                    sys.stderr.write("Error: could not detect face type for line '%s'" % line)
                    return
            
            group.face_lengths.append(len(faces))
            
            # Don't decode the faces here because the / separators have to be parsed out
            # and this is very slow to do one at a time. Instead, just append to a list
            # which is much faster than appending to a string, and it will get joined and
            # parsed later
            group.face_indices.append(line)
        
        elif command == 'l':
            faces = line.split()
            
            if group.face_mode == FACEMODE.UNKNOWN:
                group.face_mode = detectFaceStyle(faces[0])
                if group.face_mode is None:
                    sys.stderr.write("Error: could not detect face type for line '%s'" % line)
                    return
            
            # COLLADA defines lines as a pair of points, so the index values "1 2 3 4" would
            # refer to *two* lines, one between 1 and 2 and one between 3 and 4. OBJ defines
            # lines as continous, so it would be three lines: 1-2, 2-3, 3-4. This duplicates
            # the points to get pairs for COLLADA. This is not very efficient, but not sure
            # of a faster way to do this and I've never seen any files with a huge number of
            # lines in it anyway.
            line = faces[0] + " " + faces[1]
            prev = faces[1]
            for cur in faces[2:]:
                line += " " + prev + " " + cur
                prev = cur
            group.line_indices.append(line)
        
        elif command == 'p':
            faces = line.split()
            
            if group.face_mode == FACEMODE.UNKNOWN:
                group.face_mode = detectFaceStyle(faces[0])
                if group.face_mode is None:
                    sys.stderr.write("Error: could not detect face type for line '%s'" % line)
                    return
                
            # COLLADA does not have points, so this converts a point to a line with two
            # identical endpoints
            line = " ".join(f + " " + f for f in faces)
            group.line_indices.append(line)
        
        # TODO: other elements
        # curv
        # curv2
        # surf
        
        elif command == 'g':
            if group.empty():
                # first group without any previous data, so just set name
                group.name = namer.name(line)
                continue
            
            # end of previous group and start of new group
            groups.append(group)
            group = ObjGroup(namer.name(line))
        
        elif command == 's':
            # there is no way to map shading groups into collada
            continue
        
        elif command == 'o':
            geometry_name = namer.name(line)
        
        # TODO: grouping info
        # mg
        
        # TODO: Free-form curve/surface body statements
        # parm
        # trim
        # hole
        # scrv
        # sp
        # end
        # con
        
        elif command == 'mtllib':
            mtl_file = None
            if aux_file_loader is not None:
                mtl_file = aux_file_loader(line)
            if mtl_file is not None:
                material_data = loadMaterialLib(mtl_file, namer=materialNamer, aux_file_loader=aux_file_loader)
                material_map.update(material_data['material_map'])
                cimages.extend(material_data['images'])
            
        elif command == 'usemtl':
            group.material = slugify(line)
        
        # TODO: display and render attributes
        # bevel
        # c_interp
        # d_interp
        # lod
        # shadow_obj
        # trace_obj
        # ctech
        # stech
        
        else:
            print '  MISSING LINE: %s %s' % (command, line)
    
    # done, append last group
    if not group.empty():
        groups.append(group)
    
    for material in material_map.values():
        mesh.effects.append(material.effect)
        mesh.materials.append(material)
    for cimg in cimages:
        mesh.images.append(cimg)
    
    vertices = numpy.array(vertices, dtype=numpy.float32).reshape(-1, 3)
    normals = numpy.array(normals, dtype=numpy.float32).reshape(-1, 3)
    texcoords = numpy.array(texcoords, dtype=numpy.float32).reshape(-1, 2)
    
    sources = []
    # all modes have vertex source
    sources.append(collada.source.FloatSource("obj-vertex-source", vertices, ('X', 'Y', 'Z')))
    if len(normals) > 0:
        sources.append(collada.source.FloatSource("obj-normal-source", normals, ('X', 'Y', 'Z')))
    if len(texcoords) > 0:
        sources.append(collada.source.FloatSource("obj-uv-source", texcoords, ('S', 'T')))
    
    geom = collada.geometry.Geometry(mesh, geometry_name, geometry_name, sources)
    
    materials_mapped = set()
    for group in groups:
        input_list = collada.source.InputList()
        input_list.addInput(0, 'VERTEX', "#obj-vertex-source")
        if group.face_mode == FACEMODE.VN:
            input_list.addInput(1, 'NORMAL', '#obj-normal-source')
        elif group.face_mode == FACEMODE.VT:
            input_list.addInput(1, 'TEXCOORD', '#obj-uv-source')
        elif group.face_mode == FACEMODE.VTN:
            input_list.addInput(1, 'TEXCOORD', '#obj-uv-source')
            input_list.addInput(2, 'NORMAL', '#obj-normal-source')
        
        if len(group.face_lengths) > 0:
            face_lengths = numpy.array(group.face_lengths, dtype=numpy.int32)
    
            # First, join the individual face lines together, separated by spaces. Then,        
            # just replace 1/2/3 and 1//3 with "1 2 3" and "1  3", as numpy.fromstring can
            # handle any whitespace it's given, similar to python's split(). Concatenating
            # together this way is much faster than parsing the numbers in python - let
            # numpy do it. Note that sep=" " is actually misleading - it handles tabs and
            # other whitespace also
            group.face_indices = (" ".join(group.face_indices)).replace("/", " ")
            face_indices = numpy.fromstring(group.face_indices, dtype=numpy.int32, sep=" ")
            
            # obj indices start at 1, while collada start at 0
            face_indices -= 1
            
            polylist = geom.createPolylist(face_indices, face_lengths, input_list, group.material or namer.name("nullmaterial"))
            geom.primitives.append(polylist)
            
        if len(group.line_indices) > 0:
            group.line_indices = (" ".join(group.line_indices)).replace("/", " ")
            line_indices = numpy.fromstring(group.line_indices, dtype=numpy.int32, sep=" ")
            line_indices -= 1
            lineset = geom.createLineSet(line_indices, input_list, group.material or namer.name("nullmaterial"))
            geom.primitives.append(lineset)
        
        if group.material in material_map:
            materials_mapped.add(group.material)
    
    mesh.geometries.append(geom)
    
    matnodes = []
    for matref in materials_mapped:
        matnode = collada.scene.MaterialNode(matref, material_map[matref], inputs=[('TEX0', 'TEXCOORD', '0')])
        matnodes.append(matnode)
    geomnode = collada.scene.GeometryNode(geom, matnodes)
    node = collada.scene.Node(namer.name("node"), children=[geomnode])
    myscene = collada.scene.Scene(namer.name("scene"), [node])
    mesh.scenes.append(myscene)
    mesh.scene = myscene
    
    return mesh

Example 3

Project: python-xlib
Source File: genprottest.py
View license
def build_request(endian):
    fc = open('genrequest.c', 'w')

    fc.write(C_HEADER)

    reqlist = list(request.major_codes.items())
    reqlist.sort(key=lambda x: x[0])

    genfuncs = []
    req_args = {}
    reply_args = {}

    for code, req in reqlist:
        name = req.__name__
        creqname = name

        cdefs = request_defs.get(name)
        if cdefs is None:
            cdefs = mini_request_defs.get(name)
            creqname = ''
        if cdefs is None:
            cdefs = resource_request_defs.get(name)
            creqname = 'Resource'

        creqname = 'x%sReq' % creqname

        if cdefs is None:
            sys.stderr.write('missing def for request: %s\n' % name)
        else:
            vardefs = request_var_defs.get(name, [()])
            if type(vardefs) is not list:
                vardefs = [vardefs]

            i = 0
            for v in vardefs:
                if i > 0:
                    uname = name + str(i)
                else:
                    uname = name

                try:
                    req_args[uname] = gen_func(fc,
                                              'genrequest_' + uname,
                                              creqname,
                                              'REQUEST ' + uname,
                                              req._request,
                                              cdefs,
                                              v)
                except:
                    sys.stderr.write('Error in %s request\n' % uname)
                    raise

                genfuncs.append('genrequest_' + uname)
                i = i + 1

        if issubclass(req, rq.ReplyRequest):
            cdefs = reply_defs.get(name)

            if cdefs is None:
                sys.stderr.write('missing def for reply: %s\n' % name)
            else:
                vardefs = reply_var_defs.get(name, ())
                if type(vardefs) is not list:
                    vardefs = [vardefs]

                i = 0
                for v in vardefs:
                    if i > 0:
                        uname = name + str(i)
                    else:
                        uname = name

                    try:
                        reply_args[uname] = gen_func(fc,
                                                     'genreply_' + uname,
                                                     'x%sReply' % name,
                                                     'REPLY ' + uname,
                                                     req._reply,
                                                     cdefs,
                                                     v)
                    except:
                        sys.stderr.write('Error in %s reply\n' % uname)
                        raise

                    genfuncs.append('genreply_' + uname)
                    i = i + 1


    fc.write('''

    int main(void)
    {
    ''')

    for gf in genfuncs:
        fc.write('      %s();\n' % gf)

    fc.write('''
      return 0;
    }
    ''')

    fc.close()
    os.system('gcc -Wall -g genrequest.c -o genrequest')

    req_bins = {}
    reply_bins = {}
    pc = os.popen('./genrequest', 'r')
    for line in pc.readlines():
        parts = line.strip().split()
        if parts[0] == 'REQUEST':
            req_bins[parts[1]] = parts[2]
        elif parts[0] == 'REPLY':
            reply_bins[parts[1]] = parts[2]

    fpy = open('../test_requests_%s.py' % endian, 'w')
    os.chmod('../test_requests_%s.py' % endian, 0o755)

    if endian == 'be':
        e = 'BigEndian'
        v = 1
    else:
        e = 'LittleEndian'
        v = 0

    fpy.write(PY_HEADER % { 'endname': e, 'endvalue': v })

    for code, req in reqlist:
        name = req.__name__

        fpy.write('\n\nclass Test%s(EndianTest):\n' % name)
        fpy.write('    def setUp(self):\n')

        i = 0
        reqs = -1
        replies = -1
        while 1:
            if i > 0:
                uname = name + str(i)
            else:
                uname = name

            reqbin = req_bins.get(uname)
            replybin = reply_bins.get(uname)

            if reqbin is None and replybin is None:
                break

            if reqbin:
                reqs = i
                fpy.write('        self.req_args_%d = %s\n'
                          % (i, build_args(req_args[uname])))
                fpy.write('        self.req_bin_%d = %s\n\n'
                          % (i, build_bin(reqbin)))
            if replybin:
                replies = i
                fpy.write('        self.reply_args_%d = %s\n'
                          % (i, build_args(reply_args[uname])))
                fpy.write('        self.reply_bin_%d = %s\n\n'
                          % (i, build_bin(replybin)))

            i = i + 1

        for i in range(0, reqs + 1):
            fpy.write('''
    def testPackRequest%(n)d(self):
        bin = request.%(req)s._request.to_binary(*(), **self.req_args_%(n)d)
        self.assertBinaryEqual(bin, self.req_bin_%(n)d)

    def testUnpackRequest%(n)d(self):
        args, remain = request.%(req)s._request.parse_binary(self.req_bin_%(n)d, dummy_display, 1)
        self.assertBinaryEmpty(remain)
        self.assertEqual(args, self.req_args_%(n)d)
''' % { 'req': req.__name__, 'n': i })

        for i in range(0, replies + 1):
            fpy.write('''
    def testPackReply%(n)d(self):
        bin = request.%(req)s._reply.to_binary(*(), **self.reply_args_%(n)d)
        self.assertBinaryEqual(bin, self.reply_bin_%(n)d)

    def testUnpackReply%(n)d(self):
        args, remain = request.%(req)s._reply.parse_binary(self.reply_bin_%(n)d, dummy_display, 1)
        self.assertBinaryEmpty(remain)
        self.assertEqual(args, self.reply_args_%(n)d)
''' % { 'req': req.__name__, 'n': i })

    fpy.write('''

if __name__ == "__main__":
    unittest.main()
''')

Example 4

Project: django-pyodbc
Source File: ss_loaddata.py
View license
    def handle(self, *fixture_labels, **options):
        from django.db.models import get_apps
        from django.core import serializers
        from django.db import connection, transaction
        from django.conf import settings

        self.style = no_style()

        verbosity = int(options.get('verbosity', 1))
        show_traceback = options.get('traceback', False)

        # commit is a stealth option - it isn't really useful as
        # a command line option, but it can be useful when invoking
        # loaddata from within another script.
        # If commit=True, loaddata will use its own transaction;
        # if commit=False, the data load SQL will become part of
        # the transaction in place when loaddata was invoked.
        commit = options.get('commit', True)

        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        object_count = 0
        models = set()

        humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        # Start transaction management. All fixtures are installed in a
        # single transaction to ensure that all references are resolved.
        if commit:
            transaction.commit_unless_managed()
            transaction.enter_transaction_management()
            transaction.managed(True)

        self.disable_forward_ref_checks()

        class SingleZipReader(zipfile.ZipFile):
            def __init__(self, *args, **kwargs):
                zipfile.ZipFile.__init__(self, *args, **kwargs)
                if settings.DEBUG:
                    assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
            def read(self):
                return zipfile.ZipFile.read(self, self.namelist()[0])

        compression_types = {
            None:   file,
            'gz':   gzip.GzipFile,
            'zip':  SingleZipReader
        }
        if has_bz2:
            compression_types['bz2'] = bz2.BZ2File

        app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
        for fixture_label in fixture_labels:
            parts = fixture_label.split('.')

            if len(parts) > 1 and parts[-1] in compression_types:
                compression_formats = [parts[-1]]
                parts = parts[:-1]
            else:
                compression_formats = compression_types.keys()

            if len(parts) == 1:
                fixture_name = parts[0]
                formats = serializers.get_public_serializer_formats()
            else:
                fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                if format in serializers.get_public_serializer_formats():
                    formats = [format]
                else:
                    formats = []

            if formats:
                if verbosity > 1:
                    print "Loading '%s' fixtures..." % fixture_name
            else:
                self.enable_forward_ref_checks(cursor)
                sys.stderr.write(
                    self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
                        (fixture_name, format)))
                transaction.rollback()
                transaction.leave_transaction_management()
                return

            if os.path.isabs(fixture_name):
                fixture_dirs = [fixture_name]
            else:
                fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

            for fixture_dir in fixture_dirs:
                if verbosity > 1:
                    print "Checking %s for fixtures..." % humanize(fixture_dir)

                label_found = False
                for format in formats:
                    for compression_format in compression_formats:
                        if compression_format:
                            file_name = '.'.join([fixture_name, format,
                                                  compression_format])
                        else:
                            file_name = '.'.join([fixture_name, format])

                        if verbosity > 1:
                            print "Trying %s for %s fixture '%s'..." % \
                                (humanize(fixture_dir), file_name, fixture_name)
                        full_path = os.path.join(fixture_dir, file_name)
                        open_method = compression_types[compression_format]
                        try:
                            fixture = open_method(full_path, 'r')
                            if label_found:
                                fixture.close()
                                self.enable_forward_ref_checks(cursor)
                                print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
                                    (fixture_name, humanize(fixture_dir)))
                                transaction.rollback()
                                transaction.leave_transaction_management()
                                return
                            else:
                                fixture_count += 1
                                objects_in_fixture = 0
                                if verbosity > 0:
                                    print "Installing %s fixture '%s' from %s." % \
                                        (format, fixture_name, humanize(fixture_dir))
                                try:
                                    objects = serializers.deserialize(format, fixture)
                                    for obj in objects:
                                        objects_in_fixture += 1
                                        self.handle_ref_checks(cursor, obj)
                                        models.add(obj.object.__class__)
                                        obj.save()
                                    object_count += objects_in_fixture
                                    label_found = True
                                except (SystemExit, KeyboardInterrupt):
                                    self.enable_forward_ref_checks(cursor)
                                    raise
                                except Exception:
                                    import traceback
                                    fixture.close()
                                    self.enable_forward_ref_checks(cursor)
                                    transaction.rollback()
                                    transaction.leave_transaction_management()
                                    if show_traceback:
                                        traceback.print_exc()
                                    else:
                                        sys.stderr.write(
                                            self.style.ERROR("Problem installing fixture '%s': %s\n" %
                                                 (full_path, ''.join(traceback.format_exception(sys.exc_type,
                                                     sys.exc_value, sys.exc_traceback)))))
                                    return
                                fixture.close()

                                # If the fixture we loaded contains 0 objects, assume that an
                                # error was encountered during fixture loading.
                                if objects_in_fixture == 0:
                                    self.enable_forward_ref_checks(cursor)
                                    sys.stderr.write(
                                        self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
                                            (fixture_name)))
                                    transaction.rollback()
                                    transaction.leave_transaction_management()
                                    return

                        except Exception, e:
                            if verbosity > 1:
                                print "No %s fixture '%s' in %s." % \
                                    (format, fixture_name, humanize(fixture_dir))

        self.enable_forward_ref_checks(cursor)

        # If we found even one object in a fixture, we need to reset the
        # database sequences.
        if object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
            if sequence_sql:
                if verbosity > 1:
                    print "Resetting sequences"
                for line in sequence_sql:
                    cursor.execute(line)

        if commit:
            transaction.commit()
            transaction.leave_transaction_management()

        if object_count == 0:
            if verbosity > 1:
                print "No fixtures found."
        else:
            if verbosity > 0:
                print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)

        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        if commit:
            connection.close()

Example 5

Project: IMAPdedup
Source File: imapdedup.py
View license
def process(options, mboxes):
    if options.process:
        serverclass = imaplib.IMAP4_stream
    elif options.ssl:
        serverclass = imaplib.IMAP4_SSL
    else:
        serverclass = imaplib.IMAP4

    try:
        if options.process:
            server = serverclass(options.process)
        elif options.port:
            server = serverclass(options.server, options.port)
        else:
            # Use the default, which will be different depending on SSL choice
            server = serverclass(options.server)
    except socket.error as e:
        sys.stderr.write("\nFailed to connect to server. Might be host, port or SSL settings?\n")
        sys.stderr.write("%s\n\n" % e)
        sys.exit(1)

    if ('STARTTLS' in server.capabilities) and hasattr(server, 'starttls'):
        server.starttls()
    elif not options.ssl:
        sys.stderr.write('\nWarning: Unencrypted connection\n')

    try:
        if not options.process:
            server.login(options.user, options.password)
    except:
        sys.stderr.write("\nError: Login failed\n")
        sys.exit(1)

    # List mailboxes option
    if options.just_list:
        for mb in check_response(server.list()):
            mb = mb.decode('utf-7')
            bits = parse_list_response(mb)
            if r'\\Noselect' not in bits[0]:
                print(bits[2])
        sys.exit()

    if len(mboxes) == 0:
        sys.stderr.write("\nError: Must specify mailbox\n")
        sys.exit(1)

    # OK - let's get started.
    # Iterate through a set of named mailboxes and delete the later messages discovered.
    try:
        p = email.parser.Parser() # can be the same for all mailboxes
        # Create a list of previously seen message IDs, in any mailbox
        msg_ids = {}
        for mbox in mboxes:
            msgs_to_delete = [] # should be reset for each mbox
            msg_map = {} # should be reset for each mbox

            # Select the mailbox
            msgs = check_response(server.select(mbox, options.dry_run))[0]
            print("There are %d messages in %s." % (int(msgs), mbox))

            # Check how many messages are already marked 'deleted'...
            deleted = check_response(server.search(None, 'DELETED'))[0].split()
            numdeleted = len(deleted)
            print("%s message(s) currently marked as deleted in %s" % (numdeleted or "No", mbox))

            # ...and get a list of the ones that aren't deleted. That's what we'll use.
            msgnums = check_response(server.search(None, 'UNDELETED'))[0].split()
            print("%s others in %s" % (len(msgnums), mbox))

            chunkSize = 100
            if options.verbose: print ("Reading the others... (in batches of %d)" % chunkSize)

            for i in range(0, len(msgnums), chunkSize):
                msgnums_in_chunk = msgnums[i:i + chunkSize]
                message_ids = ','.join(msgnums_in_chunk)
                # Get the header of each message
                ms = check_response(server.fetch(message_ids, '(RFC822.HEADER)'))
                if options.verbose:
                    print ("Batch starting at item %d" % i)

                # and parse them.
                for ci in range(0, len(msgnums_in_chunk)):
                    mnum = msgnums_in_chunk[ci]
                    mp = p.parsestr(ms[ci * 2][1])
                    if options.verbose:
                        print("Checking %s message %s" % (mbox, mnum))

                    # Record the message-ID header (or generate one from other headers)
                    msg_id = get_message_id(mp, options.use_checksum, options.use_id_in_checksum)

                    # Store message only when verbose is enabled (to print it later on)
                    if options.verbose:
                        msg_map[mnum] = mp

                    if msg_id:
                        # If we've seen this message before, record it as one to be
                        # deleted in this mailbox.
                        if msg_id in msg_ids:
                            print ("Message %s_%s is a duplicate of %s and %s be marked as deleted" % (
                                           mbox, mnum, msg_ids[msg_id], options.dry_run and "would" or "will"))
                            if options.verbose:
                                print ("Subject: %s\nFrom: %s\nDate: %s\n" % (mp['Subject'], mp['From'], mp['Date']))
                            msgs_to_delete.append(mnum)
                        # Otherwise record the fact that we've seen it
                        else:
                            msg_ids[msg_id] = mbox + '_' + mnum

                print ("%s message(s) in %s processed" % (min(len(msgnums), i + chunkSize), mbox))

            # OK - we've been through this mailbox, and msgs_to_delete holds
            # a list of the duplicates we've found.

            if len(msgs_to_delete) == 0:
                print("No duplicates were found in %s" % mbox)

            else:
                if options.verbose:
                    print("These are the duplicate messages: ")
                    for mnum in msgs_to_delete:
                        print_message_info(msg_map[mnum])

                if options.dry_run:
                    print("If you had not selected the 'dry-run' option,\n%i messages would now be marked as 'deleted'." % (len(msgs_to_delete)))

                else:
                    print("Marking %i messages as deleted..." % (len(msgs_to_delete)))
                    # Deleting messages one at a time can be slow if there are many, so we batch them up
                    chunkSize = 30
                    if options.verbose: print("(in batches of %d)" % chunkSize)
                    for i in range(0, len(msgs_to_delete), chunkSize):
                        message_ids = ','.join(msgs_to_delete[i:i + chunkSize])
                        check_response(server.store(message_ids, '+FLAGS', r'(\Deleted)'))
                        if options.verbose:
                            print("Batch starting at item %d marked." % i)
                    print("Confirming new numbers...")
                    deleted = check_response(server.search(None, 'DELETED'))[0].split()
                    numdeleted = len(deleted)
                    undeleted = check_response(server.search(None, 'UNDELETED'))[0].split()
                    numundel = len(undeleted)
                    print("There are now %s messages marked as deleted and %s others in %s." % (numdeleted, numundel, mbox))
        if not options.no_close:
            server.close()
    except ImapDedupException as e:
        print >> sys.stderr, "Error:", e
    finally:
        server.logout()

Example 6

View license
    def fit(self, X, y):
        """Perform feature selection and learn model from training data.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]
            Training vectors, where n_samples is the number of samples and
            n_features is the number of features.
        y : array-like, shape = [n_samples]
            Target values.

        Returns
        -------
        self : object

        """

        if not isinstance(self.k_features, int) and\
                not isinstance(self.k_features, tuple):
            raise AttributeError('k_features must be a positive integer'
                                 ' or tuple')

        if isinstance(self.k_features, int) and (self.k_features < 1 or
                                                 self.k_features > X.shape[1]):
            raise AttributeError('k_features must be a positive integer'
                                 ' between 1 and X.shape[1], got %s'
                                 % (self.k_features, ))

        if isinstance(self.k_features, tuple):
            if len(self.k_features) != 2:
                raise AttributeError('k_features tuple must consist of 2'
                                     ' elements a min and a max value.')

            if self.k_features[0] not in range(1, X.shape[1] + 1):
                raise AttributeError('k_features tuple min value must be in'
                                     ' range(1, X.shape[1]+1).')

            if self.k_features[1] not in range(1, X.shape[1] + 1):
                raise AttributeError('k_features tuple max value must be in'
                                     ' range(1, X.shape[1]+1).')

            if self.k_features[0] > self.k_features[1]:
                raise AttributeError('The min k_features value must be larger'
                                     ' than the max k_features value.')

        if self.skip_if_stuck:
            sdq = deque(maxlen=4)
        else:
            sdq = deque(maxlen=0)

        if isinstance(self.k_features, tuple):
            select_in_range = True
        else:
            select_in_range = False
            k_to_select = self.k_features

        self.subsets_ = {}
        orig_set = set(range(X.shape[1]))
        if self.forward:
            if select_in_range:
                k_to_select = self.k_features[1]
            k_idx = ()
            k = 0
        else:
            if select_in_range:
                k_to_select = self.k_features[0]
            k_idx = tuple(range(X.shape[1]))
            k = len(k_idx)
            k_score = self._calc_score(X, y, k_idx)
            self.subsets_[k] = {
                'feature_idx': k_idx,
                'cv_scores': k_score,
                'avg_score': k_score.mean()
            }

        best_subset = None
        k_score = 0
        try:
            while k != k_to_select:
                prev_subset = set(k_idx)
                if self.forward:
                    k_idx, k_score, cv_scores = self._inclusion(
                        orig_set=orig_set,
                        subset=prev_subset,
                        X=X,
                        y=y
                    )
                else:
                    k_idx, k_score, cv_scores = self._exclusion(
                        feature_set=prev_subset,
                        X=X,
                        y=y
                    )

                if self.floating and not self._is_stuck(sdq):
                    (new_feature,) = set(k_idx) ^ prev_subset
                    if self.forward:
                        k_idx_c, k_score_c, cv_scores_c = self._exclusion(
                            feature_set=k_idx,
                            fixed_feature=new_feature,
                            X=X,
                            y=y
                        )
                    else:
                        k_idx_c, k_score_c, cv_scores_c = self._inclusion(
                            orig_set=orig_set - {new_feature},
                            subset=set(k_idx),
                            X=X,
                            y=y
                        )

                    if k_score_c and k_score_c > k_score:
                        k_idx, k_score, cv_scores = \
                            k_idx_c, k_score_c, cv_scores_c

                k = len(k_idx)
                # floating can lead to multiple same-sized subsets
                if k not in self.subsets_ or (self.subsets_[k]['avg_score'] >
                                              k_score):
                    self.subsets_[k] = {
                        'feature_idx': k_idx,
                        'cv_scores': cv_scores,
                        'avg_score': k_score
                    }
                sdq.append(k_idx)

                if self.verbose == 1:
                    sys.stderr.write('\rFeatures: %d/%s' % (
                        len(k_idx),
                        k_to_select
                    ))
                    sys.stderr.flush()
                elif self.verbose > 1:
                    sys.stderr.write('\n[%s] Features: %d/%s -- score: %s' % (
                        datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                        len(k_idx),
                        k_to_select,
                        k_score
                    ))

                if self._TESTING_INTERRUPT_MODE:
                    raise KeyboardInterrupt

        except KeyboardInterrupt as e:
            self.interrupted_ = True
            sys.stderr.write('\nSTOPPING EARLY DUE TO KEYBOARD INTERRUPT...')

        if select_in_range:
            max_score = float('-inf')
            for k in self.subsets_:
                if self.subsets_[k]['avg_score'] > max_score:
                    max_score = self.subsets_[k]['avg_score']
                    best_subset = k
            k_score = max_score
            k_idx = self.subsets_[best_subset]['feature_idx']

        self.k_feature_idx_ = k_idx
        self.k_score_ = k_score
        self.subsets_plus_ = dict()
        self.fitted = True
        return self

Example 7

Project: dnspython
Source File: zonediff.py
View license
def main():
    import argparse
    import subprocess
    import sys
    import traceback

    usage = """%prog zonefile1 zonefile2 - Show differences between zones in a diff-like format
%prog [--git|--bzr|--rcs] zonefile rev1 [rev2] - Show differences between two revisions of a zonefile

The differences shown will be logical differences, not textual differences.
"""
    p = argparse.ArgumentParser(usage=usage)
    p.add_argument('-s', '--ignore-soa', action="store_true", default=False, dest="ignore_soa",
                 help="Ignore SOA-only changes to records")
    p.add_argument('-t', '--ignore-ttl', action="store_true", default=False, dest="ignore_ttl",
                 help="Ignore TTL-only changes to Rdata")
    p.add_argument('-T', '--traceback', action="store_true", default=False, dest="tracebacks",
                 help="Show python tracebacks when errors occur")
    p.add_argument('-H', '--html', action="store_true", default=False, dest="html",
                 help="Print HTML output")
    p.add_argument('-g', '--git', action="store_true", default=False, dest="use_git",
                 help="Use git revisions instead of real files")
    p.add_argument('-b', '--bzr', action="store_true", default=False, dest="use_bzr",
                 help="Use bzr revisions instead of real files")
    p.add_argument('-r', '--rcs', action="store_true", default=False, dest="use_rcs",
                 help="Use rcs revisions instead of real files")
    opts, args = p.parse_args()
    opts.use_vc = opts.use_git or opts.use_bzr or opts.use_rcs

    def _open(what, err):
        if isinstance(what, list):
            # Must be a list, open subprocess
            try:
                proc = subprocess.Popen(what, stdout=subprocess.PIPE)
                proc.wait()
                if proc.returncode == 0:
                    return proc.stdout
                sys.stderr.write(err + "\n")
            except Exception:
                sys.stderr.write(err + "\n")
                if opts.tracebacks:
                    traceback.print_exc()
        else:
            # Open as normal file
            try:
                return open(what, 'rb')
            except IOError:
                sys.stderr.write(err + "\n")
                if opts.tracebacks:
                    traceback.print_exc()

    if not opts.use_vc and len(args) != 2:
        p.print_help()
        sys.exit(64)
    if opts.use_vc and len(args) not in (2, 3):
        p.print_help()
        sys.exit(64)

    # Open file descriptors
    if not opts.use_vc:
        oldn, newn = args
    else:
        if len(args) == 3:
            filename, oldr, newr = args
            oldn = "%s:%s" % (oldr, filename)
            newn = "%s:%s" % (newr, filename)
        else:
            filename, oldr = args
            newr = None
            oldn = "%s:%s" % (oldr, filename)
            newn = filename

    old, new = None, None
    oldz, newz = None, None
    if opts.use_bzr:
        old = _open(["bzr", "cat", "-r" + oldr, filename],
                    "Unable to retrieve revision %s of %s" % (oldr, filename))
        if newr != None:
            new = _open(["bzr", "cat", "-r" + newr, filename],
                        "Unable to retrieve revision %s of %s" % (newr, filename))
    elif opts.use_git:
        old = _open(["git", "show", oldn],
                    "Unable to retrieve revision %s of %s" % (oldr, filename))
        if newr != None:
            new = _open(["git", "show", newn],
                        "Unable to retrieve revision %s of %s" % (newr, filename))
    elif opts.use_rcs:
        old = _open(["co", "-q", "-p", "-r" + oldr, filename],
                    "Unable to retrieve revision %s of %s" % (oldr, filename))
        if newr != None:
            new = _open(["co", "-q", "-p", "-r" + newr, filename],
                        "Unable to retrieve revision %s of %s" % (newr, filename))
    if not opts.use_vc:
        old = _open(oldn, "Unable to open %s" % oldn)
    if not opts.use_vc or newr is None:
        new = _open(newn, "Unable to open %s" % newn)

    if not old or not new:
        sys.exit(65)

    # Parse the zones
    try:
        oldz = dns.zone.from_file(old, origin='.', check_origin=False)
    except dns.exception.DNSException:
        sys.stderr.write("Incorrect zonefile: %s\n", old)
        if opts.tracebacks:
            traceback.print_exc()
    try:
        newz = dns.zone.from_file(new, origin='.', check_origin=False)
    except dns.exception.DNSException:
        sys.stderr.write("Incorrect zonefile: %s\n" % new)
        if opts.tracebacks:
            traceback.print_exc()
    if not oldz or not newz:
        sys.exit(65)

    changes = diff_zones(oldz, newz, opts.ignore_ttl, opts.ignore_soa)
    changes.sort()

    if not changes:
        sys.exit(0)
    if opts.html:
        print(format_changes_html(oldn, newn, changes, opts.ignore_ttl))
    else:
        print(format_changes_plain(oldn, newn, changes, opts.ignore_ttl))
    sys.exit(1)

Example 8

Project: synapsePythonClient
Source File: test_tables.py
View license
def test_rowset_tables():

    # print("Project ID:", project.id)
    # del integration._to_cleanup[:]

    cols = []
    cols.append(Column(name='name', columnType='STRING', maximumSize=1000))
    cols.append(Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']))
    cols.append(Column(name='x', columnType='DOUBLE'))
    cols.append(Column(name='age', columnType='INTEGER'))
    cols.append(Column(name='cartoon', columnType='BOOLEAN'))

    schema1 = syn.store(Schema(name='Foo Table', columns=cols, parent=project))

    print("Table Schema:", schema1.id)

    ## Get columns associated with the given table
    retrieved_cols = list(syn.getTableColumns(schema1))

    ## Test that the columns we get are the same as the ones we stored
    assert len(retrieved_cols) == len(cols)
    for retrieved_col, col in zip(retrieved_cols, cols):
        assert retrieved_col.name == col.name
        assert retrieved_col.columnType == col.columnType

    data1 =[['Chris',  'bar', 11.23, 45, False],
            ['Jen',    'bat', 14.56, 40, False],
            ['Jane',   'bat', 17.89,  6, False],
            ['Henry',  'bar', 10.12,  1, False]]
    row_reference_set1 = syn.store(
        RowSet(columns=cols, schema=schema1, rows=[Row(r) for r in data1]))

    assert len(row_reference_set1['rows']) == 4

    ## add more new rows
    ## TODO: use 'NaN', '+Infinity', '-Infinity' when supported by server
    data2 =[['Fred',   'bat', 21.45, 20, True],
            ['Daphne', 'foo', 27.89, 20, True],
            ['Shaggy', 'foo', 23.45, 20, True],
            ['Velma',  'bar', 25.67, 20, True]]
    syn.store(
        RowSet(columns=cols, schema=schema1, rows=[Row(r) for r in data2]))

    results = syn.tableQuery("select * from %s order by name" % schema1.id, resultsAs="rowset")

    assert results.count==8
    assert results.tableId==schema1.id

    ## test that the values made the round trip
    expected = sorted(data1 + data2)
    for expected_values, row in zip(expected, results):
        assert expected_values == row['values'], 'got %s but expected %s' % (row['values'], expected_values)

    ## To modify rows, we have to select then first.
    result2 = syn.tableQuery('select * from %s where age>18 and age<30'%schema1.id, resultsAs="rowset")

    ## make a change
    rs = result2.asRowSet()
    for row in rs['rows']:
        row['values'][2] = 88.888

    ## store it
    row_reference_set = syn.store(rs)

    ## check if the change sticks
    result3 = syn.tableQuery('select name, x, age from %s'%schema1.id, resultsAs="rowset")
    for row in result3:
        if int(row['values'][2]) == 20:
            assert row['values'][1] == 88.888

    ## Add a column
    bday_column = syn.store(Column(name='birthday', columnType='DATE'))

    column = syn.getColumn(bday_column.id)
    assert column.name=="birthday"
    assert column.columnType=="DATE"

    schema1.addColumn(bday_column)
    schema1 = syn.store(schema1)

    results = syn.tableQuery('select * from %s where cartoon=false order by age'%schema1.id, resultsAs="rowset")
    rs = results.asRowSet()

    ## put data in new column
    bdays = ('2013-3-15', '2008-1-3', '1973-12-8', '1969-4-28')
    for bday, row in zip(bdays, rs.rows):
        row['values'][5] = bday
    row_reference_set = syn.store(rs)

    ## query by date and check that we get back two kids
    date_2008_jan_1 = utils.to_unix_epoch_time(datetime(2008,1,1))
    results = syn.tableQuery('select name from %s where birthday > %d order by birthday' % (schema1.id, date_2008_jan_1), resultsAs="rowset")
    assert ["Jane", "Henry"] == [row['values'][0] for row in results]

    try:
        import pandas as pd
        df = results.asDataFrame()
        assert all(df.ix[:,"name"] == ["Jane", "Henry"])
    except ImportError as e1:
        sys.stderr.write('Pandas is apparently not installed, skipping part of test_rowset_tables.\n\n')

    results = syn.tableQuery('select birthday from %s where cartoon=false order by age' % schema1.id, resultsAs="rowset")
    for bday, row in zip(bdays, results):
        assert row['values'][0] == datetime.strptime(bday, "%Y-%m-%d"), "got %s but expected %s" % (row['values'][0], bday)

    try:
        import pandas as pd
        results = syn.tableQuery("select foo, MAX(x), COUNT(foo), MIN(age) from %s group by foo order by foo" % schema1.id, resultsAs="rowset")
        df = results.asDataFrame()
        print(df)
        assert df.shape == (3,4)
        assert all(df.iloc[:,0] == ["bar", "bat", "foo"])
        assert all(df.iloc[:,1] == [88.888, 88.888, 88.888])
        assert all(df.iloc[:,2] == [3, 3, 2])
    except ImportError as e1:
        sys.stderr.write('Pandas is apparently not installed, skipping part of test_rowset_tables.\n\n')

    ## test delete rows by deleting cartoon characters
    syn.delete(syn.tableQuery('select name from %s where cartoon = true'%schema1.id, resultsAs="rowset"))

    results = syn.tableQuery('select name from %s order by birthday' % schema1.id, resultsAs="rowset")
    assert ["Chris", "Jen", "Jane", "Henry"] == [row['values'][0] for row in results]

    ## check what happens when query result is empty
    results = syn.tableQuery('select * from %s where age > 1000' % schema1.id, resultsAs="rowset")
    assert len(list(results)) == 0

    try:
        import pandas as pd
        results = syn.tableQuery('select * from %s where age > 1000' % schema1.id, resultsAs="rowset")
        df = results.asDataFrame()
        assert df.shape[0] == 0
    except ImportError as e1:
        sys.stderr.write('Pandas is apparently not installed, skipping part of test_rowset_tables.\n\n')

Example 9

Project: sd-agent
Source File: config.py
View license
def get_config(parse_args=True, cfg_path=None, options=None):
    if parse_args:
        options, _ = get_parsed_args()

    # General config
    agentConfig = {
        'check_freq': DEFAULT_CHECK_FREQUENCY,
        'dogstatsd_port': 8125,
        'dogstatsd_target': 'http://localhost:17123',
        'graphite_listen_port': None,
        'hostname': None,
        'listen_port': None,
        'tags': None,
        'use_ec2_instance_id': False,  # DEPRECATED
        'version': get_version(),
        'watchdog': True,
        'additional_checksd': '/etc/sd-agent/checks.d/',
        'bind_host': get_default_bind_host(),
        'statsd_metric_namespace': None,
        'utf8_decoding': False
    }

    if Platform.is_mac():
        agentConfig['additional_checksd'] = '/usr/local/etc/sd-agent/checks.d/'

    # Config handling
    try:
        # Find the right config file
        path = os.path.realpath(__file__)
        path = os.path.dirname(path)

        config_path = get_config_path(cfg_path, os_name=get_os())
        config = ConfigParser.ConfigParser()
        config.readfp(skip_leading_wsp(open(config_path)))

        # bulk import
        for option in config.options('Main'):
            agentConfig[option] = config.get('Main', option)

        # Store developer mode setting in the agentConfig
        if config.has_option('Main', 'developer_mode'):
            agentConfig['developer_mode'] = _is_affirmative(config.get('Main', 'developer_mode'))

        # Allow an override with the --profile option
        if options is not None and options.profile:
            agentConfig['developer_mode'] = True

        #
        # Core config
        #

        # FIXME unnecessarily complex
        if config.has_option('Main', 'sd_account'):
            agentConfig['sd_account'] = config.get('Main', 'sd_account')
        agentConfig['use_forwarder'] = False
        if options is not None and options.use_forwarder:
            listen_port = 17124
            if config.has_option('Main', 'listen_port'):
                listen_port = int(config.get('Main', 'listen_port'))
            agentConfig['sd_url'] = "http://" + agentConfig['bind_host'] + ":" + str(listen_port)
            agentConfig['use_forwarder'] = True
        elif options is not None and not options.disable_sd and options.sd_url:
            agentConfig['sd_url'] = options.sd_url
        elif config.has_option('Main', 'sd_url'):
            agentConfig['sd_url'] = config.get('Main', 'sd_url')
        else:
            # Default agent URL
            agentConfig['sd_url'] = "https://" + agentConfig['sd_account'] + ".agent.serverdensity.io"
        if agentConfig['sd_url'].endswith('/'):
            agentConfig['sd_url'] = agentConfig['sd_url'][:-1]

        # Extra checks.d path
        # the linux directory is set by default
        if config.has_option('Main', 'additional_checksd'):
            agentConfig['additional_checksd'] = config.get('Main', 'additional_checksd')
        elif get_os() == 'windows':
            # default windows location
            common_path = _windows_commondata_path()
            agentConfig['additional_checksd'] = os.path.join(common_path, 'ServerDensity', 'checks.d')

        if config.has_option('Main', 'use_dogstatsd'):
            agentConfig['use_dogstatsd'] = config.get('Main', 'use_dogstatsd').lower() in ("yes", "true")
        else:
            agentConfig['use_dogstatsd'] = True

        # Service discovery
        if config.has_option('Main', 'service_discovery_backend'):
            try:
                additional_config = extract_agent_config(config)
                agentConfig.update(additional_config)
            except:
                log.error('Failed to load the agent configuration related to '
                          'service discovery. It will not be used.')

        # Concerns only Windows
        if config.has_option('Main', 'use_web_info_page'):
            agentConfig['use_web_info_page'] = config.get('Main', 'use_web_info_page').lower() in ("yes", "true")
        else:
            agentConfig['use_web_info_page'] = True

        # Which agent key to use
        agentConfig['agent_key'] = config.get('Main', 'agent_key')

        # local traffic only? Default to no
        agentConfig['non_local_traffic'] = False
        if config.has_option('Main', 'non_local_traffic'):
            agentConfig['non_local_traffic'] = config.get('Main', 'non_local_traffic').lower() in ("yes", "true")

        # DEPRECATED
        if config.has_option('Main', 'use_ec2_instance_id'):
            use_ec2_instance_id = config.get('Main', 'use_ec2_instance_id')
            # translate yes into True, the rest into False
            agentConfig['use_ec2_instance_id'] = (use_ec2_instance_id.lower() == 'yes')

        if config.has_option('Main', 'check_freq'):
            try:
                agentConfig['check_freq'] = int(config.get('Main', 'check_freq'))
            except Exception:
                pass

        # Custom histogram aggregate/percentile metrics
        if config.has_option('Main', 'histogram_aggregates'):
            agentConfig['histogram_aggregates'] = get_histogram_aggregates(config.get('Main', 'histogram_aggregates'))

        if config.has_option('Main', 'histogram_percentiles'):
            agentConfig['histogram_percentiles'] = get_histogram_percentiles(config.get('Main', 'histogram_percentiles'))

        # Disable Watchdog (optionally)
        if config.has_option('Main', 'watchdog'):
            if config.get('Main', 'watchdog').lower() in ('no', 'false'):
                agentConfig['watchdog'] = False

        # Optional graphite listener
        if config.has_option('Main', 'graphite_listen_port'):
            agentConfig['graphite_listen_port'] = \
                int(config.get('Main', 'graphite_listen_port'))
        else:
            agentConfig['graphite_listen_port'] = None

        # Dogstatsd config
        dogstatsd_defaults = {
            'dogstatsd_port': 8125,
            'dogstatsd_target': 'http://' + agentConfig['bind_host'] + ':17123',
        }
        for key, value in dogstatsd_defaults.iteritems():
            if config.has_option('Main', key):
                agentConfig[key] = config.get('Main', key)
            else:
                agentConfig[key] = value

        # Create app:xxx tags based on monitored apps
        agentConfig['create_dd_check_tags'] = config.has_option('Main', 'create_dd_check_tags') and \
            _is_affirmative(config.get('Main', 'create_dd_check_tags'))

        # Forwarding to external statsd server
        if config.has_option('Main', 'statsd_forward_host'):
            agentConfig['statsd_forward_host'] = config.get('Main', 'statsd_forward_host')
            if config.has_option('Main', 'statsd_forward_port'):
                agentConfig['statsd_forward_port'] = int(config.get('Main', 'statsd_forward_port'))

        # optionally send dogstatsd data directly to the agent.
        if config.has_option('Main', 'dogstatsd_use_ddurl'):
            if _is_affirmative(config.get('Main', 'dogstatsd_use_ddurl')):
                agentConfig['dogstatsd_target'] = agentConfig['sd_url']

        # Optional config
        # FIXME not the prettiest code ever...
        if config.has_option('Main', 'use_mount'):
            agentConfig['use_mount'] = _is_affirmative(config.get('Main', 'use_mount'))

        if options is not None and options.autorestart:
            agentConfig['autorestart'] = True
        elif config.has_option('Main', 'autorestart'):
            agentConfig['autorestart'] = _is_affirmative(config.get('Main', 'autorestart'))

        if config.has_option('Main', 'check_timings'):
            agentConfig['check_timings'] = _is_affirmative(config.get('Main', 'check_timings'))

        if config.has_option('Main', 'exclude_process_args'):
            agentConfig['exclude_process_args'] = _is_affirmative(config.get('Main', 'exclude_process_args'))

        try:
            filter_device_re = config.get('Main', 'device_blacklist_re')
            agentConfig['device_blacklist_re'] = re.compile(filter_device_re)
        except ConfigParser.NoOptionError:
            pass

        if config.has_option("Main", "nagios_perf_cfg"):
            agentConfig["nagios_perf_cfg"] = config.get("Main", "nagios_perf_cfg")

        if config.has_option("Main", "use_curl_http_client"):
            agentConfig["use_curl_http_client"] = _is_affirmative(config.get("Main", "use_curl_http_client"))
        else:
            # Default to False as there are some issues with the curl client and ELB
            agentConfig["use_curl_http_client"] = False

        if config.has_section('WMI'):
            agentConfig['WMI'] = {}
            for key, value in config.items('WMI'):
                agentConfig['WMI'][key] = value

        if (config.has_option("Main", "limit_memory_consumption") and
                config.get("Main", "limit_memory_consumption") is not None):
            agentConfig["limit_memory_consumption"] = int(config.get("Main", "limit_memory_consumption"))
        else:
            agentConfig["limit_memory_consumption"] = None

        if config.has_option("Main", "skip_ssl_validation"):
            agentConfig["skip_ssl_validation"] = _is_affirmative(config.get("Main", "skip_ssl_validation"))

        agentConfig["collect_instance_metadata"] = True
        if config.has_option("Main", "collect_instance_metadata"):
            agentConfig["collect_instance_metadata"] = _is_affirmative(config.get("Main", "collect_instance_metadata"))

        agentConfig["proxy_forbid_method_switch"] = False
        if config.has_option("Main", "proxy_forbid_method_switch"):
            agentConfig["proxy_forbid_method_switch"] = _is_affirmative(config.get("Main", "proxy_forbid_method_switch"))

        agentConfig["collect_ec2_tags"] = False
        if config.has_option("Main", "collect_ec2_tags"):
            agentConfig["collect_ec2_tags"] = _is_affirmative(config.get("Main", "collect_ec2_tags"))

        agentConfig["utf8_decoding"] = False
        if config.has_option("Main", "utf8_decoding"):
            agentConfig["utf8_decoding"] = _is_affirmative(config.get("Main", "utf8_decoding"))

        agentConfig["gce_updated_hostname"] = False
        if config.has_option("Main", "gce_updated_hostname"):
            agentConfig["gce_updated_hostname"] = _is_affirmative(config.get("Main", "gce_updated_hostname"))

    except ConfigParser.NoSectionError, e:
        sys.stderr.write('Config file not found or incorrectly formatted.\n')
        sys.exit(2)

    except ConfigParser.ParsingError, e:
        sys.stderr.write('Config file not found or incorrectly formatted.\n')
        sys.exit(2)

    except ConfigParser.NoOptionError, e:
        sys.stderr.write('There are some items missing from your config file, but nothing fatal [%s]' % e)

    # Storing proxy settings in the agentConfig
    agentConfig['proxy_settings'] = get_proxy(agentConfig)
    if agentConfig.get('ca_certs', None) is None:
        agentConfig['ssl_certificate'] = get_ssl_certificate(get_os(), 'sd-cert.pem')
    else:
        agentConfig['ssl_certificate'] = agentConfig['ca_certs']

    return agentConfig

Example 10

Project: pyNastran
Source File: test_bdf_vectorized2.py
View license
def run_lots_of_files(filenames, folder='', debug=False, xref=True, check=True,
                      punch=False, cid=None):
    """
    Runs multiple BDFs

    Parameters
    ----------
    folder : str
        the folder where the bdf_filename is
    filenames : List[str]
        the bdf files to analyze
    debug : bool, optional
        run with debug logging (default=False)
    xref : bool / str / List[bool/str], optional
        True : cross reference the model
        False  : don't cross reference the model
        'safe' : do safe cross referencing
    check : bool / List[bool], optional
        validate cards for things like mass, area, etc. (default=True)
    punch : bool / List[bool], optional
        this is a PUNCH file (no executive/case control decks; default=False)
    cid : int / None, optional
        convert the model grids to an alternate coordinate system (default=None; no conversion)
    size : int / List[int], optional
        The field width of the model (8/16)
    is_double : bool / List[bool], optional
        Is this a double precision model?
            True : size = 16
            False : six = {8, 16}
    nastran : str, optional
        the path to nastran (default=''; no analysis)
    post : int / List[int], optional
        the PARAM,POST,value to run
    sum_load : bool; default=True
        should the loads be summed
    dev : bool; default=True
        True : crashes if an Exception occurs
        False : doesn't crash; useful for running many tests
    crash_cards : List[str, str, ...]
        list of cards that are invalid and automatically crash the run
    Usage
    -----
    All control lists must be the same length.
    You can run xref=True and xref=False with:

    .. python ::

        run_lots_of_files(filenames, xref=[True, False]) # valid
    """
    filenames = list(set(filenames))
    filenames.sort()

    if size is None:
        sizes = [8]
    elif isinstance(size, integer_types):
        sizes = [size]
    else:
        sizes = size

    if is_double is None:
        is_doubles = [8]
    elif isinstance(is_double, bool):
        is_doubles = [is_double]
    else:
        is_doubles = is_double

    #debug = True
    filenames2 = []
    diff_cards = []
    for filename in filenames:
        if(filename.endswith('.bdf') or filename.endswith('.dat') or
           filename.endswith('.nas') or filename.endswith('.nas')):
            filenames2.append(filename)

    failed_files = []
    n = 1
    for filename in filenames2:
        abs_filename = os.path.abspath(os.path.join(folder, filename))
        if folder != '':
            print("filename = %s" % abs_filename)
        is_passed = False
        #try:
        (fem1, fem2, diff_cards) = run_bdf(folder, filename, debug=debug,
                                           xref=xref, check=check, punch=punch,
                                           cid=cid, isFolder=True, dynamic_vars={})
        del fem1
        del fem2
        diff_cards += diff_cards
        is_passed = True
        #except KeyboardInterrupt:
            #sys.exit('KeyboardInterrupt...sys.exit()')
        #except IOError:
            #pass
        #except RuntimeError:  # only temporarily uncomment this when running lots of tests
            #pass
        #except AttributeError:  # only temporarily uncomment this when running lots of tests
            #pass
        #except SyntaxError:  # only temporarily uncomment this when running lots of tests
            #pass
        #except SystemExit:
            #sys.exit('sys.exit...')
        #except:
            #traceback.print_exc(file=sys.stdout)
            ##raise
        print('-' * 80)

        if is_passed:
            sys.stderr.write('%i %s' % (n, abs_filename))
            n += 1
        else:
            sys.stderr.write('*' + abs_filename)
            failed_files.append(abs_filename)
        sys.stderr.write('\n')

    print('*' * 80)
    try:
        print("diff_cards1 = %s" % list(set(diff_cards)))
    except TypeError:
        print("diff_cards2 = %s" % diff_cards)
    return failed_files

Example 11

Project: models
Source File: dp_mnist.py
View license
def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
          save_path, eval_steps=0):
  """Train MNIST for a number of steps.

  Args:
    mnist_train_file: path of MNIST train data file.
    mnist_test_file: path of MNIST test data file.
    network_parameters: parameters for defining and training the network.
    num_steps: number of steps to run. Here steps = lots
    save_path: path where to save trained parameters.
    eval_steps: evaluate the model every eval_steps.

  Returns:
    the result after the final training step.

  Raises:
    ValueError: if the accountant_type is not supported.
  """
  batch_size = FLAGS.batch_size

  params = {"accountant_type": FLAGS.accountant_type,
            "task_id": 0,
            "batch_size": FLAGS.batch_size,
            "projection_dimensions": FLAGS.projection_dimensions,
            "default_gradient_l2norm_bound":
            network_parameters.default_gradient_l2norm_bound,
            "num_hidden_layers": FLAGS.num_hidden_layers,
            "hidden_layer_num_units": FLAGS.hidden_layer_num_units,
            "num_examples": NUM_TRAINING_IMAGES,
            "learning_rate": FLAGS.lr,
            "end_learning_rate": FLAGS.end_lr,
            "learning_rate_saturate_epochs": FLAGS.lr_saturate_epochs
           }
  # Log different privacy parameters dependent on the accountant type.
  if FLAGS.accountant_type == "Amortized":
    params.update({"flag_eps": FLAGS.eps,
                   "flag_delta": FLAGS.delta,
                   "flag_pca_eps": FLAGS.pca_eps,
                   "flag_pca_delta": FLAGS.pca_delta,
                  })
  elif FLAGS.accountant_type == "Moments":
    params.update({"sigma": FLAGS.sigma,
                   "pca_sigma": FLAGS.pca_sigma,
                  })

  with tf.Graph().as_default(), tf.Session() as sess, tf.device('/cpu:0'):
    # Create the basic Mnist model.
    images, labels = MnistInput(mnist_train_file, batch_size, FLAGS.randomize)

    logits, projection, training_params = utils.BuildNetwork(
        images, network_parameters)

    cost = tf.nn.softmax_cross_entropy_with_logits(
        logits, tf.one_hot(labels, 10))

    # The actual cost is the average across the examples.
    cost = tf.reduce_sum(cost, [0]) / batch_size

    if FLAGS.accountant_type == "Amortized":
      priv_accountant = accountant.AmortizedAccountant(NUM_TRAINING_IMAGES)
      sigma = None
      pca_sigma = None
      with_privacy = FLAGS.eps > 0
    elif FLAGS.accountant_type == "Moments":
      priv_accountant = accountant.GaussianMomentsAccountant(
          NUM_TRAINING_IMAGES)
      sigma = FLAGS.sigma
      pca_sigma = FLAGS.pca_sigma
      with_privacy = FLAGS.sigma > 0
    else:
      raise ValueError("Undefined accountant type, needs to be "
                       "Amortized or Moments, but got %s" % FLAGS.accountant)
    # Note: Here and below, we scale down the l2norm_bound by
    # batch_size. This is because per_example_gradients computes the
    # gradient of the minibatch loss with respect to each individual
    # example, and the minibatch loss (for our model) is the *average*
    # loss over examples in the minibatch. Hence, the scale of the
    # per-example gradients goes like 1 / batch_size.
    gaussian_sanitizer = sanitizer.AmortizedGaussianSanitizer(
        priv_accountant,
        [network_parameters.default_gradient_l2norm_bound / batch_size, True])

    for var in training_params:
      if "gradient_l2norm_bound" in training_params[var]:
        l2bound = training_params[var]["gradient_l2norm_bound"] / batch_size
        gaussian_sanitizer.set_option(var,
                                      sanitizer.ClipOption(l2bound, True))
    lr = tf.placeholder(tf.float32)
    eps = tf.placeholder(tf.float32)
    delta = tf.placeholder(tf.float32)

    init_ops = []
    if network_parameters.projection_type == "PCA":
      with tf.variable_scope("pca"):
        # Compute differentially private PCA.
        all_data, _ = MnistInput(mnist_train_file, NUM_TRAINING_IMAGES, False)
        pca_projection = dp_pca.ComputeDPPrincipalProjection(
            all_data, network_parameters.projection_dimensions,
            gaussian_sanitizer, [FLAGS.pca_eps, FLAGS.pca_delta], pca_sigma)
        assign_pca_proj = tf.assign(projection, pca_projection)
        init_ops.append(assign_pca_proj)

    # Add global_step
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False,
                              name="global_step")

    if with_privacy:
      gd_op = dp_optimizer.DPGradientDescentOptimizer(
          lr,
          [eps, delta],
          gaussian_sanitizer,
          sigma=sigma,
          batches_per_lot=FLAGS.batches_per_lot).minimize(
              cost, global_step=global_step)
    else:
      gd_op = tf.train.GradientDescentOptimizer(lr).minimize(cost)

    saver = tf.train.Saver()
    coord = tf.train.Coordinator()
    _ = tf.train.start_queue_runners(sess=sess, coord=coord)

    # We need to maintain the intialization sequence.
    for v in tf.trainable_variables():
      sess.run(tf.initialize_variables([v]))
    sess.run(tf.initialize_all_variables())
    sess.run(init_ops)

    results = []
    start_time = time.time()
    prev_time = start_time
    filename = "results-0.json"
    log_path = os.path.join(save_path, filename)

    target_eps = [float(s) for s in FLAGS.target_eps.split(",")]
    if FLAGS.accountant_type == "Amortized":
      # Only matters if --terminate_based_on_privacy is true.
      target_eps = [max(target_eps)]
    max_target_eps = max(target_eps)

    lot_size = FLAGS.batches_per_lot * FLAGS.batch_size
    lots_per_epoch = NUM_TRAINING_IMAGES / lot_size
    for step in xrange(num_steps):
      epoch = step / lots_per_epoch
      curr_lr = utils.VaryRate(FLAGS.lr, FLAGS.end_lr,
                               FLAGS.lr_saturate_epochs, epoch)
      curr_eps = utils.VaryRate(FLAGS.eps, FLAGS.end_eps,
                                FLAGS.eps_saturate_epochs, epoch)
      for _ in xrange(FLAGS.batches_per_lot):
        _ = sess.run(
            [gd_op], feed_dict={lr: curr_lr, eps: curr_eps, delta: FLAGS.delta})
      sys.stderr.write("step: %d\n" % step)

      # See if we should stop training due to exceeded privacy budget:
      should_terminate = False
      terminate_spent_eps_delta = None
      if with_privacy and FLAGS.terminate_based_on_privacy:
        terminate_spent_eps_delta = priv_accountant.get_privacy_spent(
            sess, target_eps=[max_target_eps])[0]
        # For the Moments accountant, we should always have
        # spent_eps == max_target_eps.
        if (terminate_spent_eps_delta.spent_delta > FLAGS.target_delta or
            terminate_spent_eps_delta.spent_eps > max_target_eps):
          should_terminate = True

      if (eval_steps > 0 and (step + 1) % eval_steps == 0) or should_terminate:
        if with_privacy:
          spent_eps_deltas = priv_accountant.get_privacy_spent(
              sess, target_eps=target_eps)
        else:
          spent_eps_deltas = [accountant.EpsDelta(0, 0)]
        for spent_eps, spent_delta in spent_eps_deltas:
          sys.stderr.write("spent privacy: eps %.4f delta %.5g\n" % (
              spent_eps, spent_delta))

        saver.save(sess, save_path=save_path + "/ckpt")
        train_accuracy, _ = Eval(mnist_train_file, network_parameters,
                                 num_testing_images=NUM_TESTING_IMAGES,
                                 randomize=True, load_path=save_path)
        sys.stderr.write("train_accuracy: %.2f\n" % train_accuracy)
        test_accuracy, mistakes = Eval(mnist_test_file, network_parameters,
                                       num_testing_images=NUM_TESTING_IMAGES,
                                       randomize=False, load_path=save_path,
                                       save_mistakes=FLAGS.save_mistakes)
        sys.stderr.write("eval_accuracy: %.2f\n" % test_accuracy)

        curr_time = time.time()
        elapsed_time = curr_time - prev_time
        prev_time = curr_time

        results.append({"step": step+1,  # Number of lots trained so far.
                        "elapsed_secs": elapsed_time,
                        "spent_eps_deltas": spent_eps_deltas,
                        "train_accuracy": train_accuracy,
                        "test_accuracy": test_accuracy,
                        "mistakes": mistakes})
        loginfo = {"elapsed_secs": curr_time-start_time,
                   "spent_eps_deltas": spent_eps_deltas,
                   "train_accuracy": train_accuracy,
                   "test_accuracy": test_accuracy,
                   "num_training_steps": step+1,  # Steps so far.
                   "mistakes": mistakes,
                   "result_series": results}
        loginfo.update(params)
        if log_path:
          with tf.gfile.Open(log_path, "w") as f:
            json.dump(loginfo, f, indent=2)
            f.write("\n")
            f.close()

      if should_terminate:
        break

Example 12

Project: flumotion
Source File: main.py
View license
def main(args):
    parser = _createParser()

    log.debug('manager', 'Parsing arguments (%r)' % ', '.join(args))
    options, args = parser.parse_args(args)

    # Force options down configure's throat
    for d in ['logdir', 'rundir']:
        o = getattr(options, d, None)
        if o:
            log.debug('manager', 'Setting configure.%s to %s' % (d, o))
            setattr(configure, d, o)

    # parse planet config file
    if len(args) <= 1:
        log.warning('manager', 'Please specify a planet configuration file')
        sys.stderr.write("Please specify a planet configuration file.\n")
        return 1

    planetFile = args[1]
    try:
        cfg = config.ManagerConfigParser(planetFile)
    except IOError, e:
        sys.stderr.write("ERROR: Could not read configuration from '%s':\n" %
            planetFile)
        sys.stderr.write("ERROR: %s\n" % e.strerror)
        return 1
    except errors.ConfigError, e:
        sys.stderr.write("ERROR: Could not read configuration from '%s':\n" %
            planetFile)
        sys.stderr.write("ERROR: %s\n" % e.args[0])
        return 1

    managerConfigDir = os.path.abspath(os.path.dirname(planetFile))

    # now copy over stuff from config that is not set yet
    if cfg.manager:
        if not options.host and cfg.manager.host:
            options.host = cfg.manager.host
            log.debug('manager', 'Setting manager host to %s' % options.host)
        if not options.port and cfg.manager.port:
            options.port = cfg.manager.port
            log.debug('manager', 'Setting manager port to %s' % options.port)
        if not options.transport and cfg.manager.transport:
            options.transport = cfg.manager.transport
            log.debug('manager', 'Setting manager transport to %s' %
                options.transport)
        if not options.certificate and cfg.manager.certificate:
            options.certificate = cfg.manager.certificate
            log.debug('manager', 'Using certificate %s' %
                options.certificate)
        if not options.name and cfg.manager.name:
            options.name = cfg.manager.name
            log.debug('manager', 'Setting manager name to %s' % options.name)
        # environment debug > command-line debug > config file debug
        if not options.debug and cfg.manager.fludebug \
            and not 'FLU_DEBUG' in os.environ:
            options.debug = cfg.manager.fludebug
            log.debug('manager',
                      'Setting debug level to config file value %s' %
                options.debug)

    # set debug level as soon as we can after deciding
    if options.debug:
        log.setFluDebug(options.debug)

    # set default values for all unset options
    if not options.host:
        options.host = "" # needed for bind to work
    if not options.transport:
        options.transport = 'ssl'
    if not options.port:
        if options.transport == "tcp":
            options.port = defaultTCPPort
        elif options.transport == "ssl":
            options.port = defaultSSLPort
    if not options.certificate and options.transport == 'ssl':
        options.certificate = 'default.pem'
    if not options.name:
        # if the file is in a directory under a 'managers' directory,
        # use the parent directory name
        head, filename = os.path.split(os.path.abspath(planetFile))
        head, name = os.path.split(head)
        head, managers = os.path.split(head)
        if managers != 'managers':
            options.name = 'unnamed'
            log.debug('manager', 'Setting name to unnamed')
        else:
            options.name = name
            log.debug('manager', 'Setting name to %s based on path' % name)

    # check for wrong options/arguments
    if not options.transport in ['ssl', 'tcp']:
        sys.stderr.write('ERROR: wrong transport %s, must be ssl or tcp\n' %
            options.transport)
        return 1

    # register package path
    setup.setupPackagePath()

    # log our standardized starting marker
    log.info('manager', "Starting manager '%s'" % options.name)

    log.debug('manager', 'Running Flumotion version %s' %
        configure.version)
    import twisted.copyright
    log.debug('manager', 'Running against Twisted version %s' %
        twisted.copyright.version)
    from flumotion.project import project
    for p in project.list():
        log.debug('manager', 'Registered project %s version %s' % (
            p, project.get(p, 'version')))

    vishnu = manager.Vishnu(options.name, configDir=managerConfigDir)
    for managerConfigFile in args[1:]:
        vishnu.loadManagerConfigurationXML(managerConfigFile)

    paths = [os.path.abspath(filename) for filename in args[1:]]
    reactor.callLater(0, _initialLoadConfig, vishnu, paths)
    reactor.callLater(0, vishnu.startManagerPlugs)

    # set up server based on transport
    myServer = server.Server(vishnu)
    try:
        if options.transport == "ssl":
            myServer.startSSL(options.host, options.port, options.certificate,
                configure.configdir)
        elif options.transport == "tcp":
            myServer.startTCP(options.host, options.port)
    except error.CannotListenError, e:
        # e is a socket.error()
        message = "Could not listen on port %d: %s" % (
            e.port, e.socketError.args[1])
        raise errors.FatalError, message

    if options.daemonizeTo and not options.daemonize:
        sys.stderr.write(
            'ERROR: --daemonize-to can only be used with -D/--daemonize.\n')
        return 1

    if options.serviceName and not options.daemonize:
        sys.stderr.write(
            'ERROR: --service-name can only be used with -D/--daemonize.\n')
        return 1

    name = options.name

    if options.daemonize:
        if options.serviceName:
            name = options.serviceName
        if not options.daemonizeTo:
            options.daemonizeTo = "/"

    startup("manager", name, options.daemonize, options.daemonizeTo)

    reactor.run()

    return 0

Example 13

Project: WeatherDesk
Source File: Desktop.py
View license
def set_wallpaper(image):

    desktop_env = get_desktop_environment()

    try:

        if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon']:

            uri = 'file://%s' % image

            try:

                SCHEMA = 'org.gnome.desktop.background'
                KEY = 'picture-uri'
                gsettings = Gio.Settings.new(SCHEMA)

                gsettings.set_string(KEY, uri)

            except:

                args = ['gsettings', 'set', 'org.gnome.desktop.background', 'picture-uri', uri]
                subprocess.Popen(args)

        elif desktop_env == 'mate':

            try: # MATE >= 1.6

                args = ['gsettings', 'set', 'org.mate.background', 'picture-filename', '%s' % image]
                subprocess.Popen(args)

            except: # MATE < 1.6

                args = ['mateconftool-2','-t','string','--set','/desktop/mate/background/picture_filename','%s' % image]
                subprocess.Popen(args)

        elif desktop_env == 'gnome2':

            args = ['gconftool-2','-t','string','--set','/desktop/gnome/background/picture_filename', '%s' % image]
            subprocess.Popen(args)

        elif desktop_env == 'kde':

            # The KDE 4+ method of changing *anything* in the CLI is either
            # non-existent or deprecated or horribly convoluted.
            # There have been long-open bugs (5 yrs and counting) but no fix.

            # There was *one* way (in KDE 4) - the file
            # ~/.kde/share/config/plasma-desktop-appletsrc
            # That too, is gone in KDE 5.

            # The only way seems to be to *make* the user set a file as
            # wallpaper and keep overwriting  that file. KDE will, apparently,
            # notice the change and update automatically.

            # Update: That, too, is gone. KDE users will have to set a
            # peridically updating slideshow in a folder.

            # Update: Even *that* does not work. Sorry, KDE.

            # Update: Periodically updating slideshows with random filenames?

            old_working_dir = os.getcwd()

            if not os.path.isdir(os.path.join(os.path.expanduser('~'), '.wall_slide_kde')):

                os.mkdir(os.path.join(os.path.expanduser('~'), '.wall_slide_kde'))

            os.chdir(os.path.join(os.path.expanduser('~'), '.wall_slide_kde'))

            for dirpath, dirnames, files in os.walk('.'):

                if files:

                    for file in os.listdir('.'):

                        os.remove(file)

            kde_random_image = tempfile.NamedTemporaryFile(delete=False)

            shutil.copyfile(image, kde_random_image.name)

            os.chdir(old_working_dir)

        elif desktop_env in ['kde3', 'trinity']:

            args = 'dcop kdesktop KBackgroundIface setWallpaper 0 "%s" 6' % image
            subprocess.Popen(args,shell=True)

        elif desktop_env=='xfce4':

            # XFCE4's image property is not image-path but last-image (What?)
            # Only GNOME seems to have a sane wallpaper interface

            # Update: the monitor id thing seems to be changed in
            # XFCE 4.12 to just monitor0 instead of monitorVGA1 or something
            # So now we need to do both.

            list_of_properties_cmd = subprocess.Popen(['bash -c "xfconf-query -R -l -c xfce4-desktop -p /backdrop"'], shell=True, stdout=subprocess.PIPE)

            list_of_properties, list_of_properties_err = list_of_properties_cmd.communicate()

            list_of_properties = list_of_properties.decode('utf-8')

            for i in list_of_properties.split('\n'):

                if i.endswith('last-image'):

                    # The property given is a background property
                    subprocess.Popen(
                        ['xfconf-query -c xfce4-desktop -p %s -s "%s"' % (i, image)],
                        shell=True)

                    subprocess.Popen(['xfdesktop --reload'], shell=True)

        elif desktop_env=='razor-qt':

            desktop_conf = configparser.ConfigParser()
            # Development version

            desktop_conf_file = os.path.join(get_config_dir('razor'),'desktop.conf')

            if os.path.isfile(desktop_conf_file):

                config_option = r'screens\1\desktops\1\wallpaper'

            else:

                desktop_conf_file = os.path.join(os.path.expanduser('~'),'.razor/desktop.conf')
                config_option = r'desktops\1\wallpaper'

            desktop_conf.read(os.path.join(desktop_conf_file))

            try:

                if desktop_conf.has_option('razor',config_option):  # only replacing a value

                    desktop_conf.set('razor',config_option,image)

                    with codecs.open(desktop_conf_file, 'w', encoding='utf-8', errors='replace') as f:

                        desktop_conf.write(f)

            except: pass


        elif desktop_env in ['fluxbox','jwm','openbox','afterstep', 'i3']:

            try:

                args = ['feh','--bg-scale', image]
                subprocess.Popen(args)

            except:

                sys.stderr.write('Error: Failed to set wallpaper with feh!')
                sys.stderr.write('Please make sre that You have feh installed.')

        elif desktop_env == 'icewm':

            args = ['icewmbg', image]
            subprocess.Popen(args)

        elif desktop_env == 'blackbox':

            args = ['bsetbg', '-full', image]
            subprocess.Popen(args)

        elif desktop_env == 'lxde':

            args = 'pcmanfm --set-wallpaper %s --wallpaper-mode=scaled' % image
            subprocess.Popen(args, shell=True)

        elif desktop_env == 'lxqt':

            args = 'pcmanfm-qt --set-wallpaper %s --wallpaper-mode=scaled' % image
            subprocess.Popen(args, shell=True)

        elif desktop_env == 'windowmaker':

            args = 'wmsetbg -s -u %s' % image
            subprocess.Popen(args, shell=True)

        elif desktop_env == 'enlightenment':

           args = 'enlightenment_remote -desktop-bg-add 0 0 0 0 %s' % image
           subprocess.Popen(args, shell=True)

        elif desktop_env == 'awesome':

            with subprocess.Popen("awesome-client", stdin=subprocess.PIPE) as awesome_client:

                command = 'local gears = require("gears"); for s = 1, screen.count() do gears.wallpaper.maximized("%s", s, true); end;' % image
                awesome_client.communicate(input=bytes(command, 'UTF-8'));

        elif desktop_env == 'windows':

               WIN_SCRIPT = '''reg add "HKEY_CURRENT_USER\Control Panel\Desktop" /v Wallpaper /t REG_SZ /d  %s /f

rundll32.exe user32.dll,UpdatePerUserSystemParameters
''' % image

               win_script_file = open(os.path.abspath(os.path.expanduser('~/.weatherdesk_script.bat')), 'w')

               win_script_file.write(WIN_SCRIPT)

               win_script_file.close()

               subprocess.Popen([os.path.abspath(os.path.expanduser('~/.weatherdesk_script.bat'))], shell=True)

        elif desktop_env == 'mac':

            try:

               from appscript import app, mactypes

               app('Finder').desktop_picture.set(mactypes.File(image))

            except ImportError:

                OSX_SCRIPT = '''tell application "System Events"
                                  set desktopCount to count of desktops
                                    repeat with desktopNumber from 1 to desktopCount
                                      tell desktop desktopNumber
                                        set picture to POSIX file "%s"
                                      end tell
                                    end repeat
                                end tell
                ''' % image

                osx_script_file = open(os.path.expanduser('~/.weatherdesk_script.AppleScript'), 'w')

                osx_script_file.truncate()

                osx_script_file.write(OSX_SCRIPT)

                osx_script_file.close()

                subprocess.Popen(['/usr/bin/osascript', os.path.abspath(os.path.expanduser('~/.weatherdesk_script.AppleScript'))])
        else:

            sys.stderr.write('Error: Failed to set wallpaper. (Desktop not supported)')

            return False

        return True

    except:

        print(traceback.format_exc())

        return False

Example 14

Project: foodnetwork
Source File: syncdata.py
View license
    def handle(self, *fixture_labels, **options):
        """ Main method of a Django command """
        from django.db.models import get_apps
        from django.core import serializers
        from django.db import connection, transaction
        from django.conf import settings

        self.style = no_style()

        verbosity = int(options.get('verbosity', 1))
        show_traceback = options.get('traceback', False)
        
        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        object_count = 0
        objects_per_fixture = []
        models = set()

        humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        # Start transaction management. All fixtures are installed in a
        # single transaction to ensure that all references are resolved.
        transaction.commit_unless_managed()
        transaction.enter_transaction_management()
        transaction.managed(True)

        app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') \
                        for app in get_apps()]
        for fixture_label in fixture_labels:
            parts = fixture_label.split('.')
            if len(parts) == 1:
                fixture_name = fixture_label
                formats = serializers.get_public_serializer_formats()
            else:
                fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                if format in serializers.get_public_serializer_formats():
                    formats = [format]
                else:
                    formats = []

            if formats:
                if verbosity > 1:
                    print "Loading '%s' fixtures..." % fixture_name
            else:
                sys.stderr.write(
                    self.style.ERROR("Problem installing fixture '%s': %s is not a known "+ \
                                     "serialization format." % (fixture_name, format))
                    )
                transaction.rollback()
                transaction.leave_transaction_management()
                return

            if os.path.isabs(fixture_name):
                fixture_dirs = [fixture_name]
            else:
                fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

            for fixture_dir in fixture_dirs:
                if verbosity > 1:
                    print "Checking %s for fixtures..." % humanize(fixture_dir)

                label_found = False
                for format in formats:
                    serializer = serializers.get_serializer(format)
                    if verbosity > 1:
                        print "Trying %s for %s fixture '%s'..." % \
                            (humanize(fixture_dir), format, fixture_name)
                    try:
                        full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format]))
                        fixture = open(full_path, 'r')
                        if label_found:
                            fixture.close()
                            print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
                                (fixture_name, humanize(fixture_dir)))
                            transaction.rollback()
                            transaction.leave_transaction_management()
                            return
                        else:
                            fixture_count += 1
                            objects_per_fixture.append(0)
                            if verbosity > 0:
                                print "Installing %s fixture '%s' from %s." % \
                                    (format, fixture_name, humanize(fixture_dir))
                            try:
                                objects_to_keep = {}
                                objects = serializers.deserialize(format, fixture)
                                for obj in objects:
                                    object_count += 1
                                    objects_per_fixture[-1] += 1

                                    class_ = obj.object.__class__
                                    if not class_ in objects_to_keep:
                                        objects_to_keep[class_] = set()
                                    objects_to_keep[class_].add(obj.object)
                                    
                                    models.add(class_)
                                    obj.save()

                                self.remove_objects_not_in(objects_to_keep, verbosity)

                                label_found = True
                            except (SystemExit, KeyboardInterrupt):
                                raise
                            except Exception:
                                import traceback
                                fixture.close()
                                transaction.rollback()
                                transaction.leave_transaction_management()
                                if show_traceback:
                                    traceback.print_exc()
                                else:
                                    sys.stderr.write(
                                        self.style.ERROR("Problem installing fixture '%s': %s\n" %
                                             (full_path, traceback.format_exc())))
                                return
                            fixture.close()
                    except:
                        if verbosity > 1:
                            print "No %s fixture '%s' in %s." % \
                                (format, fixture_name, humanize(fixture_dir))

        # If any of the fixtures we loaded contain 0 objects, assume that an 
        # error was encountered during fixture loading.
        if 0 in objects_per_fixture:
            sys.stderr.write(
                self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
                    (fixture_name)))
            transaction.rollback()
            transaction.leave_transaction_management()
            return
            
        # If we found even one object in a fixture, we need to reset the 
        # database sequences.
        if object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
            if sequence_sql:
                if verbosity > 1:
                    print "Resetting sequences"
                for line in sequence_sql:
                    cursor.execute(line)
            
        transaction.commit()
        transaction.leave_transaction_management()

        if object_count == 0:
            if verbosity > 1:
                print "No fixtures found."
        else:
            if verbosity > 0:
                print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)
                
        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        connection.close()

Example 15

Project: fixofx
Source File: csv_converter.py
View license
    def _parse_date(self, txn_date, dayfirst=False):

    def _check_date_format(self, parsed_date):
        # If we *ever* find a date that parses as dayfirst, treat
        # *all* transactions in this statement as dayfirst.
        if parsed_date is not None and parsed_date != "UNKNOWN" and parsed_date.microsecond == 3:
            self.dayfirst = True

    #
    # Cleanup methods
    #

    def _clean_txn_list(self, txn_list):
        for txn_obj in txn_list:
            try:
                txn = self._clean_txn(txn_obj)
                txn_date = txn["Date"]
                txn_date_list = self.txns_by_date.get(txn_date, [])
                txn_date_list.append(txn)
                self.txns_by_date[txn_date] = txn_date_list
            except ValueError:
                # The _clean_txn method will sometimes find transactions
                # that are inherently unclean and are unable to be purified.
                # In these cases it will reject the transaction by throwing
                # a ValueError, which signals us not to store the transaction.
                if self.debug: sys.stderr.write("Skipping transaction '%s'." %
                                                str(txn_obj.asDict()))

        # Sort the dates (in YYYYMMDD format) and choose the lowest
        # date as our start date, and the highest date as our end
        # date.
        date_list = self.txns_by_date.keys()
        date_list.sort()

        self.start_date = date_list[0]
        self.end_date   = date_list[-1]

    def _clean_txn(self, txn_obj):
        # This is sort of the brute-force method of the converter.  It
        # looks at the data we get from the bank and tries as hard as
        # possible to make best-effort guesses about what the OFX 2.0
        # standard values for the transaction should be.  There's a
        # reasonable amount of guesswork in here -- some of it wise,
        # maybe some of it not.  If the cleanup method determines that
        # the txn_obj shouldn't be in the data, it will return None.
        # Otherwise, it will return a transaction cleaned to the best
        # of our abilities.
        txn = txn_obj.asDict()
        self._clean_txn_date(txn)
        self._clean_txn_amount(txn)
        self._clean_txn_number(txn)
        self._clean_txn_type(txn)
        self._clean_txn_payee(txn)
        return txn

    def _clean_txn_date(self, txn):
        txn_date    = txn.get("Date", "UNKNOWN").strip()
        if txn_date != "UNKNOWN":
            parsed_date = self._parse_date(txn_date, dayfirst=self.dayfirst)
            txn["Date"] = parsed_date.strftime("%Y%m%d")
        else:
            txn["Date"] = "UNKNOWN"

    def _clean_txn_amount(self, txn):
        txn_amount  = txn.get("Amount",  "00.00")
        txn_amount2 = txn.get("Amount2", "00.00")

        # Home Depot Credit Card seems to send two transaction records for each
        # transaction. They're out of order (that is, the second record is not
        # directly after the first, nor even necessarily after it at all), and
        # the second one *sometimes* appears to be a memo field on the first one
        # (e.g., a credit card payment will show up with an amount and date, and
        # then the next transaction will have the same date and a payee that
        # reads, "Thank you for your payment!"), and *sometimes* is the real
        # payee (e.g., the first will say "Home Depot" and the second will say
        # "Seasonal/Garden"). One of the two transaction records will have a
        # transaction amount of "-", and the other will have the real
        # transaction amount. Ideally, we would pull out the memo and attach it
        # to the right transaction, but unless the two transactions are the only
        # transactions on that date, there doesn't seem to be a good clue (order
        # in statement, amount, etc.) as to how to associate them. So, instead,
        # we're returning None, which means this transaction should be removed
        # from the statement and not displayed to the user. The result is that
        # for Home Depot cards, sometimes we lose the memo (which isn't that big
        # a deal), and sometimes we make the memo into the payee (which sucks).
        if txn_amount == "-" or txn_amount == " ":
            raise ValueError("Transaction amount is undefined.")

        # Some QIF sources put the amount in Amount2 instead, for unknown
        # reasons.  Here we ignore Amount2 unless Amount is unknown.
        if txn_amount == "00.00":
            txn_amount = txn_amount2

        # Okay, now strip out whitespace padding.
        txn_amount = txn_amount.strip()

        # Some QIF files have dollar signs in the amount.  Hey, why not?
        txn_amount = txn_amount.replace('$', '', 1)

        # Some QIF sources put three digits after the decimal, and the Ruby
        # code thinks that means we're in Europe.  So.....let's deal with
        # that now.
        try:
            txn_amount = str(Decimal(txn_amount).quantize(Decimal('.01')))
        except:
            # Just keep truckin'.
            pass

        txn["Amount"] = txn_amount

    def _clean_txn_number(self, txn):
        txn_number  = txn.get("Number", "UNKNOWN").strip()

        # Clean up bad check number behavior
        all_digits = re.compile("\d+")

        if txn_number == "N/A":
            # Get rid of brain-dead Chase check number "N/A"s
            del txn["Number"]

        elif txn_number.startswith("XXXX-XXXX-XXXX"):
            # Home Depot credit cards throw THE CREDIT CARD NUMBER
            # into the check number field.  Oy!  At least they mask
            # the first twelve digits, so we know they're insane.
            del txn["Number"]

        elif txn_number != "UNKNOWN" and self.accttype == "CREDITCARD":
            # Several other credit card companies (MBNA, CapitalOne)
            # seem to use the number field as a transaction ID.  Get
            # rid of this.
            del txn["Number"]

        elif txn_number == "0000000000" and self.accttype != "CREDITCARD":
            # There's some bank that puts "N0000000000" in every non-check
            # transaction.  (They do use normal check numbers for checks.)
            del txn["Number"]

        elif txn_number != "UNKNOWN" and all_digits.search(txn_number):
            # Washington Mutual doesn't indicate a CHECK transaction
            # when a check number is present.
            txn["Type"] = "CHECK"

    def _clean_txn_type(self, txn):
        txn_type    = "UNKNOWN"
        txn_amount  = txn.get("Amount", "UNKNOWN")
        txn_payee   = txn.get("Payee",  "UNKNOWN")
        txn_memo    = txn.get("Memo",   "UNKNOWN")
        txn_number  = txn.get("Number", "UNKNOWN")
        txn_sign    = self._txn_sign(txn_amount)

        # Try to figure out the transaction type from the Payee or
        # Memo field.
        for typestr in self.txn_types.keys():
            if txn_number == typestr:
                # US Bank sends "DEBIT" or "CREDIT" as a check number
                # on credit card transactions.
                txn["Type"] = self.txn_types[typestr]
                del txn["Number"]
                break

            elif txn_payee.startswith(typestr + "/") or \
            txn_memo.startswith(typestr + "/") or \
            txn_memo == typestr or txn_payee == typestr:
                if typestr == "ACH" and txn_sign == "credit":
                    txn["Type"] = "DIRECTDEP"

                elif typestr == "ACH" and txn_sign == "debit":
                    txn["Type"] = "DIRECTDEBIT"

                else:
                    txn["Type"] = self.txn_types[typestr]
                break

    def _clean_txn_payee(self, txn):
        txn_payee   = txn.get("Payee",  "UNKNOWN")
        txn_memo    = txn.get("Memo",   "UNKNOWN")
        txn_number  = txn.get("Number", "UNKNOWN")
        txn_type    = txn.get("Type",   "UNKNOWN")
        txn_amount  = txn.get("Amount", "UNKNOWN")
        txn_sign    = self._txn_sign(txn_amount)

        # Try to fill in the payee field with some meaningful value.
        if txn_payee == "UNKNOWN":
            if txn_number != "UNKNOWN" and (self.accttype == "CHECKING" or
            self.accttype == "SAVINGS"):
                txn["Payee"] = "Check #%s" % txn_number
                txn["Type"]  = "CHECK"

            elif txn_type == "INT" and txn_sign == "debit":
                txn["Payee"] = "Interest paid"

            elif txn_type == "INT" and txn_sign == "credit":
                txn["Payee"] = "Interest earned"

            elif txn_type == "ATM" and txn_sign == "debit":
                txn["Payee"] = "ATM Withdrawal"

            elif txn_type == "ATM" and txn_sign == "credit":
                txn["Payee"] = "ATM Deposit"

            elif txn_type == "POS" and txn_sign == "debit":
                txn["Payee"] = "Point of Sale Payment"

            elif txn_type == "POS" and txn_sign == "credit":
                txn["Payee"] = "Point of Sale Credit"

            elif txn_memo != "UNKNOWN":
                txn["Payee"] = txn_memo

            # Down here, we have no payee, no memo, no check number,
            # and no type.  Who knows what this stuff is.
            elif txn_type == "UNKNOWN" and txn_sign == "debit":
                txn["Payee"] = "Other Debit"
                txn["Type"]  = "DEBIT"

            elif txn_type == "UNKNOWN" and txn_sign == "credit":
                txn["Payee"] = "Other Credit"
                txn["Type"]  = "CREDIT"

        # Make sure the transaction type has some valid value.
        if not txn.has_key("Type") and txn_sign == "debit":
            txn["Type"] = "DEBIT"

        elif not txn.has_key("Type") and txn_sign == "credit":
            txn["Type"] = "CREDIT"

    def _txn_sign(self, txn_amount):
        # Is this a credit or a debit?
        if txn_amount.startswith("-"):
            return "debit"
        else:
            return "credit"

    #
    # Conversion methods
    #

    def to_ofx102(self):
        if self.debug: sys.stderr.write("Making OFX/1.02.\n")
        return DOCUMENT(self._ofx_header(),
                        OFX(self._ofx_signon(),
                            self._ofx_stmt()))

    def to_xml(self):
        ofx102 = self.to_ofx102()

        if self.debug:
            sys.stderr.write(ofx102 + "\n")
            sys.stderr.write("Parsing OFX/1.02.\n")
        response = ofx.Response(ofx102) #, debug=self.debug)

        if self.debug: sys.stderr.write("Making OFX/2.0.\n")
        if self.dayfirst:
            date_format = "DD/MM/YY"
        else:
            date_format = "MM/DD/YY"
        xml = response.as_xml(original_format="QIF", date_format=date_format)

        return xml

Example 16

Project: mavelous
Source File: miniterm.py
View license
def main():
    import optparse

    parser = optparse.OptionParser(
        usage = "%prog [options] [port [baudrate]]",
        description = "Miniterm - A simple terminal program for the serial port."
    )

    parser.add_option("-p", "--port",
        dest = "port",
        help = "port, a number or a device name. (deprecated option, use parameter instead)",
        default = None
    )

    parser.add_option("-b", "--baud",
        dest = "baudrate",
        action = "store",
        type = 'int',
        help = "set baud rate, default %default",
        default = 9600
    )

    parser.add_option("--parity",
        dest = "parity",
        action = "store",
        help = "set parity, one of [N, E, O, S, M], default=N",
        default = 'N'
    )

    parser.add_option("-e", "--echo",
        dest = "echo",
        action = "store_true",
        help = "enable local echo (default off)",
        default = False
    )

    parser.add_option("--rtscts",
        dest = "rtscts",
        action = "store_true",
        help = "enable RTS/CTS flow control (default off)",
        default = False
    )

    parser.add_option("--xonxoff",
        dest = "xonxoff",
        action = "store_true",
        help = "enable software flow control (default off)",
        default = False
    )

    parser.add_option("--cr",
        dest = "cr",
        action = "store_true",
        help = "do not send CR+LF, send CR only",
        default = False
    )

    parser.add_option("--lf",
        dest = "lf",
        action = "store_true",
        help = "do not send CR+LF, send LF only",
        default = False
    )

    parser.add_option("-D", "--debug",
        dest = "repr_mode",
        action = "count",
        help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
        default = 0
    )

    parser.add_option("--rts",
        dest = "rts_state",
        action = "store",
        type = 'int',
        help = "set initial RTS line state (possible values: 0, 1)",
        default = None
    )

    parser.add_option("--dtr",
        dest = "dtr_state",
        action = "store",
        type = 'int',
        help = "set initial DTR line state (possible values: 0, 1)",
        default = None
    )

    parser.add_option("-q", "--quiet",
        dest = "quiet",
        action = "store_true",
        help = "suppress non error messages",
        default = False
    )

    parser.add_option("--exit-char",
        dest = "exit_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to exit the application",
        default = 0x1d
    )

    parser.add_option("--menu-char",
        dest = "menu_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to control miniterm (menu)",
        default = 0x14
    )

    (options, args) = parser.parse_args()

    options.parity = options.parity.upper()
    if options.parity not in 'NEOSM':
        parser.error("invalid parity")

    if options.cr and options.lf:
        parser.error("only one of --cr or --lf can be specified")

    if options.menu_char == options.exit_char:
        parser.error('--exit-char can not be the same as --menu-char')

    global EXITCHARCTER, MENUCHARACTER
    EXITCHARCTER = chr(options.exit_char)
    MENUCHARACTER = chr(options.menu_char)

    port = options.port
    baudrate = options.baudrate
    if args:
        if options.port is not None:
            parser.error("no arguments are allowed, options only when --port is given")
        port = args.pop(0)
        if args:
            try:
                baudrate = int(args[0])
            except ValueError:
                parser.error("baud rate must be a number, not %r" % args[0])
            args.pop(0)
        if args:
            parser.error("too many arguments")
    else:
        if port is None: port = 0

    convert_outgoing = CONVERT_CRLF
    if options.cr:
        convert_outgoing = CONVERT_CR
    elif options.lf:
        convert_outgoing = CONVERT_LF

    try:
        miniterm = Miniterm(
            port,
            baudrate,
            options.parity,
            rtscts=options.rtscts,
            xonxoff=options.xonxoff,
            echo=options.echo,
            convert_outgoing=convert_outgoing,
            repr_mode=options.repr_mode,
        )
    except serial.SerialException, e:
        sys.stderr.write("could not open port %r: %s\n" % (port, e))
        sys.exit(1)

    if not options.quiet:
        sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
            miniterm.serial.portstr,
            miniterm.serial.baudrate,
            miniterm.serial.bytesize,
            miniterm.serial.parity,
            miniterm.serial.stopbits,
        ))
        sys.stderr.write('--- Quit: %s  |  Menu: %s | Help: %s followed by %s ---\n' % (
            key_description(EXITCHARCTER),
            key_description(MENUCHARACTER),
            key_description(MENUCHARACTER),
            key_description('\x08'),
        ))

    if options.dtr_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
        miniterm.serial.setDTR(options.dtr_state)
        miniterm.dtr_state = options.dtr_state
    if options.rts_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
        miniterm.serial.setRTS(options.rts_state)
        miniterm.rts_state = options.rts_state

    miniterm.start()
    try:
        miniterm.join(True)
    except KeyboardInterrupt:
        pass
    if not options.quiet:
        sys.stderr.write("\n--- exit ---\n")
    miniterm.join()

Example 17

Project: rPGA
Source File: mapping.py
View license
def main(args):
  ## main function for mapping reads to personal genomes
  
  helpStr = "Help!\n"
  if not args.o:
    sys.stderr.write('rPGA2 ERROR: must provide output directory \n\n')
    sys.exit()
  if not args.s:
    sys.stderr.write('rPGA ERROR: must provide read sequence files \n\n')
    sys.exit()
#  if not args.g:
#    sys.stderr.write('rPGA ERROR: must provide gtf file\n')
  

  command = args.command
  if args.T:
    threads = int(args.T)
  else:
    threads = 8

  if args.N:
    mismatches = int(args.N)
  else:
    mismatches = 3

  if args.M:
    multimapped = int(args.M)
  else:
    multimapped = 20

  gzipped = args.gz
  outDir = args.o
  nmask = args.nmask
  if not os.path.exists(outDir):
    os.makedirs(outDir)
  ref = args.r
  vcf = args.v
  gtf = args.g
  hap = args.hap
  if args.r1:
    hap1Ref = args.r1
  else:
    hap1Ref = os.path.join(outDir, "hap1.fa")
  if args.r2:
    hap2Ref = args.r2
  else:
    hap2Ref = os.path.join(outDir, "hap2.fa")
  nRef = os.path.join(outDir, "mask.fa")  
  if args.readlength:
    readlength = int(args.readlength)-1
  else:
    readlength = 99
  if len(command)>1:
    sys.stderr.write(helpStr + "\n\n")
    sys.exit()

  seqs = ' '.join((args.s).split(','))
  if len((args.s).split(','))==0 or len((args.s).split(','))>2:
    sys.stderr.write("ERROR: Sequence parameter -s input is  not correct\n Example: rPGA run mappng alleles -s reads_1.fq,reads_2.fq -o rPGA\n")
    sys.exit()
  
  
  if nmask:
    if not os.path.exists(os.path.join(outDir,"MASK/STARindex")):
      os.makedirs(os.path.join(outDir, "MASK/STARindex"))
    if not os.path.exists(os.path.join(outDir,"MASK/STARalign")):
      os.makedirs(os.path.join(outDir, "MASK/STARalign"))
  ##create genome index
    STAR_create_genome(outDir, nRef, "MASK",threads,gtf,readlength)
    genomeDir = outDir + '/MASK/STARindex'
  # map reads
    STAR_perform_mapping(genomeDir,outDir, "MASK", seqs,threads,mismatches,gzipped,multimapped)   
  ## sort bam file
    sam_to_sorted_bam(os.path.join(outDir,'MASK/STARalign/Aligned.out'))
  ## remove unnecessary files
    os.remove(os.path.join(outDir,'MASK/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'MASK/STARalign/SJ.out.tab'))
    shutil.rmtree(os.path.join(outDir, "MASK/STARindex"))

  elif hap: # map to hap and hap2 personal genomes
    if not os.path.exists(os.path.join(outDir, "HAP1/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP1/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP2/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP1/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP1/STARalign"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP2/STARalign"))
    if not args.genomedir:
      if not args.g:
        sys.stderr.write('rPGA ERROR: must provide gtf file\n') 
        sys.exit()
      STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
      STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)
      genomeDir1 = os.path.join(outDir, 'HAP1/STARindex')
      genomeDir2 = os.path.join(outDir, 'HAP2/STARindex')
    else:
      genomeDir1, genomeDir2 = (args.genomedir).split(',')
    STAR_perform_mapping(genomeDir1, outDir, "HAP1", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDir2, outDir, "HAP2", seqs,threads,mismatches,gzipped,multimapped)
    sam_to_sorted_bam(os.path.join(outDir,'HAP1/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'HAP2/STARalign/Aligned.out'))
    os.remove(os.path.join(outDir,'HAP1/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'HAP2/STARalign/Aligned.out.bam'))
    os.remove(os.path.join(outDir,'HAP1/STARalign/SJ.out.tab'))
    os.remove(os.path.join(outDir,'HAP2/STARalign/SJ.out.tab'))
    shutil.rmtree(os.path.join(outDir, "HAP1/STARindex"))
    shutil.rmtree(os.path.join(outDir, "HAP2/STARindex"))
  else:
    if not args.r:
      sys.stderr.write("ERROR: rPGA run mapping command requires -r parameter \nExample: rPGA run mapping -r reference.fa -s reads_1.fq,reads_.fq -o rPGA \n")
      sys.exit()
    
    if not os.path.exists(os.path.join(outDir, "HAP1/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP1/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARindex")):
      os.makedirs(os.path.join(outDir, "HAP2/STARindex"))
    if not os.path.exists(os.path.join(outDir, "REF/STARindex")):
      os.makedirs(os.path.join(outDir, "REF/STARindex"))
    if not os.path.exists(os.path.join(outDir, "HAP1/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP1/STARalign"))
    if not os.path.exists(os.path.join(outDir, "HAP2/STARalign")):
      os.makedirs(os.path.join(outDir, "HAP2/STARalign"))
    if not os.path.exists(os.path.join(outDir, "REF/STARalign")):
      os.makedirs(os.path.join(outDir, "REF/STARalign"))

    print "creating STAR genome indicies"
    if not args.genomedir:
      STAR_create_genome(outDir, ref, "REF",threads,gtf,readlength)
      STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
      STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)

    print "perform STAR mapping"
    if args.genomedir:
      genomeDir1, genomeDir2, genomeDirR = (args.genomedir).split(',')
    else:
      genomeDir1 = os.path.join(outDir, 'HAP1/STARindex')
      genomeDir2 = os.path.join(outDir, 'HAP2/STARindex')
      genomeDirR = os.path.join(outDir, 'REF/STARindex')
    if not args.g:
      sys.stderr.write('rPGA ERROR: must provide gtf file\n')
      sys.exit()
    STAR_create_genome(outDir, hap1Ref, "HAP1",threads,gtf,readlength)
    STAR_create_genome(outDir, hap2Ref, "HAP2",threads,gtf,readlength)
    STAR_create_genome(outDir, ref, "REF",threads,gtf,readlength)
    STAR_perform_mapping(genomeDir1, outDir, "HAP1", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDir2, outDir, "HAP2", seqs,threads,mismatches,gzipped,multimapped)
    STAR_perform_mapping(genomeDirR, outDir, "REF", seqs,threads,mismatches,gzipped,multimapped)
    sam_to_sorted_bam(os.path.join(outDir,'HAP1/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'HAP2/STARalign/Aligned.out'))
    sam_to_sorted_bam(os.path.join(outDir,'REF/STARalign/Aligned.out'))

Example 18

Project: spitfire
Source File: crunner.py
View license
    def process_file(self, filename):
        buffer = StringIO.StringIO()
        reset_sys_modules()

        classname = util.filename2classname(filename)
        modulename = util.filename2modulename(filename)
        test_output_path = os.path.join(self.options.test_output,
                                        classname + '.txt')

        if self.options.verbose:
            sys.stderr.write(modulename + ' ... ')

        compile_failed = False
        if self.options.debug or self.options.compile:
            try:
                self.compiler.compile_file(filename)
            except Exception as e:
                compile_failed = True
                print >> buffer, '=' * 70
                print >> buffer, 'FAIL:', modulename, '(' + filename + ')'
                print >> buffer, '-' * 70
                traceback.print_exc(None, buffer)
            if self.options.debug:
                if 'parse_tree' in self.options.debug_flags:
                    print >> buffer, "parse_tree:"
                    visitor.print_tree(self.compiler._parse_tree, output=buffer)
                if 'analyzed_tree' in self.options.debug_flags:
                    print >> buffer, "analyzed_tree:"
                    visitor.print_tree(self.compiler._analyzed_tree,
                                       output=buffer)
                if 'optimized_tree' in self.options.debug_flags:
                    print >> buffer, "optimized_tree:"
                    visitor.print_tree(self.compiler._optimized_tree,
                                       output=buffer)
                if 'hoisted_tree' in self.options.debug_flags:
                    print >> buffer, "hoisted_tree:"
                    visitor.print_tree(self.compiler._hoisted_tree,
                                       output=buffer)
                if 'source_code' in self.options.debug_flags:
                    print >> buffer, "source_code:"
                    for i, line in enumerate(self.compiler._source_code.split(
                            '\n')):
                        print >> buffer, '% 3s' % (i + 1), line

        test_failed = False
        if not self.options.skip_test:
            import tests

            current_output = None
            raised_exception = False
            try:
                if self.options.debug or self.options.compile:
                    template_module = util.load_module_from_src(
                        self.compiler._source_code, filename, modulename)
                else:
                    template_module = runtime.import_module_symbol(modulename)
            except Exception as e:
                # An exception here means the template is unavailble; the test
                # fails.
                test_failed = True
                raised_exception = True
                current_output = str(e)

            if not test_failed:
                try:
                    template_class = getattr(template_module, classname)
                    template = template_class(search_list=self.search_list)
                    current_output = template.main().encode('utf8')
                except Exception as e:
                    # An exception here doesn't meant that the test fails
                    # necessarily since libraries don't have a class; as long as
                    # the expected output matches the exception, the test
                    # passes.
                    raised_exception = True
                    current_output = str(e)

            if not test_failed:
                if self.options.test_accept_result:
                    test_file = open(test_output_path, 'w')
                    test_file.write(current_output)
                    test_file.close()
                try:
                    test_file = open(test_output_path)
                except IOError as e:
                    # An excpetion here means that the expected output is
                    # unavailbe; the test fails.
                    test_failed = True
                    raised_exception = True
                    current_output = str(e)

            if test_failed:
                test_output = None
            else:
                test_output = test_file.read()
                if current_output != test_output:
                    test_failed = True
                    if self.options.debug:
                        print >> buffer, "expected output:"
                        print >> buffer, test_output
                        print >> buffer, "actual output:"
                        print >> buffer, current_output

            if compile_failed or test_failed:
                self.num_tests_failed += 1
                if self.options.verbose:
                    sys.stderr.write('FAIL\n')
                else:
                    sys.stderr.write('F')
                current_output_path = os.path.join(self.options.test_output,
                                                   classname + '.failed')
                f = open(current_output_path, 'w')
                f.write(current_output)
                f.close()
                print >> buffer, '=' * 70
                print >> buffer, 'FAIL:', modulename, '(' + filename + ')'
                print >> buffer, '-' * 70
                print >> buffer, 'Compare expected and actual output with:'
                print >> buffer, ' '.join(['    diff -u', test_output_path,
                                           current_output_path])
                print >> buffer, 'Show debug information for the test with:'
                test_cmd = [arg for arg in sys.argv if arg not in self.files]
                if '--debug' not in test_cmd:
                    test_cmd.append('--debug')
                test_cmd = ' '.join(test_cmd)
                print >> buffer, '   ', test_cmd, filename
                if raised_exception:
                    print >> buffer, '-' * 70
                    print >> buffer, current_output
                    traceback.print_exc(None, buffer)
                print >> buffer
                self.buffer.write(buffer.getvalue())
            else:
                if self.options.verbose:
                    sys.stderr.write('ok\n')
                else:
                    sys.stderr.write('.')
            self.num_tests_run += 1

Example 19

View license
    def run(self):
        while shared.shutdown == 0:
            queueValue = shared.addressGeneratorQueue.get()
            nonceTrialsPerByte = 0
            payloadLengthExtraBytes = 0
            if queueValue[0] == 'createChan':
                command, addressVersionNumber, streamNumber, label, deterministicPassphrase = queueValue
                eighteenByteRipe = False
                numberOfAddressesToMake = 1
                numberOfNullBytesDemandedOnFrontOfRipeHash = 1
            elif queueValue[0] == 'joinChan':
                command, chanAddress, label, deterministicPassphrase = queueValue
                eighteenByteRipe = False
                addressVersionNumber = decodeAddress(chanAddress)[1]
                streamNumber = decodeAddress(chanAddress)[2]
                numberOfAddressesToMake = 1
                numberOfNullBytesDemandedOnFrontOfRipeHash = 1
            elif len(queueValue) == 7:
                command, addressVersionNumber, streamNumber, label, numberOfAddressesToMake, deterministicPassphrase, eighteenByteRipe = queueValue
                try:
                    numberOfNullBytesDemandedOnFrontOfRipeHash = shared.config.getint(
                        'bitmessagesettings', 'numberofnullbytesonaddress')
                except:
                    if eighteenByteRipe:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 2
                    else:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 1 # the default
            elif len(queueValue) == 9:
                command, addressVersionNumber, streamNumber, label, numberOfAddressesToMake, deterministicPassphrase, eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes = queueValue
                try:
                    numberOfNullBytesDemandedOnFrontOfRipeHash = shared.config.getint(
                        'bitmessagesettings', 'numberofnullbytesonaddress')
                except:
                    if eighteenByteRipe:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 2
                    else:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 1 # the default
            elif queueValue[0] == 'stopThread':
                break
            else:
                sys.stderr.write(
                    'Programming error: A structure with the wrong number of values was passed into the addressGeneratorQueue. Here is the queueValue: %s\n' % repr(queueValue))
            if addressVersionNumber < 3 or addressVersionNumber > 4:
                sys.stderr.write(
                    'Program error: For some reason the address generator queue has been given a request to create at least one version %s address which it cannot do.\n' % addressVersionNumber)
            if nonceTrialsPerByte == 0:
                nonceTrialsPerByte = shared.config.getint(
                    'bitmessagesettings', 'defaultnoncetrialsperbyte')
            if nonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte:
                nonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
            if payloadLengthExtraBytes == 0:
                payloadLengthExtraBytes = shared.config.getint(
                    'bitmessagesettings', 'defaultpayloadlengthextrabytes')
            if payloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
                payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
            if command == 'createRandomAddress':
                shared.UISignalQueue.put((
                    'updateStatusBar', tr._translate("MainWindow", "Generating one new address")))
                # This next section is a little bit strange. We're going to generate keys over and over until we
                # find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
                # we won't store the \x00 or \x00\x00 bytes thus making the
                # address shorter.
                startTime = time.time()
                numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
                potentialPrivSigningKey = OpenSSL.rand(32)
                potentialPubSigningKey = highlevelcrypto.pointMult(potentialPrivSigningKey)
                while True:
                    numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
                    potentialPrivEncryptionKey = OpenSSL.rand(32)
                    potentialPubEncryptionKey = highlevelcrypto.pointMult(
                        potentialPrivEncryptionKey)
                    ripe = hashlib.new('ripemd160')
                    sha = hashlib.new('sha512')
                    sha.update(
                        potentialPubSigningKey + potentialPubEncryptionKey)
                    ripe.update(sha.digest())
                    if ripe.digest()[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash:
                        break
                logger.info('Generated address with ripe digest: %s' % hexlify(ripe.digest()))
                try:
                    logger.info('Address generator calculated %s addresses at %s addresses per second before finding one with the correct ripe-prefix.' % (numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime)))
                except ZeroDivisionError:
                    # The user must have a pretty fast computer. time.time() - startTime equaled zero.
                    pass
                address = encodeAddress(addressVersionNumber, streamNumber, ripe.digest())

                # An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
                # https://en.bitcoin.it/wiki/Wallet_import_format
                privSigningKey = '\x80' + potentialPrivSigningKey
                checksum = hashlib.sha256(hashlib.sha256(
                    privSigningKey).digest()).digest()[0:4]
                privSigningKeyWIF = arithmetic.changebase(
                    privSigningKey + checksum, 256, 58)

                privEncryptionKey = '\x80' + potentialPrivEncryptionKey
                checksum = hashlib.sha256(hashlib.sha256(
                    privEncryptionKey).digest()).digest()[0:4]
                privEncryptionKeyWIF = arithmetic.changebase(
                    privEncryptionKey + checksum, 256, 58)

                shared.config.add_section(address)
                shared.config.set(address, 'label', label)
                shared.config.set(address, 'enabled', 'true')
                shared.config.set(address, 'decoy', 'false')
                shared.config.set(address, 'noncetrialsperbyte', str(
                    nonceTrialsPerByte))
                shared.config.set(address, 'payloadlengthextrabytes', str(
                    payloadLengthExtraBytes))
                shared.config.set(
                    address, 'privSigningKey', privSigningKeyWIF)
                shared.config.set(
                    address, 'privEncryptionKey', privEncryptionKeyWIF)
                shared.writeKeysFile()

                # The API and the join and create Chan functionality
                # both need information back from the address generator.
                shared.apiAddressGeneratorReturnQueue.put(address)

                shared.UISignalQueue.put((
                    'updateStatusBar', tr._translate("MainWindow", "Done generating address. Doing work necessary to broadcast it...")))
                shared.UISignalQueue.put(('writeNewAddressToTable', (
                    label, address, streamNumber)))
                shared.reloadMyAddressHashes()
                if addressVersionNumber == 3:
                    shared.workerQueue.put((
                        'sendOutOrStoreMyV3Pubkey', ripe.digest()))
                elif addressVersionNumber == 4:
                    shared.workerQueue.put((
                        'sendOutOrStoreMyV4Pubkey', address))

            elif command == 'createDeterministicAddresses' or command == 'getDeterministicAddress' or command == 'createChan' or command == 'joinChan':
                if len(deterministicPassphrase) == 0:
                    sys.stderr.write(
                        'WARNING: You are creating deterministic address(es) using a blank passphrase. Bitmessage will do it but it is rather stupid.')
                if command == 'createDeterministicAddresses':
                    shared.UISignalQueue.put((
                                'updateStatusBar', tr._translate("MainWindow","Generating %1 new addresses.").arg(str(numberOfAddressesToMake))))
                signingKeyNonce = 0
                encryptionKeyNonce = 1
                listOfNewAddressesToSendOutThroughTheAPI = [
                ]  # We fill out this list no matter what although we only need it if we end up passing the info to the API.

                for i in range(numberOfAddressesToMake):
                    # This next section is a little bit strange. We're going to generate keys over and over until we
                    # find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
                    # into a Bitmessage address, we won't store the \x00 or
                    # \x00\x00 bytes thus making the address shorter.
                    startTime = time.time()
                    numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
                    while True:
                        numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
                        potentialPrivSigningKey = hashlib.sha512(
                            deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
                        potentialPrivEncryptionKey = hashlib.sha512(
                            deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
                        potentialPubSigningKey = highlevelcrypto.pointMult(
                            potentialPrivSigningKey)
                        potentialPubEncryptionKey = highlevelcrypto.pointMult(
                            potentialPrivEncryptionKey)
                        signingKeyNonce += 2
                        encryptionKeyNonce += 2
                        ripe = hashlib.new('ripemd160')
                        sha = hashlib.new('sha512')
                        sha.update(
                            potentialPubSigningKey + potentialPubEncryptionKey)
                        ripe.update(sha.digest())
                        if ripe.digest()[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash:
                            break

                    
                    logger.info('Generated address with ripe digest: %s' % hexlify(ripe.digest()))
                    try:
                        logger.info('Address generator calculated %s addresses at %s addresses per second before finding one with the correct ripe-prefix.' % (numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime)))
                    except ZeroDivisionError:
                        # The user must have a pretty fast computer. time.time() - startTime equaled zero.
                        pass
                    address = encodeAddress(addressVersionNumber, streamNumber, ripe.digest())

                    saveAddressToDisk = True
                    # If we are joining an existing chan, let us check to make sure it matches the provided Bitmessage address
                    if command == 'joinChan':
                        if address != chanAddress:
                            shared.apiAddressGeneratorReturnQueue.put('chan name does not match address')
                            saveAddressToDisk = False
                    if command == 'getDeterministicAddress':
                        saveAddressToDisk = False

                    if saveAddressToDisk:
                        # An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
                        # https://en.bitcoin.it/wiki/Wallet_import_format
                        privSigningKey = '\x80' + potentialPrivSigningKey
                        checksum = hashlib.sha256(hashlib.sha256(
                            privSigningKey).digest()).digest()[0:4]
                        privSigningKeyWIF = arithmetic.changebase(
                            privSigningKey + checksum, 256, 58)

                        privEncryptionKey = '\x80' + \
                            potentialPrivEncryptionKey
                        checksum = hashlib.sha256(hashlib.sha256(
                            privEncryptionKey).digest()).digest()[0:4]
                        privEncryptionKeyWIF = arithmetic.changebase(
                            privEncryptionKey + checksum, 256, 58)

                        
                        try:
                            shared.config.add_section(address)
                            addressAlreadyExists = False
                        except:
                            addressAlreadyExists = True
                            
                        if addressAlreadyExists:
                            logger.info('%s already exists. Not adding it again.' % address)
                            shared.UISignalQueue.put((
                                'updateStatusBar', tr._translate("MainWindow","%1 is already in 'Your Identities'. Not adding it again.").arg(address)))
                        else:
                            logger.debug('label: %s' % label)
                            shared.config.set(address, 'label', label)
                            shared.config.set(address, 'enabled', 'true')
                            shared.config.set(address, 'decoy', 'false')
                            if command == 'joinChan' or command == 'createChan':
                                shared.config.set(address, 'chan', 'true')
                            shared.config.set(address, 'noncetrialsperbyte', str(
                                nonceTrialsPerByte))
                            shared.config.set(address, 'payloadlengthextrabytes', str(
                                payloadLengthExtraBytes))
                            shared.config.set(
                                address, 'privSigningKey', privSigningKeyWIF)
                            shared.config.set(
                                address, 'privEncryptionKey', privEncryptionKeyWIF)
                            shared.writeKeysFile()

                            shared.UISignalQueue.put(('writeNewAddressToTable', (
                                label, address, str(streamNumber))))
                            listOfNewAddressesToSendOutThroughTheAPI.append(
                                address)
                            shared.myECCryptorObjects[ripe.digest()] = highlevelcrypto.makeCryptor(
                                hexlify(potentialPrivEncryptionKey))
                            shared.myAddressesByHash[ripe.digest()] = address
                            tag = hashlib.sha512(hashlib.sha512(encodeVarint(
                                addressVersionNumber) + encodeVarint(streamNumber) + ripe.digest()).digest()).digest()[32:]
                            shared.myAddressesByTag[tag] = address
                            if addressVersionNumber == 3:
                                shared.workerQueue.put((
                                    'sendOutOrStoreMyV3Pubkey', ripe.digest())) # If this is a chan address,
                                        # the worker thread won't send out the pubkey over the network.
                            elif addressVersionNumber == 4:
                                shared.workerQueue.put((
                                    'sendOutOrStoreMyV4Pubkey', address))
                            shared.UISignalQueue.put((
                                'updateStatusBar', tr._translate("MainWindow", "Done generating address")))


                # Done generating addresses.
                if command == 'createDeterministicAddresses' or command == 'joinChan' or command == 'createChan':
                    shared.apiAddressGeneratorReturnQueue.put(
                        listOfNewAddressesToSendOutThroughTheAPI)
                elif command == 'getDeterministicAddress':
                    shared.apiAddressGeneratorReturnQueue.put(address)
            else:
                raise Exception(
                    "Error in the addressGenerator thread. Thread was given a command it could not understand: " + command)
            shared.addressGeneratorQueue.task_done()

Example 20

Project: BitXBay
Source File: class_addressGenerator.py
View license
    def run(self):
        while True:
            queueValue = shared.addressGeneratorQueue.get()
            nonceTrialsPerByte = 0
            payloadLengthExtraBytes = 0
            if queueValue[0] == 'createChan':
                command, addressVersionNumber, streamNumber, label, deterministicPassphrase = queueValue
                eighteenByteRipe = False
                numberOfAddressesToMake = 1
                numberOfNullBytesDemandedOnFrontOfRipeHash = 1
            elif queueValue[0] == 'joinChan':
                command, chanAddress, label, deterministicPassphrase = queueValue
                eighteenByteRipe = False
                addressVersionNumber = decodeAddress(chanAddress)[1]
                streamNumber = decodeAddress(chanAddress)[2]
                numberOfAddressesToMake = 1
                numberOfNullBytesDemandedOnFrontOfRipeHash = 1
            elif len(queueValue) == 7:
                command, addressVersionNumber, streamNumber, label, numberOfAddressesToMake, deterministicPassphrase, eighteenByteRipe = queueValue
                try:
                    numberOfNullBytesDemandedOnFrontOfRipeHash = shared.config.getint(
                        'bitmessagesettings', 'numberofnullbytesonaddress')
                except:
                    if eighteenByteRipe:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 2
                    else:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 1 # the default
            elif len(queueValue) == 9:
                command, addressVersionNumber, streamNumber, label, numberOfAddressesToMake, deterministicPassphrase, eighteenByteRipe, nonceTrialsPerByte, payloadLengthExtraBytes = queueValue
                try:
                    numberOfNullBytesDemandedOnFrontOfRipeHash = shared.config.getint(
                        'bitmessagesettings', 'numberofnullbytesonaddress')
                except:
                    if eighteenByteRipe:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 2
                    else:
                        numberOfNullBytesDemandedOnFrontOfRipeHash = 1 # the default
            else:
                sys.stderr.write(
                    'Programming error: A structure with the wrong number of values was passed into the addressGeneratorQueue. Here is the queueValue: %s\n' % repr(queueValue))
            if addressVersionNumber < 3 or addressVersionNumber > 4:
                sys.stderr.write(
                    'Program error: For some reason the address generator queue has been given a request to create at least one version %s address which it cannot do.\n' % addressVersionNumber)
            if nonceTrialsPerByte == 0:
                nonceTrialsPerByte = shared.config.getint(
                    'bitmessagesettings', 'defaultnoncetrialsperbyte')
            if nonceTrialsPerByte < shared.networkDefaultProofOfWorkNonceTrialsPerByte:
                nonceTrialsPerByte = shared.networkDefaultProofOfWorkNonceTrialsPerByte
            if payloadLengthExtraBytes == 0:
                payloadLengthExtraBytes = shared.config.getint(
                    'bitmessagesettings', 'defaultpayloadlengthextrabytes')
            if payloadLengthExtraBytes < shared.networkDefaultPayloadLengthExtraBytes:
                payloadLengthExtraBytes = shared.networkDefaultPayloadLengthExtraBytes
            if command == 'createRandomAddress':
                shared.UISignalQueue.put((
                    'updateStatusBar', tr.translateText("MainWindow", "Generating one new address")))
                # This next section is a little bit strange. We're going to generate keys over and over until we
                # find one that starts with either \x00 or \x00\x00. Then when we pack them into a Bitmessage address,
                # we won't store the \x00 or \x00\x00 bytes thus making the
                # address shorter.
                startTime = time.time()
                numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
                potentialPrivSigningKey = OpenSSL.rand(32)
                potentialPubSigningKey = pointMult(potentialPrivSigningKey)
                while True:
                    numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
                    potentialPrivEncryptionKey = OpenSSL.rand(32)
                    potentialPubEncryptionKey = pointMult(
                        potentialPrivEncryptionKey)
                    # print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
                    # print 'potentialPubEncryptionKey',
                    # potentialPubEncryptionKey.encode('hex')
                    ripe = hashlib.new('ripemd160')
                    sha = hashlib.new('sha512')
                    sha.update(
                        potentialPubSigningKey + potentialPubEncryptionKey)
                    ripe.update(sha.digest())
                    # print 'potential ripe.digest',
                    # ripe.digest().encode('hex')
                    if ripe.digest()[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash:
                        break
                print 'Generated address with ripe digest:', ripe.digest().encode('hex')
                print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime), 'addresses per second before finding one with the correct ripe-prefix.'
                address = encodeAddress(addressVersionNumber, streamNumber, ripe.digest())

                # An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
                # https://en.bitcoin.it/wiki/Wallet_import_format
                privSigningKey = '\x80' + potentialPrivSigningKey
                checksum = hashlib.sha256(hashlib.sha256(
                    privSigningKey).digest()).digest()[0:4]
                privSigningKeyWIF = arithmetic.changebase(
                    privSigningKey + checksum, 256, 58)
                # print 'privSigningKeyWIF',privSigningKeyWIF

                privEncryptionKey = '\x80' + potentialPrivEncryptionKey
                checksum = hashlib.sha256(hashlib.sha256(
                    privEncryptionKey).digest()).digest()[0:4]
                privEncryptionKeyWIF = arithmetic.changebase(
                    privEncryptionKey + checksum, 256, 58)
                # print 'privEncryptionKeyWIF',privEncryptionKeyWIF

                shared.config.add_section(address)
                shared.config.set(address, 'label', label)
                shared.config.set(address, 'enabled', 'true')
                shared.config.set(address, 'decoy', 'false')
                shared.config.set(address, 'noncetrialsperbyte', str(
                    nonceTrialsPerByte))
                shared.config.set(address, 'payloadlengthextrabytes', str(
                    payloadLengthExtraBytes))
                shared.config.set(
                    address, 'privSigningKey', privSigningKeyWIF)
                shared.config.set(
                    address, 'privEncryptionKey', privEncryptionKeyWIF)
                with open(shared.appdata + 'keys.dat', 'wb') as configfile:
                    shared.config.write(configfile)

                # The API and the join and create Chan functionality
                # both need information back from the address generator.
                shared.apiAddressGeneratorReturnQueue.put(address)

                shared.UISignalQueue.put((
                    'updateStatusBar', tr.translateText("MainWindow", "Done generating address. Doing work necessary to broadcast it...")))
                shared.UISignalQueue.put(('writeNewAddressToTable', (
                    label, address, streamNumber)))
                shared.reloadMyAddressHashes()
                if addressVersionNumber == 3:
                    shared.workerQueue.put((
                        'sendOutOrStoreMyV3Pubkey', ripe.digest()))
                elif addressVersionNumber == 4:
                    shared.workerQueue.put((
                        'sendOutOrStoreMyV4Pubkey', address))

            elif command == 'createDeterministicAddresses' or command == 'getDeterministicAddress' or command == 'createChan' or command == 'joinChan':
                if len(deterministicPassphrase) == 0:
                    sys.stderr.write(
                        'WARNING: You are creating deterministic address(es) using a blank passphrase. Bitmessage will do it but it is rather stupid.')
                if command == 'createDeterministicAddresses':
                    statusbar = 'Generating ' + str(
                        numberOfAddressesToMake) + ' new addresses.'
                    shared.UISignalQueue.put((
                        'updateStatusBar', statusbar))
                signingKeyNonce = 0
                encryptionKeyNonce = 1
                listOfNewAddressesToSendOutThroughTheAPI = [
                ]  # We fill out this list no matter what although we only need it if we end up passing the info to the API.

                for i in range(numberOfAddressesToMake):
                    # This next section is a little bit strange. We're going to generate keys over and over until we
                    # find one that has a RIPEMD hash that starts with either \x00 or \x00\x00. Then when we pack them
                    # into a Bitmessage address, we won't store the \x00 or
                    # \x00\x00 bytes thus making the address shorter.
                    startTime = time.time()
                    numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix = 0
                    while True:
                        numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix += 1
                        potentialPrivSigningKey = hashlib.sha512(
                            deterministicPassphrase + encodeVarint(signingKeyNonce)).digest()[:32]
                        potentialPrivEncryptionKey = hashlib.sha512(
                            deterministicPassphrase + encodeVarint(encryptionKeyNonce)).digest()[:32]
                        potentialPubSigningKey = pointMult(
                            potentialPrivSigningKey)
                        potentialPubEncryptionKey = pointMult(
                            potentialPrivEncryptionKey)
                        # print 'potentialPubSigningKey', potentialPubSigningKey.encode('hex')
                        # print 'potentialPubEncryptionKey',
                        # potentialPubEncryptionKey.encode('hex')
                        signingKeyNonce += 2
                        encryptionKeyNonce += 2
                        ripe = hashlib.new('ripemd160')
                        sha = hashlib.new('sha512')
                        sha.update(
                            potentialPubSigningKey + potentialPubEncryptionKey)
                        ripe.update(sha.digest())
                        # print 'potential ripe.digest',
                        # ripe.digest().encode('hex')
                        if ripe.digest()[:numberOfNullBytesDemandedOnFrontOfRipeHash] == '\x00' * numberOfNullBytesDemandedOnFrontOfRipeHash:
                            break

                    print 'ripe.digest', ripe.digest().encode('hex')
                    print 'Address generator calculated', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix, 'addresses at', numberOfAddressesWeHadToMakeBeforeWeFoundOneWithTheCorrectRipePrefix / (time.time() - startTime), 'keys per second.'
                    address = encodeAddress(addressVersionNumber, streamNumber, ripe.digest())

                    saveAddressToDisk = True
                    # If we are joining an existing chan, let us check to make sure it matches the provided Bitmessage address
                    if command == 'joinChan':
                        if address != chanAddress:
                            shared.apiAddressGeneratorReturnQueue.put('chan name does not match address')
                            saveAddressToDisk = False
                    if command == 'getDeterministicAddress':
                        saveAddressToDisk = False

                    if saveAddressToDisk:
                        # An excellent way for us to store our keys is in Wallet Import Format. Let us convert now.
                        # https://en.bitcoin.it/wiki/Wallet_import_format
                        privSigningKey = '\x80' + potentialPrivSigningKey
                        checksum = hashlib.sha256(hashlib.sha256(
                            privSigningKey).digest()).digest()[0:4]
                        privSigningKeyWIF = arithmetic.changebase(
                            privSigningKey + checksum, 256, 58)

                        privEncryptionKey = '\x80' + \
                            potentialPrivEncryptionKey
                        checksum = hashlib.sha256(hashlib.sha256(
                            privEncryptionKey).digest()).digest()[0:4]
                        privEncryptionKeyWIF = arithmetic.changebase(
                            privEncryptionKey + checksum, 256, 58)

                        addressAlreadyExists = False
                        try:
                            shared.config.add_section(address)
                        except:
                            print address, 'already exists. Not adding it again.'
                            addressAlreadyExists = True
                        if not addressAlreadyExists:
                            print 'label:', label
                            shared.config.set(address, 'label', label)
                            shared.config.set(address, 'enabled', 'true')
                            shared.config.set(address, 'decoy', 'false')
                            if command == 'joinChan' or command == 'createChan':
                                shared.config.set(address, 'chan', 'true')
                            shared.config.set(address, 'noncetrialsperbyte', str(
                                nonceTrialsPerByte))
                            shared.config.set(address, 'payloadlengthextrabytes', str(
                                payloadLengthExtraBytes))
                            shared.config.set(
                                address, 'privSigningKey', privSigningKeyWIF)
                            shared.config.set(
                                address, 'privEncryptionKey', privEncryptionKeyWIF)
                            with open(shared.appdata + 'keys.dat', 'wb') as configfile:
                                shared.config.write(configfile)

                            shared.UISignalQueue.put(('writeNewAddressToTable', (
                                label, address, str(streamNumber))))
                            listOfNewAddressesToSendOutThroughTheAPI.append(
                                address)
                            shared.myECCryptorObjects[ripe.digest()] = highlevelcrypto.makeCryptor(
                                potentialPrivEncryptionKey.encode('hex'))
                            shared.myAddressesByHash[ripe.digest()] = address
                            tag = hashlib.sha512(hashlib.sha512(encodeVarint(
                                addressVersionNumber) + encodeVarint(streamNumber) + ripe.digest()).digest()).digest()[32:]
                            shared.myAddressesByTag[tag] = address
                            if addressVersionNumber == 3:
                                shared.workerQueue.put((
                                    'sendOutOrStoreMyV3Pubkey', ripe.digest())) # If this is a chan address,
                                        # the worker thread won't send out the pubkey over the network.
                            elif addressVersionNumber == 4:
                                shared.workerQueue.put((
                                    'sendOutOrStoreMyV4Pubkey', address))


                # Done generating addresses.
                if command == 'createDeterministicAddresses' or command == 'joinChan' or command == 'createChan':
                    shared.apiAddressGeneratorReturnQueue.put(
                        listOfNewAddressesToSendOutThroughTheAPI)
                    shared.UISignalQueue.put((
                        'updateStatusBar', tr.translateText("MainWindow", "Done generating address")))
                    # shared.reloadMyAddressHashes()
                elif command == 'getDeterministicAddress':
                    shared.apiAddressGeneratorReturnQueue.put(address)
                #todo: return things to the API if createChan or joinChan assuming saveAddressToDisk
            else:
                raise Exception(
                    "Error in the addressGenerator thread. Thread was given a command it could not understand: " + command)

Example 21

Project: ALF
Source File: __init__.py
View license
    def __init__(self, grammar_str, path=".", **kwds):
        self._grmr = _alf_grammar.Grammar()
        self.grammar_debug = int(os.getenv("GRAMMAR_DEBUG", "0"), 0)
        used_kwds = set()

        self.re_refs = r"""(?x)^(?P<func>%s)\((?P<func_rest>.*)$ |
                               ^(?P<quote>[\"']).*$ |
                               ^\#\s*(?P<spec>\S*).*$ |
                               ^&(?P<file>[^\s%%s]+)\s*(?P<file_rest>.*)$ |
                               ^(?P<regex>[\[\.]).*$ |
                               ^rndint\(\s*(?P<int_a>[0-9+-]+)\s*,\s*(?P<int_b>[0-9+-]+)\s*\)\s*(?P<int_rest>.*)$ |
                               ^rndflt\(\s*(?P<flt_a>[0-9.e+-]+)\s*,\s*(?P<flt_b>[0-9.e+-]+)\s*\)\s*(?P<flt_rest>.*)$ |
                               ^(?P<sym>[^\s%%s]+)\s*(?P<sym_rest>.*)$ |
                               ^(?P<bad>.*)$""" % "|".join(kwds.keys())

        current_choice_symbol = None
        line_no = 0
        allow_cfg = True
        grammar_str = grammar_str.splitlines()
        while True:
            line_no += 1
            try:
                line = grammar_str[line_no-1]
            except IndexError:
                break
            # this messes up line numbers in the error prints.. oh well
            while line.endswith("\\"):
                line_no += 1
                try:
                    line = "%s%s" % (line[:-1], grammar_str[line_no-1])
                except IndexError:
                    raise RuntimeError("Unexpected end of input on line %d" % line_no)

            match = RE_SYM_PARSE.match(line)

            if match is None or match.group("not_name") is not None:
                raise RuntimeError("Grammar parse error on line %d" % line_no)

            if match.group("comment") is not None: # comment
                if allow_cfg:
                    self._parse_cfg(line)
                    allow_cfg = False
                continue

            if match.group("whitespace") is not None: # all whitespace
                current_choice_symbol = None
                continue

            allow_cfg = False

            name = "".join([_f for _f in match.group("ref", "star", "foreign", "choice", "concat") if _f])

            if name:
                # create an empty symbol, OR get ready to fill in a symbol that other symbols already reference
                symbol = self._grmr.name_to_symbol(name, line_no)
                assert symbol.type == 0, "Defining a symbol twice (%d)" % line_no
                current_choice_symbol = None

            if match.group("ref"):
                # Tracked symbol
                tracked = self._grmr.name_to_symbol(name[1:], line_no)
                tracked.tracked = int(match.group("ref_width"))
                if tracked.tracked > 32:
                    raise RuntimeError("References longer than 32 bytes are not supported.")
                symbol.define_reference(tracked, line_no)
                scoped = self._grmr.name_to_symbol("$%s" % name[1:], line_no) # define a scoped version too, in case it gets used
                scoped.define_scoped_reference(tracked, line_no)
                # parse the comment in case a keyword is specified
                if match.group("ref_cmt"):
                    for _ in self._parse_refs(symbol, match.group("ref_cmt"), path, kwds, used_kwds, line_no):
                        raise RuntimeError("Unexpected input on line %d" % line_no)
                    for _ in self._parse_refs(scoped, match.group("ref_cmt"), path, kwds, used_kwds, line_no):
                        raise RuntimeError("Unexpected input on line %d" % line_no)
            elif match.group("star"):
                # Star
                count, rest = match.group("count", "star_rest")
                symbol.define_star(self._parse_child(symbol, rest, path, kwds, used_kwds, line_no)[0], float(count), line_no)
            elif match.group("foreign"):
                # Foreign grammar
                filename, rest = match.group("filename", "foreign_rest")
                sub_grammar = Grammar(open(os.path.join(path, filename)).read(), path)
                if rest:
                    sub_grammar._parse_cfg(rest)
                symbol.define_foreign(sub_grammar._grmr, line_no)
            elif match.group("choice"):
                # Choice
                weight, rest = match.group("choice_weight", "choice_rest")
                symbol.define_choice(line_no)
                current_choice_symbol = symbol
                self._parse_choice(symbol, weight, rest, path, kwds, used_kwds, line_no)
            elif match.group("concat"):
                # Concatenation
                rest = match.group("concat_rest")
                symbol.define_concat(line_no)
                for child in self._parse_refs(symbol, rest, path, kwds, used_kwds, line_no):
                    symbol.add_concat(child, line_no)

            elif match.group("ccs_rest"): # starts with whitespace: continuation of previous symbol, which must be a choice symbol
                assert current_choice_symbol, "This line looks like the continuation of a choice symbol," \
                                              " but there is no current choice symbol. (%d)" % line_no
                weight, line = match.group("ccs_weight", "ccs_rest")
                self._parse_choice(current_choice_symbol, weight, line, path, kwds, used_kwds, line_no)

            else:
                raise RuntimeError("Unrecognized format parsing line %d" % line_no)

        for func in kwds:
            if func not in used_kwds:
                raise RuntimeError("Unused keyword argument: %s" % func)

        self._grmr.sanity_check()

        # populate terminal value
        grmr_pos = 0
        stack = []
        chd_pos = []
        while True:
            if not stack:
                if grmr_pos >= len(self._grmr):
                    break # done
                stack.append(self._grmr[grmr_pos])
                chd_pos.append(0)
                grmr_pos += 1
            sym = stack.pop()
            chd = chd_pos.pop()
            if sym.terminal is None:
                if sym in stack:
                    if self.grammar_debug & (1<<6):
                        sys.stderr.write("%s (%d) non-terminating by recursion\n" % (sym.name, sym.line_no))
                    sym.terminal = False
                elif chd < len(sym):
                    # recurse into children
                    stack.extend([sym, sym[chd]])
                    if self.grammar_debug & (1<<6):
                        sys.stderr.write("recursing into %s (%d) %d-th child %s (%d)\n" % (sym.name, sym.line_no, chd, sym[chd].name, sym[chd].line_no))
                    chd_pos.extend([chd + 1, 0])
                else:
                    # done with children (or no children)
                    sym.terminal = True
                    for c in sym:
                        sym.terminal = sym.terminal and c.terminal
                    if self.grammar_debug & (1<<6):
                        sys.stderr.write("%s (%d) %sterminating by children\n" % (sym.name, sym.line_no, "" if sym.terminal else "non-"))
            elif self.grammar_debug & (1<<6):
               sys.stderr.write("%s (%d) already %sterminating\n" % (sym.name, sym.line_no, "" if sym.terminal else "non-"))

Example 22

Project: WASP
Source File: simulate_counts.py
View license
def main():
    options = parse_options()
    
    out_files = []
    sys.stderr.write("creating output files:\n")
    file_list = open("%s_file_list.txt" % options.prefix, "w")
    for i in range(options.num_inds):
        out_filename = "%s_%d.txt" % (options.prefix, i+1)
        sys.stderr.write("  %s\n" % out_filename)

        out_files.append(open(out_filename, "w"))
        # write_options(out_files[i], options)
        write_header(out_files[i])
        file_list.write(out_filename + "\n")
    file_list.close()
    
    
    ASseq_Y_file = open("%s_Y.txt" % options.prefix, "w")
    ASseq_Y1_file = open("%s_Y1.txt" % options.prefix, "w")
    ASseq_Y2_file = open("%s_Y2.txt" % options.prefix, "w")
    ASseq_Z_file = open("%s_Z.txt" % options.prefix, "w")

    test = 1
    while test <= options.num_tests:
        if random() > options.true_positives:
            # simulate a site with no effect, this is not a positive
            effect = 0
            alt_expr = 1
            AS_frac = 0.5
        elif random() < 0.5:
            # simulate a site with effect and beta > alpha
            effect = 0
            alt_expr = 1.0 + options.effect_size
            AS_frac = 1.0 / (2.0 + options.additivity * options.effect_size)
        else:
            # simulate a site with effect and alpha > beta
            effect = 1
            alt_expr = 1 / (1 + options.effect_size)
            AS_frac = (1 + options.additivity * options.effect_size) / \
                (2.0 + options.additivity * options.effect_size)

        snps = []
        counts = []
        num_hets = 0

        
        if options.mean_counts_distr == "POINT":
            mean_counts = options.mean_counts
        elif options.mean_counts_distr == "EXPONENTIAL":
            mean_counts = np.random.exponential(options.mean_counts)
        else:
            raise ValueError("unknown distribution %s\n" %
                             options.mean_counts_distr)

        if options.gene_disp_distr == "POINT":
            gene_disp = options.gene_disp
        elif options.gene_disp_distr == "EXPONENTIAL":
            gene_disp = np.random.exponential(options.gene_disp)
            sys.stderr.write("gene_disp: %.2f\n" % gene_disp)
        else:
            sys.stderr.write("unknown distribution: %s\n" % gene_disp)

        
        for ind in range(options.num_inds):
            # Simulate the individual's haps=[0,0]
            # prob of each minor allele is MAF (minor allele freq)
            is_het = False

            n_minor = int(random() < options.maf) + int(random() < options.maf)
            if n_minor == 0:
                # no minor alleles
                haps = [0,0]
            elif n_minor == 1:
                # heterozygous
                haps = [0,1]
                num_hets += 1
                is_het = True
            else:
                # two minor alleles
                haps = [1,1]
            
            # Expected number of reads based on genotypes
            ind_mean_counts = mean_counts * ((2 - n_minor) + (n_minor * alt_expr))
            #sys.stderr.write("n_minor: %d alt_expr: %g mean_counts: %g " %
            #                 (n_minor, alt_expr, ind_mean_counts))
            
            sim_count = simulate_BNB(ind_mean_counts, gene_disp, options.ind_disp[ind])

            if is_het:
                if random() < options.het_error_rate:
                    # simulate a homozygous site that was miscalled
                    # as a heterozygote
                    if haps[0] == 0:
                        ref, alt = simulate_BB(options.as_counts,
                                               options.read_error_rate, options.as_disp)
                    else:
                        ref, alt = simulate_BB(options.as_counts, 1-options.read_error_rate,
                                               options.as_disp)
                else:
                    ref, alt = simulate_BB(options.as_counts, AS_frac,
                                           options.as_disp)
            else:
                if options.sim_hom_as:
                    # simulate allele-specific counts even when test SNP
                    # is homozygous
                    ref, alt = simulate_BB(options.as_counts, 0.5, AS_disp)
                else:
                    ref, alt = 0, 0
            snps.append(TestSNP(effect, test, haps, sim_count,
                                ref, alt, 1.0 - options.het_error_rate))
            counts.append(sim_count)

        mean_counts = np.mean(counts)
        Y=[]
        Y1=[]
        Y2=[]
        Z=[]
        
        if num_hets >= options.min_hets:
            for snp_indx in range(len(snps)):
                snps[snp_indx].set_total_counts(mean_counts)
                out_files[snp_indx].write(snps[snp_indx].print_snp())
                out_files[snp_indx].flush()
                
                Y.append(snps[snp_indx].count)
                Y1.append(snps[snp_indx].ref_count)
                Y2.append(snps[snp_indx].alt_count)
            
                if(snps[snp_indx].haps[0]==0 and snps[snp_indx].haps[1]==0):
                    Z.append(0)
                elif(snps[snp_indx].haps[0]==0 and snps[snp_indx].haps[1]==1):
                    Z.append(1)
                elif(snps[snp_indx].haps[0]==1 and snps[snp_indx].haps[1]==0):
                    Z.append(1)
                elif(snps[snp_indx].haps[0]==1 and snps[snp_indx].haps[1]==1):
                    Z.append(4)
                    
            ASseq_Y_file.write("\t".join(str(y) for y in Y)+"\n")
            ASseq_Y1_file.write("\t".join(str(y1) for y1 in Y1)+"\n")
            ASseq_Y2_file.write("\t".join(str(y2) for y2 in Y2)+"\n")    
            ASseq_Z_file.write("\t".join(str(z) for z in Z)+"\n")
            test+=1

Example 23

Project: WASP
Source File: rmdup_pe.py
View license
def filter_reads(infile, outfile):
    read_stats = ReadStats()
    
    cur_tid = None
    seen_chrom = set([])

    # name of reads to keep
    keep_cache = {}
    # name of reads to discard
    discard_cache = {}
    cur_by_mpos = {}

    read_count = 0

    # current position on chromosome
    cur_pos = None
    # lists of reads at current position,
    # grouped by the mate pair position
    cur_by_mpos = {}
    
    for read in infile:
        read_count += 1

        if read.is_unmapped:
           read_stats.discard_unmapped += 1
           continue
        
        if (cur_tid is None) or (read.tid != cur_tid):
            # this is a new chromosome
            cur_chrom = infile.getrname(read.tid)

            if cur_pos:
                update_read_cache(cur_by_mpos, keep_cache, discard_cache,
                                  read_stats, outfile)
            
            if len(keep_cache) + len(discard_cache) != 0:
                sys.stderr.write("WARNING: failed to find pairs for %d "
                                 "reads on this chromosome\n" %
                                 (len(keep_cache) + len(discard_cache)))
                read_stats.discard_missing_pair += len(keep_cache) + \
                                                   len(discard_cache)
                
                sys.stderr.write("keep_cache:\n")
                for r in keep_cache.values():
                    sys.stderr.write("  %s\n" % r.qname)
                sys.stderr.write("discard_cache:\n")
                for r in discard_cache.values():
                    sys.stderr.write("  %s\n" % r.qname)
                                    
            keep_cache = {}
            discard_cache = {}
            cur_pos = None
            cur_by_mpos = {}
            read_count = 0
            
            if cur_chrom in seen_chrom:
                # sanity check that input bam file is sorted
                raise ValueError("expected input BAM file to be sorted "
                                 "but chromosome %s is repeated\n" % cur_chrom)
            seen_chrom.add(cur_chrom)
            cur_tid = read.tid
            sys.stderr.write("starting chromosome %s\n" % cur_chrom)
            sys.stderr.write("processing reads\n")

        if read.mate_is_unmapped:
            read_stats.discard_mate_unmapped += 1
            continue
            
        if read.is_secondary:
            # this is a secondary alignment (i.e. read was aligned more than
            # once and this has align score that <= best score)
            read_stats.discard_secondary += 1
            continue

        if (not read.is_paired) or (read.next_reference_name is None):
            read_stats.discard_single += 1
            continue

        if (read.next_reference_name != cur_chrom) and \
           (read.next_reference_name != "="):
            # other side of pair mapped to different chromosome
            read_stats.discard_different_chromosome += 1
            continue

        if not read.is_proper_pair:
            read_stats.discard_improper_pair += 1
            continue

        if (cur_pos is not None) and (read.pos < cur_pos):
            raise ValueError("expected input BAM file to be sorted "
                             "but reads are out of order")
        
        if cur_pos is None or read.pos > cur_pos:
            # we have advanced to a new start position
            # decide which of reads at last position to keep or discard
            update_read_cache(cur_by_mpos, keep_cache, discard_cache,
                              read_stats, outfile)

            # create new list of reads at current position
            cur_pos = read.pos
            cur_by_mpos = {}

        if read.qname in keep_cache:
            # we already saw prev side of pair, retrieve from cache
            read1 = keep_cache[read.qname]
            read2 = read
            del keep_cache[read.qname]

            if read2.next_reference_start != read1.reference_start:
                sys.stderr.write("WARNING: read pair positions "
                                 "do not match for pair %s\n" % read.qname)

            read_stats.keep_pair += 1
            outfile.write(read1)
            outfile.write(read2)
            
        elif read.qname in discard_cache:
            # we already saw prev side of pair, but decided to discard
            # because read duplicated
            del discard_cache[read.qname]
            read_stats.discard_dup += 1

        else:
            # we have not seen other side of this read yet
            # add read to list of those at current position
            # grouping by mate-pair position
            if read.mpos in cur_by_mpos:
                cur_by_mpos[read.mpos].append(read)
            else:
                cur_by_mpos[read.mpos] = [read]

    # final update of read cache is just to cache strange corner case
    # where final read pair on chromosome were overlapping (same start pos)
    if cur_pos:
        update_read_cache(cur_by_mpos, keep_cache, discard_cache,
                          read_stats, outfile)
                                 
    if (len(keep_cache) + len(discard_cache)) != 0:
        sys.stderr.write("WARNING: failed to find pairs for %d "
                         "keep reads and %d discard reads on this "
                         "chromosome\n" % (len(keep_cache), len(discard_cache)))
        
        read_stats.discard_missing_pair += len(keep_cache) + len(discard_cache)

    read_stats.write(sys.stderr)

Example 24

Project: camr
Source File: AMRGraph.py
View license
    @classmethod
    def parse_string(cls,amr_string,RENAME_NODE=False):
        """
        Parse a Pennman style string representation for amr and return an AMR 
        
        >>>x = AMR.parse_string("(a / and :op1(恶化 :ARG0(它) :ARG1(模式 :mod(开发)) :time (已 经)) :op2(堵塞 :ARG0(它) :ARG1(交通 :mod(局部)) :location(a / around :op1(出口)))))")
        >>>
        .
        """

        def make_compiled_regex(rules):
            regexstr =  '|'.join('(?P<%s>%s)' % (name, rule) for name, rule in rules)
            return re.compile(regexstr)

        def rename_node(parentnodelabel,parentconcept):
            if not isinstance(parentnodelabel,(Quantity,Polarity,Interrogative,StrLiteral)):                                       
                # graph node rebuild
                if parentconcept is not None:
                    amr.node_to_concepts[node_idx] = parentconcept
                    mapping_table[parentnodelabel] = node_idx
                    parentnodelabel = node_idx
                    node_idx += 1
                else:
                    # not revisiting and concept is None
                    if parentnodelabel not in mapping_table:
                        amr.node_to_concepts[node_idx] = parentnodelabel
                        parentnodelabel = node_idx
                        node_idx += 1
                    else: #revisiting 
                        parentnodelabel = mapping_table[parentnodelabel]


        PNODE = 1
        CNODE = 2        
        EDGE = 3
        RCNODE = 4
        
        amr = cls()
        stack = []
        state = 0
        node_idx = 0; # sequential new node index
        mapping_table = {};  # old new index mapping table

        lex_rules = [
            ("LPAR", '\('),
            ("RPAR",'\)'),
            ("COMMA",','), 
            ("SLASH",'/'),
            ("EDGELABEL",":[^\s()]+"),
            ("STRLITERAL",u'"[^"]+"|\u201c[^\u201d]+\u201d'),
            ("LITERAL","'[^\s(),]+"),
            ("INTERROGATIVE","\s(interrogative|imperative|expressive)(?=[\s\)])"),
            ("QUANTITY","[0-9][0-9Ee^+\-\.,:]*(?=[\s\)])"),
            ("IDENTIFIER","[^\s()]+"), #no blank within characters
            ("POLARITY","\s(\-|\+)(?=[\s\)])")
        ] 
        
        token_re = make_compiled_regex(lex_rules)
        #lexer = Lexer(lex_rules)
        #amr.reentrance_triples = []

        for match in token_re.finditer(amr_string):
            token = match.group()
            type = match.lastgroup
            
            #if type == "STRLITERAL":
            #    import pdb
            #    pdb.set_trace()

            #print token.strip(),type
            if state == 0:
                if type == "LPAR":
                    state = 1
                else: raise ParserError, "Unexpected token %s"%(token)

            elif state == 1:
                if type == "IDENTIFIER":
                    stack.append((PNODE,token.strip(),None))
                    state = 2
                elif type == "QUANTITY":
                    stack.append((PNODE,Quantity(token.strip()),None))
                    state = 2
                elif type == "STRLITERAL":
                    stack.append((PNODE,StrLiteral(token.strip()),None))
                    state = 2
                else: raise ParserError , "Unexpected token %s"%(token.encode('utf8'))

            elif state == 2:
                if type == "SLASH":
                    state = 3
                elif type == "EDGELABEL":
                    stack.append((EDGE,token[1:]))
                    state = 5
                elif type == "RPAR":
                    forgetme, parentnodelabel, parentconcept = stack.pop()
                    assert forgetme == PNODE
                    assert parentconcept == None
                    
                    if RENAME_NODE:
                        rename_node(parentnodelabel,parentconcept)
                    else:
                        if not parentnodelabel in amr.node_to_concepts or parentconcept is not None: 
                            amr.node_to_concepts[parentnodelabel] = parentconcept

                    foo = amr[parentnodelabel]

                    if stack:
                        stack.append((CNODE,parentnodelabel,parentconcept))
                        state = 6
                    else:
                        amr.roots.append(parentnodelabel)
                        state = 0
                    
                else: raise ParserError, "Unexpected token %s"%(token)

            elif state == 3:
                if type == "IDENTIFIER" or "QUANTITY":
                    assert stack[-1][0] == PNODE
                    nodelabel = stack.pop()[1]
                    stack.append((PNODE,nodelabel,token))
                    state = 4
                else: raise ParserError, "Unexpected token %s"%(token)
            
            elif state == 4:
                if type == "EDGELABEL":
                    stack.append((EDGE,token[1:]))
                    state = 5
                elif type == "RPAR":
                    forgetme, parentnodelabel, parentconcept = stack.pop()
                    assert forgetme == PNODE
                    foo = amr[parentnodelabel] # add only the node
                    #print state,parentnodelabel,parentconcept
                    if parentconcept is not None:
                        amr.node_to_concepts[parentnodelabel] = parentconcept
                    
                    if stack:
                        stack.append((CNODE,parentnodelabel,parentconcept))
                        state = 6
                    else:
                        amr.roots.append(parentnodelabel)
                        state = 0
                else:
                    print amr_string
                    raise ParserError, "Unexpected token %s"%(token.encode('utf8'))
                
            elif state == 5:
                if type == "LPAR":
                    state = 1
                elif type == "QUANTITY":
                    stack.append((CNODE,Quantity(token),None))
                    state = 6
                elif type == "STRLITERAL":
                    stack.append((CNODE,StrLiteral(token[1:-1]),None))
                    state = 6
                elif type == "INTERROGATIVE":
                    stack.append((CNODE,Interrogative(token[1:]),None))
                    state = 6
                elif type == "POLARITY":
                    stack.append((CNODE,Polarity(token.strip()),None))
                    state = 6
                elif type == "IDENTIFIER":
                    stack.append((RCNODE,token,None))
                    state = 6
                elif type == "EDGELABEL": #Unary edge
                    stack.append((CNODE,None,None)) 
                    stack.append((EDGE,token[1:]))
                    state = 5
                
                elif type == "RPAR":
                    stack.append((CNODE,None,None))
                    edges = []
                    
                    while stack[-1][0] != PNODE:
                        children = []
                        #one edge may have multiple children/tail nodes
                        while stack[-1][0] == CNODE:
                            forgetme, childnodelabel, childconcept = stack.pop()
                            children.append((childnodelabel,childconcept))
                        
                        assert stack[-1][0] == EDGE
                        forgetme, edgelabel = stack.pop()
                        edges.append((edgelabel,children))
                    
                    forgetme,parentnodelabel,parentconcept = stack.pop()
                    #print state,parentnodelabel,parentconcept

                    #check for annotation error
                    if parentnodelabel in amr.node_to_concepts.keys(): 
                        #concept has been defined by the children, 
                        #then they must have different concepts, otherwise the children's concepts should be None
                        #(coreference)
                        if amr.node_to_concepts[parentnodelabel] == parentconcept:
                            sys.stderr.write("Wrong annotation format: Revisited concepts %s should be ignored.\n" % parentconcept)
                        else:
                            sys.stderr.write("Wrong annotation format: Different concepts %s and %s have same node label(index)\n" % (amr.node_to_concepts[parentnodelabel],parentconcept))
                            parentnodelabel = parentnodelabel + "1"
                            
                    if RENAME_NODE:
                        rename_node(parentnodelabel,parentconcept)
                    else:
                        if not parentnodelabel in amr.node_to_concepts or parentconcept is not None: 
                            amr.node_to_concepts[parentnodelabel] = parentconcept


                    for edgelabel,children in reversed(edges):
                        hypertarget = []
                        for node, concept in children:
                            if node is not None and not isinstance(node,(Quantity,Polarity,Interrogative,StrLiteral)) and not node in amr.node_to_concepts:
                                if RENAME_NODE:
                                    rename_node(node,concept)
                                else:
                                    if concept:
                                        amr.node_to_concepts[node] = concept

                            hypertarget.append(node)
                        hyperchild = tuple(hypertarget)
                        amr._add_triple(parentnodelabel,edgelabel,hyperchild)
                    
                    if stack: #we have done with current level
                        state = 6
                        stack.append((CNODE, parentnodelabel, parentconcept))
                    else: #we have done with this subgraph
                        state = 0
                        amr.roots.append(parentlabel)

            elif state == 6:
                if type == "RPAR":
                    
                    edges = []
                    reedges = []
                    while stack[-1][0] != PNODE:
                        children = []
                        reentrances = []
                        #one edge may have multiple children/tail nodes
                        while stack[-1][0] == CNODE or stack[-1][0] == RCNODE:
                            CTYPE, childnodelabel, childconcept = stack.pop()
                            if CTYPE == RCNODE:
                                reentrances.append((childnodelabel,childconcept))
                            children.append((childnodelabel,childconcept))
                        
                        assert stack[-1][0] == EDGE
                        forgetme, edgelabel = stack.pop()
                        edges.append((edgelabel,children))
                        reedges.append((edgelabel,reentrances))
                    
                    forgetme,parentnodelabel,parentconcept = stack.pop()
                    #print "PNODE",state,parentnodelabel,parentconcept
                    
                    #check for annotation error
                    if parentnodelabel in amr.node_to_concepts.keys(): 
                        #concept has been defined by the children, 
                        #then they must have different concepts, otherwise the children's concepts should be None
                        #(coreference)
                        if amr.node_to_concepts[parentnodelabel] == parentconcept:
                            sys.stderr.write("Wrong annotation format: Revisited concepts %s should be ignored.\n" % parentconcept)
                        else:
                            sys.stderr.write("Wrong annotation format: Different concepts %s and %s have same node label(index)\n" % (amr.node_to_concepts[parentnodelabel],parentconcept))
                            parentnodelabel = parentnodelabel + "1"
                            
                    if RENAME_NODE:
                        rename_node(parentnodelabel,parentconcept)
                    else:
                        if not parentnodelabel in amr.node_to_concepts or parentconcept is not None: 
                            amr.node_to_concepts[parentnodelabel] = parentconcept

                    for edgelabel,children in reversed(edges):
                        hypertarget = []
                        for node, concept in children:
                            if node is not None and not isinstance(node,(Quantity,Polarity,Interrogative,StrLiteral)) and not node in amr.node_to_concepts:
                                if RENAME_NODE:
                                    rename_node(node,concept)
                                else:
                                    if concept:
                                        amr.node_to_concepts[node] = concept
                            hypertarget.append(node)
                        hyperchild = tuple(hypertarget)
                        amr._add_triple(parentnodelabel,edgelabel,hyperchild)
                    
                    for edgelabel,reentrance in reedges:
                        hreent = []
                        for node,concept in reentrance:
                            hreent.append(node)
                        amr._add_reentrance(parentnodelabel,edgelabel,hreent)

                    if stack: #we have done with current level
                        state = 6
                        stack.append((CNODE, parentnodelabel, parentconcept))
                    else: #we have done with this subgraph
                        state = 0
                        amr.roots.append(parentnodelabel)
                elif type == "COMMA": # to seperate multiple children/tails 
                    state = 7
                elif type == "EDGELABEL":
                    stack.append((EDGE,token[1:]))
                    state = 5
                else: raise ParserError, "Unexpected token %s"%(token.encode('utf8'))

            elif state == 7:
                if type == "IDENTIFIER":
                    stack.append((CNODE, token, None)) # another children
                    state = 6
                elif typpe == "LPAR":
                    state = 1
                else: raise ParserError, "Unexpected token %s"%(token)

        if state != 0 and stack: 
            raise ParserError, "mismatched parenthesis"
        return amr

Example 25

Project: bamsurgeon
Source File: addindel.py
View license
def makemut(args, chrom, start, end, vaf, ins, avoid, alignopts):
    ''' is ins is a sequence, it will is inserted at start, otherwise delete from start to end'''

    if args.seed is not None: random.seed(int(args.seed) + int(start))

    mutid = chrom + '_' + str(start) + '_' + str(end) + '_' + str(vaf)
    if ins is None:
        mutid += ':DEL'
    else:
        mutid += ':INS:' + ins

    try:
        bamfile = pysam.Samfile(args.bamFileName, 'rb')
        bammate = pysam.Samfile(args.bamFileName, 'rb') # use for mates to avoid iterator problems
        reffile = pysam.Fastafile(args.refFasta)
        tmpbams = []

        is_insertion = ins is not None
        is_deletion  = ins is None

        snvfrac = float(args.snvfrac)

        mutstr = get_mutstr(chrom, start, end, ins, reffile)

        del_ln = 0
        if is_deletion:
            del_ln = end-start

        mutpos = start
        mutpos_list = [start]

        # optional CNV file
        cnv = None
        if (args.cnvfile):
            cnv = pysam.Tabixfile(args.cnvfile, 'r')

        log = open('addindel_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + "." + "_".join((chrom,str(start),str(end))) + ".log",'w')

        tmpoutbamname = args.tmpdir + "/" + mutid + ".tmpbam." + str(uuid4()) + ".bam"
        print "INFO\t" + now() + "\t" + mutid + "\tcreating tmp bam: ",tmpoutbamname #DEBUG
        outbam_muts = pysam.Samfile(tmpoutbamname, 'wb', template=bamfile)

        mutfail, hasSNP, maxfrac, outreads, mutreads, mutmates = mutation.mutate(args, log, bamfile, bammate, chrom, mutpos, mutpos+del_ln+1, mutpos_list, avoid=avoid, mutid_list=[mutid], is_insertion=is_insertion, is_deletion=is_deletion, ins_seq=ins, reffile=reffile, indel_start=start, indel_end=end)

        if mutfail:
            outbam_muts.close()
            os.remove(tmpoutbamname)
            return None

        # pick reads to change
        readlist = []
        for extqname,read in outreads.iteritems():
            if read.seq != mutreads[extqname]:
                readlist.append(extqname)

        print "len(readlist):",str(len(readlist))
        readlist.sort()
        random.shuffle(readlist)

        if len(readlist) < int(args.mindepth):
            sys.stderr.write("WARN\t" + now() + "\t" + mutid + "\tskipped, too few reads in region: " + str(len(readlist)) + "\n")
            outbam_muts.close()
            os.remove(tmpoutbamname)
            return None

        if vaf is None:
            vaf = float(args.mutfrac) # default minor allele freq if not otherwise specified
        if cnv: # cnv file is present
            if chrom in cnv.contigs:
                for cnregion in cnv.fetch(chrom,start,end):
                    cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN
                    sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\t" + ' '.join(("copy number in snp region:",chrom,str(start),str(end),"=",str(cn))) + "\n")
                    if float(cn) > 0.0:
                        vaf = 1.0/float(cn)
                    else:
                        vaf = 0.0
                    sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tadjusted VAF: " + str(vaf) + "\n")
        else:
            sys.stdout.write("INFO\t" + now() + "\t" + mutid + "\tselected VAF: " + str(vaf) + "\n")

        lastread = int(len(readlist)*vaf)

        # pick at least args.minmutreads if possible
        if lastread < int(args.minmutreads):
            if len(readlist) > int(args.minmutreads):
                lastread = int(args.minmutreads)
                sys.stdout.write("WARN\t" + now() + "\t" + mutid + "\tforced " + str(lastread) + " reads.\n")
            else:
                print "WARN\t" + now() + "\t" + mutid + "\tdropped site with fewer reads than --minmutreads"
                os.remove(tmpoutbamname)
                return None

        readtrack = dd(list)

        for readname in readlist:
            orig_name, readpos, pairend = readname.split(',')
            readtrack[orig_name].append('%s,%s' % (readpos, pairend))

        usedreads = 0
        newreadlist = []

        for orig_name in readtrack:
            for read_instance in readtrack[orig_name]:
                newreadlist.append(orig_name + ',' + read_instance)
                usedreads += 1

            if usedreads >= lastread:
                break

        readlist = newreadlist

        print "INFO\t" + now() + "\t" + mutid + "\tpicked: " + str(len(readlist)) + " reads"

        wrote = 0
        nmut = 0
        mut_out = {}
        # change reads from .bam to mutated sequences
        for extqname,read in outreads.iteritems():
            if read.seq != mutreads[extqname]:
                if not args.nomut and extqname in readlist:
                    qual = read.qual # changing seq resets qual (see pysam API docs)
                    read.seq = mutreads[extqname] # make mutation
                    read.qual = qual
                    nmut += 1
            if not hasSNP or args.force:
                wrote += 1
                mut_out[extqname] = read

        muts_written = {}

        for extqname in mut_out:
            if extqname not in muts_written:
                outbam_muts.write(mut_out[extqname])
                muts_written[extqname] = True

                if mutmates[extqname] is not None:
                    # is mate also in mutated list?
                    mate_read = mutmates[extqname]

                    pairname = 'F' # read is first in pair
                    if mate_read.is_read2:
                        pairname = 'S' # read is second in pair
                    if not mate_read.is_paired:
                        pairname = 'U' # read is unpaired

                    mateqname = ','.join((mate_read.qname,str(mate_read.pos),pairname))

                    if mateqname in mut_out:
                        # yes: output mutated mate
                        outbam_muts.write(mut_out[mateqname])
                        muts_written[mateqname] = True

                    else:
                        # no: output original mate
                        outbam_muts.write(mate_read)

        print "INFO\t" + now() + "\t" + mutid + "\twrote: " + str(wrote) + " reads, mutated: " + str(nmut) + " reads"

        if not hasSNP or args.force:
            outbam_muts.close()
            aligners.remap_bam(args.aligner, tmpoutbamname, args.refFasta, alignopts, mutid=mutid, paired=(not args.single), picardjar=args.picardjar)

            outbam_muts = pysam.Samfile(tmpoutbamname,'rb')
            coverwindow = 1
            incover  = countReadCoverage(bamfile,chrom,mutpos-coverwindow,mutpos+del_ln+coverwindow)
            outcover = countReadCoverage(outbam_muts,chrom,mutpos-coverwindow,mutpos+del_ln+coverwindow)

            avgincover  = float(sum(incover))/float(len(incover)) 
            avgoutcover = float(sum(outcover))/float(len(outcover))
            spikein_frac = 0.0
            if wrote > 0:
                spikein_frac = float(nmut)/float(wrote)

            # qc cutoff for final snv depth 
            if (avgoutcover > 0 and avgincover > 0 and avgoutcover/avgincover >= float(args.coverdiff)) or args.force:
                tmpbams.append(tmpoutbamname)
                indelstr = ''
                if is_insertion:
                    indelstr = ':'.join(('INS', chrom, str(start), ins))
                else:
                    indelstr = ':'.join(('DEL', chrom, str(start), str(end)))

                snvstr = chrom + ":" + str(start) + "-" + str(end) + " (VAF=" + str(vaf) + ")"
                log.write("\t".join(("indel",indelstr,str(mutpos),mutstr,str(avgincover),str(avgoutcover),str(spikein_frac),str(maxfrac)))+"\n")
            else:
                outbam_muts.close()
                os.remove(tmpoutbamname)
                if os.path.exists(tmpoutbamname + '.bai'):
                    os.remove(tmpoutbamname + '.bai')
                    
                print "WARN\t" + now() + "\t" + mutid + "\tdropped for outcover/incover < " + str(args.coverdiff)
                return None

        outbam_muts.close()
        bamfile.close()
        bammate.close()
        log.close() 

        return sorted(tmpbams)
        
    except Exception, e:
        sys.stderr.write("*"*60 + "\nencountered error in mutation spikein: " + mutid + "\n")
        traceback.print_exc(file=sys.stdout)
        sys.stderr.write("*"*60 + "\n")
        if os.path.exists(tmpoutbamname):
            os.remove(tmpoutbamname)
        if os.path.exists(tmpoutbamname + '.bai'):
            os.remove(tmpoutbamname + '.bai')
        return None

Example 26

Project: bamsurgeon
Source File: addsnv.py
View license
def makemut(args, hc, avoid, alignopts):


    mutid_list = []
    for site in hc:
        mutid_list.append(site['chrom'] + '_' + str(site['start']) + '_' + str(site['end']) + '_' + str(site['vaf']) + '_' + str(site['altbase']))

    try:
        if args.seed is not None: random.seed(int(args.seed) + int(hc[0]['start']))

        bamfile = pysam.Samfile(args.bamFileName, 'rb')
        bammate = pysam.Samfile(args.bamFileName, 'rb') # use for mates to avoid iterator problems
        reffile = pysam.Fastafile(args.refFasta)
        tmpbams = []

        #snvfrac = float(args.snvfrac)

        chrom = None
        vaf   = None

        mutpos_list = []
        altbase_list = []
        
        for site in hc:
            if chrom is None:
                chrom = site['chrom']
            else:
                assert chrom == site['chrom'], "haplotype clusters cannot span multiple chromosomes!"

            if vaf is None:
                vaf = site['vaf']
                
            elif vaf != site['vaf']:
                sys.stderr.write("WARN\t" + now() + "\tmultiple VAFs for single haplotype, using first encountered VAF: " + str(vaf) + "\n")

            mutpos = int(random.uniform(site['start'],site['end']+1)) # position of mutation in genome
            mutpos_list.append(mutpos) # FIXME
            altbase_list.append(site['altbase'])

        mutbase_list = []
        refbase_list = []
        mutstr_list  = []

        for n, mutpos in enumerate(mutpos_list):
            refbase = reffile.fetch(chrom,mutpos-1,mutpos)
            altbase = altbase_list[n]
            refbase_list.append(refbase)

            if altbase == refbase.upper() and not args.ignoreref:
                sys.stderr.write("WARN\t" + now() + "\t" + mutid_list[n] + "\tspecified ALT base matches reference, skipping mutation\n")
                return None

            try:
                mutbase = mut(refbase, altbase)
                mutbase_list.append(mutbase)

            except ValueError as e:
                sys.stderr.write("WARN\t" + now() + "\t" + mutid_list[n] + "\t" + ' '.join(("skipped site:",chrom,str(hc[n]['start']),str(hc[n]['end']),"due to N base:",str(e),"\n")))
                return None

            mutstr_list.append(refbase + "-->" + str(mutbase))

        # optional CNV file
        cnv = None
        if (args.cnvfile):
            cnv = pysam.Tabixfile(args.cnvfile, 'r')

        hapstr = "_".join(('haplo',chrom,str(min(mutpos_list)),str(max(mutpos_list))))
        log = open('addsnv_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + "." + hapstr + ".log",'w')

        tmpoutbamname = args.tmpdir + "/" + hapstr + ".tmpbam." + str(uuid4()) + ".bam"
        print "INFO\t" + now() + "\t" + hapstr + "\tcreating tmp bam: ",tmpoutbamname
        outbam_muts = pysam.Samfile(tmpoutbamname, 'wb', template=bamfile)

        mutfail, hasSNP, maxfrac, outreads, mutreads, mutmates = mutation.mutate(args, log, bamfile, bammate, chrom, min(mutpos_list), max(mutpos_list)+1, mutpos_list, avoid=avoid, mutid_list=mutid_list, is_snv=True, mutbase_list=mutbase_list, reffile=reffile)

        if mutfail:
            outbam_muts.close()
            os.remove(tmpoutbamname)
            return None

        # pick reads to change
        readlist = []
        for extqname,read in outreads.iteritems():
            if read.seq != mutreads[extqname]:
                readlist.append(extqname)

        print "INFO\t" + now() + "\t" + hapstr + "\tlen(readlist): " + str(len(readlist))
        readlist.sort()
        random.shuffle(readlist)

        if len(readlist) < int(args.mindepth):
            print "WARN\t" + now() + "\t" + hapstr + "\ttoo few reads in region (" + str(len(readlist)) + ") skipping..."
            outbam_muts.close()
            os.remove(tmpoutbamname)
            return None

        if vaf is None:
            vaf = float(args.mutfrac) # default minor allele freq if not otherwise specified
        if cnv: # cnv file is present
            if chrom in cnv.contigs:
                for cnregion in cnv.fetch(chrom,min(mutpos_list),max(mutpos_list)+1):
                    cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN
                    print "INFO\t" + now() + "\t" + hapstr + "\t" + ' '.join(("copy number in snp region:",chrom,str(min(mutpos_list)),str(max(mutpos_list)),"=",str(cn))) + "\n"
                    if float(cn) > 0.0:
                        vaf = 1.0/float(cn)
                    else:
                        vaf = 0.0
                    print "adjusted VAF: " + str(vaf) + "\n"
        else:
            print "INFO\t" + now() + "\t" + hapstr + "\tselected VAF: " + str(vaf) + "\n"

        lastread = int(len(readlist)*vaf)

        # pick at least args.minmutreads if possible
        if lastread < int(args.minmutreads):
            if len(readlist) > int(args.minmutreads):
                lastread = int(args.minmutreads)
                sys.stdout.write("WARN\t" + now() + "\t" + hapstr + "\tforced " + str(lastread) + " reads.\n")
            else:
                print "WARN\t" + now() + "\t" + hapstr + "\tdropped site with fewer reads than --minmutreads"
                os.remove(tmpoutbamname)
                return None

        readtrack = dd(list)

        for readname in readlist:
            orig_name, readpos, pairend = readname.split(',')
            readtrack[orig_name].append('%s,%s' % (readpos, pairend))

        usedreads = 0
        newreadlist = []

        for orig_name in readtrack:
            for read_instance in readtrack[orig_name]:
                newreadlist.append(orig_name + ',' + read_instance)
                usedreads += 1

            if usedreads >= lastread:
                break

        readlist = newreadlist

        print "INFO\t" + now() + "\t" + hapstr + "\tpicked:",str(len(readlist))

        wrote = 0
        nmut = 0
        mut_out = {}
        # change reads from .bam to mutated sequences
        for extqname,read in outreads.iteritems():
            if read.seq != mutreads[extqname]:
                if not args.nomut and extqname in readlist:
                    qual = read.qual # changing seq resets qual (see pysam API docs)
                    read.seq = mutreads[extqname] # make mutation
                    read.qual = qual
                    nmut += 1
            if not hasSNP or args.force:
                wrote += 1
                mut_out[extqname] = read

        muts_written = {}

        for extqname in mut_out:
            if extqname not in muts_written:
                outbam_muts.write(mut_out[extqname])
                muts_written[extqname] = True

                if mutmates[extqname] is not None:
                    # is mate also in mutated list?
                    mate_read = mutmates[extqname]

                    pairname = 'F' # read is first in pair
                    if mate_read.is_read2:
                        pairname = 'S' # read is second in pair
                    if not mate_read.is_paired:
                        pairname = 'U' # read is unpaired

                    mateqname = ','.join((mate_read.qname,str(mate_read.pos),pairname))

                    if mateqname in mut_out:
                        # yes: output mutated mate
                        outbam_muts.write(mut_out[mateqname])
                        muts_written[mateqname] = True

                    else:
                        # no: output original mate
                        outbam_muts.write(mate_read)

        print "INFO\t" + now() + "\t" + hapstr + "\twrote: ",wrote,"mutated:",nmut

        if not hasSNP or args.force:
            outbam_muts.close()

            aligners.remap_bam(args.aligner, tmpoutbamname, args.refFasta, alignopts, mutid=hapstr, paired=(not args.single), picardjar=args.picardjar)

            outbam_muts = pysam.Samfile(tmpoutbamname,'rb')
            coverwindow = 1
            incover  = countReadCoverage(bamfile,chrom,min(mutpos_list)-coverwindow,max(mutpos_list)+coverwindow)
            outcover = countReadCoverage(outbam_muts,chrom,min(mutpos_list)-coverwindow,max(mutpos_list)+coverwindow)

            avgincover  = float(sum(incover))/float(len(incover)) 
            avgoutcover = float(sum(outcover))/float(len(outcover))

            print "INFO\t" + now() + "\t" + hapstr + "\tavgincover: " + str(avgincover) + " avgoutcover: " + str(avgoutcover)

            spikein_snvfrac = 0.0
            if wrote > 0:
                spikein_snvfrac = float(nmut)/float(wrote)

            # qc cutoff for final snv depth 
            if (avgoutcover > 0 and avgincover > 0 and avgoutcover/avgincover >= float(args.coverdiff)) or args.force:
                tmpbams.append(tmpoutbamname)
                for n,site in enumerate(hc):
                    snvstr = chrom + ":" + str(site['start']) + "-" + str(site['end']) + " (VAF=" + str(vaf) + ")"
                    log.write("\t".join(("snv",snvstr,str(mutpos_list[n]),mutstr_list[n],str(avgoutcover),str(avgoutcover),str(spikein_snvfrac),str(maxfrac)))+"\n")
            else:

                outbam_muts.close()
                os.remove(tmpoutbamname)
                if os.path.exists(tmpoutbamname + '.bai'):
                    os.remove(tmpoutbamname + '.bai')
                print "WARN\t" + now() + "\t" + hapstr + "\tdropped for outcover/incover < " + str(args.coverdiff)
                return None

        outbam_muts.close()
        bamfile.close()
        bammate.close()
        log.close() 

        return tmpbams

    except Exception, e:
        sys.stderr.write("*"*60 + "\nERROR\t" + now() + "\tencountered error in mutation spikein: " + str(mutid_list) + "\n")
        traceback.print_exc(file=sys.stdout)
        sys.stderr.write("*"*60 + "\n")
        if os.path.exists(tmpoutbamname):
            os.remove(tmpoutbamname)
        if os.path.exists(tmpoutbamname + '.bai'):
            os.remove(tmpoutbamname + '.bai')
        return None

Example 27

Project: cgstudiomap
Source File: miniterm.py
View license
def main():
    import optparse

    parser = optparse.OptionParser(
        usage = "%prog [options] [port [baudrate]]",
        description = "Miniterm - A simple terminal program for the serial port."
    )

    group = optparse.OptionGroup(parser, "Port settings")

    group.add_option("-p", "--port",
        dest = "port",
        help = "port, a number or a device name. (deprecated option, use parameter instead)",
        default = DEFAULT_PORT
    )

    group.add_option("-b", "--baud",
        dest = "baudrate",
        action = "store",
        type = 'int',
        help = "set baud rate, default %default",
        default = DEFAULT_BAUDRATE
    )

    group.add_option("--parity",
        dest = "parity",
        action = "store",
        help = "set parity, one of [N, E, O, S, M], default=N",
        default = 'N'
    )

    group.add_option("--rtscts",
        dest = "rtscts",
        action = "store_true",
        help = "enable RTS/CTS flow control (default off)",
        default = False
    )

    group.add_option("--xonxoff",
        dest = "xonxoff",
        action = "store_true",
        help = "enable software flow control (default off)",
        default = False
    )

    group.add_option("--rts",
        dest = "rts_state",
        action = "store",
        type = 'int',
        help = "set initial RTS line state (possible values: 0, 1)",
        default = DEFAULT_RTS
    )

    group.add_option("--dtr",
        dest = "dtr_state",
        action = "store",
        type = 'int',
        help = "set initial DTR line state (possible values: 0, 1)",
        default = DEFAULT_DTR
    )

    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Data handling")

    group.add_option("-e", "--echo",
        dest = "echo",
        action = "store_true",
        help = "enable local echo (default off)",
        default = False
    )

    group.add_option("--cr",
        dest = "cr",
        action = "store_true",
        help = "do not send CR+LF, send CR only",
        default = False
    )

    group.add_option("--lf",
        dest = "lf",
        action = "store_true",
        help = "do not send CR+LF, send LF only",
        default = False
    )

    group.add_option("-D", "--debug",
        dest = "repr_mode",
        action = "count",
        help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
        default = 0
    )

    parser.add_option_group(group)


    group = optparse.OptionGroup(parser, "Hotkeys")

    group.add_option("--exit-char",
        dest = "exit_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to exit the application",
        default = 0x1d
    )

    group.add_option("--menu-char",
        dest = "menu_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to control miniterm (menu)",
        default = 0x14
    )

    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Diagnostics")

    group.add_option("-q", "--quiet",
        dest = "quiet",
        action = "store_true",
        help = "suppress non-error messages",
        default = False
    )

    parser.add_option_group(group)


    (options, args) = parser.parse_args()

    options.parity = options.parity.upper()
    if options.parity not in 'NEOSM':
        parser.error("invalid parity")

    if options.cr and options.lf:
        parser.error("only one of --cr or --lf can be specified")

    if options.menu_char == options.exit_char:
        parser.error('--exit-char can not be the same as --menu-char')

    global EXITCHARCTER, MENUCHARACTER
    EXITCHARCTER = chr(options.exit_char)
    MENUCHARACTER = chr(options.menu_char)

    port = options.port
    baudrate = options.baudrate
    if args:
        if options.port is not None:
            parser.error("no arguments are allowed, options only when --port is given")
        port = args.pop(0)
        if args:
            try:
                baudrate = int(args[0])
            except ValueError:
                parser.error("baud rate must be a number, not %r" % args[0])
            args.pop(0)
        if args:
            parser.error("too many arguments")
    else:
        # noport given on command line -> ask user now
        if port is None:
            dump_port_list()
            port = raw_input('Enter port name:')

    convert_outgoing = CONVERT_CRLF
    if options.cr:
        convert_outgoing = CONVERT_CR
    elif options.lf:
        convert_outgoing = CONVERT_LF

    try:
        miniterm = Miniterm(
            port,
            baudrate,
            options.parity,
            rtscts=options.rtscts,
            xonxoff=options.xonxoff,
            echo=options.echo,
            convert_outgoing=convert_outgoing,
            repr_mode=options.repr_mode,
        )
    except serial.SerialException, e:
        sys.stderr.write("could not open port %r: %s\n" % (port, e))
        sys.exit(1)

    if not options.quiet:
        sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
            miniterm.serial.portstr,
            miniterm.serial.baudrate,
            miniterm.serial.bytesize,
            miniterm.serial.parity,
            miniterm.serial.stopbits,
        ))
        sys.stderr.write('--- Quit: %s  |  Menu: %s | Help: %s followed by %s ---\n' % (
            key_description(EXITCHARCTER),
            key_description(MENUCHARACTER),
            key_description(MENUCHARACTER),
            key_description('\x08'),
        ))

    if options.dtr_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
        miniterm.serial.setDTR(options.dtr_state)
        miniterm.dtr_state = options.dtr_state
    if options.rts_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
        miniterm.serial.setRTS(options.rts_state)
        miniterm.rts_state = options.rts_state

    console.setup()
    miniterm.start()
    try:
        miniterm.join(True)
    except KeyboardInterrupt:
        pass
    if not options.quiet:
        sys.stderr.write("\n--- exit ---\n")
    miniterm.join()

Example 28

Project: cgstudiomap
Source File: miniterm.py
View license
def main():
    import optparse

    parser = optparse.OptionParser(
        usage = "%prog [options] [port [baudrate]]",
        description = "Miniterm - A simple terminal program for the serial port."
    )

    group = optparse.OptionGroup(parser, "Port settings")

    group.add_option("-p", "--port",
        dest = "port",
        help = "port, a number or a device name. (deprecated option, use parameter instead)",
        default = DEFAULT_PORT
    )

    group.add_option("-b", "--baud",
        dest = "baudrate",
        action = "store",
        type = 'int',
        help = "set baud rate, default %default",
        default = DEFAULT_BAUDRATE
    )

    group.add_option("--parity",
        dest = "parity",
        action = "store",
        help = "set parity, one of [N, E, O, S, M], default=N",
        default = 'N'
    )

    group.add_option("--rtscts",
        dest = "rtscts",
        action = "store_true",
        help = "enable RTS/CTS flow control (default off)",
        default = False
    )

    group.add_option("--xonxoff",
        dest = "xonxoff",
        action = "store_true",
        help = "enable software flow control (default off)",
        default = False
    )

    group.add_option("--rts",
        dest = "rts_state",
        action = "store",
        type = 'int',
        help = "set initial RTS line state (possible values: 0, 1)",
        default = DEFAULT_RTS
    )

    group.add_option("--dtr",
        dest = "dtr_state",
        action = "store",
        type = 'int',
        help = "set initial DTR line state (possible values: 0, 1)",
        default = DEFAULT_DTR
    )

    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Data handling")

    group.add_option("-e", "--echo",
        dest = "echo",
        action = "store_true",
        help = "enable local echo (default off)",
        default = False
    )

    group.add_option("--cr",
        dest = "cr",
        action = "store_true",
        help = "do not send CR+LF, send CR only",
        default = False
    )

    group.add_option("--lf",
        dest = "lf",
        action = "store_true",
        help = "do not send CR+LF, send LF only",
        default = False
    )

    group.add_option("-D", "--debug",
        dest = "repr_mode",
        action = "count",
        help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
        default = 0
    )

    parser.add_option_group(group)


    group = optparse.OptionGroup(parser, "Hotkeys")

    group.add_option("--exit-char",
        dest = "exit_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to exit the application",
        default = 0x1d
    )

    group.add_option("--menu-char",
        dest = "menu_char",
        action = "store",
        type = 'int',
        help = "ASCII code of special character that is used to control miniterm (menu)",
        default = 0x14
    )

    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Diagnostics")

    group.add_option("-q", "--quiet",
        dest = "quiet",
        action = "store_true",
        help = "suppress non-error messages",
        default = False
    )

    parser.add_option_group(group)


    (options, args) = parser.parse_args()

    options.parity = options.parity.upper()
    if options.parity not in 'NEOSM':
        parser.error("invalid parity")

    if options.cr and options.lf:
        parser.error("only one of --cr or --lf can be specified")

    if options.menu_char == options.exit_char:
        parser.error('--exit-char can not be the same as --menu-char')

    global EXITCHARCTER, MENUCHARACTER
    EXITCHARCTER = chr(options.exit_char)
    MENUCHARACTER = chr(options.menu_char)

    port = options.port
    baudrate = options.baudrate
    if args:
        if options.port is not None:
            parser.error("no arguments are allowed, options only when --port is given")
        port = args.pop(0)
        if args:
            try:
                baudrate = int(args[0])
            except ValueError:
                parser.error("baud rate must be a number, not %r" % args[0])
            args.pop(0)
        if args:
            parser.error("too many arguments")
    else:
        # noport given on command line -> ask user now
        if port is None:
            dump_port_list()
            port = raw_input('Enter port name:')

    convert_outgoing = CONVERT_CRLF
    if options.cr:
        convert_outgoing = CONVERT_CR
    elif options.lf:
        convert_outgoing = CONVERT_LF

    try:
        miniterm = Miniterm(
            port,
            baudrate,
            options.parity,
            rtscts=options.rtscts,
            xonxoff=options.xonxoff,
            echo=options.echo,
            convert_outgoing=convert_outgoing,
            repr_mode=options.repr_mode,
        )
    except serial.SerialException, e:
        sys.stderr.write("could not open port %r: %s\n" % (port, e))
        sys.exit(1)

    if not options.quiet:
        sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
            miniterm.serial.portstr,
            miniterm.serial.baudrate,
            miniterm.serial.bytesize,
            miniterm.serial.parity,
            miniterm.serial.stopbits,
        ))
        sys.stderr.write('--- Quit: %s  |  Menu: %s | Help: %s followed by %s ---\n' % (
            key_description(EXITCHARCTER),
            key_description(MENUCHARACTER),
            key_description(MENUCHARACTER),
            key_description('\x08'),
        ))

    if options.dtr_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
        miniterm.serial.setDTR(options.dtr_state)
        miniterm.dtr_state = options.dtr_state
    if options.rts_state is not None:
        if not options.quiet:
            sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
        miniterm.serial.setRTS(options.rts_state)
        miniterm.rts_state = options.rts_state

    console.setup()
    miniterm.start()
    try:
        miniterm.join(True)
    except KeyboardInterrupt:
        pass
    if not options.quiet:
        sys.stderr.write("\n--- exit ---\n")
    miniterm.join()

Example 29

Project: StrangeCase
Source File: __init__.py
View license
def strange_case(config):
    # pull out important values.
    config['site_path'] = site_path = os.path.abspath(config['site_path'])
    config['deploy_path'] = deploy_path = os.path.abspath(config['deploy_path'])

    # check for site/ folder (required)
    if not os.path.isdir(site_path):
        raise IOError('Could not find site_path folder "%s"' % site_path)

    # create the public/ folder
    if not os.path.isdir(config['deploy_path']):
        os.mkdir(config['deploy_path'])

    from strange_case.support.jinja import StrangeCaseEnvironment
    from plywood import PlywoodEnv, PlywoodFunction

    ##|
    ##|  EXTENSIONS
    ##|  these are Jinja2 extensions that get loaded into the Environment object
    ##|
    extensions = []
    if 'extensions' in config:
        for extension in config['extensions']:
            if isinstance(extension, basestring):
                try:
                    extension = fancy_import(extension)
                except ImportError:
                    sys.stderr.write('Error in processors: Could not find "%s"\n' % extension)
                    raise
            extensions.append(extension)
        del config['extensions']

    if not Registry.get('jinja_environment'):
        jinja_environment = StrangeCaseEnvironment(extensions=extensions, project_path=config['project_path'])
        Registry.set('jinja_environment', jinja_environment)
    else:
        jinja_environment = Registry.get('jinja_environment')

    if not Registry.get('plywood_environment'):
        plywood_environment = PlywoodEnv()
        Registry.set('plywood_environment', plywood_environment)
    else:
        plywood_environment = Registry.get('plywood_environment')

    ##|
    ##|  FILTERS
    ##|  Jinja2 filter functions (`{{ var|filter }}`).  These are inserted into
    ##|  the Environment object's `filter` property.
    ##|
    if 'filters' in config:
        for filter_name, method in config['filters'].iteritems():
            if isinstance(method, basestring):
                try:
                    method = fancy_import(method)
                except ImportError:
                    sys.stderr.write('Error in filters: Could not find "%s"\n' % method)
                    raise
            jinja_environment.filters[filter_name] = method
            if filter_name not in plywood_environment.scope:
                plywood_environment.scope[filter_name] = PlywoodFunction(method)
        del config['filters']

    ##|
    ##|  PROCESSORS
    ##|  A processors function registers itself using `Registry.register`, so
    ##|  all that is needed here is to load the module.
    ##|
    if 'processors' in config:
        for processor in config['processors']:
            try:
                fancy_import(processor)
            except ImportError:
                sys.stderr.write('Error in processors: Could not find "%s"\n' % processor)
                raise
        del config['processors']
    configurators = get_configurators(config)

    # register configurators - I broke this out into a separate function (below)
    Registry.reset_configurators()
    for configurator in configurators:
        Registry.add_configurator(configurator)

    # configurators can respond to the 'on_start' hook
    # skip_if_not_modified configurator uses this to read in the .timestamps
    # file, and strip_extensions makes sure that set_url is run before itself.
    for configurator in configurators:
        # configurators might be removed (?)
        if configurator in Registry.configurators:
            try:
                configurator.on_start(config)
            except AttributeError:
                pass

    # generic Registry hooks can listen for 'on_start'
    # category plugin uses this to reset when --watch is used
    Registry.trigger('on_start', config)

    # each node class should add files to these properties, so that watchdog and
    # stale-file-removal work.
    Node.files_written = []
    Node.files_tracked = []

    # create the list of existing files.  files that aren't generated will be
    # removed (unless dont_remove config is True)
    remove_stale_files = config['remove_stale_files']
    dont_remove = config['dont_remove']
    existing_files = []
    if os.path.isdir(deploy_path):
        existing_files = find_files(deploy_path)
    else:
        os.makedirs(deploy_path)

    # this is the one folder that *doesn't* get processed by
    # processors.build_page_tree - it needs special handling here.
    root_node = build_node(config, site_path, deploy_path, '')[0]
    Registry.set('root', root_node)
    root_node.generate()

    # configurators can respond to the 'on_finish' hook
    for configurator in Registry.configurators:
        try:
            configurators.on_finish(config)
        except AttributeError:
            pass

    if remove_stale_files and existing_files:
        paths = []
        for f in existing_files:
            if f not in Node.files_written:
                f = os.path.abspath(f)
                f_rel = os.path.relpath(f)
                if any(pattern for pattern in dont_remove if fnmatch(f, pattern)):
                    sys.stderr.write("\033[32mignoring\033[0m \033[1m" + f_rel + "\033[0m\n")
                    continue

                if os.path.isdir(f):
                    paths.insert(0, f)
                else:
                    sys.stderr.write("\033[31mrm\033[0m \033[1m" + f_rel + "\033[0m\n")
                    os.remove(f)
        # filter out directories that are not empty
        paths = [p for p in paths if not os.listdir(p)]
        for p in paths:
            p_rel = os.path.relpath(p)
            sys.stderr.write("\033[31mrmdir\033[0m \033[1m" + p_rel + "\033[0m\n")
            os.removedirs(p)

Example 30

Project: StrangeCase
Source File: __main__.py
View license
def run():
    import logging
    logging.basicConfig()

    import argparse
    parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
    parser.add_argument('-w', '--watch', dest='watch', action='store_const',
                       const=True, default=False,
                       help='watch the site_path for changes')
    conf_overrides = [
        'project_path',
        'site_path',
        'deploy_path',
        'remove_stale_files',
        'config_file',
        '__verbose',
    ]
    parser.add_argument('-x', '--exclude', nargs='*', dest='exclude_paths', default=None)
    parser.add_argument('-p', '--project', dest='project_path')
    parser.add_argument('-s', '--site', dest='site_path')
    parser.add_argument('-d', '--deploy', dest='deploy_path')
    parser.add_argument('-r', '--remove', dest='remove_stale_files', action='store_true', default=None)
    parser.add_argument('-n', '--no-remove', dest='remove_stale_files', action='store_false', default=None)
    parser.add_argument('-c', '--config', dest='config_file')
    parser.add_argument('-v', '--verbose', dest='__verbose', action='store_true', default=False)
    parser.add_argument('--serve', dest='port', nargs="?", type=int, default=argparse.SUPPRESS, const=8000)
    parser.add_argument('configs', nargs='*')
    args = parser.parse_args()

    if args.project_path:
        if args.project_path[0] == '~':
            project_path = os.path.expanduser(args.project_path)
        else:
            project_path = os.path.abspath(args.project_path)
    else:
        project_path = os.getcwd()

    # config section catches assertion errors and prints them as error messages
    from strange_case.strange_case_config import CONFIG
    CONFIG['project_path'] = project_path

    if 'site_path' not in CONFIG:
        CONFIG['site_path'] = os.path.join(project_path, u'site/')

    if 'deploy_path' not in CONFIG:
        CONFIG['deploy_path'] = os.path.join(project_path, u'public/')

    # normalize paths
    for conf in ['site_path', 'deploy_path']:
        if CONFIG[conf][0] == '~':
            CONFIG[conf] = os.path.expanduser(CONFIG[conf])
        elif CONFIG[conf][0] == '.':
            CONFIG[conf] = os.path.abspath(CONFIG[conf])

    # now we can look for the app config
    if os.path.isfile(os.path.join(project_path, 'config.py')):
        config_module = imp.load_source('config', os.path.join(project_path, 'config.py'))
        if hasattr(config_module, 'CONFIG'):
            CONFIG.update(config_module.CONFIG)

    config_path = os.path.join(project_path, CONFIG['config_file'])

    if os.path.isfile(config_path):
        with open(config_path, 'r') as config_file:
            yaml_config = yaml.load(config_file)
        if yaml_config:
            CONFIG.update(yaml_config)

    for conf in conf_overrides:
        if getattr(args, conf) is not None:
            CONFIG[conf] = getattr(args, conf)

    for conf in args.configs:
        if ':' not in conf:
            raise TypeError('Cannot read config "{0}". Does not contain a ":"'.format(conf))
        key, val = conf.split(':', 1)
        assign = CONFIG
        while ('.' in key) or ('[' in key and ']' in key):
            if '.' in key:
                dot = key.index('.')
                assign_key = key[:dot]
                key = key[dot + 1:]
            elif key[0] == '[':
                closing_bracket = key.index(']')
                assign_key = key[1:closing_bracket]
                key = key[closing_bracket + 1:]
            else:
                opening_bracket = key.index('[')
                assign_key = key[:opening_bracket]
                key = key[opening_bracket:]
            assign = CONFIG.get(assign_key, {})
        assign[key] = val

    if CONFIG['config_hook']:
        CONFIG['config_hook'](CONFIG)
        del CONFIG['config_hook']

    try:
        assert CONFIG['project_path'], "project_path is required"
        assert CONFIG['site_path'], "site_path is required"
        assert CONFIG['deploy_path'], "deploy_path is required"
    except AssertionError as e:
        sys.stderr.write("\033[1;31mError:\033[0m \033[1m" + str(e) + "\033[0m\n")
        return

    if args.watch:
        import time
        from watchdog.observers import Observer
        from watchdog.events import FileSystemEventHandler

        class Regenerate(FileSystemEventHandler):
            last_run = None

            def on_any_event(self, event, alert=True):
                if self.last_run and time.time() - self.last_run < .1:
                    return

                if alert:
                    sys.stderr.write("Change detected.  Running StrangeCase\n")
                try:
                    strange_case(CONFIG)
                except Exception as e:
                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
                    sys.stderr.write("Error (%s): %s\n" % (type(e).__name__, str(e)))
                else:
                    sys.stderr.write("StrangeCase generated at %i\n" % int(time.time()))
                self.last_run = time.time()

        exclude_paths = [
            os.path.abspath('.git'),
            os.path.abspath('.hg'),
            os.path.abspath('.svn'),
            os.path.abspath(CONFIG['deploy_path']),
        ]
        if args.exclude_paths:
            exclude_paths.extend([os.path.abspath(path) for path in args.exclude_paths])

        observer = Observer()
        handler = Regenerate()
        for path in os.listdir(project_path):
            path = os.path.abspath(path)
            if os.path.isdir(path) and path not in exclude_paths:
                sys.stderr.write('Watching "%s" for changes\n' % path)
                observer.schedule(handler, path=path, recursive=True)
        observer.start()
        try:
            handler.on_any_event(None, False)  # run the first time, no alert
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            sys.stderr.write("Stopping\n")
            observer.stop()
        observer.join()
    else:
        strange_case(CONFIG)

    if hasattr(args, 'port'):
        import SimpleHTTPServer
        import SocketServer

        args.port = args.port

        os.chdir(CONFIG['deploy_path'])
        Handler = SimpleHTTPServer.SimpleHTTPRequestHandler

        httpd = SocketServer.TCPServer(("", args.port), Handler)

        sys.stderr.write("serving at http://localhost:{port}\n".format(port=args.port))
        httpd.serve_forever()

Example 31

Project: PipelineConstructionSet
Source File: files.py
View license
def batchExport(sourcePath, destPath, toFileType='fbx', recursively=True, matchDirectorySructure=True, **keywords):
	"""
	Batch export files in one location to another location. Returns True on
	success. Only intended for use in standalone mode; otherwise FBX settings
	are not respected. Not all FBX Export settings are currently supported. Not
	all versions of Maya have been completely tested; please tell me if you
	find errors in old versions.
	@param filePatterns List corresponding to different file extensions to parse
	@param filterPrefix List corresponding to required prefixes in file names
	@param filterSuffix List corresponding to required suffixes in file names
	@param filterContains List corresponding to required string in file names
	@param addPrefix String to prefix on all exported files
	@param addSuffix String to suffix on all exported files
	@param bakeAll String specifying object types on which to bake animation
	@param bakeAllRange Tuple or list specifying a frame range
	@param maVersion Version of Maya to set for .ma exports
	@param fbxVersion FBXFileVersion string
	@param fbxAscii FBXExportInAscii
	@param fbxBake FBXExportBakeComplexAnimation boolean
	@param fbxBakeStart FBXExportBakeComplexStart float
	@param fbxBakeEnd FBXExportBakeComplexEnd
	@param fbxAnimOnly FBXExportAnimationOnly boolean
	@param fbxConstraints FBXExportConstraints boolean
	@param fbxUnit FBXConvertUnitString and FBXExportConvertUnitString
	@param fbxUpAxis FBXExportUpAxis
	@param fbxAxisConvert FBXExportAxisConversionMethod
	@param fbxScale FBXExportScaleFactor
	"""
	# make sure that the file type string comes in as expected
	try:
		toFileType = toFileType.lower()
		toFileType = toFileType[toFileType.rfind('.')+1:len(toFileType)]
	except: raise
	
	if toFileType == 'fbx':
		# early out if the plug-in has not been loaded
		try: cmds.FBXExport
		except:
			try: cmds.loadPlugin('fbxmaya')
			except:
				sys.stderr.write('ERROR: FBX Export Plug-in was not detected.\n')
				return False
		# configure FBX export settings
		try: mel.eval('FBXExportFileVersion %s'%keywords['fbxVersion']) # absent before Maya 2009, but should simply print a benign error message
		except: pass
		try: mel.eval('FBXExportAscii %s'%keywords['fbxAscii'].__str__().lower())
		except: pass
		try:
			mel.eval('FBXExportBakeComplexAnimation -v %s'%keywords['fbxBake'].__str__().lower())
			try: mel.eval('FBXExportBakeComplexStart -v %s'%keywords['fbxBakeStart'])
			except: mel.eval('FBXExportBakeComplexStart -v %i'%math.floor(cmds.playbackOptions(q=True, min=True)))
			try: mel.eval('FBXExportBakeComplexEnd -v %s'%keywords['fbxBakeEnd'])
			except: mel.eval('FBXExportBakeComplexEnd -v %i'%math.ceil(cmds.playbackOptions(q=True, max=True)))
		except: pass
		try: mel.eval('FBXExportAnimationOnly -v %s'%keywords['fbxAnimOnly'].__str__().lower())
		except: pass
		try: mel.eval('FBXExportConstraints -v %s'%keywords['fbxConstraints'].__str__().lower())
		except: pass
		try: mel.eval('FBXExportConvertUnitString -v %s'%keywords['fbxUnit'])
		except:
			try: mel.eval('FBXConvertUnitString -v %s'%keywords['fbxUnit'])
			except: pass
		try: mel.eval('FBXExportUpAxis -v %s'%keywords['fbxUpAxis'])
		except: pass
		try: mel.eval('FBXExportAxisConversionMethod -v %s'%keywords['fbxAxisConvert'])
		except: pass
		try: mel.eval('FBXExportScaleFactor -v %s'%keywords['fbxScale'])
		except: pass
	elif toFileType == 'ma':
		try:
			maVersion = keywords['maVersion']
			maVersion = convertValidTypesToStringList(maVersion, [types.StringTypes, types.IntType, types.FloatType])
			maVersion = maVersion[0]
		except: maVersion = None
	else:
		sys.stderr.write('ERROR: %s is not a supported file type.\n'%toFileType)
		return False
	
	# parse the keywords
	try: filePatterns = keywords['filePatterns']
	except: filePatterns = ['mb', 'ma']
	try: filterPrefix = keywords['filterPrefix']
	except: filterPrefix = ['']
	try: filterSuffix = keywords['filterSuffix']
	except: filterSuffix = ['']
	try: filterContains = keywords['filterContains']
	except: filterContains = ['']
	try: addPrefix = '%s'%keywords['addPrefix']
	except: addPrefix = ''
	try: addSuffix = '%s'%keywords['addSuffix']
	except: addSuffix = ''
	try:
		bakeAll = keywords['bakeAll']
		if not isinstance(bakeAll, types.StringTypes):
			sys.stderr.write("WARNING: Invalid argument %s specified for bakeAll. Type 'dag' is being used instead.\n"%bakeAll)
			bakeAll = 'dag'
	except: bakeAll = None
	try:
		bakeAllRange = keywords['bakeAllRange']
		if not type(bakeAllRange) == types.ListType or not type(bakeAllRange) == types.TupleType or not len(bakeAllRange) == 2:
			sys.stderr.write('WARNING: Invalid argument %s specified for bakeAllRange. Timeline is being used instead.\n'%bakeAllRange)
			bakeAllRange = None
	except: bakeAllRange = None
	
	# create the destination folders
	if matchDirectorySructure:
		if copyDirectoryStructure(sourcePath, destPath, False) == False: return False
	else:
		try: os.listdir(destPath)
		except: 
			try: os.makedirs(destPath)
			except: return False
	
	# get a list of all of the source files
	files = find(sourcePath, recursively, False, paths='relative', filePatterns=filePatterns, filterPrefix=filterPrefix, filterSuffix=filterSuffix, filterContains=filterContains)
	
	# export an fbx for each file
	for file in files:
		# build the export path and filename
		relativeLocation = ''
		filename = file[0:file.rfind('.')]
		if file.rfind('/') > -1:
			if matchDirectorySructure: relativeLocation = file[0:file.rfind('/')+1]
			filename = file[file.rfind('/')+1:file.rfind('.')]
		exportAsString = '%s/%s%s%s%s.%s'%(destPath, relativeLocation, addPrefix, filename, addSuffix, toFileType)
		try:
			# open the source file
			cmds.file('%s/%s'%(sourcePath, file), o=True, force=True)
			# export to the proper location
			try:
				if bakeAll:
					try:
						if not bakeAllRange: cmds.bakeResults(cmds.ls(type=bakeAll), sm=True, time=(math.floor(cmds.playbackOptions(q=True, min=True)),math.ceil(cmds.playbackOptions(q=True, max=True))))
						else: cmds.bakeResults(cmds.ls(type=bakeAll), sm=True, time=bakeAllRange)
					except: sys.stderr.write('WARNING: Unable to bake objects of type %s in file %s.\n'%(bakeAll, file))
					try:
						for object in cmds.ls(type=bakeAll):
							amTools.utilities.animation.smoothAnimCurves(object, False)
					except: sys.stderr.write('WARNING: Unable to correct tangents on baked objects of type %s in file %s.\n'%(bakeAll, file))
				if toFileType == 'fbx': mel.eval('FBXExport -f "%s";'%exportAsString) # TODO: failing on patched .ma files
				elif toFileType == 'ma':
					cmds.file(exportAsString, ea=True, type='mayaAscii', force=True)
					patchMayaAsciiFile(exportAsString, maVersion)
			except: sys.stderr.write('WARNING: Unable to export the file %s. It is being skipped.\n'%file[file.rfind('/')+1:len(file)])
		except: sys.stderr.write('WARNING: Unable to read the file %s. It is being skipped.\n'%file[file.rfind('/')+1:len(file)])
	
	return True

Example 32

Project: ntdsxtract
Source File: dsdatabase.py
View license
def dsBuildMaps(dsDatabase, workdir):
    
    global dsMapOffsetByLineId
    global dsMapLineIdByRecordId
    global dsMapRecordIdByName
    global dsMapTypeByRecordId
    global dsMapChildsByRecordId
    global dsMapRecordIdBySID
    global dsMapRecordIdbyGUID
    global dsSchemaTypeId
        
    lineid = 0
    while True:
        sys.stderr.write("\r[+] Scanning database - %d%% -> %d records processed" % (
                                            dsDatabase.tell()*100/dsDatabaseSize,
                                            lineid+1
                                            ))
        sys.stderr.flush()
        try:
            dsMapOffsetByLineId[lineid] = dsDatabase.tell()
        except:
            sys.stderr.write("\n[!] Warning! Error at dsMapOffsetByLineId!\n")
            pass
        line = dsDatabase.readline()
        if line == "":
            break
        record = line.split('\t')
        if lineid != 0:
            #===================================================================
            # This record will always be the record representing the domain
            # object
            # This should be the only record containing the PEK
            #===================================================================
            if record[ntds.dsfielddictionary.dsPEKIndex] != "":
                if ntds.dsfielddictionary.dsEncryptedPEK != "":
                    sys.stderr.write("\n[!] Warning! Multiple records with PEK entry!\n")
                ntds.dsfielddictionary.dsEncryptedPEK = record[ntds.dsfielddictionary.dsPEKIndex]
                
            try:
                dsMapLineIdByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = lineid
            except:
                sys.stderr.write("\n[!] Warning! Error at dsMapLineIdByRecordId!\n")
                pass
            
            try:
                tmp = dsMapRecordIdByName[record[ntds.dsfielddictionary.dsObjectName2Index]]
                # Also save the Schema type id for future use
                if record[ntds.dsfielddictionary.dsObjectName2Index] == "Schema":
                    if dsSchemaTypeId == -1 and record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
                        dsSchemaTypeId = int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])
                    else:
                        sys.stderr.write("\n[!] Warning! There is more than one Schema object! The DB is inconsistent!\n")
            except:
                dsMapRecordIdByName[record[ntds.dsfielddictionary.dsObjectName2Index]] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
                if record[ntds.dsfielddictionary.dsObjectName2Index] == "Schema":
                    if dsSchemaTypeId == -1 and record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
                        dsSchemaTypeId = int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])
                    else:
                        sys.stderr.write("\n[!] Warning! There is more than one Schema object! The DB is inconsistent!\n")
                pass
            
            try:
                dsMapTypeByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = record[ntds.dsfielddictionary.dsObjectTypeIdIndex]
            except:
                sys.stderr.write("\n[!] Warning! Error at dsMapTypeByRecordId!\n")
                pass
            
            try:
                tmp = dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])]
            except KeyError:
                dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsRecordIdIndex])] = []
                pass
            
            try:
                dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
            except KeyError:
                dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])] = []
                dsMapChildsByRecordId[int(record[ntds.dsfielddictionary.dsParentRecordIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
            
            try:
                dsMapRecordIdBySID[str(SID(record[ntds.dsfielddictionary.dsSIDIndex]))]
            except KeyError:
            	dsMapRecordIdBySID[str(SID(record[ntds.dsfielddictionary.dsSIDIndex]))] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
            
            try:
                dsMapRecordIdByGUID[str(GUID(record[ntds.dsfielddictionary.dsObjectGUIDIndex]))]
            except KeyError:
            	dsMapRecordIdByGUID[str(GUID(record[ntds.dsfielddictionary.dsObjectGUIDIndex]))] = int(record[ntds.dsfielddictionary.dsRecordIdIndex])
            
            try:
            	if record[ntds.dsfielddictionary.dsObjectTypeIdIndex] != "":
                	dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
            except KeyError:
            	dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])] = []
            	dsMapRecordIdByTypeId[int(record[ntds.dsfielddictionary.dsObjectTypeIdIndex])].append(int(record[ntds.dsfielddictionary.dsRecordIdIndex]))
                
        lineid += 1
    sys.stderr.write("\n")
    
    offlid = open(path.join(workdir, "offlid.map"), "wb")
    pickle.dump(dsMapOffsetByLineId, offlid)
    offlid.close()
    
    lidrid = open(path.join(workdir, "lidrid.map"), "wb")
    pickle.dump(dsMapLineIdByRecordId, lidrid)
    lidrid.close()
    
    ridname = open(path.join(workdir, "ridname.map"), "wb")
    pickle.dump(dsMapRecordIdByName, ridname)
    ridname.close()
    
    typerid = open(path.join(workdir, "typerid.map"), "wb")
    pickle.dump(dsMapTypeByRecordId, typerid)
    typerid.close()
    
    childsrid = open(path.join(workdir, "childsrid.map"), "wb")
    pickle.dump(dsMapChildsByRecordId, childsrid)
    childsrid.close()
    
    pek = open(path.join(workdir, "pek.map"), "wb")
    pek.write(ntds.dsfielddictionary.dsEncryptedPEK)
    pek.close()
    
    ridsid = open(path.join(workdir, "ridsid.map"), "wb")
    pickle.dump(dsMapRecordIdBySID, ridsid)
    ridsid.close()
    
    ridguid = open(path.join(workdir, "ridguid.map"), "wb")
    pickle.dump(dsMapRecordIdByGUID, ridguid)
    ridguid.close()
    
    ridtype = open(path.join(workdir, "ridtype.map"), "wb")
    pickle.dump(dsMapRecordIdByTypeId, ridtype)
    ridtype.close()
    
    dsBuildTypeMap(dsDatabase, workdir)

Example 33

Project: cython
Source File: CmdLine.py
View license
def parse_command_line(args):
    from .Main import CompilationOptions, default_options

    pending_arg = []

    def pop_arg():
        if not args or pending_arg:
            bad_usage()
        if '=' in args[0] and args[0].startswith('--'):  # allow "--long-option=xyz"
            name, value = args.pop(0).split('=', 1)
            pending_arg.append(value)
            return name
        return args.pop(0)

    def pop_value(default=None):
        if pending_arg:
            return pending_arg.pop()
        elif default is not None:
            return default
        elif not args:
            bad_usage()
        return args.pop(0)

    def get_param(option):
        tail = option[2:]
        if tail:
            return tail
        else:
            return pop_arg()

    options = CompilationOptions(default_options)
    sources = []
    while args:
        if args[0].startswith("-"):
            option = pop_arg()
            if option in ("-V", "--version"):
                options.show_version = 1
            elif option in ("-l", "--create-listing"):
                options.use_listing_file = 1
            elif option in ("-+", "--cplus"):
                options.cplus = 1
            elif option == "--embed":
                Options.embed = pop_value("main")
            elif option.startswith("-I"):
                options.include_path.append(get_param(option))
            elif option == "--include-dir":
                options.include_path.append(pop_value())
            elif option in ("-w", "--working"):
                options.working_path = pop_value()
            elif option in ("-o", "--output-file"):
                options.output_file = pop_value()
            elif option in ("-t", "--timestamps"):
                options.timestamps = 1
            elif option in ("-f", "--force"):
                options.timestamps = 0
            elif option in ("-v", "--verbose"):
                options.verbose += 1
            elif option in ("-p", "--embed-positions"):
                Options.embed_pos_in_docstring = 1
            elif option in ("-z", "--pre-import"):
                Options.pre_import = pop_value()
            elif option == "--cleanup":
                Options.generate_cleanup_code = int(pop_value())
            elif option in ("-D", "--no-docstrings"):
                Options.docstrings = False
            elif option in ("-a", "--annotate"):
                Options.annotate = True
            elif option == "--annotate-coverage":
                Options.annotate = True
                Options.annotate_coverage_xml = pop_value()
            elif option == "--convert-range":
                Options.convert_range = True
            elif option == "--line-directives":
                options.emit_linenums = True
            elif option == "--no-c-in-traceback":
                options.c_line_in_traceback = False
            elif option == "--gdb":
                options.gdb_debug = True
                options.output_dir = os.curdir
            elif option == "--gdb-outdir":
                options.gdb_debug = True
                options.output_dir = pop_value()
            elif option == "--lenient":
                Options.error_on_unknown_names = False
                Options.error_on_uninitialized = False
            elif option == '-2':
                options.language_level = 2
            elif option == '-3':
                options.language_level = 3
            elif option == "--capi-reexport-cincludes":
                options.capi_reexport_cincludes = True
            elif option == "--fast-fail":
                Options.fast_fail = True
            elif option in ('-Werror', '--warning-errors'):
                Options.warning_errors = True
            elif option in ('-Wextra', '--warning-extra'):
                options.compiler_directives.update(Options.extra_warnings)
            elif option == "--old-style-globals":
                Options.old_style_globals = True
            elif option == "--directive" or option.startswith('-X'):
                if option.startswith('-X') and option[2:].strip():
                    x_args = option[2:]
                else:
                    x_args = pop_value()
                try:
                    options.compiler_directives = Options.parse_directive_list(
                        x_args, relaxed_bool=True,
                        current_settings=options.compiler_directives)
                except ValueError as e:
                    sys.stderr.write("Error in compiler directive: %s\n" % e.args[0])
                    sys.exit(1)
            elif option.startswith('--debug'):
                option = option[2:].replace('-', '_')
                from . import DebugFlags
                if option in dir(DebugFlags):
                    setattr(DebugFlags, option, True)
                else:
                    sys.stderr.write("Unknown debug flag: %s\n" % option)
                    bad_usage()
            elif option in ('-h', '--help'):
                sys.stdout.write(usage)
                sys.exit(0)
            else:
                sys.stderr.write("Unknown compiler flag: %s\n" % option)
                sys.exit(1)
        else:
            sources.append(pop_arg())

    if pending_arg:
        bad_usage()

    if options.use_listing_file and len(sources) > 1:
        sys.stderr.write(
            "cython: Only one source file allowed when using -o\n")
        sys.exit(1)
    if len(sources) == 0 and not options.show_version:
        bad_usage()
    if Options.embed and len(sources) > 1:
        sys.stderr.write(
            "cython: Only one source file allowed when using -embed\n")
        sys.exit(1)
    return options, sources

Example 34

Project: tz.js
Source File: build-tests.py
View license
def output_tests(source_prefix, zdump_command, io):
    all_zones = list(generate_zones(source_prefix))

    io.write("""<!DOCTYPE HTML>
<title>tz.js tests (generated by """ + __file__ + """)</title>
<script src="tz.js"></script>
<pre id="output"></pre>
<script>
var output_node = document.createTextNode("");
document.getElementById("output").appendChild(output_node);
function print(s)
{
    output_node.appendData(s + "\\n");
}

var pass_count = 0, fail_count = 0;

function assert(cond, description)
{
    if (cond) {
        ++pass_count;
    } else {
        ++fail_count;
        print("FAIL: " + description);
    }
}

function is(value, expected, description)
{
    assert(value == expected,
           description + ":  " + value + " should equal " + expected);
}

function check_offset(zone, d, utcoff, abbr)
{
    var z = tz.zoneAt(zone, new Date(d * 1000));
    is(z.offset, utcoff, zone + " at " + d);
    is(z.abbr, abbr, zone + " at " + d);
}

/*
 * Check a non-round-second values, since the tests below are largely round.
 *
 * The last two could become invalid in the future.
 */
check_offset("America/Los_Angeles", 1300010399.999, -28800, "PST");
check_offset("America/Los_Angeles", 1300010400.001, -25200, "PDT");
check_offset("America/Los_Angeles", 1308469553.734, -25200, "PDT");
check_offset("America/Los_Angeles", 2519888399.999, -25200, "PDT");
check_offset("America/Los_Angeles", 2519888400.001, -28800, "PST");

/*
 * Use the same values to test Etc/UTC, which we don't otherwise test.
 */
check_offset("Etc/UTC", 1300010399.999, 0, "UTC");
check_offset("Etc/UTC", 1300010400, 0, "UTC");
check_offset("Etc/UTC", 1300010400.001, 0, "UTC");
check_offset("Etc/UTC", 1308469553.734, 0, "UTC");
check_offset("Etc/UTC", 2519888399.999, 0, "UTC");
check_offset("Etc/UTC", 2519888400, 0, "UTC");
check_offset("Etc/UTC", 2519888400.001, 0, "UTC");
""")

    def output_check_offset(zone, d, utcoff, abbr):
        io.write("check_offset(\"{0}\", {1}, {2}, \"{3}\");\n" \
                   .format(zone, d, utcoff, abbr));

    date_zone_re = re.compile("^([^ ]*) ([+-])(\d{2}):(\d{2}):(\d{2})$")
    def write_expected(time):
        return "@" + str(math.trunc(time))
    def read_expected(dateprocess):
        (abbr, sign, hours, mins, secs) = date_zone_re.match(
            dateprocess.stdout.readline().rstrip("\n")).groups()
        utcoff = ((sign == "+") * 2 - 1) * \
                 (3600 * int(hours) + 60 * int(mins) + int(secs))
        return (utcoff, abbr)
    def expected_for(zone, time):
        date_process = subprocess.Popen(['date',
                                         '--date=' + write_expected(time),
                                         '+%Z %::z'],
                                        stdout = subprocess.PIPE,
                                        env={"TZ": os.path.join(source_prefix, zone)})
        result = read_expected(date_process)
        date_process.stdout.close()
        return result

    io.write("""
/*
 * Generate tests based on all the transitions shown by zdump for each zone.
 */
""")

    sys.stderr.write("Preparing to build transition tests.\n")

    date_process = subprocess.Popen(['date',
                                     '--date=' + str(STOP_YEAR) +
                                     '-01-01 00:00:00 UTC', '+%s'],
                                    stdout = subprocess.PIPE)
    stop_d = int(date_process.stdout.read().rstrip("\n"))
    date_process.stdout.close()
    def zdump_for(zone):
        zdump = subprocess.Popen([zdump_command,
                                  '-v',
                                  '-c', str(START_YEAR) + "," + str(STOP_YEAR),
                                  zone],
                                 stdout=subprocess.PIPE)
        zdump_re = re.compile("^" + zone + "  ([^=]+) = ([^=]+) isdst=([01]) gmtoff=(-?\d+)$")
        for line in zdump.stdout:
            line = line.rstrip("\n")
            if line.endswith(" = NULL"):
                continue
            yield zdump_re.match(line).groups()
    # FIXME: spread this across cores
    zdumps = [(zone, list(zdump_for(zone))) for zone in all_zones]
    # Write all the dates to one file and run them through a single
    # date process, for speed.
    datefile = tempfile.NamedTemporaryFile(delete=False)
    for (zone, zdump) in zdumps:
        for (date_utc, date_loc, isdst, utcoff) in zdump:
            datefile.write(date_utc + "\n")
    datefile.close()
    date_process = subprocess.Popen(['date',
                                     '--file=' + datefile.name, '+%s'],
                                    stdout = subprocess.PIPE)
    prev_zone = None
    for (zone, zdump) in zdumps:
        if zone != prev_zone:
            prev_zone = zone
            sys.stderr.write("Building transition tests for zone " + zone + "\n")
        def output_test(d, utcoff, abbr):
            output_check_offset(zone, d, utcoff, abbr)
        first = True
        first_after_1970 = True
        prev_utcoff = None
        prev_abbr = None
        for (date_utc, date_loc, isdst, utcoff) in zdump:
            isdst = bool(isdst) # not really needed
            utcoff = int(utcoff)
            d = int(date_process.stdout.readline().rstrip("\n"))
            abbr = date_loc.split(" ")[-1]
            if d >= 0:
                if first_after_1970 and d != 0 and not first:
                    output_test(0, prev_utcoff, prev_abbr)
                if first and d > 0:
                    output_test(0, utcoff, abbr)
                output_test(d, utcoff, abbr)
                first_after_1970 = False
            first = False
            prev_utcoff = utcoff
            prev_abbr = abbr
        if first:
            # This zone (Pacific/Johnston) has no transitions, but we
            # can still test it.
            (prev_utcoff, prev_abbr) = expected_for(zone, 0)
        if first_after_1970:
            output_test(0, prev_utcoff, prev_abbr)
        output_test(stop_d, prev_utcoff, prev_abbr)
    date_process.stdout.close()
    os.unlink(datefile.name)
    io.write("""

/*
 * Generate a fixed set of random tests using a linear-congruential
 * PRNG.  This does a good bit of testing of the space in a random way,
 * but uses a fixed random seed to always get the same set of tests.
 * See http://en.wikipedia.org/wiki/Linear_congruential_generator (using
 * the numbers from Numerical Recipes).
 *
 * And while we're here, toss in some tests for midnight boundaries
 * around the new year.
 */
""")
    def lc_prng(): # a generator
        # a randomly (once) generated number in [0,2^32)
        rand_state = 1938266273;
        while True:
            yield 1.0 * rand_state / 0x100000000 # value in [0,1)
            rand_state = ((rand_state * 1664525) + 1013904223) % 0x100000000

    prng = lc_prng()
    def random_time():
        # pick a random time in 1970...STOP_SECS.  Use two random
        # numbers so we use the full space, random down to the
        # millisecond.
        time = (prng.next() * STOP_SECS) + (prng.next() * 0x100000000 / 1000)
        time = time % STOP_SECS
        time = math.floor(time * 1000) / 1000
        return time
    def midnight_rule_time(i):
        # return 2049-12-31 00:30 UTC + i hours
        return 2524523400 + 3600 * i
    # For each time zone, we make 100 random tests, and some additional
    # tests.  Do each zone together so that we can easily use a single
    # date process for each zone.
    for zone in all_zones:
        sys.stderr.write("Building tests for zone " + zone + "\n")
        # 100 random tests, then specifically test 48 hours around new
        # years 2050 to test rule edge cases
        test_times = [random_time() for i in range(100)] + \
                     [midnight_rule_time(i) for i in range(48)]
        # Write all the dates to one file and run them through a single
        # date process, for speed.
        datefile = tempfile.NamedTemporaryFile(delete=False)
        for time in test_times:
            datefile.write(write_expected(time) + "\n")
        datefile.close()
        # FIXME: This is using the system's date command, which might
        # not be compatible with the timezone data it's being given.
        # (For example, if you have a system date command that doesn't
        # understand version 3 timezone file formats, you'll fail some
        # post-2038 tests for America/Godthab.)
        date_process = subprocess.Popen(['date',
                                         '--file=' + datefile.name,
                                         '+%Z %::z'],
                                        stdout = subprocess.PIPE,
                                        env={"TZ": os.path.join(source_prefix, zone)})
        for time in test_times:
            (utcoff, abbr) = read_expected(date_process)
            output_check_offset(zone, time, utcoff, abbr)
        date_process.stdout.close()
        os.unlink(datefile.name)
    io.write("""
/*
 * Some fixed tests for window.tz.datesFor
 */
var df = window.tz.datesFor("America/Los_Angeles", 2011, 1, 1, 0, 0, 0);
is(df.length, 1, "datesFor (1) length");
is(df[0].offset, -28800, "datesFor(1) [0].offset");
is(df[0].abbr, "PST", "datesFor(1) [0].abbr");
is(df[0].date.valueOf(), 1293868800000, "datesFor(1) [0].date.valueOf()");
df = window.tz.datesFor("America/Los_Angeles", 2011, 3, 13, 2, 30, 0);
is(df.length, 0, "datesFor (2) length");
df = window.tz.datesFor("America/Los_Angeles", 2011, 11, 6, 1, 30, 0);
is(df.length, 2, "datesFor (3) length");
is(df[0].offset, -25200, "datesFor(3) [0].offset");
is(df[0].abbr, "PDT", "datesFor(3) [0].abbr");
is(df[0].date.valueOf(), 1320568200000, "datesFor(3) [0].date.valueOf()");
is(df[1].offset, -28800, "datesFor(3) [1].offset");
is(df[1].abbr, "PST", "datesFor(3) [1].abbr");
is(df[1].date.valueOf(), 1320571800000, "datesFor(3) [1].date.valueOf()");
""")

    io.write("""
print("Totals:  " + pass_count + " passed, " + fail_count + " failed.");
</script>
""")

Example 35

Project: django-compositepks
Source File: loaddata.py
View license
    def handle(self, *fixture_labels, **options):
        from django.db.models import get_apps
        from django.core import serializers
        from django.db import connection, transaction
        from django.conf import settings

        self.style = no_style()

        verbosity = int(options.get('verbosity', 1))
        show_traceback = options.get('traceback', False)

        # commit is a stealth option - it isn't really useful as
        # a command line option, but it can be useful when invoking
        # loaddata from within another script.
        # If commit=True, loaddata will use its own transaction;
        # if commit=False, the data load SQL will become part of
        # the transaction in place when loaddata was invoked.
        commit = options.get('commit', True)

        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        object_count = 0
        models = set()

        humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        # Start transaction management. All fixtures are installed in a
        # single transaction to ensure that all references are resolved.
        if commit:
            transaction.commit_unless_managed()
            transaction.enter_transaction_management()
            transaction.managed(True)

        app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
        for fixture_label in fixture_labels:
            parts = fixture_label.split('.')
            if len(parts) == 1:
                fixture_name = fixture_label
                formats = serializers.get_public_serializer_formats()
            else:
                fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                if format in serializers.get_public_serializer_formats():
                    formats = [format]
                else:
                    formats = []

            if formats:
                if verbosity > 1:
                    print "Loading '%s' fixtures..." % fixture_name
            else:
                sys.stderr.write(
                    self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
                        (fixture_name, format)))
                transaction.rollback()
                transaction.leave_transaction_management()
                return

            if os.path.isabs(fixture_name):
                fixture_dirs = [fixture_name]
            else:
                fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

            for fixture_dir in fixture_dirs:
                if verbosity > 1:
                    print "Checking %s for fixtures..." % humanize(fixture_dir)

                label_found = False
                for format in formats:
                    serializer = serializers.get_serializer(format)
                    if verbosity > 1:
                        print "Trying %s for %s fixture '%s'..." % \
                            (humanize(fixture_dir), format, fixture_name)
                    try:
                        full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format]))
                        fixture = open(full_path, 'r')
                        if label_found:
                            fixture.close()
                            print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
                                (fixture_name, humanize(fixture_dir)))
                            transaction.rollback()
                            transaction.leave_transaction_management()
                            return
                        else:
                            fixture_count += 1
                            objects_in_fixture = 0
                            if verbosity > 0:
                                print "Installing %s fixture '%s' from %s." % \
                                    (format, fixture_name, humanize(fixture_dir))
                            try:
                                objects = serializers.deserialize(format, fixture)
                                for obj in objects:
                                    objects_in_fixture += 1
                                    models.add(obj.object.__class__)
                                    obj.save()
                                object_count += objects_in_fixture
                                label_found = True
                            except (SystemExit, KeyboardInterrupt):
                                raise
                            except Exception:
                                import traceback
                                fixture.close()
                                transaction.rollback()
                                transaction.leave_transaction_management()
                                if show_traceback:
                                    import traceback
                                    traceback.print_exc()
                                else:
                                    sys.stderr.write(
                                        self.style.ERROR("Problem installing fixture '%s': %s\n" %
                                             (full_path, traceback.format_exc())))
                                return
                            fixture.close()

                            # If the fixture we loaded contains 0 objects, assume that an
                            # error was encountered during fixture loading.
                            if objects_in_fixture == 0:
                                sys.stderr.write(
                                    self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
                                        (fixture_name)))
                                transaction.rollback()
                                transaction.leave_transaction_management()
                                return
                    except:
                        if verbosity > 1:
                            print "No %s fixture '%s' in %s." % \
                                (format, fixture_name, humanize(fixture_dir))

        # If we found even one object in a fixture, we need to reset the
        # database sequences.
        if object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
            if sequence_sql:
                if verbosity > 1:
                    print "Resetting sequences"
                for line in sequence_sql:
                    cursor.execute(line)

        if commit:
            transaction.commit()
            transaction.leave_transaction_management()

        if object_count == 0:
            if verbosity > 1:
                print "No fixtures found."
        else:
            if verbosity > 0:
                print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)

        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        if commit:
            connection.close()

Example 36

Project: django-extensions
Source File: syncdata.py
View license
    @signalcommand
    @transaction.atomic
    def handle(self, *fixture_labels, **options):
        """ Main method of a Django command """
        from django.db.models import get_apps
        from django.core import serializers
        from django.conf import settings

        self.style = no_style()

        verbosity = int(options.get('verbosity', 1))
        show_traceback = options.get('traceback', False)

        # Keep a count of the installed objects and fixtures
        fixture_count = 0
        object_count = 0
        objects_per_fixture = []
        models = set()

        humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'

        # Get a cursor (even though we don't need one yet). This has
        # the side effect of initializing the test database (if
        # it isn't already initialized).
        cursor = connection.cursor()

        app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
        for fixture_label in fixture_labels:
            parts = fixture_label.split('.')
            if len(parts) == 1:
                fixture_name = fixture_label
                formats = serializers.get_public_serializer_formats()
            else:
                fixture_name, format = '.'.join(parts[:-1]), parts[-1]
                if format in serializers.get_public_serializer_formats():
                    formats = [format]
                else:
                    formats = []

            if formats:
                if verbosity > 1:
                    print("Loading '%s' fixtures..." % fixture_name)
            else:
                sys.stderr.write(self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." % (fixture_name, format)))
                transaction.rollback()
                return

            if os.path.isabs(fixture_name):
                fixture_dirs = [fixture_name]
            else:
                fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']

            for fixture_dir in fixture_dirs:
                if verbosity > 1:
                    print("Checking %s for fixtures..." % humanize(fixture_dir))

                label_found = False
                for format in formats:
                    if verbosity > 1:
                        print("Trying %s for %s fixture '%s'..." % (humanize(fixture_dir), format, fixture_name))
                    try:
                        full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format]))
                        fixture = open(full_path, 'r')
                        if label_found:
                            fixture.close()
                            print(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." % (fixture_name, humanize(fixture_dir))))
                            transaction.rollback()
                            return
                        else:
                            fixture_count += 1
                            objects_per_fixture.append(0)
                            if verbosity > 0:
                                print("Installing %s fixture '%s' from %s." % (format, fixture_name, humanize(fixture_dir)))
                            try:
                                objects_to_keep = {}
                                objects = serializers.deserialize(format, fixture)
                                for obj in objects:
                                    object_count += 1
                                    objects_per_fixture[-1] += 1

                                    class_ = obj.object.__class__
                                    if class_ not in objects_to_keep:
                                        objects_to_keep[class_] = set()
                                    objects_to_keep[class_].add(obj.object)

                                    models.add(class_)
                                    obj.save()

                                if options.get('remove'):
                                    self.remove_objects_not_in(objects_to_keep, verbosity)

                                label_found = True
                            except (SystemExit, KeyboardInterrupt):
                                raise
                            except Exception:
                                import traceback
                                fixture.close()
                                transaction.rollback()
                                if show_traceback:
                                    traceback.print_exc()
                                else:
                                    sys.stderr.write(self.style.ERROR("Problem installing fixture '%s': %s\n" % (full_path, traceback.format_exc())))
                                return
                            fixture.close()
                    except:
                        if verbosity > 1:
                            print("No %s fixture '%s' in %s." % (format, fixture_name, humanize(fixture_dir)))

        # If any of the fixtures we loaded contain 0 objects, assume that an
        # error was encountered during fixture loading.
        if 0 in objects_per_fixture:
            sys.stderr.write(
                self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" % fixture_name))
            transaction.rollback()
            return

        # If we found even one object in a fixture, we need to reset the
        # database sequences.
        if object_count > 0:
            sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
            if sequence_sql:
                if verbosity > 1:
                    print("Resetting sequences")
                for line in sequence_sql:
                    cursor.execute(line)

        transaction.commit()

        if object_count == 0:
            if verbosity > 1:
                print("No fixtures found.")
        else:
            if verbosity > 0:
                print("Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count))

        # Close the DB connection. This is required as a workaround for an
        # edge case in MySQL: if the same connection is used to
        # create tables, load data, and query, the query can return
        # incorrect results. See Django #7572, MySQL #37735.
        connection.close()

Example 37

Project: dx-toolkit
Source File: dxfile_functions.py
View license
def _download_dxfile(dxid, filename, part_retry_counter,
                     chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False,
                     project=None, **kwargs):
    '''
    Core of download logic. Download file-id *dxid* and store it in
    a local file *filename*.

    The return value is as follows:
    - True means the download was successfully completed
    - False means the download was stopped because of a retryable error
    - Exception raised for other errors
    '''
    def print_progress(bytes_downloaded, file_size, action="Downloaded"):
        num_ticks = 60

        effective_file_size = file_size or 1
        if bytes_downloaded > effective_file_size:
            effective_file_size = bytes_downloaded

        ticks = int(round((bytes_downloaded / float(effective_file_size)) * num_ticks))
        percent = int(math.floor((bytes_downloaded / float(effective_file_size)) * 100))

        fmt = "[{done}{pending}] {action} {done_bytes:,}{remaining} bytes ({percent}%) {name}"
        # Erase the line and return the cursor to the start of the line.
        # The following VT100 escape sequence will erase the current line.
        sys.stderr.write("\33[2K")
        sys.stderr.write(fmt.format(action=action,
                                    done=("=" * (ticks - 1) + ">") if ticks > 0 else "",
                                    pending=" " * (num_ticks - ticks),
                                    done_bytes=bytes_downloaded,
                                    remaining=" of {size:,}".format(size=file_size) if file_size else "",
                                    percent=percent,
                                    name=filename))
        sys.stderr.flush()
        sys.stderr.write("\r")
        sys.stderr.flush()

    _bytes = 0

    if isinstance(dxid, DXFile):
        dxfile = dxid
    else:
        dxfile = DXFile(dxid, mode="r")

    dxfile_desc = dxfile.describe(fields={"parts"}, default_fields=True, **kwargs)
    parts = dxfile_desc["parts"]
    parts_to_get = sorted(parts, key=int)
    file_size = dxfile_desc.get("size")

    offset = 0
    for part_id in parts_to_get:
        parts[part_id]["start"] = offset
        offset += parts[part_id]["size"]

    if append:
        fh = open(filename, "ab")
    else:
        try:
            fh = open(filename, "rb+")
        except IOError:
            fh = open(filename, "wb")

    if show_progress:
        print_progress(0, None)

    def get_chunk(part_id_to_get, start, end):
        url, headers = dxfile.get_download_url(project=project, **kwargs)
        # If we're fetching the whole object in one shot, avoid setting the Range header to take advantage of gzip
        # transfer compression
        sub_range = False
        if len(parts) > 1 or (start > 0) or (end - start + 1 < parts[part_id_to_get]["size"]):
            sub_range = True
        data = dxpy._dxhttp_read_range(url, headers, start, end, FILE_REQUEST_TIMEOUT, sub_range)
        return part_id_to_get, data

    def chunk_requests():
        for part_id_to_chunk in parts_to_get:
            part_info = parts[part_id_to_chunk]
            for chunk_start in range(part_info["start"], part_info["start"] + part_info["size"], chunksize):
                chunk_end = min(chunk_start + chunksize, part_info["start"] + part_info["size"]) - 1
                yield get_chunk, [part_id_to_chunk, chunk_start, chunk_end], {}

    def verify_part(_part_id, got_bytes, hasher):
        if got_bytes is not None and got_bytes != parts[_part_id]["size"]:
            msg = "Unexpected part data size in {} part {} (expected {}, got {})"
            msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["size"], got_bytes)
            raise DXPartLengthMismatchError(msg)
        if hasher is not None and "md5" not in parts[_part_id]:
            warnings.warn("Download of file {} is not being checked for integrity".format(dxfile.get_id()))
        elif hasher is not None and hasher.hexdigest() != parts[_part_id]["md5"]:
            msg = "Checksum mismatch in {} part {} (expected {}, got {})"
            msg = msg.format(dxfile.get_id(), _part_id, parts[_part_id]["md5"], hasher.hexdigest())
            raise DXChecksumMismatchError(msg)

    with fh:
        last_verified_pos = 0

        if fh.mode == "rb+":
            # We already downloaded the beginning of the file, verify that the
            # chunk checksums match the metadata.
            last_verified_part, max_verify_chunk_size = None, 1024*1024
            try:
                for part_id in parts_to_get:
                    part_info = parts[part_id]
                    if "md5" not in part_info:
                        raise DXFileError("File {} does not contain part md5 checksums".format(dxfile.get_id()))
                    bytes_to_read = part_info["size"]
                    hasher = hashlib.md5()
                    while bytes_to_read > 0:
                        chunk = fh.read(min(max_verify_chunk_size, bytes_to_read))
                        if len(chunk) < min(max_verify_chunk_size, bytes_to_read):
                            raise DXFileError("Local data for part {} is truncated".format(part_id))
                        hasher.update(chunk)
                        bytes_to_read -= max_verify_chunk_size
                    if hasher.hexdigest() != part_info["md5"]:
                        raise DXFileError("Checksum mismatch when verifying downloaded part {}".format(part_id))
                    else:
                        last_verified_part = part_id
                        last_verified_pos = fh.tell()
                        if show_progress:
                            _bytes += part_info["size"]
                            print_progress(_bytes, file_size, action="Verified")
            except (IOError, DXFileError) as e:
                logger.debug(e)
            fh.seek(last_verified_pos)
            fh.truncate()
            if last_verified_part is not None:
                del parts_to_get[:parts_to_get.index(last_verified_part)+1]
            if show_progress and len(parts_to_get) < len(parts):
                print_progress(last_verified_pos, file_size, action="Resuming at")
            logger.debug("Verified %s/%d downloaded parts", last_verified_part, len(parts_to_get))

        try:
            # Main loop. In parallel: download chunks, verify them, and write them to disk.
            get_first_chunk_sequentially = (file_size > 128 * 1024 and last_verified_pos == 0 and dxpy.JOB_ID)
            cur_part, got_bytes, hasher = None, None, None
            for chunk_part, chunk_data in response_iterator(chunk_requests(),
                                                            dxfile._http_threadpool,
                                                            do_first_task_sequentially=get_first_chunk_sequentially):
                if chunk_part != cur_part:
                    verify_part(cur_part, got_bytes, hasher)
                    cur_part, got_bytes, hasher = chunk_part, 0, hashlib.md5()
                got_bytes += len(chunk_data)
                hasher.update(chunk_data)
                fh.write(chunk_data)
                if show_progress:
                    _bytes += len(chunk_data)
                    print_progress(_bytes, file_size)
            verify_part(cur_part, got_bytes, hasher)
            if show_progress:
                print_progress(_bytes, file_size, action="Completed")
        except DXFileError:
            print(traceback.format_exc(), file=sys.stderr)
            part_retry_counter[cur_part] -= 1
            if part_retry_counter[cur_part] > 0:
                print("Retrying {} ({} tries remain for part {})".format(dxfile.get_id(), part_retry_counter[cur_part], cur_part),
                      file=sys.stderr)
                return False
            raise

        if show_progress:
            sys.stderr.write("\n")

        return True

Example 38

Project: dx-toolkit
Source File: dxfile_functions.py
View license
def upload_local_file(filename=None, file=None, media_type=None, keep_open=False,
                      wait_on_close=False, use_existing_dxfile=None, show_progress=False,
                      write_buffer_size=None, **kwargs):
    '''
    :param filename: Local filename
    :type filename: string
    :param file: File-like object
    :type file: File-like object
    :param media_type: Internet Media Type
    :type media_type: string
    :param keep_open: If False, closes the file after uploading
    :type keep_open: boolean
    :param write_buffer_size: Buffer size to use for upload
    :type write_buffer_size: int
    :param wait_on_close: If True, waits for the file to close
    :type wait_on_close: boolean
    :param use_existing_dxfile: Instead of creating a new file object, upload to the specified file
    :type use_existing_dxfile: :class:`~dxpy.bindings.dxfile.DXFile`
    :returns: Remote file handler
    :rtype: :class:`~dxpy.bindings.dxfile.DXFile`

    Additional optional parameters not listed: all those under
    :func:`dxpy.bindings.DXDataObject.new`.

    Exactly one of *filename* or *file* is required.

    Uploads *filename* or reads from *file* into a new file object (with
    media type *media_type* if given) and returns the associated remote
    file handler. The "name" property of the newly created remote file
    is set to the basename of *filename* or to *file.name* (if it
    exists).

    Examples::

      # Upload from a path
      dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
      # Upload from a file-like object
      with open("reads.fastq") as fh:
          dxpy.upload_local_file(file=fh)

    '''
    fd = file if filename is None else open(filename, 'rb')

    try:
        file_size = os.fstat(fd.fileno()).st_size
    except:
        file_size = 0

    file_is_mmapd = hasattr(fd, "fileno")

    if write_buffer_size is None:
        write_buffer_size=dxfile.DEFAULT_BUFFER_SIZE

    if use_existing_dxfile:
        handler = use_existing_dxfile
    else:
        # Set a reasonable name for the file if none has been set
        # already
        creation_kwargs = kwargs.copy()
        if 'name' not in kwargs:
            if filename is not None:
                creation_kwargs['name'] = os.path.basename(filename)
            else:
                # Try to get filename from file-like object
                try:
                    local_file_name = file.name
                except AttributeError:
                    pass
                else:
                    creation_kwargs['name'] = os.path.basename(local_file_name)

        # Use 'a' mode because we will be responsible for closing the file
        # ourselves later (if requested).
        handler = new_dxfile(mode='a', media_type=media_type, write_buffer_size=write_buffer_size,
                             expected_file_size=file_size, file_is_mmapd=file_is_mmapd, **creation_kwargs)

    # For subsequent API calls, don't supply the dataobject metadata
    # parameters that are only needed at creation time.
    _, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)

    num_ticks = 60
    offset = 0

    handler._ensure_write_bufsize(**remaining_kwargs)

    def can_be_mmapd(fd):
        if not hasattr(fd, "fileno"):
            return False
        mode = os.fstat(fd.fileno()).st_mode
        return not (stat.S_ISCHR(mode) or stat.S_ISFIFO(mode))

    def read(num_bytes):
        """
        Returns a string or mmap'd data containing the next num_bytes of
        the file, or up to the end if there are fewer than num_bytes
        left.
        """
        # If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back
        # to doing an actual read from the file.
        if not can_be_mmapd(fd):
            return fd.read(handler._write_bufsize)

        bytes_available = max(file_size - offset, 0)
        if bytes_available == 0:
            return b""

        return mmap.mmap(fd.fileno(), min(handler._write_bufsize, bytes_available), offset=offset, access=mmap.ACCESS_READ)

    handler._num_bytes_transmitted = 0

    def report_progress(handler, num_bytes):
        handler._num_bytes_transmitted += num_bytes
        if file_size > 0:
            ticks = int(round((handler._num_bytes_transmitted / float(file_size)) * num_ticks))
            percent = int(round((handler._num_bytes_transmitted / float(file_size)) * 100))

            fmt = "[{done}{pending}] Uploaded {done_bytes:,} of {total:,} bytes ({percent}%) {name}"
            sys.stderr.write(fmt.format(done='=' * (ticks - 1) + '>' if ticks > 0 else '',
                                        pending=' ' * (num_ticks - ticks),
                                        done_bytes=handler._num_bytes_transmitted,
                                        total=file_size,
                                        percent=percent,
                                        name=filename if filename is not None else ''))
            sys.stderr.flush()
            sys.stderr.write("\r")
            sys.stderr.flush()

    if show_progress:
        report_progress(handler, 0)

    while True:
        buf = read(handler._write_bufsize)
        offset += len(buf)

        if len(buf) == 0:
            break

        handler.write(buf, report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)

    if filename is not None:
        fd.close()

    handler.flush(report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)

    if show_progress:
        sys.stderr.write("\n")
        sys.stderr.flush()

    if not keep_open:
        handler.close(block=wait_on_close, report_progress_fn=report_progress if show_progress else None, **remaining_kwargs)

    return handler

Example 39

Project: dx-toolkit
Source File: dx_app_wizard.py
View license
def main(**kwargs):
    """
    Entry point for dx-app-wizard.
    Note that this function is not meant to be used as a subroutine in your program.
    """
    manifest = []

    print_intro(API_VERSION)

    if args.json_file is not None:
        with open(args.json_file, 'r') as json_file:
            app_json = json.loads(json_file.read())
            # Re-confirm the name
            name = get_name(default=args.name or app_json.get('name'))
            app_json['name'] = name
            version = get_version(default=app_json.get('version'))
            app_json['version'] = version
        try:
            os.mkdir(app_json['name'])
        except:
            sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n')
            sys.exit(1)
    else:
        ##################
        # BASIC METADATA #
        ##################

        name = get_name(default=args.name)

        try:
            os.mkdir(name)
        except:
            sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n')
            sys.exit(1)

        title, summary = get_metadata(API_VERSION)

        version = get_version()

        app_json = OrderedDict()
        app_json["name"] = name

        app_json["title"] = title or name
        app_json['summary'] = summary or name

        app_json["dxapi"] = API_VERSION
        app_json["version"] = version

        ############
        # IO SPECS #
        ############

        class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash',
                                     'array:int', 'array:float', 'array:string', 'array:boolean',
                                     'record', 'file', 'applet',
                                     'array:record', 'array:file', 'array:applet'])
        bool_completer = Completer(['true', 'false'])

        print('')
        print(BOLD() + 'Input Specification' + ENDC())
        print('')

        input_spec = True
        input_names = []
        printed_classes = False

        if input_spec:
            app_json['inputSpec'] = []
            print(fill('You will now be prompted for each input parameter to your app.  Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))

            while True:
                print('')
                ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
                input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
                if input_name == '':
                    break
                if input_name in input_names:
                    print(fill('Error: Cannot use the same input parameter name twice.  Please choose again.'))
                    continue
                if not IO_NAME_PATTERN.match(input_name):
                    print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit.  Please choose again.'))
                    continue
                input_names.append(input_name)

                input_label = prompt_for_var('Label (optional human-readable name)', '')

                use_completer(class_completer)
                if not printed_classes:
                    print('Your input parameter must be of one of the following classes:')
                    print('''applet         array:file     array:record   file           int
array:applet   array:float    array:string   float          record
array:boolean  array:int      boolean        hash           string
''')
                    printed_classes = True

                while True:
                    input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
                    if input_class in class_completer.choices:
                        break
                    else:
                        print(fill('Not a recognized class; please choose again.'))

                use_completer()

                optional = prompt_for_yn('This is an optional parameter')

                default_val = None
                if optional and input_class in ['int', 'float', 'string', 'boolean']:
                    default_val = prompt_for_yn('A default value should be provided')
                    if default_val:
                        while True:
                            if input_class == 'boolean':
                                use_completer(bool_completer)
                                default_val = prompt_for_var('  Default value', choices=['true', 'false'])
                                use_completer()
                            elif input_class == 'string':
                                default_val = prompt_for_var('  Default value', allow_empty=True)
                            else:
                                default_val = prompt_for_var('  Default value')

                            try:
                                if input_class == 'boolean':
                                    default_val = (default_val == 'true')
                                elif input_class == 'int':
                                    default_val = int(default_val)
                                elif input_class == 'float':
                                    default_val = float(default_val)
                                break
                            except:
                                print('Not a valid default value for the given class ' + input_class)
                    else:
                        default_val = None

                # Fill in the input parameter's JSON
                parameter_json = OrderedDict()

                parameter_json["name"] = input_name
                if input_label != '':
                    parameter_json['label'] = input_label
                parameter_json["class"] = input_class
                parameter_json["optional"] = optional
                if default_val is not None:
                    parameter_json['default'] = default_val

                # Fill in patterns and blank help string
                if input_class == 'file' or input_class == 'array:file':
                    parameter_json["patterns"] = ["*"]
                parameter_json["help"] = ""

                app_json['inputSpec'].append(parameter_json)

        print('')
        print(BOLD() + 'Output Specification' + ENDC())
        print('')

        output_spec = True
        output_names = []
        if output_spec:
            app_json['outputSpec'] = []
            print(fill('You will now be prompted for each output parameter of your app.  Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))

            while True:
                print('')
                ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
                output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
                if output_name == '':
                    break
                if output_name in output_names:
                    print(fill('Error: Cannot use the same output parameter name twice.  Please choose again.'))
                    continue
                if not IO_NAME_PATTERN.match(output_name):
                    print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit.  Please choose again.'))
                    continue
                output_names.append(output_name)

                output_label = prompt_for_var('Label (optional human-readable name)', '')

                use_completer(class_completer)
                if not printed_classes:
                    print('Your output parameter must be of one of the following classes:')
                    print('''applet         array:file     array:record   file           int
array:applet   array:float    array:string   float          record
array:boolean  array:int      boolean        hash           string''')
                    printed_classes = True
                while True:
                    output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
                    if output_class in class_completer.choices:
                        break
                    else:
                        print(fill('Not a recognized class; please choose again.'))

                use_completer()

                # Fill in the output parameter's JSON
                parameter_json = OrderedDict()
                parameter_json["name"] = output_name
                if output_label != '':
                    parameter_json['label'] = output_label
                parameter_json["class"] = output_class

                # Fill in patterns and blank help string
                if output_class == 'file' or output_class == 'array:file':
                    parameter_json["patterns"] = ["*"]
                parameter_json["help"] = ""

                app_json['outputSpec'].append(parameter_json)

    required_file_input_names = []
    optional_file_input_names = []
    required_file_array_input_names = []
    optional_file_array_input_names = []
    file_output_names = []

    if 'inputSpec' in app_json:
        for param in app_json['inputSpec']:
            may_be_missing = param['optional'] and "default" not in param
            if param['class'] == 'file':
                param_list = optional_file_input_names if may_be_missing else required_file_input_names
            elif param['class'] == 'array:file':
                param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names
            else:
                param_list = None
            if param_list is not None:
                param_list.append(param['name'])

    if 'outputSpec' in app_json:
        file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file']

    ##################
    # TIMEOUT POLICY #
    ##################

    print('')
    print(BOLD() + 'Timeout Policy' + ENDC())

    app_json.setdefault('timeoutPolicy', {})

    timeout, timeout_units = get_timeout(default=app_json['timeoutPolicy'].get('*'))

    app_json['timeoutPolicy'].setdefault('*', {})
    app_json['timeoutPolicy']['*'].setdefault(timeout_units, timeout)

    ########################
    # LANGUAGE AND PATTERN #
    ########################

    print('')
    print(BOLD() + 'Template Options' + ENDC())

    # Prompt for programming language if not specified

    language = args.language if args.language is not None else get_language()

    interpreter = language_options[language].get_interpreter()
    app_json["runSpec"] = OrderedDict({"interpreter": interpreter})

    # Prompt the execution pattern iff the args.pattern is provided and invalid

    template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
    if not os.path.isdir(os.path.join(template_dir, args.template)):
        print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
        pattern = get_pattern(template_dir)
    else:
        pattern = args.template
    template_dir = os.path.join(template_dir, pattern)

    with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
        file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
        template_app_json = json.loads(file_text)
        for key in template_app_json['runSpec']:
            app_json['runSpec'][key] = template_app_json['runSpec'][key]

    if (language == args.language) and (pattern == args.template):
        print('All template options are supplied in the arguments.')

    ##########################
    # APP ACCESS PERMISSIONS #
    ##########################

    print('')
    print(BOLD('Access Permissions'))
    print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' +
    BOLD('https://wiki.dnanexus.com/App-Permissions') + '.'))

    print('')
    print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
    if prompt_for_yn("Will this app need access to the Internet?", default=False):
        app_json.setdefault('access', {})
        app_json['access']['network'] = ['*']
        print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' +
                   UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.'))

    print('')
    print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs,
    which will be copied into the project after it's done running.'''))
    if prompt_for_yn("Will this app need access to the parent project?", default=False):
        app_json.setdefault('access', {})
        app_json['access']['project'] = 'CONTRIBUTE'
        print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' +
                   'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') +
                   ' fields of dxapp.json once we generate the app.'))

    #######################
    # SYSTEM REQUIREMENTS #
    #######################

    print('')
    print(BOLD('System Requirements'))
    print('')
    print(BOLD('Common instance types:'))
    print(format_table(InstanceTypesCompleter.preferred_instance_types.values(),
                       column_names=InstanceTypesCompleter.instance_types.values()[0]._fields))
    print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' +
               'your app unless you override it. See ' +
               BOLD('https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types') + ' for more information.'))
    use_completer(InstanceTypesCompleter())
    instance_type = prompt_for_var('Choose an instance type for your app',
                                   default=InstanceTypesCompleter.default_instance_type.Name,
                                   choices=list(InstanceTypesCompleter.instance_types))
    app_json['runSpec'].setdefault('systemRequirements', {})
    app_json['runSpec']['systemRequirements'].setdefault('*', {})
    app_json['runSpec']['systemRequirements']['*']['instanceType'] = instance_type

    ######################
    # HARDCODED DEFAULTS #
    ######################

    # Default of no other authorizedUsers
    # app_json['authorizedUsers'] = []

    # print('\n' + BOLD('Linux version: '))
    app_json['runSpec']['distribution'] = 'Ubuntu'
    app_json['runSpec']['release'] = '12.04'

    #if any(instance_type.startswith(prefix) for prefix in ('mem1_hdd2', 'mem2_hdd2', 'mem3_hdd2')):
    #    print(fill('Your app will run on Ubuntu 12.04. To use Ubuntu 14.04, select from the list of common instance ' +
    #               'types above.'))
    #    app_json['runSpec']['release'] = '12.04'
    #else:
    #    app_json['runSpec']['release'] = '14.04'
    #    print(fill('Your app has been configured to run on Ubuntu 14.04. To use Ubuntu 12.04, edit the ' +
    #               BOLD('runSpec.release') + ' field of your dxapp.json.'))

    #################
    # WRITING FILES #
    #################

    print('')
    print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())

    with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
        prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n')
    manifest.append(os.path.join(name, 'dxapp.json'))

    print('')
    print(fill('''Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://wiki.dnanexus.com/Developer-Portal for complete
documentation).''' + ('''  Note that without an input and output specification,
your app can only be built as an APPLET on the system.  To publish it to
the DNAnexus community, you must first specify your inputs and outputs.
''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else "")))
    print('')

    for subdir in 'src', 'test', 'resources':
        try:
            os.mkdir(os.path.join(name, subdir))
            manifest.append(os.path.join(name, subdir, ''))
        except:
            sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir))
            sys.exit(1)

    entry_points = ['main']

    if pattern == 'parallelized':
        entry_points = ['main', 'process', 'postprocess']
    elif pattern == 'scatter-process-gather':
        entry_points = ['main', 'scatter', 'map', 'process', 'postprocess']

    manifest += create_files_from_templates(template_dir, app_json, language,
                                            required_file_input_names, optional_file_input_names,
                                            required_file_array_input_names, optional_file_array_input_names,
                                            file_output_names, pattern,
                                            description='<!-- Insert a description of your app here -->',
                                            entry_points=entry_points)

    print("Created files:")
    for filename in sorted(manifest):
        print("\t", filename)
    print("\n" + fill('''App directory created!  See
https://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n")
    print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform.  Any files found in the ''' +
            BOLD() + 'resources' + ENDC() +
            ''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))

Example 40

View license
def process_net_command(py_db, cmd_id, seq, text):
    '''Processes a command received from the Java side

    @param cmd_id: the id of the command
    @param seq: the sequence of the command
    @param text: the text received in the command

    @note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
    a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
    that the gains from having a fast access to what should be executed are lost because of the function call in
    a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
    makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
    it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
    probably will give better performance).
    '''
    # print(ID_TO_MEANING[str(cmd_id)], repr(text))

    py_db._main_lock.acquire()
    try:
        try:
            cmd = None
            if cmd_id == CMD_RUN:
                py_db.ready_to_run = True

            elif cmd_id == CMD_VERSION:
                # response is version number
                # ide_os should be 'WINDOWS' or 'UNIX'.
                ide_os = 'WINDOWS'

                # Breakpoints can be grouped by 'LINE' or by 'ID'.
                breakpoints_by = 'LINE'

                splitted = text.split('\t')
                if len(splitted) == 1:
                    _local_version = splitted

                elif len(splitted) == 2:
                    _local_version, ide_os = splitted

                elif len(splitted) == 3:
                    _local_version, ide_os, breakpoints_by = splitted

                if breakpoints_by == 'ID':
                    py_db._set_breakpoints_with_id = True
                else:
                    py_db._set_breakpoints_with_id = False

                pydevd_file_utils.set_ide_os(ide_os)

                cmd = py_db.cmd_factory.make_version_message(seq)

            elif cmd_id == CMD_LIST_THREADS:
                # response is a list of threads
                cmd = py_db.cmd_factory.make_list_threads_message(seq)

            elif cmd_id == CMD_THREAD_KILL:
                int_cmd = InternalTerminateThread(text)
                py_db.post_internal_command(int_cmd, text)

            elif cmd_id == CMD_THREAD_SUSPEND:
                # Yes, thread suspend is still done at this point, not through an internal command!
                t = pydevd_find_thread_by_id(text)
                if t:
                    additional_info = None
                    try:
                        additional_info = t.additional_info
                    except AttributeError:
                        pass  # that's ok, no info currently set

                    if additional_info is not None:
                        for frame in additional_info.iter_frames(t):
                            py_db.set_trace_for_frame_and_parents(frame)
                            del frame

                    py_db.set_suspend(t, CMD_THREAD_SUSPEND)
                elif text.startswith('__frame__:'):
                    sys.stderr.write("Can't suspend tasklet: %s\n" % (text,))

            elif cmd_id == CMD_THREAD_RUN:
                t = pydevd_find_thread_by_id(text)
                if t:
                    thread_id = get_thread_id(t)
                    int_cmd = InternalRunThread(thread_id)
                    py_db.post_internal_command(int_cmd, thread_id)

                elif text.startswith('__frame__:'):
                    sys.stderr.write("Can't make tasklet run: %s\n" % (text,))


            elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN or \
                    cmd_id == CMD_STEP_INTO_MY_CODE:
                # we received some command to make a single step
                t = pydevd_find_thread_by_id(text)
                if t:
                    thread_id = get_thread_id(t)
                    int_cmd = InternalStepThread(thread_id, cmd_id)
                    py_db.post_internal_command(int_cmd, thread_id)

                elif text.startswith('__frame__:'):
                    sys.stderr.write("Can't make tasklet step command: %s\n" % (text,))


            elif cmd_id == CMD_RUN_TO_LINE or cmd_id == CMD_SET_NEXT_STATEMENT or cmd_id == CMD_SMART_STEP_INTO:
                # we received some command to make a single step
                thread_id, line, func_name = text.split('\t', 2)
                t = pydevd_find_thread_by_id(thread_id)
                if t:
                    int_cmd = InternalSetNextStatementThread(thread_id, cmd_id, line, func_name)
                    py_db.post_internal_command(int_cmd, thread_id)
                elif thread_id.startswith('__frame__:'):
                    sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))


            elif cmd_id == CMD_RELOAD_CODE:
                # we received some command to make a reload of a module
                module_name = text.strip()

                thread_id = '*'  # Any thread

                # Note: not going for the main thread because in this case it'd only do the load
                # when we stopped on a breakpoint.
                # for tid, t in py_db._running_thread_ids.items(): #Iterate in copy
                #    thread_name = t.getName()
                #
                #    print thread_name, get_thread_id(t)
                #    #Note: if possible, try to reload on the main thread
                #    if thread_name == 'MainThread':
                #        thread_id = tid

                int_cmd = ReloadCodeCommand(module_name, thread_id)
                py_db.post_internal_command(int_cmd, thread_id)


            elif cmd_id == CMD_CHANGE_VARIABLE:
                # the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
                try:
                    thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)

                    tab_index = attr_and_value.rindex('\t')
                    attr = attr_and_value[0:tab_index].replace('\t', '.')
                    value = attr_and_value[tab_index + 1:]
                    int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
                    py_db.post_internal_command(int_cmd, thread_id)

                except:
                    traceback.print_exc()

            elif cmd_id == CMD_GET_VARIABLE:
                # we received some command to get a variable
                # the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
                try:
                    thread_id, frame_id, scopeattrs = text.split('\t', 2)

                    if scopeattrs.find('\t') != -1:  # there are attributes beyond scope
                        scope, attrs = scopeattrs.split('\t', 1)
                    else:
                        scope, attrs = (scopeattrs, None)

                    int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
                    py_db.post_internal_command(int_cmd, thread_id)

                except:
                    traceback.print_exc()

            elif cmd_id == CMD_GET_ARRAY:
                # we received some command to get an array variable
                # the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat
                try:
                    roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs  = text.split('\t', 7)

                    if scopeattrs.find('\t') != -1:  # there are attributes beyond scope
                        scope, attrs = scopeattrs.split('\t', 1)
                    else:
                        scope, attrs = (scopeattrs, None)

                    int_cmd = InternalGetArray(seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs)
                    py_db.post_internal_command(int_cmd, thread_id)

                except:
                    traceback.print_exc()

            elif cmd_id == CMD_GET_COMPLETIONS:
                # we received some command to get a variable
                # the text is: thread_id\tframe_id\tactivation token
                try:
                    thread_id, frame_id, scope, act_tok = text.split('\t', 3)

                    int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
                    py_db.post_internal_command(int_cmd, thread_id)

                except:
                    traceback.print_exc()

            elif cmd_id == CMD_GET_FRAME:
                thread_id, frame_id, scope = text.split('\t', 2)

                int_cmd = InternalGetFrame(seq, thread_id, frame_id)
                py_db.post_internal_command(int_cmd, thread_id)

            elif cmd_id == CMD_SET_BREAK:
                # func name: 'None': match anything. Empty: match global, specified: only method context.
                # command to add some breakpoint.
                # text is file\tline. Add to breakpoints dictionary
                if py_db._set_breakpoints_with_id:
                    breakpoint_id, type, file, line, func_name, condition, expression = text.split('\t', 6)

                    breakpoint_id = int(breakpoint_id)
                    line = int(line)

                    # We must restore new lines and tabs as done in
                    # AbstractDebugTarget.breakpointAdded
                    condition = condition.replace("@[email protected][email protected][email protected]", '\n').\
                        replace("@[email protected][email protected][email protected]", '\t').strip()

                    expression = expression.replace("@[email protected][email protected][email protected]", '\n').\
                        replace("@[email protected][email protected][email protected]", '\t').strip()
                else:
                    #Note: this else should be removed after PyCharm migrates to setting
                    #breakpoints by id (and ideally also provides func_name).
                    type, file, line, func_name, condition, expression = text.split('\t', 5)
                    # If we don't have an id given for each breakpoint, consider
                    # the id to be the line.
                    breakpoint_id = line = int(line)

                    condition = condition.replace("@[email protected][email protected][email protected]", '\n'). \
                        replace("@[email protected][email protected][email protected]", '\t').strip()

                    expression = expression.replace("@[email protected][email protected][email protected]", '\n'). \
                        replace("@[email protected][email protected][email protected]", '\t').strip()

                if not IS_PY3K:  # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
                    file = file.encode(file_system_encoding)

                file = pydevd_file_utils.norm_file_to_server(file)

                if not pydevd_file_utils.exists(file):
                    sys.stderr.write('pydev debugger: warning: trying to add breakpoint'\
                        ' to file that does not exist: %s (will have no effect)\n' % (file,))
                    sys.stderr.flush()


                if len(condition) <= 0 or condition is None or condition == "None":
                    condition = None

                if len(expression) <= 0 or expression is None or expression == "None":
                    expression = None

                supported_type = False
                if type == 'python-line':
                    breakpoint = LineBreakpoint(line, condition, func_name, expression)
                    breakpoints = py_db.breakpoints
                    file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
                    supported_type = True
                else:
                    result = None
                    plugin = py_db.get_plugin_lazy_init()
                    if plugin is not None:
                        result = plugin.add_breakpoint('add_line_breakpoint', py_db, type, file, line, condition, expression, func_name)
                    if result is not None:
                        supported_type = True
                        breakpoint, breakpoints = result
                        file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
                    else:
                        supported_type = False

                if not supported_type:
                    raise NameError(type)

                if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
                    pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name.encode('utf-8')))
                    sys.stderr.flush()

                if dict_contains(file_to_id_to_breakpoint, file):
                    id_to_pybreakpoint = file_to_id_to_breakpoint[file]
                else:
                    id_to_pybreakpoint = file_to_id_to_breakpoint[file] = {}

                id_to_pybreakpoint[breakpoint_id] = breakpoint
                py_db.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
                if py_db.plugin is not None:
                    py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()

                py_db.set_tracing_for_untraced_contexts(overwrite_prev_trace=True)

            elif cmd_id == CMD_REMOVE_BREAK:
                #command to remove some breakpoint
                #text is type\file\tid. Remove from breakpoints dictionary
                breakpoint_type, file, breakpoint_id = text.split('\t', 2)

                if not IS_PY3K:  # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
                    file = file.encode(file_system_encoding)

                file = pydevd_file_utils.norm_file_to_server(file)

                try:
                    breakpoint_id = int(breakpoint_id)
                except ValueError:
                    pydev_log.error('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s' % (breakpoint_id,))

                else:
                    file_to_id_to_breakpoint = None
                    if breakpoint_type == 'python-line':
                        breakpoints = py_db.breakpoints
                        file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
                    elif py_db.get_plugin_lazy_init() is not None:
                        result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
                        if result is not None:
                            file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
                            breakpoints = result

                    if file_to_id_to_breakpoint is None:
                        pydev_log.error('Error removing breakpoint. Cant handle breakpoint of type %s' % breakpoint_type)
                    else:
                        try:
                            id_to_pybreakpoint = file_to_id_to_breakpoint.get(file, {})
                            if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
                                existing = id_to_pybreakpoint[breakpoint_id]
                                sys.stderr.write('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
                                    file, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))

                            del id_to_pybreakpoint[breakpoint_id]
                            py_db.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
                            if py_db.plugin is not None:
                                py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()

                        except KeyError:
                            pydev_log.error("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n" % (
                                file, breakpoint_id, dict_keys(id_to_pybreakpoint)))


            elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
                #command to evaluate the given expression
                #text is: thread\tstackframe\tLOCAL\texpression
                thread_id, frame_id, scope, expression, trim = text.split('\t', 4)
                int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
                    cmd_id == CMD_EXEC_EXPRESSION, int(trim) == 1)
                py_db.post_internal_command(int_cmd, thread_id)

            elif cmd_id == CMD_CONSOLE_EXEC:
                #command to exec expression in console, in case expression is only partially valid 'False' is returned
                #text is: thread\tstackframe\tLOCAL\texpression

                thread_id, frame_id, scope, expression = text.split('\t', 3)

                int_cmd = InternalConsoleExec(seq, thread_id, frame_id, expression)
                py_db.post_internal_command(int_cmd, thread_id)

            elif cmd_id == CMD_SET_PY_EXCEPTION:
                # Command which receives set of exceptions on which user wants to break the debugger
                # text is: break_on_uncaught;break_on_caught;TypeError;ImportError;zipimport.ZipImportError;
                # This API is optional and works 'in bulk' -- it's possible
                # to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
                # which allows setting caught/uncaught per exception.
                #
                splitted = text.split(';')
                py_db.break_on_uncaught_exceptions = {}
                py_db.break_on_caught_exceptions = {}
                added = []
                if len(splitted) >= 4:
                    if splitted[0] == 'true':
                        break_on_uncaught = True
                    else:
                        break_on_uncaught = False

                    if splitted[1] == 'true':
                        break_on_caught = True
                    else:
                        break_on_caught = False

                    if splitted[2] == 'true':
                        py_db.break_on_exceptions_thrown_in_same_context = True
                    else:
                        py_db.break_on_exceptions_thrown_in_same_context = False

                    if splitted[3] == 'true':
                        py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
                    else:
                        py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = False

                    for exception_type in splitted[4:]:
                        exception_type = exception_type.strip()
                        if not exception_type:
                            continue

                        exception_breakpoint = py_db.add_break_on_exception(
                            exception_type,
                            notify_always=break_on_caught,
                            notify_on_terminate=break_on_uncaught,
                            notify_on_first_raise_only=False,
                        )
                        if exception_breakpoint is None:
                            continue
                        added.append(exception_breakpoint)

                    py_db.update_after_exceptions_added(added)

                else:
                    sys.stderr.write("Error when setting exception list. Received: %s\n" % (text,))

            elif cmd_id == CMD_GET_FILE_CONTENTS:

                if not IS_PY3K:  # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
                    text = text.encode(file_system_encoding)

                if os.path.exists(text):
                    f = open(text, 'r')
                    try:
                        source = f.read()
                    finally:
                        f.close()
                    cmd = py_db.cmd_factory.make_get_file_contents(seq, source)

            elif cmd_id == CMD_SET_PROPERTY_TRACE:
                # Command which receives whether to trace property getter/setter/deleter
                # text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
                if text != "":
                    splitted = text.split(';')
                    if len(splitted) >= 3:
                        if py_db.disable_property_trace is False and splitted[0] == 'true':
                            # Replacing property by custom property only when the debugger starts
                            pydevd_traceproperty.replace_builtin_property()
                            py_db.disable_property_trace = True
                        # Enable/Disable tracing of the property getter
                        if splitted[1] == 'true':
                            py_db.disable_property_getter_trace = True
                        else:
                            py_db.disable_property_getter_trace = False
                        # Enable/Disable tracing of the property setter
                        if splitted[2] == 'true':
                            py_db.disable_property_setter_trace = True
                        else:
                            py_db.disable_property_setter_trace = False
                        # Enable/Disable tracing of the property deleter
                        if splitted[3] == 'true':
                            py_db.disable_property_deleter_trace = True
                        else:
                            py_db.disable_property_deleter_trace = False
                else:
                    # User hasn't configured any settings for property tracing
                    pass

            elif cmd_id == CMD_ADD_EXCEPTION_BREAK:
                if text.find('\t') != -1:
                    exception, notify_always, notify_on_terminate, ignore_libraries = text.split('\t', 3)
                else:
                    exception, notify_always, notify_on_terminate, ignore_libraries = text, 0, 0, 0

                if exception.find('-') != -1:
                    breakpoint_type, exception = exception.split('-')
                else:
                    breakpoint_type = 'python'

                if breakpoint_type == 'python':
                    if int(notify_always) == 1:
                        pydev_log.warn("Deprecated parameter: 'notify always' policy removed in PyCharm\n")
                    exception_breakpoint = py_db.add_break_on_exception(
                        exception,
                        notify_always=int(notify_always) > 0,
                        notify_on_terminate = int(notify_on_terminate) == 1,
                        notify_on_first_raise_only=int(notify_always) == 2,
                        ignore_libraries=int(ignore_libraries) > 0
                    )

                    if exception_breakpoint is not None:
                        py_db.update_after_exceptions_added([exception_breakpoint])
                else:
                    supported_type = False
                    plugin = py_db.get_plugin_lazy_init()
                    if plugin is not None:
                        supported_type = plugin.add_breakpoint('add_exception_breakpoint', py_db, breakpoint_type, exception)

                    if supported_type:
                        py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
                    else:
                        raise NameError(breakpoint_type)



            elif cmd_id == CMD_REMOVE_EXCEPTION_BREAK:
                exception = text
                if exception.find('-') != -1:
                    exception_type, exception = exception.split('-')
                else:
                    exception_type = 'python'

                if exception_type == 'python':
                    try:
                        cp = py_db.break_on_uncaught_exceptions.copy()
                        dict_pop(cp, exception, None)
                        py_db.break_on_uncaught_exceptions = cp

                        cp = py_db.break_on_caught_exceptions.copy()
                        dict_pop(cp, exception, None)
                        py_db.break_on_caught_exceptions = cp
                    except:
                        pydev_log.debug("Error while removing exception %s"%sys.exc_info()[0])
                    update_exception_hook(py_db)
                else:
                    supported_type = False

                    # I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
                    # anything from it anyways).
                    plugin = py_db.plugin
                    if plugin is not None:
                        supported_type = plugin.remove_exception_breakpoint(py_db, exception_type, exception)

                    if supported_type:
                        py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
                    else:
                        raise NameError(exception_type)

            elif cmd_id == CMD_LOAD_SOURCE:
                path = text
                try:
                    f = open(path, 'r')
                    source = f.read()
                    py_db.cmd_factory.make_load_source_message(seq, source, py_db)
                except:
                    return py_db.cmd_factory.make_error_message(seq, pydevd_tracing.get_exception_traceback_str())

            elif cmd_id == CMD_ADD_DJANGO_EXCEPTION_BREAK:
                exception = text
                plugin = py_db.get_plugin_lazy_init()
                if plugin is not None:
                    plugin.add_breakpoint('add_exception_breakpoint', py_db, 'django', exception)
                    py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()


            elif cmd_id == CMD_REMOVE_DJANGO_EXCEPTION_BREAK:
                exception = text

                # I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
                # anything from it anyways).
                plugin = py_db.plugin
                if plugin is not None:
                    plugin.remove_exception_breakpoint(py_db, 'django', exception)
                    py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()

            elif cmd_id == CMD_EVALUATE_CONSOLE_EXPRESSION:
                # Command which takes care for the debug console communication
                if text != "":
                    thread_id, frame_id, console_command = text.split('\t', 2)
                    console_command, line = console_command.split('\t')

                    if console_command == 'EVALUATE':
                        int_cmd = InternalEvaluateConsoleExpression(
                            seq, thread_id, frame_id, line, buffer_output=True)

                    elif console_command == 'EVALUATE_UNBUFFERED':
                        int_cmd = InternalEvaluateConsoleExpression(
                            seq, thread_id, frame_id, line, buffer_output=False)

                    elif console_command == 'GET_COMPLETIONS':
                        int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)

                    else:
                        raise ValueError('Unrecognized command: %s' % (console_command,))

                    py_db.post_internal_command(int_cmd, thread_id)

            elif cmd_id == CMD_RUN_CUSTOM_OPERATION:
                # Command which runs a custom operation
                if text != "":
                    try:
                        location, custom = text.split('||', 1)
                    except:
                        sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,))
                        raise

                    thread_id, frame_id, scopeattrs = location.split('\t', 2)

                    if scopeattrs.find('\t') != -1:  # there are attributes beyond scope
                        scope, attrs = scopeattrs.split('\t', 1)
                    else:
                        scope, attrs = (scopeattrs, None)

                    # : style: EXECFILE or EXEC
                    # : encoded_code_or_file: file to execute or code
                    # : fname: name of function to be executed in the resulting namespace
                    style, encoded_code_or_file, fnname = custom.split('\t', 3)
                    int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs,
                                                         style, encoded_code_or_file, fnname)
                    py_db.post_internal_command(int_cmd, thread_id)

            elif cmd_id == CMD_IGNORE_THROWN_EXCEPTION_AT:
                if text:
                    replace = 'REPLACE:'  # Not all 3.x versions support u'REPLACE:', so, doing workaround.
                    if not IS_PY3K:
                        replace = unicode(replace)

                    if text.startswith(replace):
                        text = text[8:]
                        py_db.filename_to_lines_where_exceptions_are_ignored.clear()

                    if text:
                        for line in text.split('||'):  # Can be bulk-created (one in each line)
                            filename, line_number = line.split('|')
                            if not IS_PY3K:
                                filename = filename.encode(file_system_encoding)

                            filename = pydevd_file_utils.norm_file_to_server(filename)

                            if os.path.exists(filename):
                                lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored.get(filename)
                                if lines_ignored is None:
                                    lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored[filename] = {}
                                lines_ignored[int(line_number)] = 1
                            else:
                                sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'\
                                    ' on file that does not exist: %s (will have no effect)\n' % (filename,))

            elif cmd_id == CMD_ENABLE_DONT_TRACE:
                if text:
                    true_str = 'true'  # Not all 3.x versions support u'str', so, doing workaround.
                    if not IS_PY3K:
                        true_str = unicode(true_str)

                    mode = text.strip() == true_str
                    pydevd_dont_trace.trace_filter(mode)

            else:
                #I have no idea what this is all about
                cmd = py_db.cmd_factory.make_error_message(seq, "unexpected command " + str(cmd_id))

            if cmd is not None:
                py_db.writer.add_command(cmd)
                del cmd

        except Exception:
            traceback.print_exc()
            from _pydev_bundle.pydev_imports import StringIO
            stream = StringIO()
            traceback.print_exc(file=stream)
            cmd = py_db.cmd_factory.make_error_message(
                seq,
                "Unexpected exception in process_net_command.\nInitial params: %s. Exception: %s" % (
                    ((cmd_id, seq, text), stream.getvalue())
                )
            )

            py_db.writer.add_command(cmd)
    finally:
        py_db._main_lock.release()

Example 41

Project: tools-iuc
Source File: __init__.py
View license
def _read_calc_variations(start_pos, cigar, md, seq):
    '''
    For each variation, outputs a tuple: (op, pos, seq)

    op  - operation (0 = mismatch, 1 = insert, 2 = deletion) (like CIGAR)
    pos - 0-based position of the variation (relative to reference)
    seq - the base (or bases) involved in the variation
          for mismatch or insert, this is the sequence inserted
          for deletions, this is the reference sequence that was removed

    MD is the mismatch string. Not all aligners include the tag. If your aligner
    doesn't include this, then you'll need to add it, or use a different function
    (see: read_calc_mismatches_gen).

    Special care must be used to handle RNAseq reads that cross
    an exon-exon junction.

    Also: MD is a *really* dumb format that can't be read correctly with
          a regex. It must be processed in concert with the CIGAR alignment
          in order to catch all edge cases. Some implementations insert 0's
          at the end of inserts / deltions / variations to make parsing easier
          but not everyone follows this. Look at the complex examples: the
          CIGAR alignment may show an insert, but the MD just shows all matches.

    Examples: See: http://davetang.org/muse/2011/01/28/perl-and-sam/
              Also from CCBB actual mappings and manual altered (shortened,
              made more complex)
              (doctests included)

    Match/mismatch
    CIGAR: 36M
    MD:Z:  1A0C0C0C1T0C0T27
    MD:Z:  1ACCC1TCT27 (alternative)
                   1         2
          123456789012345678901234567890123456
    ref:  CGATACGGGGACATCCGGCCTGCTCCTTCTCACATG
           XXXX XXX
    read: CACCCCTCTGACATCCGGCCTGCTCCTTCTCACATG
          MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
          -ACCC-TCT---------------------------
    >>> list(_read_calc_variations(1, [(0,36)], '1A0C0C0C1T0C0T27', 'CACCCCTCTGACATCCGGCCTGCTCCTTCTCACATG'))
    [(0, 2, 'A'), (0, 3, 'C'), (0, 4, 'C'), (0, 5, 'C'), (0, 7, 'T'), (0, 8, 'C'), (0, 9, 'T')]

    Insert
    CIGAR: 6M1I29M
    MD:Z: 0C1C0C1C0T0C27
          C1CC1CTC27 (alt)
                    1         2
          123456^789012345678901234567890123456
    ref:  CACCCC^TCTGACATCCGGCCTGCTCCTTCTCACAT
          X XX X|XX
    read: GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT
          MMMMMMIMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
          G-GA-GGGG---------------------------
    >>> list(_read_calc_variations(1, [(0,6), (1,1), (0, 29)], '0C1C0C1C0T0C27', 'GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT'))
    [(0, 1, 'G'), (0, 3, 'G'), (0, 4, 'A'), (0, 6, 'G'), (1, 7, 'G'), (0, 7, 'G'), (0, 8, 'G')]
    >>> list(_read_calc_variations(1, [(0,6), (1,1), (0, 29)], 'C1CC1CTC27', 'GAGACGGGGTGACATCCGGCCTGCTCCTTCTCACAT'))
    [(0, 1, 'G'), (0, 3, 'G'), (0, 4, 'A'), (0, 6, 'G'), (1, 7, 'G'), (0, 7, 'G'), (0, 8, 'G')]


    Deletion
    CIGAR: 9M9D27M
    MD:Z: 2G0A5^ATGATGTCA27
          2GA5^ATGATGTCA27 (alt)
    ref:  AGGAATGGGATGATGTCAGGGGTTCCAGGTGGAGACGAGGACTCC
            XX     ^^^^^^^^^
    read: AGTGATGGG^^^^^^^^^GGGGTTCCAGGTGGAGACGAGGACTCC
          MMMMMMMMMDDDDDDDDDMMMMMMMMMMMMMMMMMMMMMMMMMMM
          --TG-----ATGATGTCA---------------------------
    >>> list(_read_calc_variations(1, [(0,9), (2,9), (0, 27)], '2G0A5^ATGATGTCA27', 'AGTGATGGGGGGGTTCCAGGTGGAGACGAGGACTCC'))
    [(0, 3, 'T'), (0, 4, 'G'), (2, 10, 'ATGATGTCA')]


    Complex
    CIGAR: 9M9D11M1I15M
    MD:Z: 2G0A5^ATGATGTCAA26
    MD:Z: 2G0A5^ATGATGTCA0G26 (alt)
                   1         2         3         4
    pos:  123456789012345678901234567890123456789012345
    ref:  AGGAATGGGATGATGTCAGGGGTTCCAGG^GGAGACGAGGACTCC
            XX     ^^^^^^^^^X          |
    read: AGTGATGGG^^^^^^^^^AGGGTTCCAGGTGGAGACGAGGACTCC
          MMMMMMMMMDDDDDDDDDMMMMMMMMMMMMMMMMMMMMMMMMMMM
          --TG-----ATGATGTCAG----------T---------------
    >>> list(_read_calc_variations(1, [(0,9), (2,9), (0,11), (1,1), (0,15)], '2G0A5^ATGATGTCAA26', 'AGTGATGGGGGGGTTCCAGGTGGAGACGAGGACTCC'))
    [(0, 3, 'T'), (0, 4, 'G'), (2, 10, 'ATGATGTCA'), (0, 19, 'G'), (1, 30, 'T')]


    Complex example - inserts aren't separately handled by MD, only visible in CIGAR
    CIGAR: 14M2D16M3I42M
    MD:Z:  14^TC58
                   1         2         3            4         5         6         7
    pos:  12345678901234567890123456789012^^^345678901234567890123456789012345678901234567
    ref:  caagtatcaccatgtcaggcatttttttcatt^^^tttgtagagagagaagacttgctatgttgcccaagctggcct
                        ^^                |||
    read: CAAGTATCACCATG^^AGGCATTTTTTTCATTTGGTTTGTAGAGAGAGAAGACTTGCTATGTTGCCCAAGCTGGCCT
          MMMMMMMMMMMMMMDDMMMMMMMMMMMMMMMMIIIMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
          --------------tc----------------TGG------------------------------------------
    >>> list(_read_calc_variations(1, [(0,14), (2,2), (0,16), (1,3), (0,42)], '14^TC58', 'CAAGTATCACCATGAGGCATTTTTTTCATTTGGTTTGTAGAGAGAGAAGACTTGCTATGTTGCCCAAGCTGGCCT'))
    [(2, 15, 'TC'), (1, 33, 'TGG')]


    Complex example 2:
    CIGAR: 41M3I10M1I5M1I2M2I10M
    MD:Z:  44C2C6T6T6
                   1         2         3         4            5             6
    pos:  12345678901234567890123456789012345678901^^^2345678901^23456^78^^9012345678
    ref:  AGGGTGGCGAGATCGATGACGGCATTGGCGATGGTGATCTT^^^GAGCCACATG^CGGTC^GC^^GGATCTCCAG
                                                   |||   X  X   |   X |  ||   X
    read: AGGGTGGCGAGATCGATGACGGCATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG
          MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMIIIMMMMMMMMMMIMMMMMIMMIIMMMMMMMMMM
          -----------------------------------------tta---A--T---c---A----gt---G------


    13M 28M 3I 10M 1I 5M 1I 2M 2I 10M
    >>> list(_read_calc_variations(1, [(0, 41), (1, 3), (0, 10), (1, 1), (0, 5), (1, 1), (0, 2), (1, 2), (0, 10)], '44C2C6T6T6', 'AGGGTGGCGAGATCGATGACGGCATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG'))
    [(1, 42, 'TTA'), (0, 45, 'A'), (0, 48, 'T'), (1, 52, 'C'), (0, 55, 'A'), (1, 57, 'G'), (1, 59, 'GT'), (0, 62, 'G')]


    Splice junction example:
    CIGAR: 62M100N13M
    MD:Z:  2T27C44
                                                                                 1      1
                   1         2         3         4         5         6           6      7
    pos:  12345678901234567890123456789012345678901234567890123456789012| [100] |3456789012345
    ref:  CCTCATGACCAGCTTGTTGAAGAGATCCGACATCAAGTGCCCACCTTGGCTCGTGGCTCTCA|-------|CTTGCTCCTGCTC
            X                           X
    read: CCGCATGACCAGCTTGTTGAAGAGATCCGATATCAAGTGCCCACCTTGGCTCGTGGCTCTCA|-------|CTTGCTCCTGCTC
          --G---------------------------T-----------------------------------------------------

    >>> list(_read_calc_variations(1, [(0,62), (4,100), (0,13)], '2T27C44', 'CCGCATGACCAGCTTGTTGAAGAGATCCGATATCAAGTGCCCACCTTGGCTCGTGGCTCTCACTTGCTCCTGCTC'))
    [(0, 3, 'G'), (0, 31, 'T')]


    Splice junction example 2:
    CIGAR: 13M100N28M3I10M1I5M1I2M2I10M
    MD:Z:  44C2C6T6T6
                                      1         1         1            1             1
                   1                  2         3         4            5             6
    pos:  1234567890123| [100] |4567890123456789012345678901^^^2345678901^23456^78^^9012345678
    ref:  AGGGTGGCGAGAT|-------|CGATGACGGCATTGGCGATGGTGATCTT^^^GAGCCACATG^CGGTC^GC^^GGATCTCCAG
                                                            |||   X  X   |   X |  ||   X
    read: AGGGTGGCGAGAT|-------|CGATGACGGCATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG
          MMMMMMMMMMMMM         MMMMMMMMMMMMMMMMMMMMMMMMMMMMIIIMMMMMMMMMMIMMMMMIMMIIMMMMMMMMMM
          -------------         ----------------------------tta---A--T---c---A----gt---G------

    13M 100N 28M 3I 10M 1I 5M 1I 2M 2I 10M
    >>> list(_read_calc_variations(1, [(0, 13), (3, 100), (0, 28), (1, 3), (0, 10), (1, 1), (0, 5), (1, 1), (0, 2), (1, 2), (0, 10)], '44C2C6T6T6', 'AGGGTGGCGAGATCGATGACGGCATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG'))
    [(1, 142, 'TTA'), (0, 145, 'A'), (0, 148, 'T'), (1, 152, 'C'), (0, 155, 'A'), (1, 157, 'G'), (1, 159, 'GT'), (0, 162, 'G')]


    Splice junction example 2A:
    CIGAR: 13M100N7M2D19M3I10M1I5M1I2M2I10M
    MD:Z:  9A10^GG22C2C6T6T6
                                      1         1         1            1             1
                   1                  2         3         4            5             6
    pos:  1234567890123| [100] |4567890123456789012345678901^^^2345678901^23456^78^^9012345678
    ref:  AGGGTGGCGAGAT|-------|CGATGACGGCATTGGCGATGGTGATCTT^^^GAGCCACATG^CGGTC^GC^^GGATCTCCAG
                                       ^^                   |||   X  X   |   X |  ||   X
    read: AGGGTGGCGCGAT|-------|CGATGAC^^CATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG
          MMMMMMMMMMMMM         MMMMMMMDDMMMMMMMMMMMMMMMMMMMIIIMMMMMMMMMMIMMMMMIMMIIMMMMMMMMMM
          ---------C---         ----------------------------tta---A--T---c---A----gt---G------
          .........A...         .......GG...................   ...C..C... ...T. ..  ...T......
              9    A        10        ^GG             22          C 2C   6   T     6   T   6

    >>> list(_read_calc_variations(1, [(0, 13), (3, 100), (0, 7), (2, 2), (0, 19), (1, 3), (0, 10), (1, 1), (0, 5), (1, 1), (0, 2), (1, 2), (0, 10)], '9A10^GG22C2C6T6T6', 'AGGGTGGCGCGATCGATGACCATTGGCGATGGTGATCTTTTAGAGACATATGCCGGACGGCGTGGAGCTCCAG'))
    [(0, 10, 'C'), (2, 121, 'GG'), (1, 142, 'TTA'), (0, 145, 'A'), (0, 148, 'T'), (1, 152, 'C'), (0, 155, 'A'), (1, 157, 'G'), (1, 159, 'GT'), (0, 162, 'G')]

    Real Example
    242_1071_1799_B1
    CIGAR: 42M10I3M1D9M1D11M
    MD:Z:  27G16A0^T6C2^T1C9
                   1         2         3         4                   5         6         7
    pos:  123456789012345678901234567890123456789012          345678901234567890123456789012345
    ref:  ACTGAGAAACCCAACCCTCTGAGACCAGCACACCCCTTTCAA^^^^^^^^^^GCATGTTCCTCCCTCCCCTTCTTTG
                                     X                          X^      X  ^ X
    read: ACTGAGAAACCCAACCCTCTGAGACCAACACACCCCTTTCAACACATTTTTGGCC^GTTCCTGCC^CGCCTTCTTTG
          MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMIIIIIIIIIIMMMDMMMMMMMMMDMMMMMMMMMMM
          ---------------------------A--------------^^^^^^^^^^--CT------G--T-G---------

    >>> list(_read_calc_variations(1, [(0,42), (1,10), (0, 3), (2, 1), (0, 9), (2, 1), (0, 11)], '27G16A0^T6C2^T1C9', 'ACTGAGAAACCCAACCCTCTGAGACCAACACACCCCTTTCAACACATTTTTGGCCGTTCCTGCCCGCCTTCTTTG',  ))
    [(0, 28, 'A'), (1, 43, 'CACATTTTTG'), (0, 45, 'C'), (2, 46, 'T'), (0, 53, 'G'), (2, 56, 'T'), (0, 58, 'G')]


    Real example 2
    577_1692_891_A1
    CIGAR: 34M100N39M2I
    MD:Z:  3T69
                                                          1         1         1         1
                   1         2         3                  4         5         6         7
    pos:  1234567890123456789012345678901234| [100] |567890123456789012345678901234567890123
    ref:  GGATTCTTCCCACTGGGTCGATGTTGTTTGTGAT|-------|CTGAGAGAGAGTTGCATCTGCACATGCTTTCCTGGCGTC^^

    read: GGAATCTTCCCACTGGGTCGATGTTGTTTGTGAT|-------|CTGAGAGAGAGTTGCATCTGCACATGCTTTCCTGGCGTCTC
          MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM  NNNNN  MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMII
          ---A------------------------------         ---------------------------------------TC

    >>> list(_read_calc_variations(1, [(0,34), (3,100), (0, 39), (1, 2)], '3T69', 'GGAATCTTCCCACTGGGTCGATGTTGTTTGTGATCTGAGAGAGAGTTGCATCTGCACATGCTTTCCTGGCGTCTC',  ))
    [(0, 4, 'A'), (1, 174, 'TC')]

    '''

    ref_pos = start_pos
    read_pos = 0

    for op, length in cigar:
        if md and md[0] == '0':
            md = md[1:]
        # sys.stderr.write('%s, %s, %s\n' %(op, length, md))
        if op == 0:  # M
            # how far in the chunk are we? (do *not* update ref_pos until end)
            md_pos = 0
            last = None
            while md and md_pos < length:
                if last == (op, length, md):
                    sys.stderr.write('\nInfinite loop in variant finding!\nPos: %s\nCIGAR: (%s, %s)\n' % (ref_pos, op, length))
                    sys.exit(1)
                last = (op, length, md)
                # sys.stderr.write('%s, %s, %s\n' %(op, length, md))
                chunk_size, md = _extract_md_matches(md, length - md_pos)
                # sys.stderr.write('   -> %s, %s\n' %(chunk_size, md))
                md_pos += chunk_size

                # look for mismatches
                while md_pos < length and md and md[0] not in '0123456789^':
                    yield (op, ref_pos + md_pos, seq[read_pos + md_pos])
                    md = md[1:]

                    md_pos += 1

            ref_pos += length
            read_pos += length

        elif op == 1:  # I
            # nothing in MD about inserts...
            yield (op, ref_pos, seq[read_pos:read_pos + length])
            read_pos += length

        elif op == 2:  # D
            # prefixed with '^' and includes all of the removed bases
            if md[0] == '^':
                md = md[1:]
            yield (op, ref_pos, md[:length])
            md = md[length:]
            ref_pos += length

        elif op == 3:  # N
            ref_pos += length

Example 42

Project: tools-iuc
Source File: raxml.py
View license
def __main__():
    usage = "usage: %prog -T <threads> -s <input> -n <output> -m <model> [optional arguments]"

    # Parse the primary wrapper's command line options
    parser = optparse.OptionParser(usage=usage)
    # raxml binary name, hardcoded in the xml file
    parser.add_option("--binary", action="store", type="string", dest="binary", help="Command to run")
    # (-a)
    parser.add_option("--weightfile", action="store", type="string", dest="weightfile", help="Column weight file")
    # (-A)
    parser.add_option("--secondary_structure_model", action="store", type="string", dest="secondary_structure_model", help="Secondary structure model")
    # (-b)
    parser.add_option("--bootseed", action="store", type="int", dest="bootseed", help="Bootstrap random number seed")
    # (-c)
    parser.add_option("--numofcats", action="store", type="int", dest="numofcats", help="Number of distinct rate categories")
    # (-d)
    parser.add_option("--search_complete_random_tree", action="store_true", dest="search_complete_random_tree", help="Search with a complete random starting tree")
    # (-D)
    parser.add_option("--ml_search_convergence", action="store_true", dest="ml_search_convergence", help="ML search onvergence criterion")
    # (-e)
    parser.add_option("--model_opt_precision", action="store", type="float", dest="model_opt_precision", help="Model Optimization Precision (-e)")
    # (-E)
    parser.add_option("--excludefile", action="store", type="string", dest="excludefile", help="Exclude File Name")
    # (-f)
    parser.add_option("--search_algorithm", action="store", type="string", dest="search_algorithm", help="Search Algorithm")
    # (-F)
    parser.add_option("--save_memory_cat_model", action="store_true", dest="save_memory_cat_model", help="Save memory under CAT and GTRGAMMA models")
    # (-g)
    parser.add_option("--groupingfile", action="store", type="string", dest="groupingfile", help="Grouping File Name")
    # (-G)
    parser.add_option("--enable_evol_heuristics", action="store_true", dest="enable_evol_heuristics", help="Enable evol algo heuristics")
    # (-i)
    parser.add_option("--initial_rearrangement_setting", action="store", type="int", dest="initial_rearrangement_setting", help="Initial Rearrangement Setting")
    # (-I)
    parser.add_option("--posterior_bootstopping_analysis", action="store", type="string", dest="posterior_bootstopping_analysis", help="Posterior bootstopping analysis")
    # (-J)
    parser.add_option("--majority_rule_consensus", action="store", type="string", dest="majority_rule_consensus", help="Majority rule consensus")
    # (-k)
    parser.add_option("--print_branch_lengths", action="store_true", dest="print_branch_lengths", help="Print branch lengths")
    # (-K)
    parser.add_option("--multistate_sub_model", action="store", type="string", dest="multistate_sub_model", help="Multistate substitution model")
    # (-m)
    parser.add_option("--model_type", action="store", type="string", dest="model_type", help="Model Type")
    parser.add_option("--base_model", action="store", type="string", dest="base_model", help="Base Model")
    parser.add_option("--aa_empirical_freq", action="store_true", dest="aa_empirical_freq", help="Use AA Empirical base frequences")
    parser.add_option("--aa_search_matrix", action="store", type="string", dest="aa_search_matrix", help="AA Search Matrix")
    # (-n)
    parser.add_option("--name", action="store", type="string", dest="name", help="Run Name")
    # (-N/#)
    parser.add_option("--number_of_runs", action="store", type="int", dest="number_of_runs", help="Number of alternative runs")
    parser.add_option("--number_of_runs_bootstop", action="store", type="string", dest="number_of_runs_bootstop", help="Number of alternative runs based on the bootstop criteria")
    # (-M)
    parser.add_option("--estimate_individual_branch_lengths", action="store_true", dest="estimate_individual_branch_lengths", help="Estimate individual branch lengths")
    # (-o)
    parser.add_option("--outgroup_name", action="store", type="string", dest="outgroup_name", help="Outgroup Name")
    # (-O)
    parser.add_option("--disable_undetermined_seq_check", action="store_true", dest="disable_undetermined_seq_check", help="Disable undetermined sequence check")
    # (-p)
    parser.add_option("--random_seed", action="store", type="int", dest="random_seed", help="Random Number Seed")
    # (-P)
    parser.add_option("--external_protein_model", action="store", type="string", dest="external_protein_model", help="External Protein Model")
    # (-q)
    parser.add_option("--multiple_model", action="store", type="string", dest="multiple_model", help="Multiple Model File")
    # (-r)
    parser.add_option("--constraint_file", action="store", type="string", dest="constraint_file", help="Constraint File")
    # (-R)
    parser.add_option("--bin_model_parameter_file", action="store", type="string", dest="bin_model_parameter_file", help="Constraint File")
    # (-s)
    parser.add_option("--source", action="store", type="string", dest="source", help="Input file")
    # (-S)
    parser.add_option("--secondary_structure_file", action="store", type="string", dest="secondary_structure_file", help="Secondary structure file")
    # (-t)
    parser.add_option("--starting_tree", action="store", type="string", dest="starting_tree", help="Starting Tree")
    # (-T)
    parser.add_option("--threads", action="store", type="int", dest="threads", help="Number of threads to use")
    # (-u)
    parser.add_option("--use_median_approximation", action="store_true", dest="use_median_approximation", help="Use median approximation")
    # (-U)
    parser.add_option("--save_memory_gappy_alignments", action="store_true", dest="save_memory_gappy_alignments", help="Save memory in large gapped alignments")
    # (-V)
    parser.add_option("--disable_rate_heterogeneity", action="store_true", dest="disable_rate_heterogeneity", help="Disable rate heterogeneity")
    # (-W)
    parser.add_option("--sliding_window_size", action="store", type="string", dest="sliding_window_size", help="Sliding window size")
    # (-x)
    parser.add_option("--rapid_bootstrap_random_seed", action="store", type="int", dest="rapid_bootstrap_random_seed", help="Rapid Boostrap Random Seed")
    # (-y)
    parser.add_option("--parsimony_starting_tree_only", action="store_true", dest="parsimony_starting_tree_only", help="Generate a parsimony starting tree only")
    # (-z)
    parser.add_option("--file_multiple_trees", action="store", type="string", dest="file_multiple_trees", help="Multiple Trees File")

    (options, args) = parser.parse_args()
    cmd = []

    # Required parameters
    binary = options.binary
    cmd.append(binary)
    # Threads
    if options.threads > 1:
        threads = "-T %d" % options.threads
        cmd.append(threads)
    # Source
    source = "-s %s" % options.source
    cmd.append(source)
    # Hardcode to "galaxy" first to simplify the output part of the wrapper
    # name = "-n %s" % options.name
    name = "-n galaxy"
    cmd.append(name)
    # Model
    model_type = options.model_type
    base_model = options.base_model
    aa_search_matrix = options.aa_search_matrix
    aa_empirical_freq = options.aa_empirical_freq
    if model_type == 'aminoacid':
        model = "-m %s%s" % (base_model, aa_search_matrix)
        if aa_empirical_freq:
            model = "-m %s%s%s" % (base_model, aa_search_matrix, 'F')
        # (-P)
        if options.external_protein_model:
            external_protein_model = "-P %s" % options.external_protein_model
            cmd.append(external_protein_model)
    else:
        model = "-m %s" % base_model
    cmd.append(model)
    if model == "GTRCAT":
        # (-c)
        if options.numofcats:
            numofcats = "-c %d" % options.numofcats
            cmd.append(numofcats)
    # Optional parameters
    if options.number_of_runs_bootstop:
        number_of_runs_bootstop = "-N %s" % options.number_of_runs_bootstop
        cmd.append(number_of_runs_bootstop)
    else:
        number_of_runs_bootstop = ''
    if options.number_of_runs:
        number_of_runs_opt = "-N %d" % options.number_of_runs
        cmd.append(number_of_runs_opt)
    else:
        number_of_runs_opt = 0
    # (-a)
    if options.weightfile:
        weightfile = "-a %s" % options.weightfile
        cmd.append(weightfile)
    # (-A)
    if options.secondary_structure_model:
        secondary_structure_model = "-A %s" % options.secondary_structure_model
        cmd.append(secondary_structure_model )
    # (-b)
    if options.bootseed:
        bootseed = "-b %d" % options.bootseed
        cmd.append(bootseed)
    else:
        bootseed = 0
    # -C - doesn't work in pthreads version, skipped
    if options.search_complete_random_tree:
        cmd.append("-d")
    if options.ml_search_convergence:
        cmd.append("-D" )
    if options.model_opt_precision:
        model_opt_precision = "-e %f" % options.model_opt_precision
        cmd.append(model_opt_precision)
    if options.excludefile:
        excludefile = "-E %s" % options.excludefile
        cmd.append(excludefile)
    if options.search_algorithm:
        search_algorithm = "-f %s" % options.search_algorithm
        cmd.append(search_algorithm)
    if options.save_memory_cat_model:
        cmd.append("-F")
    if options.groupingfile:
        groupingfile = "-g %s" % options.groupingfile
        cmd.append(groupingfile)
    if options.enable_evol_heuristics:
        enable_evol_heuristics = "-G %f" % options.enable_evol_heuristics
        cmd.append(enable_evol_heuristics )
    if options.initial_rearrangement_setting:
        initial_rearrangement_setting = "-i %s" % options.initial_rearrangement_setting
        cmd.append(initial_rearrangement_setting)
    if options.posterior_bootstopping_analysis:
        posterior_bootstopping_analysis = "-I %s" % options.posterior_bootstopping_analysis
        cmd.append(posterior_bootstopping_analysis)
    if options.majority_rule_consensus:
        majority_rule_consensus = "-J %s" % options.majority_rule_consensus
        cmd.append(majority_rule_consensus)
    if options.print_branch_lengths:
        cmd.append("-k")
    if options.multistate_sub_model:
        multistate_sub_model = "-K %s" % options.multistate_sub_model
        cmd.append(multistate_sub_model)
    if options.estimate_individual_branch_lengths:
        cmd.append("-M")
    if options.outgroup_name:
        outgroup_name = "-o %s" % options.outgroup_name
        cmd.append(outgroup_name)
    if options.disable_undetermined_seq_check:
        cmd.append("-O")
    if options.random_seed:
        random_seed = "-p %d" % options.random_seed
        cmd.append(random_seed)
    multiple_model = None
    if options.multiple_model:
        multiple_model = "-q %s" % options.multiple_model
        cmd.append(multiple_model)
    if options.constraint_file:
        constraint_file = "-r %s" % options.constraint_file
        cmd.append(constraint_file)
    if options.bin_model_parameter_file:
        bin_model_parameter_file_name = "RAxML_binaryModelParameters.galaxy"
        os.symlink(options.bin_model_parameter_file, bin_model_parameter_file_name )
        bin_model_parameter_file = "-R %s" % options.bin_model_parameter_file
        # Needs testing. Is the hardcoded name or the real path needed?
        cmd.append(bin_model_parameter_file)
    if options.secondary_structure_file:
        secondary_structure_file = "-S %s" % options.secondary_structure_file
        cmd.append(secondary_structure_file)
    if options.starting_tree:
        starting_tree = "-t %s" % options.starting_tree
        cmd.append(starting_tree)
    if options.use_median_approximation:
        cmd.append("-u")
    if options.save_memory_gappy_alignments:
        cmd.append("-U")
    if options.disable_rate_heterogeneity:
        cmd.append("-V")
    if options.sliding_window_size:
        sliding_window_size = "-W %d" % options.sliding_window_size
        cmd.append(sliding_window_size)
    if options.rapid_bootstrap_random_seed:
        rapid_bootstrap_random_seed = "-x %d" % options.rapid_bootstrap_random_seed
        cmd.append(rapid_bootstrap_random_seed)
    else:
        rapid_bootstrap_random_seed = 0
    if options.parsimony_starting_tree_only:
        cmd.append("-y")
    if options.file_multiple_trees:
        file_multiple_trees = "-z %s" % options.file_multiple_trees
        cmd.append(file_multiple_trees)

    print "cmd list: ", cmd, "\n"

    full_cmd = " ".join(cmd)
    print "Command string: %s" % full_cmd

    try:
        proc = subprocess.Popen(args=full_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    except Exception as err:
        sys.stderr.write("Error invoking command: \n%s\n\n%s\n" % (cmd, err))
        sys.exit(1)
    stdout, stderr = proc.communicate()
    return_code = proc.returncode
    if return_code:
        sys.stdout.write(stdout)
        sys.stderr.write(stderr)
        sys.stderr.write("Return error code %i from command:\n" % return_code)
        sys.stderr.write("%s\n" % cmd)
    else:
        sys.stdout.write(stdout)
        sys.stdout.write(stderr)

    # Multiple runs - concatenate
    if number_of_runs_opt > 0:
        if (bootseed == 0) and (rapid_bootstrap_random_seed == 0 ):
            runfiles = glob.glob('RAxML*RUN*')
            runfiles.sort(key=getint)
        # Logs
            outfile = open('RAxML_log.galaxy', 'w')
            for filename in runfiles:
                if fnmatch.fnmatch(filename, 'RAxML_log.galaxy.RUN.*'):
                    infile = open(filename, 'r')
                    filename_line = "%s\n" % filename
                    outfile.write(filename_line)
                    for line in infile:
                        outfile.write(line)
                    infile.close()
            outfile.close()
        # Parsimony Trees
            outfile = open('RAxML_parsimonyTree.galaxy', 'w')
            for filename in runfiles:
                if fnmatch.fnmatch(filename, 'RAxML_parsimonyTree.galaxy.RUN.*'):
                    infile = open(filename, 'r')
                    filename_line = "%s\n" % filename
                    outfile.write(filename_line)
                    for line in infile:
                        outfile.write(line)
                    infile.close()
            outfile.close()
        # Results
            outfile = open('RAxML_result.galaxy', 'w')
            for filename in runfiles:
                if fnmatch.fnmatch(filename, 'RAxML_result.galaxy.RUN.*'):
                    infile = open(filename, 'r')
                    filename_line = "%s\n" % filename
                    outfile.write(filename_line)
                    for line in infile:
                        outfile.write(line)
                    infile.close()
            outfile.close()
    # Multiple Model Partition Files
    if multiple_model:
        files = glob.glob('RAxML_bestTree.galaxy.PARTITION.*')
        if len(files) > 0:
            files.sort(key=getint)
            outfile = open('RAxML_bestTreePartitions.galaxy', 'w')
            # Best Tree Partitions
            for filename in files:
                if fnmatch.fnmatch(filename, 'RAxML_bestTree.galaxy.PARTITION.*'):
                    infile = open(filename, 'r')
                    filename_line = "%s\n" % filename
                    outfile.write(filename_line)
                    for line in infile:
                        outfile.write(line)
                    infile.close()
            outfile.close()
        else:
            outfile = open('RAxML_bestTreePartitions.galaxy', 'w')
            outfile.write("No partition files were produced.\n")
            outfile.close()

        # Result Partitions
        files = glob.glob('RAxML_result.galaxy.PARTITION.*')
        if len(files) > 0:
            files.sort(key=getint)
            outfile = open('RAxML_resultPartitions.galaxy', 'w')
            for filename in files:
                if fnmatch.fnmatch(filename, 'RAxML_result.galaxy.PARTITION.*'):
                    infile = open(filename, 'r')
                    filename_line = "%s\n" % filename
                    outfile.write(filename_line)
                    for line in infile:
                        outfile.write(line)
                    infile.close()
            outfile.close()
        else:
            outfile = open('RAxML_resultPartitions.galaxy', 'w')
            outfile.write("No partition files were produced.\n")
            outfile.close()

    # DEBUG options
    infof = open('RAxML_info.galaxy', 'a')
    infof.write('\nOM: CLI options DEBUG START:\n')
    infof.write(options.__repr__())
    infof.write('\nOM: CLI options DEBUG END\n')

Example 43

Project: async
Source File: test_pool.py
View license
    def _assert_single_task(self, p, async=False):
        """Performs testing in a synchronized environment"""
        sys.stderr.write("Threadpool: Starting single task (async = %i) with %i threads\n" % (async, p.size()))
        null_tasks = p.num_tasks()      # in case we had some before

        # add a simple task
        # it iterates n items
        ni = 1000
        assert ni % 2 == 0, "ni needs to be dividable by 2"
        assert ni % 4 == 0, "ni needs to be dividable by 4"

        make_task = lambda *args, **kwargs: make_iterator_task(ni, *args, **kwargs)

        task = make_task()

        assert p.num_tasks() == null_tasks
        rc = p.add_task(task)
        assert p.num_tasks() == 1 + null_tasks
        assert isinstance(rc, PoolReader)
        assert task._out_writer is not None

        # pull the result completely - we should get one task, which calls its
        # function once. In sync mode, the order matches
        items = rc.read()
        assert len(items) == ni
        task._assert(1, ni)
        if not async:
            assert items[0] == 0 and items[-1] == ni-1

        # as the task is done, it should have been removed - we have read everything
        assert task.is_done()
        del(rc)
        assert p.num_tasks() == null_tasks
        task = make_task()

        # pull individual items
        rc = p.add_task(task)
        assert p.num_tasks() == 1 + null_tasks
        st = time.time()
        for i in range(ni):
            items = rc.read(1)
            assert len(items) == 1

            # can't assert order in async mode
            if not async:
                assert i == items[0]
        # END for each item
        elapsed = time.time() - st
        sys.stderr.write("Threadpool: processed %i individual items, with %i threads, one at a time, in %f s ( %f items / s )\n" % (ni, p.size(), elapsed, ni / elapsed))

        # it couldn't yet notice that the input is depleted as we pulled exaclty
        # ni items - the next one would remove it. Instead, we delete our channel
        # which triggers orphan handling
        assert not task.is_done()
        assert p.num_tasks() == 1 + null_tasks
        del(rc)
        assert p.num_tasks() == null_tasks

        # test min count
        # if we query 1 item, it will prepare ni / 2
        task = make_task()
        task.min_count = ni / 2
        rc = p.add_task(task)
        items = rc.read(1)
        assert len(items) == 1 and items[0] == 0            # processes ni / 2
        items = rc.read(1)
        assert len(items) == 1 and items[0] == 1            # processes nothing
        # rest - it has ni/2 - 2 on the queue, and pulls ni-2
        # It wants too much, so the task realizes its done. The task
        # doesn't care about the items in its output channel
        nri = ni - 2
        items = rc.read(nri)
        assert len(items) == nri
        p.remove_task(task)
        assert p.num_tasks() == null_tasks
        task._assert(2, ni)                     # two chunks, ni calls

        # its already done, gives us no more, its still okay to use it though
        # as a task doesn't have to be in the graph to allow reading its produced
        # items
        # it can happen that a thread closes the channel just a tiny fraction of time
        # after we check this, so the test fails, although it is nearly closed.
        # When we start reading, we should wake up once it sends its signal
        # assert task.is_closed()
        assert len(rc.read()) == 0

        # test chunking
        # we always want 4 chunks, these could go to individual nodes
        task = make_task()
        task.min_count = ni // 2             # restore previous value
        task.max_chunksize = ni // 4         # 4 chunks
        rc = p.add_task(task)

        # must read a specific item count
        # count is still at ni / 2 - here we want more than that
        # 2 steps with n / 4 items, + 1 step with n/4 items to get + 2
        nri = ni // 2 + 2
        items = rc.read(nri)
        if py2:
            assert len(items) == nri
        # have n / 4 - 2 items on queue, want n / 4 in first chunk, cause 1 processing
        # ( 4 in total ). Still want n / 4 - 2 in second chunk, causing another processing
        nri = ni // 2 - 2
        items = rc.read(nri)
        assert len(items) == nri

        task._assert( 5, ni)

        # delete the handle first, causing the task to be removed and to be set
        # done. We check for the set-done state later. Depending on the timing,
        # The task is not yet set done when we are checking it because we were
        # scheduled in before the flag could be set.
        del(rc)
        assert task.is_done()
        assert p.num_tasks() == null_tasks  # depleted

        # but this only hits if we want too many items, if we want less, it could
        # still do too much - hence we set the min_count to the same number to enforce
        # at least ni / 4 items to be preocessed, no matter what we request
        task = make_task()
        task.min_count = None
        task.max_chunksize = ni / 4     # match previous setup
        rc = p.add_task(task)
        st = time.time()
        for i in range(ni):
            if async:
                assert len(rc.read(1)) == 1
            else:
                assert rc.read(1)[0] == i
            # END handle async mode
        # END pull individual items
        # too many processing counts ;)
        elapsed = time.time() - st
        sys.stderr.write("Threadpool: processed %i individual items in chunks of %i, with %i threads, one at a time, in %f s ( %f items / s)\n" % (ni, ni/4, p.size(), elapsed, ni / elapsed))

        task._assert(ni, ni)
        assert p.num_tasks() == 1 + null_tasks
        assert p.remove_task(task) is p     # del manually this time
        assert p.num_tasks() == null_tasks

        # now with we set the minimum count to reduce the number of processing counts
        task = make_task()
        task.min_count = ni / 4
        task.max_chunksize = ni / 4     # match previous setup
        rc = p.add_task(task)
        for i in range(ni):
            items = rc.read(1)
            assert len(items) == 1
            if not async:
                assert items[0] == i
        # END for each item
        task._assert(ni / task.min_count, ni)
        del(rc)
        assert p.num_tasks() == null_tasks

        # test failure
        # on failure, the processing stops and the task is finished, keeping
        # his error for later
        task = make_task()
        task.should_fail = True
        rc = p.add_task(task)
        assert len(rc.read()) == 0      # failure on first item

        assert isinstance(task.error(), AssertionError)
        assert task.is_done()           # on error, its marked done as well
        del(rc)
        if py2:
            assert p.num_tasks() == null_tasks

        # test failure after ni / 2 items
        # This makes sure it correctly closes the channel on failure to prevent blocking
        nri = ni/2
        task = make_task(FixtureFailureThreadTask, fail_after=ni/2)
        rc = p.add_task(task)
        assert len(rc.read()) == nri
        assert task.is_done()
        assert isinstance(task.error(), AssertionError)

        sys.stderr.write("done with everything\n")

Example 44

Project: async
Source File: test_pool.py
View license
    def _assert_async_dependent_tasks(self, pool):
        # includes failure in center task, 'recursive' orphan cleanup
        # This will also verify that the channel-close mechanism works
        # t1 -> t2 -> t3

        sys.stderr.write("Threadpool: starting async dependency test in %i threads\n" % pool.size())
        null_tasks = pool.num_tasks()
        ni = 1000
        count = 3
        aic = count + 2
        make_task = lambda *args, **kwargs: add_task_chain(pool, ni, count, *args, **kwargs)

        ts, rcs = make_task()
        assert len(ts) == aic
        assert len(rcs) == aic
        assert pool.num_tasks() == null_tasks + len(ts)

        # read(0)
        #########
        st = time.time()
        items = rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni
        del(rcs)
        if py2:
            assert pool.num_tasks() == 0        # tasks depleted, all done, no handles
        # wait a tiny moment - there could still be something unprocessed on the
        # queue, increasing the refcount
        assert sys.getrefcount(ts[-1]) == 2 # ts + call
        assert sys.getrefcount(ts[0]) == 2  # ts + call
        sys.stderr.write("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed, ni / elapsed))


        # read(1)
        #########
        ts, rcs = make_task()
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to pull
        elapsed_single = time.time() - st
        # another read yields nothing, its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )\n" % (ni, aic, elapsed_single, ni / elapsed_single))


        # read with min-count size
        ###########################
        # must be faster, as it will read ni / 4 chunks
        # Its enough to set one task, as it will force all others in the chain
        # to min_size as well.
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)
        nri = ni / 4
        ts[-1].min_count = nri
        st = time.time()
        for i in range(ni):
            items = rcs[-1].read(1)
            assert len(items) == 1
        # END for each item to read
        elapsed_minsize = time.time() - st
        # its empty
        assert len(rcs[-1].read()) == 0
        sys.stderr.write("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )\n" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize))

        # it should have been a bit faster at least, and most of the time it is
        # Sometimes, its not, mainly because:
        # * The test tasks lock a lot, hence they slow down the system
        # * Each read will still trigger the pool to evaluate, causing some overhead
        #   even though there are enough items on the queue in that case. Keeping
        #   track of the scheduled items helped there, but it caused further inacceptable
        #   slowdown
        # assert elapsed_minsize < elapsed_single


        # read with failure
        ###################
        # it should recover and give at least fail_after items
        # t1 -> x -> t3
        fail_after = ni/2
        ts, rcs = make_task(fail_setup=[(0, fail_after)])
        items = rcs[-1].read()
        assert len(items) == fail_after


        # MULTI-POOL
        # If two pools are connected, this shold work as well.
        # The second one has just one more thread
        ts, rcs = make_task()

        # connect verifier channel as feeder of the second pool
        p2 = ThreadPool(0)      # don't spawn new threads, they have the tendency not to wake up on mutexes
        assert p2.size() == 0
        p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count)
        assert p2ts[0] is None      # we have no feeder task
        assert rcs[-1].pool_ref()() is pool     # it didnt change the pool
        assert rcs[-1] is p2ts[1].reader()
        assert p2.num_tasks() == len(p2ts)-1    # first is None

        # reading from the last one will evaluate all pools correctly
        st = time.time()
        items = p2rcs[-1].read()
        elapsed = time.time() - st
        assert len(items) == ni

        sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed))


        # loose the handles of the second pool to allow others to go as well
        del(p2rcs); del(p2ts)
        assert p2.num_tasks() == 0

        # now we lost our old handles as well, and the tasks go away
        ts, rcs = make_task()
        if py2:
            assert pool.num_tasks() == len(ts)

        p2ts, p2rcs = add_task_chain(p2, ni, count, feeder_channel=rcs[-1], id_offset=count)
        assert p2.num_tasks() == len(p2ts) - 1

        # Test multi-read(1)
        reader = rcs[-1]
        st = time.time()
        for i in range(ni):
            items = reader.read(1)
            assert len(items) == 1
        # END for each item to get
        elapsed = time.time() - st
        del(reader)     # decrement refcount

        sys.stderr.write("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )\n" % (ni, aic + aic-1, elapsed, ni / elapsed))

        # another read is empty
        assert len(rcs[-1].read()) == 0

        # now that both are connected, I can drop my handle to the reader
        # without affecting the task-count, but whats more important:
        # They remove their tasks correctly once we drop our references in the
        # right order
        del(p2ts)
        assert p2rcs[0] is rcs[-1]
        del(p2rcs)
        assert p2.num_tasks() == 0
        del(p2)

        if py2:
            assert pool.num_tasks() == null_tasks + len(ts)


        del(ts)
        del(rcs)

        if py2:
            assert pool.num_tasks() == null_tasks

Example 45

View license
def _scrape_master_list(known_db=None, ignored_urls=None):
    """Scrape the Master Mod List on the FTL forum.
    If an existing ModDB is provided, its thread urls will be checked too.

    :param known_db: A ModDB with mods to ignore if thread_hash is unchanged.
    :param ignored_urls: A list of uninteresting thread_urls to ignore.
    :return: A list of result dicts.
    """
    master_list_url = "http://www.ftlgame.com/forum/viewtopic.php?f=11&t=2645"

    mods_header_ptn = re.compile(re.escape("<span style=\"font-weight: bold\"><span style=\"text-decoration: underline\"><span style=\"font-size: 150%; line-height: 116%;\">Mods</span></span></span>"))

    mod_ptn = re.compile("(?u)^(?:\\[[A-Za-z0-9 ]+ *\\])?<a href=\"([^\"]+)\"[^>]*>([^>]+)</a> *((?:\\[[A-Za-z0-9 ]+\\])?)(?: (?:.*?))? - Author: <a href=\"[^\"]+\"[^>]*>([^<]+?)</a>")

    forum_url_fragment = "http://www.ftlgame.com/forum/viewtopic.php"

    boring_hashes = []
    if (known_db is not None):
        for mod_info in known_db.catalog:
            if (mod_info.get_thread_hash() != "???"):
                boring_hashes.append(mod_info.get_thread_hash())

    sys.stderr.write("\n")
    sys.stderr.write("Scraping Master Mod List...\n")
    post_content = _get_first_post(master_list_url)
    post_content = re.sub("<br */>", "\n", post_content)

    lines = post_content.split("\n")
    results = []
    in_mods = False

    for line in lines:
        if (mods_header_ptn.search(line)):
            in_mods = True
            continue
        if (not in_mods): continue

        m = mod_ptn.match(line)
        if (m):
            result = {}
            result["thread_url"] = m.group(1)
            result["title"] = m.group(2)
            result["author"] = m.group(4)
            result["wip"] = True if (m.group(3)=="[WIP]") else False
            result["raw_desc"] = ""
            result["thread_hash"] = "???"

            result["title"] = re.sub("&amp;", "&", result["title"])
            result["thread_url"] = re.sub("&amp;", "&", result["thread_url"])
            results.append(result)

    # Merge extra mods from known_db into the results list to scrape.
    if (known_db is not None):
        pending_urls = [x["thread_url"] for x in results]
        for mod_info in known_db.catalog:
            if (mod_info.get_url() is not "???" and mod_info.get_url() not in pending_urls):
                pending_urls.append(mod_info.get_url())
                new_result = {}
                new_result["thread_url"] = mod_info.get_url()
                new_result["title"] = mod_info.get_title()
                new_result["author"] = mod_info.get_author()
                new_result["wip"] = False  # Shrug
                new_result["raw_desc"] = mod_info.get_desc()
                new_result["thread_hash"] = mod_info.get_thread_hash()
                results.append(new_result)

    # Prune results with boring urls.
    if (ignored_urls):
        results = [x for x in results if (x["thread_url"] not in ignored_urls)]

    # Fetch and hash each thread url.
    for i in range(len(results)):
        if (forum_url_fragment not in results[i]["thread_url"]):
            continue  # Don't bother scraping and hashing non-forum urls.

        time.sleep(2)
        sys.stderr.write("\n")
        sys.stderr.write("Scraping mod %03d/%03d (%s)...\n" % ((i+1), len(results), results[i]["title"]))
        while (True):
            try:
                results[i]["raw_desc"] = _get_first_post(results[i]["thread_url"])
                # Encode the str/unicode string to bytes.
                results[i]["thread_hash"] = hashlib.md5(results[i]["raw_desc"].encode("utf-8")).hexdigest()
                break
            except (HTTPError) as err:
                sys.stderr.write("Request failed: %s\n" % err.code)
            except (URLError) as err:
                sys.stderr.write("Request failed: %s\n" % err.reason)
            except (OSError) as err:  # Socket timeout.
                sys.stderr.write("Request failed: %s\n" % err.reason)
            time.sleep(5)

    # Ignore threads whose hashes haven't changed.
    results = [x for x in results if (x["thread_hash"] not in boring_hashes)]

    # Scrub html out of descriptions and scrape download links.
    for result in results:
        # Unicode reminder: Prepend (?u) before regexes with: \w,\W,\b,\B,\d,\D,\s,\S.
        post_content = result["raw_desc"]
        post_content = re.sub("<br */>", "\n", post_content)
        post_content = re.sub("<img [^>]*/>", "", post_content)
        post_content = re.sub("<span [^>]*>", "", post_content)
        post_content = re.sub("</span>", "", post_content)
        post_content = re.sub("&quot;", "\"", post_content)
        post_content = re.sub(u("\u2018|\u2019"), "'", post_content)
        post_content = re.sub(u("\u2022"), "-", post_content)
        post_content = re.sub(u("\u2013"), "-", post_content)
        post_content = re.sub(u("\u00a9"), "()", post_content)
        post_content = re.sub("&amp;", "&", post_content)
        post_content = re.sub("<a (?:[^>]+ )?href=\"([^\"]+)\"[^>]*>", "<a href=\"\\g<1>\">", post_content)
        post_content = re.sub("<a href=\"[^\"]+/forum/memberlist.php[^\"]+\"[^>]*>([^<]+)</a>", "\\g<1>", post_content)
        post_content = re.sub("<a href=\"http://(?:i.imgur.com/|[^\"]*photobucket.com/|[^\"]*deviantart.com/|www.mediafire.com/view/[?])[^\"]+\"[^>]*>([^<]+)</a>", "\\g<1>", post_content)
        post_content = re.sub("<a href=\"([^\"]+)\"[^>]*>(?:\\1|[^<]+ [.][.][.] [^<]+)</a>", "<a href=\"\\g<1>\">Link</a>", post_content)
        post_content = re.sub("<a href=\"[^\"]+[.](?:jpg|png)(?:[.]html)?\"[^>]*>([^<]*)</a>", "\\g<1>", post_content)
        post_content = re.sub("<a href=\"([^\"]+)\"[^>]*></a>", "\\g<1>", post_content)
        post_content = re.sub("</li><li>", "</li>\n<li>", post_content)
        post_content = re.sub("<li>(.*?)</li>", " - \\g<1>", post_content)
        post_content = re.sub("<li>", " - ", post_content)
        post_content = re.sub("</li>", "", post_content)
        post_content = re.sub("</?ul>", "", post_content)
        post_content = re.sub("(?s)<blockquote [^>]+><div>(.*?)</div></blockquote>", "<blockquote>\\g<1></blockquote>", post_content)
        post_content = re.sub("<!-- [^>]+ -->", "", post_content)

        # Link to GMM Thread
        post_content = re.sub("<a href=\"[^\"]+/forum/viewtopic.php?(?:[^&]+&)*t=2464\"[^>]*>([^<]+)</a>", "\\g<1>", post_content)
        # Link to Superluminal Thread
        post_content = re.sub("<a href=\"[^\"]+/forum/viewtopic.php?(?:[^&]+&)*t=11251\"[^>]*>([^<]+)</a>", "\\g<1>", post_content)
        # Link to FTLEdit Thread
        post_content = re.sub("<a href=\"[^\"]+/forum/viewtopic.php?(?:[^&]+&)*t=2959\"[^>]*>([^<]+)</a>", "\\g<1>", post_content)

        post_content = re.sub("(?u)\\A\\s+", "", post_content)
        post_content = re.sub("(?u)\\s+\\Z", "", post_content)
        result["raw_desc"] = post_content +"\n"  # Triple-quoting looks better with a newline.

    return results

Example 46

Project: f5vpn-login
Source File: f5vpn-login.py
View license
def execPPPd(params):
    tunnel_host=params['tunnel_host0']
    tunnel_port=int(params['tunnel_port0'])

    serviceid = "f5vpn-%s"%tunnel_host

    request = """GET /myvpn?sess=%s HTTP/1.0\r
Cookie: MRHSession=%s\r
\r
""" % (params['Session_ID'], params['Session_ID'])

    for i in range(5):
        try:
            ssl_socket = proxy_connect(tunnel_host, tunnel_port)
            ssl_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
            ssl = sslwrap(tunnel_host, ssl_socket)
            ssl.write(request)
            initial_data = ssl.read()
            break
        except socket.sslerror, e:
            # Sometimes the server seems to respond with "EOF occurred in violation of protocol"
            # instead of establishing the connection. Try to deal with this by retrying...
            if e.args[0] != 8:
                raise
            sys.stderr.write("VPN socket unexpectedly closed during connection setup, retrying (%d/5)...\n" % (i + 1))

    # Make new PTY
    (pppd_fd, slave_pppd_fd) = os.openpty()

    # Make log pipe
    logpipe_r,logpipe_w = os.pipe()

    if params.get('LAN0'):
        routes_to_add = [parse_net_bits(routespec)
                         for routespec in params['LAN0'].split(' ')]
    else:
        routes_to_add = []

    override_gateway = ('UseDefaultGateway0' in params)
    if override_gateway:
        # If the server says to redirect the default gateway, we need to first add
        # an explicit route for the VPN server with the /current/ default gateway.
        tunnel_ip = ssl_socket.getpeername()[0]
        # FIXME: This is a total hack...and incorrect in some cases, too.  But
        # it'll work in the normal case where the VPN server isn't on your local
        # subnet.  This should really be using some (platform-specific) method
        # of finding the current route to tunnel_ip instead of assuming that's
        # the default route.
        gw_ip = os.popen("netstat -rn|grep '^default\|^0.0.0.0'|awk '{print $2}'").read().split()[0]
        sys.stderr.write("Detected current default route: %r\n" % gw_ip)
        sys.stderr.write("Attempting to delete and override route to VPN server.\n")
        try:
            platform.setup_route('', gw_ip, tunnel_ip, 32, 'delete')
        except:
            pass
        platform.setup_route('', gw_ip, tunnel_ip, 32, 'add')

        # Now, add a new default route, if it wasn't already specified (but not
        # on darwin: pppd's "defaultroute" option actually works there)
        if sys.platform != "darwin":
            if ([0,0,0,0], 0) not in routes_to_add:
                routes_to_add.insert(0, ([0,0,0,0], 0))

    pid = os.fork()
    if pid == 0:
        os.close(ssl_socket.fileno())
        # Setup new controlling TTY
        os.close(pppd_fd)
        os.setsid()
        os.dup2(slave_pppd_fd, 0)
        os.close(slave_pppd_fd)

        # setup log pipe
        os.dup2(logpipe_w, 4)
        os.close(logpipe_r)
        os.close(logpipe_w)

        # Become root
        os.seteuid(0)
        os.setuid(0)

        # Run pppd
        args = ['pppd', 'logfd', '4', 'noauth', 'nodetach',
                'crtscts', 'passive', 'ipcp-accept-local', 'ipcp-accept-remote',
                'local', 'nodeflate', 'novj', 'idle', '0', ]

        if override_gateway:
            args.append('defaultroute')
        else:
            args.append('nodefaultroute')

        if sys.platform == "darwin":
            args.extend(['serviceid', serviceid])

        try:
            os.execvp("pppd", args)
        except:
            os._exit(127)

    os.close(slave_pppd_fd)
    os.close(logpipe_w)
    def ppp_ip_up(iface_name, tty, local_ip, remote_ip):
        revdns_domains = []
        for net, bits in routes_to_add:
            platform.setup_route(iface_name, local_ip, '.'.join(map(str, net)), bits, 'add')
            revdns_domains.extend(routespec_to_revdns(net, bits))

        # sending a packet to the "local" ip appears to actually send data
        # across the connection, which is the desired behavior.
        set_keepalive_host(local_ip)

        if params.get('DNS0'):
            platform.setup_dns(iface_name, serviceid,
                               params['DNS0'].split(','),
                               re.split('[, ]+', params.get('DNSSuffix0', '')),
                               revdns_domains,
                               override_gateway)
        print "VPN link is up!"

    try:
        run_event_loop(pppd_fd, ssl_socket, ssl, logpipe_r, ppp_ip_up)
    finally:
        if params.get('DNS0'):
            platform.teardown_dns()
        as_root(shutdown_pppd, pid)
        if override_gateway:
            try:
                platform.setup_route('', gw_ip, tunnel_ip, 32, 'delete')
            except:
                pass

Example 47

Project: simplemonitor
Source File: monitor.py
View license
def main():
    """This is where it happens \o/"""

    parser = OptionParser()
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Be more verbose")
    parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, help="Don't output anything except errors")
    parser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Test config and exit")
    parser.add_option("-p", "--pidfile", dest="pidfile", default="", help="Write PID into this file")
    parser.add_option("-N", "--no-network", dest="no_network", default=False, action="store_true", help="Disable network listening socket")
    parser.add_option("-d", "--debug", dest="debug", default=False, action="store_true", help="Enable debug output")
    parser.add_option("-f", "--config", dest="config", default="monitor.ini", help="configuration file")

    (options, args) = parser.parse_args()

    if options.quiet and options.verbose:
        options.verbose = False

    if options.quiet and options.debug:
        options.debug = False

    if options.debug and not options.verbose:
        options.verbose = True

    if not options.quiet:
        print "SimpleMonitor v%s" % VERSION
        print "--> Loading main config from %s" % options.config

    config = ConfigParser()
    config.read(options.config)
    interval = config.getint("monitor", "interval")

    pidfile = ""
    try:
        pidfile = config.get("monitor", "pidfile")
    except:
        pass

    if options.pidfile != "":
        pidfile = options.pidfile

    if pidfile != "":
        my_pid = os.getpid()
        try:
            with open(pidfile, "w") as file_handle:
                file_handle.write("%d\n" % my_pid)
        except:
            sys.stderr.write("Couldn't write to pidfile!")

    if config.has_option("monitor", "monitors"):
        monitors_file = config.get("monitor", "monitors")
    else:
        monitors_file = "monitors.ini"

    if not options.quiet:
        print "--> Loading monitor config from %s" % monitors_file

    m = SimpleMonitor()

    m = load_monitors(m, monitors_file, options.quiet)

    count = m.count_monitors()
    if count == 0:
        sys.stderr.write("No monitors loaded :(\n")
        sys.exit(2)

    if not options.quiet:
        print "--> Loaded %d monitors.\n" % count

    m = load_loggers(m, config, options.quiet)
    m = load_alerters(m, config, options.quiet)

    try:
        if config.get("monitor", "remote") == "1":
            if not options.no_network:
                enable_remote = True
                remote_port = int(config.get("monitor", "remote_port"))
            else:
                enable_remote = False
        else:
            enable_remote = False
    except:
        enable_remote = False

    if not m.verify_dependencies():
        sys.exit(1)

    if options.test:
        print "--> Config test complete. Exiting."
        sys.exit(0)

    if not options.quiet:
        print

    try:
        key = config.get("monitor", "key")
    except:
        key = None

    if enable_remote:
        if not options.quiet:
            print "--> Starting remote listener thread"
        remote_listening_thread = Loggers.network.Listener(m, remote_port, options.verbose, key)
        remote_listening_thread.daemon = True
        remote_listening_thread.start()

    if not options.quiet:
        print "--> Starting... (loop runs every %ds) Hit ^C to stop" % interval
    loop = True
    heartbeat = 0

    m.set_verbosity(options.verbose, options.debug)

    while loop:
        try:
            m.run_tests()
            m.do_recovery()
            m.do_alerts()
            m.do_logs()

            if not options.quiet and not options.verbose:
                heartbeat += 1
                if heartbeat == 2:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    heartbeat = 0
        except KeyboardInterrupt:

            if not options.quiet:
                print "\n--> EJECT EJECT"
            loop = False
        except Exception, e:
            sys.exc_info()
            sys.stderr.write("Caught unhandled exception during main loop: %s\n" % e)
        if loop and enable_remote:
            if not remote_listening_thread.isAlive():
                print "Listener thread died :("
                remote_listening_thread = Loggers.network.Listener(m, remote_port, options.verbose)
                remote_listening_thread.start()
        try:
            time.sleep(interval)
        except:
            print "\n--> Quitting."
            loop = False

    if enable_remote:
        remote_listening_thread.running = False
        remote_listening_thread.join(0)

    if pidfile != "":
        try:
            unlink(pidfile)
        except:
            print "Couldn't remove pidfile!"

    if not options.quiet:
        print "--> Finished."

Example 48

Project: veusz
Source File: fit.py
View license
    def actionFit(self):
        """Fit the data."""

        s = self.settings

        # check and get compiled for of function
        compiled = self.document.evaluate.compileCheckedExpression(s.function)
        if compiled is None:
            return

        # populate the input parameters
        paramnames = sorted(s.values)
        params = N.array( [s.values[p] for p in paramnames] )

        # FIXME: loads of error handling!!
        d = self.document

        # choose dataset depending on fit variable
        if s.variable == 'x':
            xvals = s.get('xData').getData(d).data
            ydata = s.get('yData').getData(d)
        else:
            xvals = s.get('yData').getData(d).data
            ydata = s.get('xData').getData(d)
        yvals = ydata.data
        yserr = ydata.serr

        # if there are no errors on data
        if yserr is None:
            if ydata.perr is not None and ydata.nerr is not None:
                print("Warning: Symmeterising positive and negative errors")
                yserr = N.sqrt( 0.5*(ydata.perr**2 + ydata.nerr**2) )
            else:
                print("Warning: No errors on y values. Assuming 5% errors.")
                yserr = yvals*0.05
                yserr[yserr < 1e-8] = 1e-8

        # if the fitRange parameter is on, we chop out data outside the
        # range of the axis
        if s.fitRange:
            # get ranges for axes
            if s.variable == 'x':
                drange = self.parent.getAxes((s.xAxis,))[0].getPlottedRange()
                mask = N.logical_and(xvals >= drange[0], xvals <= drange[1])
            else:
                drange = self.parent.getAxes((s.yAxis,))[0].getPlottedRange()
                mask = N.logical_and(yvals >= drange[0], yvals <= drange[1])
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]
            print("Fitting %s from %g to %g" % (s.variable,
                                                drange[0], drange[1]))

        evalenv = self.initEnviron()
        def evalfunc(params, xvals):
            # update environment with variable and parameters
            evalenv[self.settings.variable] = xvals
            evalenv.update( czip(paramnames, params) )

            try:
                return eval(compiled, evalenv) + xvals*0.
            except Exception as e:
                self.document.log(cstr(e))
                return N.nan

        # minimum set for fitting
        if s.min != 'Auto':
            if s.variable == 'x':
                mask = xvals >= s.min
            else:
                mask = yvals >= s.min
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]

        # maximum set for fitting
        if s.max != 'Auto':
            if s.variable == 'x':
                mask = xvals <= s.max
            else:
                mask = yvals <= s.max
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]

        if s.min != 'Auto' or s.max != 'Auto':
            print("Fitting %s between %s and %s" % (s.variable, s.min, s.max))

        # various error checks
        if len(xvals) != len(yvals) or len(xvals) != len(yserr):
            sys.stderr.write(_('Fit data not equal in length. Not fitting.\n'))
            return
        if len(params) > len(xvals):
            sys.stderr.write(_('No degrees of freedom for fit. Not fitting\n'))
            return

        # actually do the fit, either via Minuit or our own LM fitter
        chi2 = 1
        dof = 1

        # only consider finite values
        finite = N.isfinite(xvals) & N.isfinite(yvals) & N.isfinite(yserr)
        xvals = xvals[finite]
        yvals = yvals[finite]
        yserr = yserr[finite]

        # check length after excluding non-finite values
        if len(xvals) == 0:
            sys.stderr.write(_('No data values. Not fitting.\n'))
            return

        if minuit is not None:
            vals, chi2, dof = minuitFit(evalfunc, params, paramnames, s.values,
                                        xvals, yvals, yserr)
        else:
            print(_('Minuit not available, falling back to simple L-M fitting:'))
            retn, chi2, dof = utils.fitLM(evalfunc, params,
                                          xvals,
                                          yvals, yserr)
            vals = {}
            for i, v in czip(paramnames, retn):
                vals[i] = float(v)

        # list of operations do we can undo the changes
        operations = []
                                      
        # populate the return parameters
        operations.append( document.OperationSettingSet(s.get('values'), vals) )

        # populate the read-only fit quality params
        operations.append( document.OperationSettingSet(s.get('chi2'), float(chi2)) )
        operations.append( document.OperationSettingSet(s.get('dof'), int(dof)) )
        if dof <= 0:
            print(_('No degrees of freedom in fit.\n'))
            redchi2 = -1.
        else:
            redchi2 = float(chi2/dof)
        operations.append( document.OperationSettingSet(s.get('redchi2'), redchi2) )

        # expression for fit
        expr = self.generateOutputExpr(vals)
        operations.append( document.OperationSettingSet(s.get('outExpr'), expr) )

        self.updateOutputLabel(operations, vals, chi2, dof)

        # actually change all the settings
        d.applyOperation(
            document.OperationMultiple(operations, descr=_('fit')) )

Example 49

Project: veusz
Source File: fit.py
View license
    def actionFit(self):
        """Fit the data."""

        s = self.settings

        # check and get compiled for of function
        compiled = self.document.evaluate.compileCheckedExpression(s.function)
        if compiled is None:
            return

        # populate the input parameters
        paramnames = sorted(s.values)
        params = N.array( [s.values[p] for p in paramnames] )

        # FIXME: loads of error handling!!
        d = self.document

        # choose dataset depending on fit variable
        if s.variable == 'x':
            xvals = s.get('xData').getData(d).data
            ydata = s.get('yData').getData(d)
        else:
            xvals = s.get('yData').getData(d).data
            ydata = s.get('xData').getData(d)
        yvals = ydata.data
        yserr = ydata.serr

        # if there are no errors on data
        if yserr is None:
            if ydata.perr is not None and ydata.nerr is not None:
                print("Warning: Symmeterising positive and negative errors")
                yserr = N.sqrt( 0.5*(ydata.perr**2 + ydata.nerr**2) )
            else:
                print("Warning: No errors on y values. Assuming 5% errors.")
                yserr = yvals*0.05
                yserr[yserr < 1e-8] = 1e-8

        # if the fitRange parameter is on, we chop out data outside the
        # range of the axis
        if s.fitRange:
            # get ranges for axes
            if s.variable == 'x':
                drange = self.parent.getAxes((s.xAxis,))[0].getPlottedRange()
                mask = N.logical_and(xvals >= drange[0], xvals <= drange[1])
            else:
                drange = self.parent.getAxes((s.yAxis,))[0].getPlottedRange()
                mask = N.logical_and(yvals >= drange[0], yvals <= drange[1])
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]
            print("Fitting %s from %g to %g" % (s.variable,
                                                drange[0], drange[1]))

        evalenv = self.initEnviron()
        def evalfunc(params, xvals):
            # update environment with variable and parameters
            evalenv[self.settings.variable] = xvals
            evalenv.update( czip(paramnames, params) )

            try:
                return eval(compiled, evalenv) + xvals*0.
            except Exception as e:
                self.document.log(cstr(e))
                return N.nan

        # minimum set for fitting
        if s.min != 'Auto':
            if s.variable == 'x':
                mask = xvals >= s.min
            else:
                mask = yvals >= s.min
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]

        # maximum set for fitting
        if s.max != 'Auto':
            if s.variable == 'x':
                mask = xvals <= s.max
            else:
                mask = yvals <= s.max
            xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]

        if s.min != 'Auto' or s.max != 'Auto':
            print("Fitting %s between %s and %s" % (s.variable, s.min, s.max))

        # various error checks
        if len(xvals) != len(yvals) or len(xvals) != len(yserr):
            sys.stderr.write(_('Fit data not equal in length. Not fitting.\n'))
            return
        if len(params) > len(xvals):
            sys.stderr.write(_('No degrees of freedom for fit. Not fitting\n'))
            return

        # actually do the fit, either via Minuit or our own LM fitter
        chi2 = 1
        dof = 1

        # only consider finite values
        finite = N.isfinite(xvals) & N.isfinite(yvals) & N.isfinite(yserr)
        xvals = xvals[finite]
        yvals = yvals[finite]
        yserr = yserr[finite]

        # check length after excluding non-finite values
        if len(xvals) == 0:
            sys.stderr.write(_('No data values. Not fitting.\n'))
            return

        if minuit is not None:
            vals, chi2, dof = minuitFit(evalfunc, params, paramnames, s.values,
                                        xvals, yvals, yserr)
        else:
            print(_('Minuit not available, falling back to simple L-M fitting:'))
            retn, chi2, dof = utils.fitLM(evalfunc, params,
                                          xvals,
                                          yvals, yserr)
            vals = {}
            for i, v in czip(paramnames, retn):
                vals[i] = float(v)

        # list of operations do we can undo the changes
        operations = []
                                      
        # populate the return parameters
        operations.append( document.OperationSettingSet(s.get('values'), vals) )

        # populate the read-only fit quality params
        operations.append( document.OperationSettingSet(s.get('chi2'), float(chi2)) )
        operations.append( document.OperationSettingSet(s.get('dof'), int(dof)) )
        if dof <= 0:
            print(_('No degrees of freedom in fit.\n'))
            redchi2 = -1.
        else:
            redchi2 = float(chi2/dof)
        operations.append( document.OperationSettingSet(s.get('redchi2'), redchi2) )

        # expression for fit
        expr = self.generateOutputExpr(vals)
        operations.append( document.OperationSettingSet(s.get('outExpr'), expr) )

        self.updateOutputLabel(operations, vals, chi2, dof)

        # actually change all the settings
        d.applyOperation(
            document.OperationMultiple(operations, descr=_('fit')) )

Example 50

Project: karaage
Source File: kgcreatesuperuser.py
View license
    @django.db.transaction.atomic
    @tldap.transaction.commit_on_success
    def handle(self, *args, **options):

        username = options['username']
        email = options['email']
        short_name = options['short_name']
        full_name = options['full_name']
        password = options['password']
        institute_name = options['institute']

        # Try to determine the current system user's username to use as a
        # default.
        try:
            import pwd
            unix_uid = os.getuid()
            unix_username = pwd.getpwuid(unix_uid)[0]
            default_username = unix_username.replace(' ', '').lower()
            if default_username == 'root':
                default_username = ''
        except (ImportError, KeyError):
            # KeyError will be raised by getpwuid() if there is no
            # corresponding entry in the /etc/passwd file (a very restricted
            # chroot environment, for example).
            default_username = ''

        # Determine whether the default username is taken, so we don't display
        # it as an option.
        if default_username:
            try:
                Person.objects.get(username=default_username)
            except Person.DoesNotExist:
                pass
            else:
                default_username = ''

        # Prompt for username/email/password. Enclose this whole thing in a
        # try/except to trap for a keyboard interrupt and exit gracefully.
        try:
            # Get a username
            while 1:
                if not username:
                    input_msg = 'Username'
                    if default_username:
                        input_msg += \
                            ' (Leave blank to use %r)' % default_username
                    username = input(input_msg + ': ')
                    if default_username and username == '':
                        username = default_username
                try:
                    validate_username_for_new_person(username)
                    break
                except UsernameException as e:
                    sys.stderr.write("%s\n" % e)
                    username = None
                    print('')
                    continue

            # Get an email
            while 1:
                if not email:
                    email = input('E-mail address: ')
                try:
                    validate_email(email)
                except exceptions.ValidationError:
                    sys.stderr.write(
                        "Error: That e-mail address is invalid.\n")
                    print('')
                    email = None
                else:
                    break

            # Get a password
            while 1:
                if not password:
                    password = getpass.getpass()
                    password2 = getpass.getpass('Password (again): ')
                    if password != password2:
                        sys.stderr.write(
                            "Error: Your passwords didn't match.\n")
                        password = None
                        print('')
                        continue
                if password.strip() == '':
                    sys.stderr.write(
                        "Error: Blank passwords aren't allowed.\n")
                    password = None
                    print('')
                    continue
                break

            while 1:
                if not short_name:
                    short_name = input('Short Name: ')
                else:
                    break
            while 1:
                if not full_name:
                    full_name = input('Full Name: ')
                else:
                    break

            group_re = re.compile(r'^%s$' % settings.GROUP_VALIDATION_RE)
            while 1:
                if not institute_name:
                    if Institute.objects.count() > 0:
                        print(
                            "Choose an existing institute "
                            "for new superuser.")
                        print("Alternatively enter a new name to create one.")
                        print("")
                        print("Valid choices are:")
                        print("")
                        for i in Institute.active.all():
                            print("* %s" % i)
                        print
                    else:
                        print("No Institutes in system, will create one now.")
                        print('')

                    institute_name = input('Institute Name: ')

                if not re.search(group_re, institute_name):
                    sys.stderr.write(
                        "%s\n" % settings.GROUP_VALIDATION_ERROR_MSG)
                    institute_name = None
                    print('')
                    continue
                else:
                    break

            try:
                institute = Institute.objects.get(name=institute_name)
                print("Using existing institute %s." % institute)

            except Institute.DoesNotExist:
                group, c = Group.objects.get_or_create(name=institute_name)
                if c:
                    print("Created new group %s." % group)
                else:
                    print("Using existing group %s." % group)

                institute = Institute.objects.create(
                    name=institute_name, group=group, is_active=True)

                print("Created new institute %s." % institute)

        except KeyboardInterrupt:
            sys.stderr.write("\nOperation cancelled.\n")
            sys.exit(1)

        data = {
            'username': username,
            'email': email,
            'password': password,
            'short_name': short_name,
            'full_name': full_name,
            'institute': institute,
        }
        Person.objects.create_superuser(**data)
        print("Karaage Superuser created successfully.")