numpy.array

Here are the examples of the python api numpy.array taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: pycollada
Source File: test_collada.py
View license
    def test_collada_saving(self):
        mesh = collada.Collada(validate_output=True)

        self.assertEqual(len(mesh.geometries), 0)
        self.assertEqual(len(mesh.controllers), 0)
        self.assertEqual(len(mesh.lights), 0)
        self.assertEqual(len(mesh.cameras), 0)
        self.assertEqual(len(mesh.images), 0)
        self.assertEqual(len(mesh.effects), 0)
        self.assertEqual(len(mesh.materials), 0)
        self.assertEqual(len(mesh.nodes), 0)
        self.assertEqual(len(mesh.scenes), 0)
        self.assertEqual(mesh.scene, None)
        self.assertIsNotNone(str(mesh))

        floatsource = collada.source.FloatSource("myfloatsource", numpy.array([0.1,0.2,0.3]), ('X', 'Y', 'Z'))
        geometry1 = collada.geometry.Geometry(mesh, "geometry1", "mygeometry1", {"myfloatsource":floatsource})
        mesh.geometries.append(geometry1)

        linefloats = [1,1,-1, 1,-1,-1, -1,-0.9999998,-1, -0.9999997,1,-1, 1,0.9999995,1, 0.9999994,-1.000001,1]
        linefloatsrc = collada.source.FloatSource("mylinevertsource", numpy.array(linefloats), ('X', 'Y', 'Z'))
        geometry2 = collada.geometry.Geometry(mesh, "geometry2", "mygeometry2", [linefloatsrc])
        input_list = collada.source.InputList()
        input_list.addInput(0, 'VERTEX', "#mylinevertsource")
        indices = numpy.array([0,1, 1,2, 2,3, 3,4, 4,5])
        lineset1 = geometry2.createLineSet(indices, input_list, "mymaterial2")
        geometry2.primitives.append(lineset1)
        mesh.geometries.append(geometry2)

        ambientlight = collada.light.AmbientLight("myambientlight", (1,1,1))
        pointlight = collada.light.PointLight("mypointlight", (1,1,1))
        mesh.lights.append(ambientlight)
        mesh.lights.append(pointlight)

        camera1 = collada.camera.PerspectiveCamera("mycam1", 45.0, 0.01, 1000.0)
        camera2 = collada.camera.PerspectiveCamera("mycam2", 45.0, 0.01, 1000.0)
        mesh.cameras.append(camera1)
        mesh.cameras.append(camera2)

        cimage1 = collada.material.CImage("mycimage1", "./whatever.tga", mesh)
        cimage2 = collada.material.CImage("mycimage2", "./whatever.tga", mesh)
        mesh.images.append(cimage1)
        mesh.images.append(cimage2)

        effect1 = collada.material.Effect("myeffect1", [], "phong")
        effect2 = collada.material.Effect("myeffect2", [], "phong")
        mesh.effects.append(effect1)
        mesh.effects.append(effect2)

        mat1 = collada.material.Material("mymaterial1", "mymat1", effect1)
        mat2 = collada.material.Material("mymaterial2", "mymat2", effect2)
        mesh.materials.append(mat1)
        mesh.materials.append(mat2)

        rotate = collada.scene.RotateTransform(0.1, 0.2, 0.3, 90)
        scale = collada.scene.ScaleTransform(0.1, 0.2, 0.3)
        mynode1 = collada.scene.Node('mynode1', children=[], transforms=[rotate, scale])
        mynode2 = collada.scene.Node('mynode2', children=[], transforms=[])
        mesh.nodes.append(mynode1)
        mesh.nodes.append(mynode2)

        geomnode = collada.scene.GeometryNode(geometry2)
        mynode3 = collada.scene.Node('mynode3', children=[geomnode], transforms=[])
        mynode4 = collada.scene.Node('mynode4', children=[], transforms=[])
        scene1 = collada.scene.Scene('myscene1', [mynode3])
        scene2 = collada.scene.Scene('myscene2', [mynode4])
        mesh.scenes.append(scene1)
        mesh.scenes.append(scene2)

        mesh.scene = scene1

        out = BytesIO()
        mesh.write(out)

        toload = BytesIO(out.getvalue())

        loaded_mesh = collada.Collada(toload, validate_output=True)
        self.assertEqual(len(loaded_mesh.geometries), 2)
        self.assertEqual(len(loaded_mesh.controllers), 0)
        self.assertEqual(len(loaded_mesh.lights), 2)
        self.assertEqual(len(loaded_mesh.cameras), 2)
        self.assertEqual(len(loaded_mesh.images), 2)
        self.assertEqual(len(loaded_mesh.effects), 2)
        self.assertEqual(len(loaded_mesh.materials), 2)
        self.assertEqual(len(loaded_mesh.nodes), 2)
        self.assertEqual(len(loaded_mesh.scenes), 2)
        self.assertEqual(loaded_mesh.scene.id, scene1.id)

        self.assertIn('geometry1', loaded_mesh.geometries)
        self.assertIn('geometry2', loaded_mesh.geometries)
        self.assertIn('mypointlight', loaded_mesh.lights)
        self.assertIn('myambientlight', loaded_mesh.lights)
        self.assertIn('mycam1', loaded_mesh.cameras)
        self.assertIn('mycam2', loaded_mesh.cameras)
        self.assertIn('mycimage1', loaded_mesh.images)
        self.assertIn('mycimage2', loaded_mesh.images)
        self.assertIn('myeffect1', loaded_mesh.effects)
        self.assertIn('myeffect2', loaded_mesh.effects)
        self.assertIn('mymaterial1', loaded_mesh.materials)
        self.assertIn('mymaterial2', loaded_mesh.materials)
        self.assertIn('mynode1', loaded_mesh.nodes)
        self.assertIn('mynode2', loaded_mesh.nodes)
        self.assertIn('myscene1', loaded_mesh.scenes)
        self.assertIn('myscene2', loaded_mesh.scenes)

        linefloatsrc2 = collada.source.FloatSource("mylinevertsource2", numpy.array(linefloats), ('X', 'Y', 'Z'))
        geometry3 = collada.geometry.Geometry(mesh, "geometry3", "mygeometry3", [linefloatsrc2])
        loaded_mesh.geometries.pop(0)
        loaded_mesh.geometries.append(geometry3)

        dirlight = collada.light.DirectionalLight("mydirlight", (1,1,1))
        loaded_mesh.lights.pop(0)
        loaded_mesh.lights.append(dirlight)

        camera3 = collada.camera.PerspectiveCamera("mycam3", 45.0, 0.01, 1000.0)
        loaded_mesh.cameras.pop(0)
        loaded_mesh.cameras.append(camera3)

        cimage3 = collada.material.CImage("mycimage3", "./whatever.tga", loaded_mesh)
        loaded_mesh.images.pop(0)
        loaded_mesh.images.append(cimage3)

        effect3 = collada.material.Effect("myeffect3", [], "phong")
        loaded_mesh.effects.pop(0)
        loaded_mesh.effects.append(effect3)

        mat3 = collada.material.Material("mymaterial3", "mymat3", effect3)
        loaded_mesh.materials.pop(0)
        loaded_mesh.materials.append(mat3)

        mynode5 = collada.scene.Node('mynode5', children=[], transforms=[])
        loaded_mesh.nodes.pop(0)
        loaded_mesh.nodes.append(mynode5)

        mynode6 = collada.scene.Node('mynode6', children=[], transforms=[])
        scene3 = collada.scene.Scene('myscene3', [mynode6])
        loaded_mesh.scenes.pop(0)
        loaded_mesh.scenes.append(scene3)

        loaded_mesh.scene = scene3

        loaded_mesh.save()

        strdata = tostring(loaded_mesh.xmlnode.getroot())
        indata = BytesIO(strdata)
        loaded_mesh2 = collada.Collada(indata, validate_output=True)

        self.assertEqual(loaded_mesh2.scene.id, scene3.id)
        self.assertIn('geometry3', loaded_mesh2.geometries)
        self.assertIn('geometry2', loaded_mesh2.geometries)
        self.assertIn('mydirlight', loaded_mesh2.lights)
        self.assertIn('mypointlight', loaded_mesh2.lights)
        self.assertIn('mycam3', loaded_mesh2.cameras)
        self.assertIn('mycam2', loaded_mesh2.cameras)
        self.assertIn('mycimage3', loaded_mesh2.images)
        self.assertIn('mycimage2', loaded_mesh2.images)
        self.assertIn('myeffect3', loaded_mesh2.effects)
        self.assertIn('myeffect2', loaded_mesh2.effects)
        self.assertIn('mymaterial3', loaded_mesh2.materials)
        self.assertIn('mymaterial2', loaded_mesh2.materials)
        self.assertIn('mynode5', loaded_mesh2.nodes)
        self.assertIn('mynode2', loaded_mesh2.nodes)
        self.assertIn('myscene3', loaded_mesh2.scenes)
        self.assertIn('myscene2', loaded_mesh2.scenes)

Example 2

Project: RMG-Py
Source File: gamessparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line [1:12] == "INPUT CARD>":
            return

        # We are looking for this line:
        #           PARAMETERS CONTROLLING GEOMETRY SEARCH ARE
        #           ...
        #           OPTTOL = 1.000E-04          RMIN   = 1.500E-03
        if line[10:18] == "OPTTOL =":
            if not hasattr(self, "geotargets"):
                opttol = float(line.split()[2])
                self.geotargets = numpy.array([opttol, 3. / opttol], "d")
                        
        if line.find("FINAL") == 1:
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
        # Has to deal with such lines as:
        #  FINAL R-B3LYP ENERGY IS     -382.0507446475 AFTER  10 ITERATIONS
        #  FINAL ENERGY IS     -379.7594673378 AFTER   9 ITERATIONS
        # ...so take the number after the "IS"
            temp = line.split()
            self.scfenergies.append(utils.convertor(float(temp[temp.index("IS") + 1]), "hartree", "eV"))

        # Total energies after Moller-Plesset corrections
        if (line.find("RESULTS OF MOLLER-PLESSET") >= 0 or
            line[6:37] == "SCHWARZ INEQUALITY TEST SKIPPED"):
            # Output looks something like this:
            # RESULTS OF MOLLER-PLESSET 2ND ORDER CORRECTION ARE
            #         E(0)=      -285.7568061536
            #         E(1)=         0.0
            #         E(2)=        -0.9679419329
            #       E(MP2)=      -286.7247480864
            # where E(MP2) = E(0) + E(2)
            #
            # with GAMESS-US 12 Jan 2009 (R3) the preceding text is different:
            ##      DIRECT 4-INDEX TRANSFORMATION 
            ##      SCHWARZ INEQUALITY TEST SKIPPED          0 INTEGRAL BLOCKS
            ##                     E(SCF)=       -76.0088477471
            ##                       E(2)=        -0.1403745370
            ##                     E(MP2)=       -76.1492222841            
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            # Each iteration has a new print-out
            self.mpenergies.append([])
            # GAMESS-US presently supports only second order corrections (MP2)
            # PC GAMESS also has higher levels (3rd and 4th), with different output
            # Only the highest level MP4 energy is gathered (SDQ or SDTQ)            
            while re.search("DONE WITH MP(\d) ENERGY", line) is None:
                line = inputfile.next()
                if len(line.split()) > 0:
                    # Only up to MP2 correction
                    if line.split()[0] == "E(MP2)=":
                        mp2energy = float(line.split()[1])
                        self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
                    # MP2 before higher order calculations
                    if line.split()[0] == "E(MP2)":
                        mp2energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))
                    if line.split()[0] == "E(MP3)":
                        mp3energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV"))
                    if line.split()[0] in ["E(MP4-SDQ)", "E(MP4-SDTQ)"]:
                        mp4energy = float(line.split()[2])
                        self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV"))

        # Total energies after Coupled Cluster calculations
        # Only the highest Coupled Cluster level result is gathered
        if line[12:23] == "CCD ENERGY:":
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []
            ccenergy = float(line.split()[2])
            self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
        if line.find("CCSD") >= 0 and line.split()[0:2] == ["CCSD", "ENERGY:"]:
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []
            ccenergy = float(line.split()[2])
            line = inputfile.next()
            if line[8:23] == "CCSD[T] ENERGY:":
                ccenergy = float(line.split()[2])
                line = inputfile.next()
                if line[8:23] == "CCSD(T) ENERGY:":
                    ccenergy = float(line.split()[2])
            self.ccenergies.append(utils.convertor(ccenergy, "hartree", "eV"))
        # Also collect MP2 energies, which are always calculated before CC
        if line [8:23] == "MBPT(2) ENERGY:":
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            self.mpenergies.append([])
            mp2energy = float(line.split()[2])
            self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))

        # Extract charge and multiplicity
        if line[1:19] == "CHARGE OF MOLECULE":
            self.charge = int(line.split()[-1])
            self.mult = int(inputfile.next().split()[-1])

        # etenergies (used only for CIS runs now)
        if "EXCITATION ENERGIES" in line and line.find("DONE WITH") < 0:
            if not hasattr(self, "etenergies"):
                self.etenergies = []
            header = inputfile.next().rstrip()
            get_etosc = False
            if header.endswith("OSC. STR."):
                # water_cis_dets.out does not have the oscillator strength
                # in this table...it is extracted from a different section below
                get_etosc = True
                self.etoscs = []
            dashes = inputfile.next()
            line = inputfile.next()
            broken = line.split()
            while len(broken) > 0:
                # Take hartree value with more numbers, and convert.
                # Note that the values listed after this are also less exact!
                etenergy = float(broken[1])
                self.etenergies.append(utils.convertor(etenergy, "hartree", "cm-1"))
                if get_etosc:
                    etosc = float(broken[-1])
                    self.etoscs.append(etosc)
                broken = inputfile.next().split()

        # Detect the CI hamiltonian type, if applicable.
        # Should always be detected if CIS is done.
        if line[8:64] == "RESULTS FROM SPIN-ADAPTED ANTISYMMETRIZED PRODUCT (SAPS)":
            self.cihamtyp = "saps"
        if line[8:64] == "RESULTS FROM DETERMINANT BASED ATOMIC ORBITAL CI-SINGLES":
            self.cihamtyp = "dets"

        # etsecs (used only for CIS runs for now)
        if line[1:14] == "EXCITED STATE":
            if not hasattr(self, 'etsecs'):
                self.etsecs = []
            if not hasattr(self, 'etsyms'):
                self.etsyms = []
            statenumber = int(line.split()[2])
            spin = int(float(line.split()[7]))
            if spin == 0:
                sym = "Singlet"
            if spin == 1:
                sym = "Triplet"
            sym += '-' + line.split()[-1]
            self.etsyms.append(sym)
            # skip 5 lines
            for i in range(5):
                line = inputfile.next()
            line = inputfile.next()
            CIScontribs = []
            while line.strip()[0] != "-":
                MOtype = 0
                # alpha/beta are specified for hamtyp=dets
                if self.cihamtyp == "dets":
                    if line.split()[0] == "BETA":
                        MOtype = 1
                fromMO = int(line.split()[-3])-1
                toMO = int(line.split()[-2])-1
                coeff = float(line.split()[-1])
                # With the SAPS hamiltonian, the coefficients are multiplied
                #   by sqrt(2) so that they normalize to 1.
                # With DETS, both alpha and beta excitations are printed.
                # if self.cihamtyp == "saps":
                #    coeff /= numpy.sqrt(2.0)
                CIScontribs.append([(fromMO,MOtype),(toMO,MOtype),coeff])
                line = inputfile.next()
            self.etsecs.append(CIScontribs)

        # etoscs (used only for CIS runs now)
        if line[1:50] == "TRANSITION FROM THE GROUND STATE TO EXCITED STATE":
            if not hasattr(self, "etoscs"):
                self.etoscs = []
            statenumber = int(line.split()[-1])
            # skip 7 lines
            for i in range(8):
                line = inputfile.next()
            strength = float(line.split()[3])
            self.etoscs.append(strength)

        # TD-DFT for GAMESS-US
        if line[14:29] == "LET EXCITATIONS": # TRIPLET and SINGLET
            self.etenergies = []
            self.etoscs = []
            self.etsecs = []
            etsyms = []
            minus = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            # Loop starts on the STATE line
            while line.find("STATE") >= 0:
                broken = line.split()
                self.etenergies.append(utils.convertor(float(broken[-2]), "eV", "cm-1"))
                broken = inputfile.next().split()
                self.etoscs.append(float(broken[-1]))
                sym = inputfile.next() # Not always present
                if sym.find("SYMMETRY")>=0:
                    etsyms.append(sym.split()[-1])
                    header = inputfile.next()
                minus = inputfile.next()
                CIScontribs = []
                line = inputfile.next()
                while line.strip():
                    broken = line.split()
                    fromMO, toMO = [int(broken[x]) - 1 for x in [2, 4]]
                    CIScontribs.append([(fromMO, 0), (toMO, 0), float(broken[1])])
                    line = inputfile.next()
                self.etsecs.append(CIScontribs)
                line = inputfile.next()
            if etsyms: # Not always present
                self.etsyms = etsyms
         
        # Maximum and RMS gradients.
        if "MAXIMUM GRADIENT" in line or "RMS GRADIENT" in line:

            if not hasattr(self, "geovalues"):
                self.geovalues = []

            parts = line.split()

            # Newer versions (around 2006) have both maximum and RMS on one line:
            #       MAXIMUM GRADIENT =  0.0531540    RMS GRADIENT = 0.0189223
            if len(parts) == 8:
                maximum = float(parts[3])
                rms = float(parts[7])
            
            # In older versions of GAMESS, this spanned two lines, like this:
            #       MAXIMUM GRADIENT =    0.057578167
            #           RMS GRADIENT =    0.027589766
            if len(parts) == 4:
                maximum = float(parts[3])
                line = inputfile.next()
                parts = line.split()
                rms = float(parts[3])


            # FMO also prints two final one- and two-body gradients (see exam37):
            #   (1) MAXIMUM GRADIENT =  0.0531540    RMS GRADIENT = 0.0189223
            if len(parts) == 9:
                maximum = float(parts[4])
                rms = float(parts[8])

            self.geovalues.append([maximum, rms])

        if line[11:50] == "ATOMIC                      COORDINATES":
            # This is the input orientation, which is the only data available for
            # SP calcs, but which should be overwritten by the standard orientation
            # values, which is the only information available for all geoopt cycles.
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
                self.atomnos = []
            line = inputfile.next()
            atomcoords = []
            atomnos = []
            line = inputfile.next()
            while line.strip():
                temp = line.strip().split()
                atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[2:5]])
                atomnos.append(int(round(float(temp[1])))) # Don't use the atom name as this is arbitary
                line = inputfile.next()
            self.atomnos = numpy.array(atomnos, "i")
            self.atomcoords.append(atomcoords)

        if line[12:40] == "EQUILIBRIUM GEOMETRY LOCATED":
            # Prevent extraction of the final geometry twice
            self.geooptfinished = True
        
        if line[1:29] == "COORDINATES OF ALL ATOMS ARE" and not self.geooptfinished:
            # This is the standard orientation, which is the only coordinate
            # information available for all geometry optimisation cycles.
            # The input orientation will be overwritten if this is a geometry optimisation
            # We assume that a previous Input Orientation has been found and
            # used to extract the atomnos
            if self.firststdorient:
                self.firststdorient = False
                # Wipes out the single input coordinate at the start of the file
                self.atomcoords = []
                
            line = inputfile.next()
            hyphens = inputfile.next()

            atomcoords = []
            line = inputfile.next()                

            for i in range(self.natom):
                temp = line.strip().split()
                atomcoords.append(map(float, temp[2:5]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
        
        # Section with SCF information.
        #
        # The space at the start of the search string is to differentiate from MCSCF.
        # Everything before the search string is stored as the type of SCF.
        # SCF types may include: BLYP, RHF, ROHF, UHF, etc.
        #
        # For example, in exam17 the section looks like this (note that this is GVB):
        #          ------------------------
        #          ROHF-GVB SCF CALCULATION
        #          ------------------------
        # GVB STEP WILL USE    119875 WORDS OF MEMORY.
        #
        #     MAXIT=  30   NPUNCH= 2   SQCDF TOL=1.0000E-05
        #     NUCLEAR ENERGY=        6.1597411978
        #     EXTRAP=T   DAMP=F   SHIFT=F   RSTRCT=F   DIIS=F  SOSCF=F
        #
        # ITER EX     TOTAL ENERGY       E CHANGE        SQCDF       DIIS ERROR
        #   0  0      -38.298939963   -38.298939963   0.131784454   0.000000000
        #   1  1      -38.332044339    -0.033104376   0.026019716   0.000000000
        # ... and will be terminated by a blank line.
        if line.rstrip()[-16:] == " SCF CALCULATION":

            # Remember the type of SCF.
            self.scftype = line.strip()[:-16]

            dashes = inputfile.next()

            while line [:5] != " ITER":

                # GVB uses SQCDF for checking convergence (for example in exam17).
                if "GVB" in self.scftype and "SQCDF TOL=" in line:
                    scftarget = float(line.split("=")[-1])

                # Normally however the density is used as the convergence criterium.
                # Deal with various versions:
                #   (GAMESS VERSION = 12 DEC 2003)
                #     DENSITY MATRIX CONV=  2.00E-05  DFT GRID SWITCH THRESHOLD=  3.00E-04
                #   (GAMESS VERSION = 22 FEB 2006)
                #     DENSITY MATRIX CONV=  1.00E-05
                #   (PC GAMESS version 6.2, Not DFT?)
                #     DENSITY CONV=  1.00E-05
                elif "DENSITY CONV" in line or "DENSITY MATRIX CONV" in line:
                    scftarget = float(line.split()[-1])

                line = inputfile.next()

            if not hasattr(self, "scftargets"):
                self.scftargets = []

            self.scftargets.append([scftarget])

            if not hasattr(self,"scfvalues"):
                self.scfvalues = []

            line = inputfile.next()

            # Normally the iteration print in 6 columns.
            # For ROHF, however, it is 5 columns, thus this extra parameter.
            if "ROHF" in self.scftype:
                valcol = 4
            else:
                valcol = 5

            # SCF iterations are terminated by a blank line.
            # The first four characters usually contains the step number.
            # However, lines can also contain messages, including:
            #   * * *   INITIATING DIIS PROCEDURE   * * *
            #   CONVERGED TO SWOFF, SO DFT CALCULATION IS NOW SWITCHED ON
            #   DFT CODE IS SWITCHING BACK TO THE FINER GRID
            values = []
            while line.strip():
                try:
                    temp = int(line[0:4])
                except ValueError:
                    pass
                else:
                    values.append([float(line.split()[valcol])])
                line = inputfile.next()
            self.scfvalues.append(values)

        if line.find("NORMAL COORDINATE ANALYSIS IN THE HARMONIC APPROXIMATION") >= 0:
        # GAMESS has...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
        #     REDUCED MASSES IN AMU.
        #
        #                          1           2           3           4           5
        #       FREQUENCY:        52.49       41.45       17.61        9.23       10.61  
        #    REDUCED MASS:      3.92418     3.77048     5.43419     6.44636     5.50693
        #    IR INTENSITY:      0.00013     0.00001     0.00004     0.00000     0.00003

        # ...or in the case of a numerical Hessian job...

        # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2,
        #     REDUCED MASSES IN AMU.
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         0.05        0.03        0.03       30.89       30.94  
        #    REDUCED MASS:      8.50125     8.50137     8.50136     1.06709     1.06709

        
        # whereas PC-GAMESS has...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         5.89        1.46        0.01        0.01        0.01  
        #    IR INTENSITY:      0.00000     0.00000     0.00000     0.00000     0.00000
        
        # If Raman is present we have (for PC-GAMESS)...
        # MODES 1 TO 6 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #     RAMAN INTENSITIES IN ANGSTROM**4/AMU, DEPOLARIZATIONS ARE DIMENSIONLESS
        #
        #                          1           2           3           4           5
        #       FREQUENCY:         5.89        1.46        0.04        0.03        0.01  
        #    IR INTENSITY:      0.00000     0.00000     0.00000     0.00000     0.00000
        # RAMAN INTENSITY:       12.675       1.828       0.000       0.000       0.000
        #  DEPOLARIZATION:        0.750       0.750       0.124       0.009       0.750

        # If PC-GAMESS has not reached the stationary point we have
        # MODES 1 TO 5 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.
        #
        #     FREQUENCIES IN CM**-1, IR INTENSITIES IN DEBYE**2/AMU-ANGSTROM**2
        #
        #     *******************************************************
        #     * THIS IS NOT A STATIONARY POINT ON THE MOLECULAR PES *
        #     *     THE VIBRATIONAL ANALYSIS IS NOT VALID !!!       *
        #     *******************************************************
        #
        #                          1           2           3           4           5
        
        # MODES 2 TO 7 ARE TAKEN AS ROTATIONS AND TRANSLATIONS.

            self.vibfreqs = []
            self.vibirs = []
            self.vibdisps = []

            # Need to get to the modes line
            warning = False
            while line.find("MODES") == -1:
                line = inputfile.next()
                if line.find("THIS IS NOT A STATIONARY POINT")>=0:
                    warning = True
            startrot = int(line.split()[1])
            endrot = int(line.split()[3])
            blank = inputfile.next()

            line = inputfile.next() # FREQUENCIES, etc.
            while line != blank:
                line = inputfile.next()
            if warning: # Get past the second warning
                line = inputfile.next()
                while line!= blank:
                    line = inputfile.next()
                self.logger.warning("This is not a stationary point on the molecular"
                                    "PES. The vibrational analysis is not valid.")
            
            freqNo = inputfile.next()
            while freqNo.find("SAYVETZ") == -1:
                freq = inputfile.next().strip().split()[1:]
            # May include imaginary frequencies
            #       FREQUENCY:       825.18 I    111.53       12.62       10.70        0.89
                newfreq = []
                for i, x in enumerate(freq):
                    if x!="I":
                        newfreq.append(float(x))
                    else:
                        newfreq[-1] = -newfreq[-1]
                self.vibfreqs.extend(newfreq)
                line = inputfile.next()
                if line.find("REDUCED") >= 0: # skip the reduced mass (not always present)
                    line = inputfile.next()
                if line.find("IR INTENSITY") >= 0:
                    # Not present if a numerical Hessian calculation
                    irIntensity = map(float, line.strip().split()[2:])
                    self.vibirs.extend([utils.convertor(x, "Debye^2/amu-Angstrom^2", "km/mol") for x in irIntensity])
                    line = inputfile.next()
                if line.find("RAMAN") >= 0:
                    if not hasattr(self,"vibramans"):
                        self.vibramans = []
                    ramanIntensity = line.strip().split()
                    self.vibramans.extend(map(float, ramanIntensity[2:]))
                    depolar = inputfile.next()
                    line = inputfile.next()
                assert line == blank

                # Extract the Cartesian displacement vectors
                p = [ [], [], [], [], [] ]
                for j in range(len(self.atomnos)):
                    q = [ [], [], [], [], [] ]
                    for k in range(3): # x, y, z
                        line = inputfile.next()[21:]
                        broken = map(float, line.split())
                        for l in range(len(broken)):
                            q[l].append(broken[l])
                    for k in range(len(broken)):
                        p[k].append(q[k])
                self.vibdisps.extend(p[:len(broken)])

                # Skip the Sayvetz stuff at the end
                for j in range(10):
                    line = inputfile.next()
                blank = inputfile.next()
                freqNo = inputfile.next()
            # Exclude rotations and translations
            self.vibfreqs = numpy.array(self.vibfreqs[:startrot-1]+self.vibfreqs[endrot:], "d")
            self.vibirs = numpy.array(self.vibirs[:startrot-1]+self.vibirs[endrot:], "d")
            self.vibdisps = numpy.array(self.vibdisps[:startrot-1]+self.vibdisps[endrot:], "d")
            if hasattr(self, "vibramans"):
                self.vibramans = numpy.array(self.vibramans[:startrot-1]+self.vibramans[endrot:], "d")

        if line[5:21] == "ATOMIC BASIS SET":
            self.gbasis = []
            line = inputfile.next()
            while line.find("SHELL")<0:
                line = inputfile.next()
            blank = inputfile.next()
            atomname = inputfile.next()
            # shellcounter stores the shell no of the last shell
            # in the previous set of primitives
            shellcounter = 1
            while line.find("TOTAL NUMBER")<0:
                blank = inputfile.next()
                line = inputfile.next()
                shellno = int(line.split()[0])
                shellgap = shellno - shellcounter
                gbasis = [] # Stores basis sets on one atom
                shellsize = 0
                while len(line.split())!=1 and line.find("TOTAL NUMBER")<0:
                    shellsize += 1
                    coeff = {}
                    # coefficients and symmetries for a block of rows
                    while line.strip():
                        temp = line.strip().split()
                        sym = temp[1]
                        assert sym in ['S', 'P', 'D', 'F', 'G', 'L']
                        if sym == "L": # L refers to SP
                            if len(temp)==6: # GAMESS US
                                coeff.setdefault("S", []).append( (float(temp[3]), float(temp[4])) )
                                coeff.setdefault("P", []).append( (float(temp[3]), float(temp[5])) )
                            else: # PC GAMESS
                                assert temp[6][-1] == temp[9][-1] == ')'
                                coeff.setdefault("S", []).append( (float(temp[3]), float(temp[6][:-1])) )
                                coeff.setdefault("P", []).append( (float(temp[3]), float(temp[9][:-1])) )
                        else:
                            if len(temp)==5: # GAMESS US
                                coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[4])) )
                            else: # PC GAMESS
                                assert temp[6][-1] == ')'
                                coeff.setdefault(sym, []).append( (float(temp[3]), float(temp[6][:-1])) )
                        line = inputfile.next()
                    # either a blank or a continuation of the block
                    if sym == "L":
                        gbasis.append( ('S', coeff['S']))
                        gbasis.append( ('P', coeff['P']))
                    else:
                        gbasis.append( (sym, coeff[sym]))
                    line = inputfile.next()
                # either the start of the next block or the start of a new atom or
                # the end of the basis function section
                
                numtoadd = 1 + (shellgap / shellsize)
                shellcounter = shellno + shellsize
                for x in range(numtoadd):
                    self.gbasis.append(gbasis)

        if line.find("EIGENVECTORS") == 10 or line.find("MOLECULAR OBRITALS") == 10:
            # The details returned come from the *final* report of evalues and
            #   the last list of symmetries in the log file.
            # Should be followed by lines like this:
            #           ------------
            #           EIGENVECTORS
            #           ------------
            # 
            #                       1          2          3          4          5
            #                   -10.0162   -10.0161   -10.0039   -10.0039   -10.0029
            #                      BU         AG         BU         AG         AG  
            #     1  C  1  S    0.699293   0.699290  -0.027566   0.027799   0.002412
            #     2  C  1  S    0.031569   0.031361   0.004097  -0.004054  -0.000605
            #     3  C  1  X    0.000908   0.000632  -0.004163   0.004132   0.000619
            #     4  C  1  Y   -0.000019   0.000033   0.000668  -0.000651   0.005256
            #     5  C  1  Z    0.000000   0.000000   0.000000   0.000000   0.000000
            #     6  C  2  S   -0.699293   0.699290   0.027566   0.027799   0.002412
            #     7  C  2  S   -0.031569   0.031361  -0.004097  -0.004054  -0.000605
            #     8  C  2  X    0.000908  -0.000632  -0.004163  -0.004132  -0.000619
            #     9  C  2  Y   -0.000019  -0.000033   0.000668   0.000651  -0.005256
            #    10  C  2  Z    0.000000   0.000000   0.000000   0.000000   0.000000
            #    11  C  3  S   -0.018967  -0.019439   0.011799  -0.014884  -0.452328
            #    12  C  3  S   -0.007748  -0.006932   0.000680  -0.000695  -0.024917
            #    13  C  3  X    0.002628   0.002997   0.000018   0.000061  -0.003608
            # and so forth... with blanks lines between blocks of 5 orbitals each.
            # Warning! There are subtle differences between GAMESS-US and PC-GAMES
            #   in the formatting of the first four columns.
            #
            # Watch out for F orbitals...
            # PC GAMESS
            #   19  C   1 YZ   0.000000   0.000000   0.000000   0.000000   0.000000
            #   20  C    XXX   0.000000   0.000000   0.000000   0.000000   0.002249
            #   21  C    YYY   0.000000   0.000000  -0.025555   0.000000   0.000000
            #   22  C    ZZZ   0.000000   0.000000   0.000000   0.002249   0.000000
            #   23  C    XXY   0.000000   0.000000   0.001343   0.000000   0.000000
            # GAMESS US
            #   55  C  1 XYZ   0.000000   0.000000   0.000000   0.000000   0.000000
            #   56  C  1XXXX  -0.000014  -0.000067   0.000000   0.000000   0.000000
            #
            # This is fine for GeoOpt and SP, but may be weird for TD and Freq.

            # This is the stuff that we can read from these blocks.
            self.moenergies = [[]]
            self.mosyms = [[]]
            if not hasattr(self, "nmo"):
                self.nmo = self.nbasis
            self.mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")]
            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                self.aonames = []
                for i in range(self.natom):
                    self.atombasis.append([])
                self.aonames = []
                readatombasis = True

            dashes = inputfile.next()
            for base in range(0, self.nmo, 5):

                line = inputfile.next()
                # Make sure that this section does not end prematurely - checked by regression test 2CO.ccsd.aug-cc-pVDZ.out.
                if line.strip() != "":
                    break;
                
                numbers = inputfile.next() # Eigenvector numbers.

                # Sometimes there are some blank lines here.
                while not line.strip():
                    line = inputfile.next()

                # Eigenvalues for these orbitals (in hartrees).
                try:
                    self.moenergies[0].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
                except:
                    self.logger.warning('MO section found but could not be parsed!')
                    break;

                # Orbital symmetries.
                line = inputfile.next()
                if line.strip():
                    self.mosyms[0].extend(map(self.normalisesym, line.split()))
                
                # Now we have nbasis lines.
                # Going to use the same method as for normalise_aonames()
                # to extract basis set information.
                p = re.compile("(\d+)\s*([A-Z][A-Z]?)\s*(\d+)\s*([A-Z]+)")
                oldatom ='0'
                for i in range(self.nbasis):
                    line = inputfile.next()

                    # If line is empty, break (ex. for FMO in exam37).
                    if not line.strip(): break

                    # Fill atombasis and aonames only first time around
                    if readatombasis and base == 0:
                        aonames = []
                        start = line[:17].strip()
                        m = p.search(start)
                        if m:
                            g = m.groups()
                            aoname = "%s%s_%s" % (g[1].capitalize(), g[2], g[3])
                            oldatom = g[2]
                            atomno = int(g[2])-1
                            orbno = int(g[0])-1
                        else: # For F orbitals, as shown above
                            g = [x.strip() for x in line.split()]
                            aoname = "%s%s_%s" % (g[1].capitalize(), oldatom, g[2])
                            atomno = int(oldatom)-1
                            orbno = int(g[0])-1
                        self.atombasis[atomno].append(orbno)
                        self.aonames.append(aoname)
                    coeffs = line[15:] # Strip off the crud at the start.
                    j = 0
                    while j*11+4 < len(coeffs):
                        self.mocoeffs[0][base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
                        j += 1

            line = inputfile.next()
            # If it's restricted and no more properties:
            #  ...... END OF RHF/DFT CALCULATION ......
            # If there are more properties (DENSITY MATRIX):
            #               --------------
            #
            # If it's unrestricted we have:
            #
            #  ----- BETA SET ----- 
            #
            #          ------------
            #          EIGENVECTORS
            #          ------------
            #
            #                      1          2          3          4          5
            # ... and so forth.
            line = inputfile.next()
            if line[2:22] == "----- BETA SET -----":
                self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d"))
                self.moenergies.append([])
                self.mosyms.append([])
                for i in range(4):
                    line = inputfile.next()
                for base in range(0, self.nmo, 5):
                    blank = inputfile.next()
                    line = inputfile.next() # Eigenvector no
                    line = inputfile.next()
                    self.moenergies[1].extend([utils.convertor(float(x), "hartree", "eV") for x in line.split()])
                    line = inputfile.next()
                    self.mosyms[1].extend(map(self.normalisesym, line.split()))
                    for i in range(self.nbasis):
                        line = inputfile.next()
                        temp = line[15:] # Strip off the crud at the start
                        j = 0
                        while j * 11 + 4 < len(temp):
                            self.mocoeffs[1][base+j, i] = float(temp[j * 11:(j + 1) * 11])
                            j += 1
                line = inputfile.next()
            self.moenergies = [numpy.array(x, "d") for x in self.moenergies]

        # Natural orbitals - presently support only CIS.
        # Looks basically the same as eigenvectors, without symmetry labels.
        if line[10:30] == "CIS NATURAL ORBITALS":

            self.nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")

            dashes = inputfile.next()
            for base in range(0, self.nmo, 5):

                blank = inputfile.next()
                numbers = inputfile.next() # Eigenvector numbers.

                # Eigenvalues for these natural orbitals (not in hartrees!).
                # Sometimes there are some blank lines before it.
                line = inputfile.next()
                while not line.strip():
                    line = inputfile.next()
                eigenvalues = line

                # Orbital symemtry labels are normally here for MO coefficients.
                line = inputfile.next()
                
                # Now we have nbasis lines with the coefficients.
                for i in range(self.nbasis):

                    line = inputfile.next()
                    coeffs = line[15:]
                    j = 0
                    while j*11+4 < len(coeffs):
                        self.nocoeffs[base+j, i] = float(coeffs[j * 11:(j + 1) * 11])
                        j += 1

        # We cannot trust this self.homos until we come to the phrase:
        #   SYMMETRIES FOR INITAL GUESS ORBITALS FOLLOW
        # which either is followed by "ALPHA" or "BOTH" at which point we can say
        # for certain that it is an un/restricted calculations.
        # Note that MCSCF calcs also print this search string, so make sure
        #   that self.homos does not exist yet.
        if line[1:28] == "NUMBER OF OCCUPIED ORBITALS" and not hasattr(self,'homos'):
            homos = [int(line.split()[-1])-1]
            line = inputfile.next()
            homos.append(int(line.split()[-1])-1)
            self.homos = numpy.array(homos, "i")

        
        if line.find("SYMMETRIES FOR INITIAL GUESS ORBITALS FOLLOW") >= 0:
            # Not unrestricted, so lop off the second index.
            # In case the search string above was not used (ex. FMO in exam38),
            #   we can try to use the next line which should also contain the
            #   number of occupied orbitals.
            if line.find("BOTH SET(S)") >= 0:
                nextline = inputfile.next()
                if "ORBITALS ARE OCCUPIED" in nextline:
                    homos = int(nextline.split()[0])-1
                    if hasattr(self,"homos"):
                        try:
                            assert self.homos[0] == homos
                        except AssertionError:
                            self.logger.warning("Number of occupied orbitals not consistent. This is normal for ECP and FMO jobs.")
                    else:
                        self.homos = [homos]
                self.homos = numpy.resize(self.homos, [1])

        # Set the total number of atoms, only once.
        # Normally GAMESS print TOTAL NUMBER OF ATOMS, however in some cases
        #   this is slightly different (ex. lower case for FMO in exam37).
        if not hasattr(self,"natom") and "NUMBER OF ATOMS" in line.upper():
            self.natom = int(line.split()[-1])
            
        if line.find("NUMBER OF CARTESIAN GAUSSIAN BASIS") == 1 or line.find("TOTAL NUMBER OF BASIS FUNCTIONS") == 1:
            # The first is from Julien's Example and the second is from Alexander's
            # I think it happens if you use a polar basis function instead of a cartesian one
            self.nbasis = int(line.strip().split()[-1])
                
        elif line.find("SPHERICAL HARMONICS KEPT IN THE VARIATION SPACE") >= 0:
            # Note that this line is present if ISPHER=1, e.g. for C_bigbasis
            self.nmo = int(line.strip().split()[-1])
            
        elif line.find("TOTAL NUMBER OF MOS IN VARIATION SPACE") == 1:
            # Note that this line is not always present, so by default
            # NBsUse is set equal to NBasis (see below).
            self.nmo = int(line.split()[-1])

        elif line.find("OVERLAP MATRIX") == 0 or line.find("OVERLAP MATRIX") == 1:
            # The first is for PC-GAMESS, the second for GAMESS
            # Read 1-electron overlap matrix
            if not hasattr(self, "aooverlaps"):
                self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")
            else:
                self.logger.info("Reading additional aooverlaps...")
            base = 0
            while base < self.nbasis:
                blank = inputfile.next()
                line = inputfile.next() # Basis fn number
                blank = inputfile.next()
                for i in range(self.nbasis - base): # Fewer lines each time
                    line = inputfile.next()
                    temp = line.split()
                    for j in range(4, len(temp)):
                        self.aooverlaps[base+j-4, i+base] = float(temp[j])
                        self.aooverlaps[i+base, base+j-4] = float(temp[j])
                base += 5

        # ECP Pseudopotential information
        if "ECP POTENTIALS" in line:
            if not hasattr(self, "coreelectrons"):
                self.coreelectrons = [0]*self.natom
            dashes = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            while header.split()[0] == "PARAMETERS":
                name = header[17:25]
                atomnum = int(header[34:40])
                # The pseudopotnetial is given explicitely
                if header[40:50] == "WITH ZCORE":
                  zcore = int(header[50:55])
                  lmax = int(header[63:67])
                  self.coreelectrons[atomnum-1] = zcore
                # The pseudopotnetial is copied from another atom
                if header[40:55] == "ARE THE SAME AS":
                  atomcopy = int(header[60:])
                  self.coreelectrons[atomnum-1] = self.coreelectrons[atomcopy-1]
                line = inputfile.next()
                while line.split() <> []:
                    line = inputfile.next()
                header = inputfile.next()

Example 3

Project: RMG-Py
Source File: gamessukparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line[1:22] == "total number of atoms":
            if not hasattr(self, "natom"):
                self.natom = int(line.split()[-1])

        if line[3:44] == "convergence threshold in optimization run":
            # Assuming that this is only found in the case of OPTXYZ
            # (i.e. an optimization in Cartesian coordinates)
            self.geotargets = [float(line.split()[-2])]

        if line[32:61] == "largest component of gradient":
            # This is the geotarget in the case of OPTXYZ
            if not hasattr(self, "geovalues"):
                self.geovalues = []
            self.geovalues.append([float(line.split()[4])])

        if line[37:49] == "convergence?":
            # Get the geovalues and geotargets for OPTIMIZE
            if not hasattr(self, "geovalues"):
                self.geovalues = []
                self.geotargets = []
            geotargets = []
            geovalues = []
            for i in range(4):
                temp = line.split()
                geovalues.append(float(temp[2]))
                if not self.geotargets:
                    geotargets.append(float(temp[-2]))
                line = inputfile.next()
            self.geovalues.append(geovalues)
            if not self.geotargets:
                self.geotargets = geotargets
        
        if line[40:58] == "molecular geometry":
            # Only one set of atomcoords is taken from this section
            # For geo-opts, more coordinates are taken from the "nuclear coordinates"
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            self.atomnos = []
            
            stop = " "*9 + "*"*79
            line = inputfile.next()
            while not line.startswith(stop):
                line = inputfile.next()
            line = inputfile.next()
            while not line.startswith(stop):
                line = inputfile.next()
            empty = inputfile.next()

            atomcoords = []
            empty = inputfile.next()
            while not empty.startswith(stop):
                line = inputfile.next().split() # the coordinate data
                atomcoords.append(map(float,line[3:6]))
                self.atomnos.append(int(round(float(line[2]))))
                while line!=empty:
                    line = inputfile.next()
                # at this point, line is an empty line, right after
                # 1 or more lines containing basis set information
                empty = inputfile.next()
                # empty is either a row of asterisks or the empty line
                # before the row of coordinate data
            
            self.atomcoords.append(atomcoords)
            self.atomnos = numpy.array(self.atomnos, "i")

        if line[40:59] == "nuclear coordinates":
            # We need not remember the first geometry in the geo-opt as this will
            # be recorded already, in the "molecular geometry" section
            # (note: single-point calculations have no "nuclear coordinates" only
            # "molecular geometry")
            if self.firstnuccoords:
                self.firstnuccoords = False
                return
                # This was continue (in loop) before parser refactoring.
                # continue
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
                self.atomnos = []
                
            asterisk = inputfile.next()
            blank = inputfile.next()
            colmname = inputfile.next()
            equals = inputfile.next()

            atomcoords = []
            atomnos = []
            line = inputfile.next()
            while line != equals:
                temp = line.strip().split()
                atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in temp[0:3]])
                if not hasattr(self, "atomnos") or len(self.atomnos) == 0:
                    atomnos.append(int(float(temp[3])))
                    
                line = inputfile.next()

            self.atomcoords.append(atomcoords)
            if not hasattr(self, "atomnos") or len(self.atomnos) == 0:
                self.atomnos = atomnos

        if line[1:32] == "total number of basis functions":
            self.nbasis = int(line.split()[-1])
            while line.find("charge of molecule")<0:
                line = inputfile.next()
            self.charge = int(line.split()[-1])
            self.mult = int(inputfile.next().split()[-1])

            alpha = int(inputfile.next().split()[-1])-1
            beta = int(inputfile.next().split()[-1])-1
            if self.mult==1:
                self.homos = numpy.array([alpha], "i")
            else:
                self.homos = numpy.array([alpha,beta], "i")

        if line[37:69] == "s-matrix over gaussian basis set":
            self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            minus = inputfile.next()
            blank = inputfile.next()
            i = 0
            while i < self.nbasis:
                blank = inputfile.next()
                blank = inputfile.next()
                header = inputfile.next()
                blank = inputfile.next()
                blank = inputfile.next()

                for j in range(self.nbasis):
                    temp = map(float, inputfile.next().split()[1:])
                    self.aooverlaps[j,(0+i):(len(temp)+i)] = temp
                    
                i += len(temp)

        if line[18:43] == 'EFFECTIVE CORE POTENTIALS':
            self.coreelectrons = numpy.zeros(self.natom, 'i')
            asterisk = inputfile.next()
            line = inputfile.next()
            while line[15:46]!="*"*31:
                if line.find("for atoms ...")>=0:
                    atomindex = []
                    line = inputfile.next()
                    while line.find("core charge")<0:
                        broken = line.split()
                        atomindex.extend([int(x.split("-")[0]) for x in broken])
                        line = inputfile.next()
                    charge = float(line.split()[4])
                    for idx in atomindex:
                        self.coreelectrons[idx-1] = self.atomnos[idx-1] - charge
                line = inputfile.next()
                            
        if line[3:27] == "Wavefunction convergence":
            self.scftarget = float(line.split()[-2])
            self.scftargets = []

        if line[11:22] == "normal mode":
            if not hasattr(self, "vibfreqs"):
                self.vibfreqs = []
                self.vibirs = []
            
            units = inputfile.next()
            xyz = inputfile.next()
            equals = inputfile.next()
            line = inputfile.next()
            while line!=equals:
                temp = line.split()
                self.vibfreqs.append(float(temp[1]))
                self.vibirs.append(float(temp[-2]))
                line = inputfile.next()
            # Use the length of the vibdisps to figure out
            # how many rotations and translations to remove
            self.vibfreqs = self.vibfreqs[-len(self.vibdisps):]
            self.vibirs = self.vibirs[-len(self.vibdisps):]

        if line[44:73] == "normalised normal coordinates":
            self.vibdisps = []
            equals = inputfile.next()
            blank = inputfile.next()
            blank = inputfile.next()
            freqnum = inputfile.next()
            while freqnum.find("=")<0:
                blank = inputfile.next()
                equals = inputfile.next()
                freqs = inputfile.next()
                equals = inputfile.next()
                blank = inputfile.next()
                header = inputfile.next()
                equals = inputfile.next()
                p = [ [] for x in range(9) ]
                for i in range(len(self.atomnos)):
                    brokenx = map(float, inputfile.next()[25:].split())
                    brokeny = map(float, inputfile.next()[25:].split())            
                    brokenz = map(float, inputfile.next()[25:].split())
                    for j,x in enumerate(zip(brokenx, brokeny, brokenz)):
                        p[j].append(x)
                self.vibdisps.extend(p)
        
                blank = inputfile.next()
                blank = inputfile.next()
                freqnum = inputfile.next()                    

        if line[26:36] == "raman data":
            self.vibramans = []

            stars = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()

            blank = inputfile.next()
            line = inputfile.next()
            while line[1]!="*":
                self.vibramans.append(float(line.split()[3]))
                blank = inputfile.next()
                line = inputfile.next()
            # Use the length of the vibdisps to figure out
            # how many rotations and translations to remove
            self.vibramans = self.vibramans[-len(self.vibdisps):]
                        
        if line[3:11] == "SCF TYPE":
            self.scftype = line.split()[-2]
            assert self.scftype in ['rhf', 'uhf', 'gvb'], "%s not one of 'rhf', 'uhf' or 'gvb'" % self.scftype

        if line[15:31] == "convergence data":
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
            self.scftargets.append([self.scftarget]) # Assuming it does not change over time
            while line[1:10] != "="*9:
                line = inputfile.next()
            line = inputfile.next()
            tester = line.find("tester") # Can be in a different place depending
            assert tester>=0
            while line[1:10] != "="*9: # May be two or three lines (unres)
                line = inputfile.next()
            
            scfvalues = []
            line = inputfile.next()
            while line.strip():
                if line[2:6]!="****":
            # e.g. **** recalulation of fock matrix on iteration  4 (examples/chap12/pyridine.out)
                    scfvalues.append([float(line[tester-5:tester+6])])
                line = inputfile.next()
            self.scfvalues.append(scfvalues)   

        if line[10:22] == "total energy" and len(line.split()) == 3:
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            scfenergy = utils.convertor(float(line.split()[-1]), "hartree", "eV")
            self.scfenergies.append(scfenergy)
        
        # Total energies after Moller-Plesset corrections
        # Second order correction is always first, so its first occurance
        #   triggers creation of mpenergies (list of lists of energies)
        # Further corrections are appended as found
        # Note: GAMESS-UK sometimes prints only the corrections,
        #   so they must be added to the last value of scfenergies
        if line[10:32] == "mp2 correlation energy" or \
           line[10:42] == "second order perturbation energy":
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            self.mpenergies.append([])
            self.mp2correction = self.float(line.split()[-1])
            self.mp2energy = self.scfenergies[-1] + self.mp2correction
            self.mpenergies[-1].append(utils.convertor(self.mp2energy, "hartree", "eV"))
        if line[10:41] == "third order perturbation energy":
            self.mp3correction = self.float(line.split()[-1])
            self.mp3energy = self.mp2energy + self.mp3correction
            self.mpenergies[-1].append(utils.convertor(self.mp3energy, "hartree", "eV"))

        if line[40:59] == "molecular basis set":
            self.gbasis = []
            line = inputfile.next()
            while line.find("contraction coefficients")<0:
                line = inputfile.next()
            equals = inputfile.next()
            blank = inputfile.next()
            atomname = inputfile.next()
            basisregexp = re.compile("\d*(\D+)") # Get everything after any digits
            shellcounter = 1
            while line!=equals:
                gbasis = [] # Stores basis sets on one atom
                blank = inputfile.next()
                blank = inputfile.next()
                line = inputfile.next()
                shellno = int(line.split()[0])
                shellgap = shellno - shellcounter
                shellsize = 0
                while len(line.split())!=1 and line!=equals:
                    if line.split():
                        shellsize += 1
                    coeff = {}
                    # coefficients and symmetries for a block of rows
                    while line.strip() and line!=equals:
                        temp = line.strip().split()
                    # temp[1] may be either like (a) "1s" and "1sp", or (b) "s" and "sp"
                    # See GAMESS-UK 7.0 distribution/examples/chap12/pyridine2_21m10r.out
                    # for an example of the latter
                        sym = basisregexp.match(temp[1]).groups()[0]
                        assert sym in ['s', 'p', 'd', 'f', 'sp'], "'%s' not a recognized symmetry" % sym
                        if sym == "sp":
                            coeff.setdefault("S", []).append( (float(temp[3]), float(temp[6])) )
                            coeff.setdefault("P", []).append( (float(temp[3]), float(temp[10])) )
                        else:
                            coeff.setdefault(sym.upper(), []).append( (float(temp[3]), float(temp[6])) )
                        line = inputfile.next()
                    # either a blank or a continuation of the block
                    if coeff:
                        if sym == "sp":
                            gbasis.append( ('S', coeff['S']))
                            gbasis.append( ('P', coeff['P']))
                        else:
                            gbasis.append( (sym.upper(), coeff[sym.upper()]))
                    if line==equals:
                        continue
                    line = inputfile.next()
                    # either the start of the next block or the start of a new atom or
                    # the end of the basis function section (signified by a line of equals)
                numtoadd = 1 + (shellgap / shellsize)
                shellcounter = shellno + shellsize
                for x in range(numtoadd):
                    self.gbasis.append(gbasis)

        if line[50:70] == "----- beta set -----":
            self.betamosyms = True
            self.betamoenergies = True
            self.betamocoeffs = True
            # betamosyms will be turned off in the next
            # SYMMETRY ASSIGNMENT section
                
        if line[31:50] == "SYMMETRY ASSIGNMENT":
            if not hasattr(self, "mosyms"):
                self.mosyms = []

            multiple = {'a':1, 'b':1, 'e':2, 't':3, 'g':4, 'h':5}
            
            equals = inputfile.next()
            line = inputfile.next()
            while line != equals: # There may be one or two lines of title (compare mg10.out and duhf_1.out)
                line = inputfile.next()

            mosyms = []
            line = inputfile.next()
            while line != equals:
                temp = line[25:30].strip()
                if temp[-1]=='?':
                    # e.g. e? or t? or g? (see example/chap12/na7mg_uhf.out)
                    # for two As, an A and an E, and two Es of the same energy respectively.
                    t = line[91:].strip().split()
                    for i in range(1,len(t),2):
                        for j in range(multiple[t[i][0]]): # add twice for 'e', etc.
                            mosyms.append(self.normalisesym(t[i]))
                else:
                    for j in range(multiple[temp[0]]):
                        mosyms.append(self.normalisesym(temp)) # add twice for 'e', etc.
                line = inputfile.next()
            assert len(mosyms) == self.nmo, "mosyms: %d but nmo: %d" % (len(mosyms), self.nmo)
            if self.betamosyms:
                # Only append if beta (otherwise with IPRINT SCF
                # it will add mosyms for every step of a geo opt)
                self.mosyms.append(mosyms)
                self.betamosyms = False
            elif self.scftype=='gvb':
                # gvb has alpha and beta orbitals but they are identical
                self.mosysms = [mosyms, mosyms]
            else:
                self.mosyms = [mosyms]

        if line[50:62] == "eigenvectors":
        # Mocoeffs...can get evalues from here too
        # (only if using FORMAT HIGH though will they all be present)                
            if not hasattr(self, "mocoeffs"):
                self.aonames = []
                aonames = []
            minus = inputfile.next()

            mocoeffs = numpy.zeros( (self.nmo, self.nbasis), "d")
            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                for i in range(self.natom):
                    self.atombasis.append([])
                readatombasis = True

            blank = inputfile.next()
            blank = inputfile.next()
            evalues = inputfile.next()

            p = re.compile(r"\d+\s+(\d+)\s*(\w+) (\w+)")
            oldatomname = "DUMMY VALUE"

            mo = 0
            while mo < self.nmo:
                blank = inputfile.next()
                blank = inputfile.next()
                nums = inputfile.next()
                blank = inputfile.next()
                blank = inputfile.next()
                for basis in range(self.nbasis):
                    line = inputfile.next()
                    # Fill atombasis only first time around.
                    if readatombasis:
                        orbno = int(line[1:5])-1
                        atomno = int(line[6:9])-1
                        self.atombasis[atomno].append(orbno)
                    if not self.aonames:
                        pg = p.match(line[:18].strip()).groups()
                        atomname = "%s%s%s" % (pg[1][0].upper(), pg[1][1:], pg[0])
                        if atomname!=oldatomname:
                            aonum = 1
                        oldatomname = atomname
                        name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
                        if name in aonames:
                            aonum += 1
                        name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
                        aonames.append(name) 
                    temp = map(float, line[19:].split())
                    mocoeffs[mo:(mo+len(temp)), basis] = temp
                # Fill atombasis only first time around.
                readatombasis = False
                if not self.aonames:
                    self.aonames = aonames

                line = inputfile.next() # blank line
                while line==blank:
                    line = inputfile.next()
                evalues = line
                if evalues[:17].strip(): # i.e. if these aren't evalues
                    break # Not all the MOs are present
                mo += len(temp)
            mocoeffs = mocoeffs[0:(mo+len(temp)), :] # In case some aren't present
            if self.betamocoeffs:
                self.mocoeffs.append(mocoeffs)
            else:
                self.mocoeffs = [mocoeffs]

        if line[7:12] == "irrep":
            ########## eigenvalues ###########
            # This section appears once at the start of a geo-opt and once at the end
            # unless IPRINT SCF is used (when it appears at every step in addition)
            if not hasattr(self, "moenergies"):
                self.moenergies = []

            equals = inputfile.next()
            while equals[1:5] != "====": # May be one or two lines of title (compare duhf_1.out and mg10.out)
                equals = inputfile.next()

            moenergies = []
            line = inputfile.next()
            if not line.strip(): # May be a blank line here (compare duhf_1.out and mg10.out)
                line = inputfile.next()

            while line.strip() and line != equals: # May end with a blank or equals
                temp = line.strip().split()
                moenergies.append(utils.convertor(float(temp[2]), "hartree", "eV"))
                line = inputfile.next()
            self.nmo = len(moenergies)
            if self.betamoenergies:
                self.moenergies.append(moenergies)
                self.betamoenergies = False
            elif self.scftype=='gvb':
                self.moenergies = [moenergies, moenergies]
            else:
                self.moenergies = [moenergies]

Example 4

Project: RMG-Py
Source File: jaguarparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""
            
        if line[0:4] == "etot":
        # Get SCF convergence information
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
                self.scftargets = [[5E-5, 5E-6]]
            values = []
            while line[0:4] == "etot":
        # Jaguar 4.2
        # etot   1  N  N  0  N  -382.08751886450           2.3E-03  1.4E-01
        # etot   2  Y  Y  0  N  -382.27486023153  1.9E-01  1.4E-03  5.7E-02
        # Jaguar 6.5
        # etot   1  N  N  0  N    -382.08751881733           2.3E-03  1.4E-01
        # etot   2  Y  Y  0  N    -382.27486018708  1.9E-01  1.4E-03  5.7E-02
                temp = line.split()[7:]
                if len(temp)==3:
                    denergy = float(temp[0])
                else:
                    denergy = 0 # Should really be greater than target value
                                # or should we just ignore the values in this line
                ddensity = float(temp[-2])
                maxdiiserr = float(temp[-1])
                if not self.geoopt:
                    values.append([denergy, ddensity])
                else:
                    values.append([ddensity])
                line = inputfile.next()
            self.scfvalues.append(values)

        # Hartree-Fock energy after SCF
        if line[1:18] == "SCFE: SCF energy:":
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            temp = line.strip().split()
            scfenergy = float(temp[temp.index("hartrees") - 1])
            scfenergy = utils.convertor(scfenergy, "hartree", "eV")
            self.scfenergies.append(scfenergy)

        # Energy after LMP2 correction
        if line[1:18] == "Total LMP2 Energy":
            if not hasattr(self, "mpenergies"):
                self.mpenergies = [[]]
            lmp2energy = float(line.split()[-1])
            lmp2energy = utils.convertor(lmp2energy, "hartree", "eV")
            self.mpenergies[-1].append(lmp2energy)

        if line[2:14] == "new geometry" or line[1:21] == "Symmetrized geometry" or line.find("Input geometry") > 0:
        # Get the atom coordinates
            if not hasattr(self, "atomcoords") or line[1:21] == "Symmetrized geometry":
                # Wipe the "Input geometry" if "Symmetrized geometry" present
                self.atomcoords = []
            p = re.compile("(\D+)\d+") # One/more letters followed by a number
            atomcoords = []
            atomnos = []
            angstrom = inputfile.next()
            title = inputfile.next()
            line = inputfile.next()
            while line.strip():
                temp = line.split()
                element = p.findall(temp[0])[0]
                atomnos.append(self.table.number[element])
                atomcoords.append(map(float, temp[1:]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
            self.atomnos = numpy.array(atomnos, "i")
            self.natom = len(atomcoords)

        # Extract charge and multiplicity
        if line[2:22] == "net molecular charge":
            self.charge = int(line.split()[-1])
            self.mult = int(inputfile.next().split()[-1])

        if line[2:24] == "start of program geopt":
            if not self.geoopt:
                # Need to keep only the RMS density change info
                # if this is a geoopt
                self.scftargets = [[self.scftargets[0][0]]]
                if hasattr(self, "scfvalues"):
                    self.scfvalues[0] = [[x[0]] for x in self.scfvalues[0]]
                self.geoopt = True
            else:
                self.scftargets.append([5E-5])

        if line[2:28] == "geometry optimization step":
        # Get Geometry Opt convergence information
            if not hasattr(self, "geovalues"):
                self.geovalues = []
                self.geotargets = numpy.zeros(5, "d")
            gopt_step = int(line.split()[-1])
            energy = inputfile.next()
            # quick hack for messages of the sort:
            #   ** restarting optimization from step    2 **
            # as found in regression file ptnh3_2_H2O_2_2plus.out
            if inputfile.next().strip():
                blank = inputfile.next()
            line = inputfile.next()
            values = []
            target_index = 0                
            if gopt_step == 1:
                # The first optimization step does not produce an energy change
                values.append(0.0)
                target_index = 1
            while line.strip():
                if len(line) > 40 and line[41] == "(":
                    # A new geo convergence value
                    values.append(float(line[26:37]))
                    self.geotargets[target_index] = float(line[43:54])
                    target_index += 1
                line = inputfile.next()
            self.geovalues.append(values)

        if line.find("number of occupied orbitals") > 0:
        # Get number of MOs
            occs = int(line.split()[-1])
            line = inputfile.next()
            virts = int(line.split()[-1])
            self.nmo = occs + virts
            self.homos = numpy.array([occs-1], "i")

            self.unrestrictedflag = False

        if line.find("number of alpha occupied orb") > 0:
        # Get number of MOs for an unrestricted calc

            aoccs = int(line.split()[-1])
            line = inputfile.next()
            avirts = int(line.split()[-1])
            line = inputfile.next()
            boccs = int(line.split()[-1])
            line = inputfile.next()
            bvirt = int(line.split()[-1])

            self.nmo = aoccs + avirts
            self.homos = numpy.array([aoccs-1,boccs-1], "i")
            self.unrestrictedflag = True

        # MO energies and symmetries.
        # Jaguar 7.0: provides energies and symmetries for both
        #   restricted and unrestricted calculations, like this:
        #     Alpha Orbital energies/symmetry label: 
        #     -10.25358 Bu  -10.25353 Ag  -10.21931 Bu  -10.21927 Ag     
        #     -10.21792 Bu  -10.21782 Ag  -10.21773 Bu  -10.21772 Ag     
        #     ...
        # Jaguar 6.5: prints both only for restricted calculations,
        #   so for unrestricted calculations the output it looks like this:
        #     Alpha Orbital energies: 
        #     -10.25358  -10.25353  -10.21931  -10.21927  -10.21792  -10.21782
        #     -10.21773  -10.21772  -10.21537  -10.21537   -1.02078   -0.96193
        #     ...
        # Presence of 'Orbital energies' is enough to catch all versions.
        if "Orbital energies" in line:

            # Parsing results is identical for restricted/unrestricted
            #   calculations, just assert later that alpha/beta order is OK.
            spin = int(line[2:6] == "Beta")

            # Check if symmetries are printed also.
            issyms = "symmetry label" in line

            if not hasattr(self, "moenergies"):
                self.moenergies = []
            if issyms and not hasattr(self, "mosyms"):
                    self.mosyms = []
            
            # Grow moeneriges/mosyms and make sure they are empty when
            #   parsed multiple times - currently cclib returns only
            #   the final output (ex. in a geomtry optimization).
            if len(self.moenergies) < spin+1:
                self.moenergies.append([])
            self.moenergies[spin] = []
            if issyms:
                if len(self.mosyms) < spin+1:
                    self.mosyms.append([])
                self.mosyms[spin] = []
            
            line = inputfile.next().split()
            while len(line) > 0:
                if issyms:
                    energies = [float(line[2*i]) for i in range(len(line)/2)]
                    syms = [line[2*i+1] for i in range(len(line)/2)]
                else:
                    energies = [float(e) for e in line]
                energies = [utils.convertor(e, "hartree", "eV") for e in energies]
                self.moenergies[spin].extend(energies)
                if issyms:
                    syms = [self.normalisesym(s) for s in syms]
                    self.mosyms[spin].extend(syms)
                line = inputfile.next().split()
            
            # There should always be an extra blank line after all this.
            line = inputfile.next()

        if line.find("Occupied + virtual Orbitals- final wvfn") > 0:
            
            blank = inputfile.next()
            stars = inputfile.next()
            blank = inputfile.next()
            blank = inputfile.next()
            
            if not hasattr(self,"mocoeffs"):
                if self.unrestrictedflag:
                    spin = 2
                else:
                    spin = 1

                self.mocoeffs = []
                
            
            aonames = []
            lastatom = "X"
            
            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                for i in range(self.natom):
                    self.atombasis.append([])
                readatombasis = True

            offset = 0

            for s in range(spin):
                mocoeffs = numpy.zeros((len(self.moenergies[s]), self.nbasis), "d")

                if s == 1: #beta case
                    stars = inputfile.next()
                    blank = inputfile.next()
                    title = inputfile.next()
                    blank = inputfile.next()
                    stars = inputfile.next()
                    blank = inputfile.next()
                    blank = inputfile.next()

                for k in range(0,len(self.moenergies[s]),5):

                    numbers = inputfile.next()
                    eigens = inputfile.next()
                    line = inputfile.next()

                    for i in range(self.nbasis):

                        info = line.split()
                        
                        # Fill atombasis only first time around.
                        if readatombasis and k == 0:
                            orbno = int(info[0])
                            atom = info[1]
                            if atom[1].isalpha():
                                atomno = int(atom[2:])
                            else:
                                atomno = int(atom[1:])
                            self.atombasis[atomno-1].append(orbno-1)

                        if not hasattr(self,"aonames"):
                            if lastatom != info[1]:
                                scount = 1
                                pcount = 3
                                dcount = 6 #six d orbitals in Jaguar

                            if info[2] == 'S':
                                aonames.append("%s_%i%s"%(info[1], scount, info[2]))
                                scount += 1
                        
                            if info[2] == 'X' or info[2] == 'Y' or info[2] == 'Z':
                                aonames.append("%s_%iP%s"%(info[1], pcount / 3, info[2]))
                                pcount += 1
                        
                            if info[2] == 'XX' or info[2] == 'YY' or info[2] == 'ZZ' or \
                               info[2] == 'XY' or info[2] == 'XZ' or info[2] == 'YZ':

                                aonames.append("%s_%iD%s"%(info[1], dcount / 6, info[2]))
                                dcount += 1

                            lastatom = info[1]

                        for j in range(len(info[3:])):
                            mocoeffs[j+k,i] = float(info[3+j])

                        line = inputfile.next()

                    if not hasattr(self,"aonames"):
                        self.aonames = aonames

                    offset += 5
                self.mocoeffs.append(mocoeffs)
                        
                        
        if line[2:6] == "olap":
            if line[6]=="-":
                return
                # This was continue (in loop) before parser refactoring.
                # continue # avoid "olap-dev"
            self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            for i in range(0, self.nbasis, 5):
                blank = inputfile.next()
                header = inputfile.next()
                for j in range(i, self.nbasis):
                    temp = map(float, inputfile.next().split()[1:])
                    self.aooverlaps[j, i:(i+len(temp))] = temp
                    self.aooverlaps[i:(i+len(temp)), j] = temp
            
        if line[1:28] == "number of occupied orbitals":
            self.homos = numpy.array([float(line.strip().split()[-1])-1], "i")

        if line[2:27] == "number of basis functions":
            self.nbasis = int(line.strip().split()[-1])

        # IR output looks like this:
        #   frequencies        72.45   113.25   176.88   183.76   267.60   312.06
        #   symmetries       Au       Bg       Au       Bu       Ag       Bg      
        #   intensities         0.07     0.00     0.28     0.52     0.00     0.00
        #   reduc. mass         1.90     0.74     1.06     1.42     1.19     0.85
        #   force const         0.01     0.01     0.02     0.03     0.05     0.05
        #   C1       X     0.00000  0.00000  0.00000 -0.05707 -0.06716  0.00000
        #   C1       Y     0.00000  0.00000  0.00000  0.00909 -0.02529  0.00000
        #   C1       Z     0.04792 -0.06032 -0.01192  0.00000  0.00000  0.11613
        #   C2       X     0.00000  0.00000  0.00000 -0.06094 -0.04635  0.00000
        #   ... etc. ...
        # This is a complete ouput, some files will not have intensities,
        #   and older Jaguar versions sometimes skip the symmetries.
        if line[2:23] == "start of program freq":

            self.vibfreqs = []
            self.vibdisps = []
            forceconstants = False
            intensities = False
            blank = inputfile.next()
            line = inputfile.next()
            while line.strip():
                if "force const" in line:
                    forceconstants = True
                if "intensities" in line:
                    intensities = True
                line = inputfile.next()
            freqs = inputfile.next()
            
            # The last block has an extra blank line after it - catch it.
            while freqs.strip():

                # Number of modes (columns printed in this block).
                nmodes = len(freqs.split())-1

                # Append the frequencies.
                self.vibfreqs.extend(map(float, freqs.split()[1:]))
                line = inputfile.next().split()
                
                # May skip symmetries (older Jaguar versions).
                if line[0] == "symmetries":
                    if not hasattr(self, "vibsyms"):
                        self.vibsyms = []
                    self.vibsyms.extend(map(self.normalisesym, line[1:]))
                    line = inputfile.next().split()                                
                if intensities:
                    if not hasattr(self, "vibirs"):
                        self.vibirs = []
                    self.vibirs.extend(map(float, line[1:]))
                    line = inputfile.next().split()                                
                if forceconstants:
                    line = inputfile.next()

                # Start parsing the displacements.
                # Variable 'q' holds up to 7 lists of triplets.
                q = [ [] for i in range(7) ]
                for n in range(self.natom):
                    # Variable 'p' holds up to 7 triplets.
                    p = [ [] for i in range(7) ]
                    for i in range(3):
                        line = inputfile.next()
                        disps = [float(disp) for disp in line.split()[2:]]
                        for j in range(nmodes):
                            p[j].append(disps[j])
                    for i in range(nmodes):
                        q[i].append(p[i])

                self.vibdisps.extend(q[:nmodes])
                blank = inputfile.next()
                freqs = inputfile.next()

            # Convert new data to arrays.
            self.vibfreqs = numpy.array(self.vibfreqs, "d")
            self.vibdisps = numpy.array(self.vibdisps, "d")
            if hasattr(self, "vibirs"):
                self.vibirs = numpy.array(self.vibirs, "d")
                
        # Parse excited state output (for CIS calculations).
        # Jaguar calculates only singlet states.
        if line[2:15] == "Excited State":
            if not hasattr(self, "etenergies"):
                self.etenergies = []
            if not hasattr(self, "etoscs"):
                self.etoscs = []
            if not hasattr(self, "etsecs"):
                self.etsecs = []
                self.etsyms = []
            etenergy = float(line.split()[3])
            etenergy = utils.convertor(etenergy, "eV", "cm-1")
            self.etenergies.append(etenergy)
            # Skip 4 lines
            for i in range(5):
                line = inputfile.next()
            self.etsecs.append([])
            # Jaguar calculates only singlet states.
            self.etsyms.append('Singlet-A')
            while line.strip() != "":
                fromMO = int(line.split()[0])-1
                toMO = int(line.split()[2])-1
                coeff = float(line.split()[-1])
                self.etsecs[-1].append([(fromMO,0),(toMO,0),coeff])
                line = inputfile.next()
            # Skip 3 lines
            for i in range(4):
                line = inputfile.next()
            strength = float(line.split()[-1])
            self.etoscs.append(strength)

Example 5

Project: Cura
Source File: X3DReader.py
View license
    def processGeometryExtrusion(self, node):
        ccw = readBoolean(node, "ccw", True)
        begin_cap = readBoolean(node, "beginCap", True)
        end_cap = readBoolean(node, "endCap", True)
        cross = readFloatArray(node, "crossSection", (1, 1, 1, -1, -1, -1, -1, 1, 1, 1))
        cross = [(cross[i], cross[i+1]) for i in range(0, len(cross), 2)]
        spine = readFloatArray(node, "spine", (0, 0, 0, 0, 1, 0))
        spine = [(spine[i], spine[i+1], spine[i+2]) for i in range(0, len(spine), 3)]
        orient = readFloatArray(node, "orientation", None)
        if orient:
            # This converts X3D's axis/angle rotation to a 3x3 numpy matrix
            def toRotationMatrix(rot):
                (x, y, z) = rot[:3]
                a = rot[3]  
                s = sin(a)
                c = cos(a)
                t = 1-c
                return numpy.array((
                    (x * x * t + c,  x * y * t - z*s, x * z * t + y * s),
                    (x * y * t + z*s, y * y * t + c, y * z * t - x * s),
                    (x * z * t - y * s, y * z * t + x * s, z * z * t + c)))   
            
            orient = [toRotationMatrix(orient[i:i+4]) if orient[i+3] != 0 else None for i in range(0, len(orient), 4)]
            
        scale = readFloatArray(node, "scale", None)
        if scale:
            scale = [numpy.array(((scale[i], 0, 0), (0, 1, 0), (0, 0, scale[i+1])))
                     if scale[i] != 1 or scale[i+1] != 1 else None for i in range(0, len(scale), 2)]
        
        
        # Special treatment for the closed spine and cross section.
        # Let's save some memory by not creating identical but distinct vertices;
        # later we'll introduce conditional logic to link the last vertex with
        # the first one where necessary.
        crossClosed = cross[0] == cross[-1]
        if crossClosed:
            cross = cross[:-1]
        nc = len(cross)
        cross = [numpy.array((c[0], 0, c[1])) for c in cross]
        ncf = nc if crossClosed else nc - 1
        # Face count along the cross; for closed cross, it's the same as the
        # respective vertex count
    
        spine_closed = spine[0] == spine[-1]
        if spine_closed:
            spine = spine[:-1]
        ns = len(spine)
        spine = [Vector(*s) for s in spine]
        nsf = ns if spine_closed else ns - 1
    
        # This will be used for fallback, where the current spine point joins
        # two collinear spine segments. No need to recheck the case of the
        # closed spine/last-to-first point juncture; if there's an angle there,
        # it would kick in on the first iteration of the main loop by spine.
        def findFirstAngleNormal():
            for i in range(1, ns - 1):
                spt = spine[i]
                z = (spine[i + 1] - spt).cross(spine[i - 1] - spt)
                if z.length() > EPSILON:
                    return z
            # All the spines are collinear. Fallback to the rotated source
            # XZ plane.
            # TODO: handle the situation where the first two spine points match
            if len(spine) < 2:
                return Vector(0, 0, 1)
            v = spine[1] - spine[0]
            orig_y = Vector(0, 1, 0)
            orig_z = Vector(0, 0, 1)
            if v.cross(orig_y).length() > EPSILON:
                # Spine at angle with global y - rotate the z accordingly
                a = v.cross(orig_y) # Axis of rotation to get to the Z
                (x, y, z) = a.normalized().getData()  
                s = a.length()/v.length()
                c = sqrt(1-s*s)
                t = 1-c
                m = numpy.array((
                    (x * x * t + c,  x * y * t + z*s, x * z * t - y * s),
                    (x * y * t - z*s, y * y * t + c, y * z * t + x * s),
                    (x * z * t + y * s, y * z * t - x * s, z * z * t + c)))
                orig_z = Vector(*m.dot(orig_z.getData()))
            return orig_z
    
        self.reserveFaceAndVertexCount(2*nsf*ncf + (nc - 2 if begin_cap else 0) + (nc - 2 if end_cap else 0), ns*nc)

        z = None
        for i, spt in enumerate(spine):
            if (i > 0 and i < ns - 1) or spine_closed:
                snext = spine[(i + 1) % ns]
                sprev = spine[(i - 1 + ns) % ns]
                y = snext - sprev
                vnext = snext - spt
                vprev = sprev - spt
                try_z = vnext.cross(vprev)
                # Might be zero, then all kinds of fallback
                if try_z.length() > EPSILON:
                    if z is not None and try_z.dot(z) < 0:
                        try_z = -try_z
                    z = try_z
                elif not z:  # No z, and no previous z.
                    # Look ahead, see if there's at least one point where
                    # spines are not collinear.
                    z = findFirstAngleNormal()
            elif i == 0:  # And non-crossed
                snext = spine[i + 1]
                y = snext - spt
                z = findFirstAngleNormal()
            else:  # last point and not crossed
                sprev = spine[i - 1]
                y = spt - sprev
                # If there's more than one point in the spine, z is already set.
                # One point in the spline is an error anyway.
    
            z = z.normalized()
            y = y.normalized()
            x = y.cross(z) # Already normalized
            m = numpy.array(((x.x, y.x, z.x), (x.y, y.y, z.y), (x.z, y.z, z.z)))
            
            # Columns are the unit vectors for the xz plane for the cross-section
            if orient:
                mrot = orient[i] if len(orient) > 1 else orient[0]
                if not mrot is None:
                    m = m.dot(mrot)  # Tested against X3DOM, the result matches, still not sure :(
                    
            if scale:
                mscale = scale[i] if len(scale) > 1 else scale[0]
                if not mscale is None:
                    m = m.dot(mscale)
                    
            # First the cross-section 2-vector is scaled,
            # then rotated (which may make it a 3-vector),
            # then applied to the xz plane unit vectors
                    
            sptv3 = numpy.array(spt.getData()[:3])
            for cpt in cross:
                v = sptv3 + m.dot(cpt)
                self.addVertex(*v)
    
        if begin_cap:
            self.addFace([x for x in range(nc - 1, -1, -1)], ccw)
    
        # Order of edges in the face: forward along cross, forward along spine,
        # backward along cross, backward along spine, flipped if now ccw.
        # This order is assumed later in the texture coordinate assignment;
        # please don't change without syncing.
    
        for s in range(ns - 1):
            for c in range(ncf):
                self.addQuadFlip(s * nc + c, s * nc + (c + 1) % nc,
                    (s + 1) * nc + (c + 1) % nc, (s + 1) * nc + c, ccw)
    
        if spine_closed:
            # The faces between the last and the first spine points
            b = (ns - 1) * nc
            for c in range(ncf):
                self.addQuadFlip(b + c, b + (c + 1) % nc,
                    (c + 1) % nc, c, ccw)
                        
        if end_cap:
            self.addFace([(ns - 1) * nc + x for x in range(0, nc)], ccw)

Example 6

Project: VIP
Source File: mcmc_sampling.py
View license
def mcmc_negfc_sampling(cubes, angs, psfn, ncomp, plsc, initial_state,
                        fwhm=4, annulus_width=3, aperture_radius=4, cube_ref=None, 
                        svd_mode='lapack', scaling='temp-mean', fmerit='sum',
                        collapse='median', nwalkers=1000, bounds=None, a=2.0,
                        burnin=0.3, rhat_threshold=1.01, rhat_count_threshold=1,
                        niteration_min=0, niteration_limit=1e02, 
                        niteration_supp=0, check_maxgap=1e04, nproc=1, 
                        output_file=None, display=False, verbose=True, save=False):
    """ Runs an affine invariant mcmc sampling algorithm in order to determine
    the position and the flux of the planet using the 'Negative Fake Companion'
    technique. The result of this procedure is a chain with the samples from the
    posterior distributions of each of the 3 parameters.
    
    This technique can be summarized as follows:
    
    1)  We inject a negative fake companion (one candidate) at a given 
        position and characterized by a given flux, both close to the expected 
        values.
    2)  We run PCA on an full annulus which pass through the initial guess, 
        regardless of the position of the candidate.
    3)  We extract the intensity values of all the pixels contained in a 
        circular aperture centered on the initial guess.
    4)  We calculate the function of merit. The associated chi^2 is given by
        chi^2 = sum(|I_j|) where j \in {1,...,N} with N the total number of 
        pixels contained in the circular aperture.        
    The steps 1) to 4) are looped. At each iteration, the candidate model 
    parameters are defined by the emcee Affine Invariant algorithm. 
    
    Parameters
    ----------  
    cubes: str or numpy.array
        The relative path to the cube of fits images OR the cube itself.
    angs: str or numpy.array
        The relative path to the parallactic angle fits image or the angs itself.
    psfn: str or numpy.array
        The relative path to the instrumental PSF fits image or the PSF itself.
        The PSF must be centered and the flux in a 1*FWHM aperture must equal 1.
    ncomp: int
        The number of principal components.        
    plsc: float
        The platescale, in arcsec per pixel.  
    annulus_width: float, optional
        The width in pixel of the annulus on which the PCA is performed.
    aperture_radius: float, optional
        The radius of the circular aperture.        
    nwalkers: int optional
        The number of Goodman & Weare 'walkers'.
    initial_state: numpy.array
        The first guess for the position and flux of the planet, respectively.
        Each walker will start in a small ball around this preferred position.
    cube_ref : array_like, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.
        'randsvd' is not recommended for the negative fake companion technique.
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    bounds: numpy.array or list, default=None, optional
        The prior knowledge on the model parameters. If None, large bounds will 
        be automatically estimated from the initial state.
    a: float, default=2.0
        The proposal scale parameter. See notes.
    burnin: float, default=0.3
        The fraction of a walker which is discarded.
    rhat_threshold: float, default=0.01
        The Gelman-Rubin threshold used for the test for nonconvergence.   
    rhat_count_threshold: int, optional
        The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in
        a row before claiming that the chain has converged.        
    niteration_min: int, optional
        Steps per walker lower bound. The simulation will run at least this
        number of steps per walker.
    niteration_limit: int, optional
        Steps per walker upper bound. If the simulation runs up to 
        'niteration_limit' steps without having reached the convergence 
        criterion, the run is stopped.
    niteration_supp: int, optional
        Number of iterations to run after having "reached the convergence".     
    check_maxgap: int, optional
        Maximum number of steps per walker between two Gelman-Rubin test.
    nproc: int, optional
        The number of processes to use for parallelization. 
    output_file: str
        The name of the ouput file which contains the MCMC results 
        (if save is True).
    display: boolean
        If True, the walk plot is displayed at each evaluation of the Gelman-
        Rubin test.
    verbose: boolean
        Display informations in the shell.
    save: boolean
        If True, the MCMC results are pickled.
                    
    Returns
    -------
    out : numpy.array
        The MCMC chain.         
        
    Notes
    -----
    The parameter 'a' must be > 1. For more theoretical information concerning
    this parameter, see Goodman & Weare, 2010, Comm. App. Math. Comp. Sci., 
    5, 65, Eq. [9] p70.
    
    The parameter 'rhat_threshold' can be a numpy.array with individual 
    threshold value for each model parameter.
    """ 
    if verbose:
        start_time = timeInit()
        print "        MCMC sampler for the NEGFC technique       "
        print sep

    # If required, one create the output folder.    
    if save:    
        if not os.path.exists('results'):
            os.makedirs('results')
        
        if output_file is None:
            datetime_today = datetime.datetime.today()
            output_file = str(datetime_today.year)+str(datetime_today.month)+\
                          str(datetime_today.day)+'_'+str(datetime_today.hour)+\
                          str(datetime_today.minute)+str(datetime_today.second)            
        
        if not os.path.exists('results/'+output_file):
            os.makedirs('results/'+output_file)

            
    # #########################################################################
    # If required, one opens the source files
    # #########################################################################
    if isinstance(cubes,str) and isinstance(angs,str):
        if angs is None:
            cubes, angs = open_adicube(cubes, verbose=False)
        else:
            cubes = open_fits(cubes)
            angs = open_fits(angs, verbose=False)    
        
        if isinstance(psfn,str):
            psfn = open_fits(psfn)
        
        if verbose:
            print 'The data has been loaded. Let''s continue !'
    
    # #########################################################################
    # Initialization of the variables
    # #########################################################################    
    dim = 3 # There are 3 model parameters, resp. the radial and angular 
            # position of the planet and its flux.
    
    itermin = niteration_min
    limit = niteration_limit    
    supp = niteration_supp
    maxgap = check_maxgap
    initial_state = np.array(initial_state)
    
    if itermin > limit:
        itermin = 0
        print("'niteration_min' must be < 'niteration_limit'.")
        
    fraction = 0.3
    geom = 0
    lastcheck = 0
    konvergence = np.inf
    rhat_count = 0
        
    chain = np.empty([nwalkers,1,dim])
    isamples = np.empty(0)
    pos = initial_state + np.random.normal(0,1e-01,(nwalkers,3))
    nIterations = limit + supp
    rhat = np.zeros(dim)  
    stop = np.inf
    

    if bounds is None:
        bounds = [(initial_state[0]-annulus_width/2.,initial_state[0]+annulus_width/2.), #radius
                  (initial_state[1]-10,initial_state[1]+10), #angle
                  (0,2*initial_state[2])] #flux
    
    sampler = emcee.EnsembleSampler(nwalkers,dim,lnprob,a,
                                    args =([bounds, cubes, angs, plsc, psfn,
                                            fwhm, annulus_width, ncomp,
                                            aperture_radius, initial_state,
                                            cube_ref, svd_mode, scaling, fmerit,
                                            collapse]),
                                    threads=nproc)
    
    duration_start = datetime.datetime.now()
    start = datetime.datetime.now()

    # #########################################################################
    # Affine Invariant MCMC run
    # ######################################################################### 
    if verbose:
        print ''
        print 'Start of the MCMC run ...'
        print 'Step  |  Duration/step (sec)  |  Remaining Estimated Time (sec)'
                             
    for k, res in enumerate(sampler.sample(pos,iterations=nIterations,
                                           storechain=True)):
        elapsed = (datetime.datetime.now()-start).total_seconds()
        if verbose:
            if k == 0:
                q = 0.5
            else:
                q = 1
            print '{}\t\t{:.5f}\t\t\t{:.5f}'.format(k,elapsed*q,elapsed*(limit-k-1)*q)
            
        start = datetime.datetime.now()

        # ---------------------------------------------------------------------        
        # Store the state manually in order to handle with dynamical sized chain.
        # ---------------------------------------------------------------------    
        ## Check if the size of the chain is long enough.
        s = chain.shape[1]
        if k+1 > s: #if not, one doubles the chain length
            empty = np.zeros([nwalkers,2*s,dim])
            chain = np.concatenate((chain,empty),axis=1)
        ## Store the state of the chain
        chain[:,k] = res[0]
        
        
        # ---------------------------------------------------------------------
        # If k meets the criterion, one tests the non-convergence.
        # ---------------------------------------------------------------------              
        criterion = np.amin([ceil(itermin*(1+fraction)**geom),\
                            lastcheck+floor(maxgap)])
   
        if k == criterion:
            if verbose:
                print ''
                print '   Gelman-Rubin statistic test in progress ...' 
            
            geom += 1
            lastcheck = k
            if display:
                showWalk(chain)
                
            if save:
                import pickle                                    
                
                with open('results/'+output_file+'/'+output_file+'_temp_k{}'.format(k),'wb') as fileSave:
                    myPickler = pickle.Pickler(fileSave)
                    myPickler.dump({'chain':sampler.chain, 
                                    'lnprob':sampler.lnprobability, 
                                    'AR':sampler.acceptance_fraction})
                
            ## We only test the rhat if we have reached the minimum number of steps.
            if (k+1) >= itermin and konvergence == np.inf:
                threshold0 = int(floor(burnin*k))
                threshold1 = int(floor((1-burnin)*k*0.25))

                # We calculate the rhat for each model parameter.
                for j in range(dim):
                    part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
                    part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
                    series = np.vstack((part1,part2))
                    rhat[j] = gelman_rubin(series)   
                if verbose:    
                    print '   r_hat = {}'.format(rhat)
                    print '   r_hat <= threshold = {}'.format(rhat <= rhat_threshold)
                    print ''
                # We test the rhat.
                if (rhat <= rhat_threshold).all(): #and rhat_count < rhat_count_threshold: 
                    rhat_count += 1
                    if rhat_count < rhat_count_threshold:
                        print("Gelman-Rubin test OK {}/{}".format(rhat_count,rhat_count_threshold))
                    elif rhat_count >= rhat_count_threshold:
                        print '... ==> convergence reached'
                        konvergence = k
                        stop = konvergence + supp                       
                #elif (rhat <= rhat_threshold).all() and rhat_count >= rhat_count_threshold:
                #    print '... ==> convergence reached'
                #    konvergence = k
                #    stop = konvergence + supp
                else:
                    rhat_count = 0

        if (k+1) >= stop: #Then we have reached the maximum number of steps for our Markov chain.
            print 'We break the loop because we have reached convergence'
            break
      
    if k == nIterations-1:
        print("We have reached the limit number of steps without having converged")
            
    # #########################################################################
    # Construction of the independent samples
    # ######################################################################### 
            
    temp = np.where(chain[0,:,0] == 0.0)[0]
    if len(temp) != 0:
        idxzero = temp[0]
    else:
        idxzero = chain.shape[1]
    
    idx = np.amin([np.floor(2e05/nwalkers),np.floor(0.1*idxzero)])
    if idx == 0:
        isamples = chain[:,0:idxzero,:] 
    else:
        isamples = chain[:,idxzero-idx:idxzero,:]

    if save:
        import pickle
        
        frame = inspect.currentframe()
        args, _, _, values = inspect.getargvalues(frame)
        input_parameters = {j : values[j] for j in args[1:]}        
        
        output = {'isamples':isamples,
                  'chain': chain_zero_truncated(chain),
                  'input_parameters': input_parameters,
                  'AR': sampler.acceptance_fraction,
                  'lnprobability': sampler.lnprobability}
                  
        with open('results/'+output_file+'/MCMC_results','wb') as fileSave:
            myPickler = pickle.Pickler(fileSave)
            myPickler.dump(output)
        
        print ''        
        print("The file MCMC_results has been stored in the folder {}".format('results/'+output_file+'/'))

    if verbose:
        timing(start_time)
                                    
    return chain_zero_truncated(chain)    

Example 7

Project: VIP
Source File: simplex_optim.py
View license
def firstguess(cube, angs, psfn, ncomp, plsc, planets_xy_coord, fwhm=4, 
               annulus_width=3, aperture_radius=4, cube_ref=None, 
               svd_mode='lapack', scaling=None, fmerit='sum', collapse='median',
               p_ini=None, f_range=None, simplex=True, simplex_options=None,
               display=False, verbose=True, save=False, figure_options=None):
    """ Determines a first guess for the position and the flux of a planet.
        
    We process the cube without injecting any negative fake companion. 
    This leads to the visual detection of the planet(s). For each of them,
    one can estimate the (x,y) coordinates in pixel for the position of the 
    star, as well as the planet(s). 

    From the (x,y) coordinates in pixels for the star and planet(s), we can 
    estimate a preliminary guess for the position and flux for each planet
    by using the method "firstguess_from_coord". The argument "f_range" allows
    to indicate prior limits for the flux (optional, default: None). 
    This step can be reiterate to refine the preliminary guess for the flux.

    We can go a step further by using a Simplex Nelder_Mead minimization to
    estimate the first guess based on the preliminary guess.
           
    Parameters
    ----------
    cube: numpy.array
        The cube of fits images expressed as a numpy.array. 
    angs: numpy.array
        The parallactic angle fits image expressed as a numpy.array.  
    psfn: numpy.array
        The centered and normalized (flux in a 1*FWHM aperture must equal 1) 
        PSF 2d-array.
    ncomp: int
        The number of principal components.         
    plsc: float
        The platescale, in arcsec per pixel.  
    planet_xy_coord: array or list
        The list of (x,y) positions of the planets.
    fwhm : float, optional
        The FHWM in pixels.
    annulus_width: int, optional
        The width in terms of the FWHM of the annulus on which the PCA is done.       
    aperture_radius: int, optional
        The radius of the circular aperture in terms of the FWHM.
    cube_ref : array_like, 3d, optional
        Reference library cube. For Reference Star Differential Imaging.
    svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
        Switch for different ways of computing the SVD and selected PCs.
    scaling : {'temp-mean', 'temp-standard'} or None, optional
        With None, no scaling is performed on the input data before SVD. With 
        "temp-mean" then temporal px-wise mean subtraction is done and with 
        "temp-standard" temporal mean centering plus scaling to unit variance 
        is done. 
    fmerit : {'sum', 'stddev'}, string optional
        Chooses the figure of merit to be used. stddev works better for close in
        companions sitting on top of speckle noise.
    collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
        Sets the way of collapsing the frames for producing a final image. If
        None then the cube of residuals is used when measuring the function of
        merit (instead of a single final frame).
    p_ini: numpy.array
        Position (r, theta) of the circular aperture center.        
    f_range: numpy.array, optional
        The range of flux tested values. If None, 20 values between 0 and 5000
        are tested.
    simplex: boolean, optional
        If True, the Nelder-Mead minimization is performed after the flux grid
        search.
    simplex_options: dict, optional
        The scipy.optimize.minimize options.
    display: boolean, optional
        If True, the figure chi2 vs. flux is displayed.
    verbose: boolean
        If True, display intermediate info in the shell.
    save: boolean, optional
        If True, the figure chi2 vs. flux is saved.
    figure_options: dict, optional
        Additional parameters are passed to the matplotlib plot method.    

    Returns
    -------
    out : The radial coordinates and the flux of the companion.

    """
    if verbose:  start_time = timeInit()

    if figure_options is None:
        figure_options = {'color':'gray', 'marker':'.', 
                          'title':r'$\chi^2_{r}$ vs flux'}
        
    planets_xy_coord = np.array(planets_xy_coord)
    n_planet = planets_xy_coord.shape[0]

    center_xy_coord = np.array([cube.shape[1]/2.,cube.shape[2]/2.])    

    if f_range is None:  
        f_range = np.linspace(0,5000,20)
    if simplex_options is None:  
        simplex_options = {'xtol':1e-1, 'maxiter':500, 'maxfev':1000}
        
    
    r_0 = np.zeros(n_planet)
    theta_0 = np.zeros_like(r_0)
    f_0 = np.zeros_like(r_0)
    
    for index_planet in range(n_planet):    
        if verbose:
            print ''
            print sep
            print '             Planet {}           '.format(index_planet)
            print sep
            print ''
            msg2 = 'Planet {}: flux estimation at the position [{},{}], running ...'
            print msg2.format(index_planet,planets_xy_coord[index_planet,0],
                              planets_xy_coord[index_planet,1])
        
        res_init = firstguess_from_coord(planets_xy_coord[index_planet],
                                         center_xy_coord, cube, angs, plsc, psfn,
                                         fwhm, annulus_width, aperture_radius,
                                         ncomp, f_range=f_range,
                                         cube_ref=cube_ref, svd_mode=svd_mode,
                                         scaling=scaling, fmerit=fmerit,
                                         collapse=collapse, display=display,
                                         verbose=verbose, save=save,
                                         **figure_options)
        r_pre, theta_pre, f_pre = res_init
                                                                                                                    
        if verbose:
            msg3 = 'Planet {}: preliminary guess: (r, theta, f)=({:.1f}, {:.1f}, {:.1f})'
            print msg3.format(index_planet,r_pre, theta_pre, f_pre)
        
        if simplex:
            if verbose:
                msg4 = 'Planet {}: Simplex Nelder-Mead minimization, running ...'
                print msg4.format(index_planet)
                                                         
            res = firstguess_simplex((r_pre,theta_pre,f_pre), cube, angs, psfn,
                                     plsc, ncomp, fwhm, annulus_width, 
                                     aperture_radius, cube_ref=cube_ref, 
                                     svd_mode=svd_mode, scaling=scaling,
                                     fmerit=fmerit, collapse=collapse, p_ini=p_ini,
                                     options=simplex_options, verbose=False)
            
            r_0[index_planet], theta_0[index_planet], f_0[index_planet] = res.x
            if verbose:
                msg5 = 'Planet {}: Success: {}, nit: {}, nfev: {}, chi2r: {}'
                print msg5.format(index_planet,res.success,res.nit,res.nfev, 
                                  res.fun)
                print 'message: {}'.format(res.message)
            
        else:
            if verbose:
                msg4bis = 'Planet {}: Simplex Nelder-Mead minimization skipped.'
                print msg4bis.format(index_planet)            
            r_0[index_planet] = r_pre
            theta_0[index_planet] = theta_pre
            f_0[index_planet] = f_pre                               

        if verbose:            
            centy, centx = frame_center(cube[0])
            posy = r_0 * np.sin(np.deg2rad(theta_0[index_planet])) + centy
            posx = r_0 * np.cos(np.deg2rad(theta_0[index_planet])) + centx
            msg6 = 'Planet {}: simplex result: (r, theta, f)=({:.3f}, {:.3f}'
            msg6 += ', {:.3f}) at \n          (X,Y)=({:.2f}, {:.2f})'
            print msg6.format(index_planet, r_0[index_planet],
                              theta_0[index_planet], f_0[index_planet], posx[0], posy[0])
    
    if verbose:
        print '\n', sep, '\nDONE !\n', sep
        timing(start_time)

    return (r_0,theta_0,f_0)


        

Example 8

Project: pyeq3
Source File: DataConverterService.py
View license
    def ConvertAndSortColumnarASCII(self, inRawData, inModel, inUseWeightsFlag):
        # you should first process commas before calling this method,
        # as it uses the default token delimiters in string split()
        #
        # For example, convert $1,234.56 to 1234.56 or 1,23 to 1.23
        # Different number systems have commas in different places
        # and the Python built-in float() method uses decimal notation
        # or scientific notation only

        # cache some data set characteristics for later use,
        # these are for the data domains of individual equations
        inModel.dataCache.independentData1ContainsZeroFlag = False
        inModel.dataCache.independentData2ContainsZeroFlag = False
        inModel.dataCache.independentData1ContainsPositiveFlag = False
        inModel.dataCache.independentData2ContainsPositiveFlag = False
        inModel.dataCache.independentData1ContainsNegativeFlag = False
        inModel.dataCache.independentData2ContainsNegativeFlag = False

        # used in calculation of relative error to prevent divide-by-zero exceptions
        inModel.dataCache.DependentDataContainsZeroFlag = False

        if inUseWeightsFlag:
            minimumNumberOfTokens = inModel.GetDimensionality() + 1
        else:
            minimumNumberOfTokens = inModel.GetDimensionality()

        # StringIO() allows using file methods on text
        rawData = io.StringIO(inRawData).readlines()

        # OK, now load in the data
        dataLists = [[], [], [], []]
        for line in rawData:

            # split the line into string tokens using the default string split() delimiters
            tokenlist = line.split()

            # test this line for minimum required number of string tokens
            if len(tokenlist) < minimumNumberOfTokens:
                continue

            # use the python built-in float() conversion and discard if any exceptions
            if inModel.GetDimensionality() == 1:
                try:
                    a = float(tokenlist[0])
                except:
                    continue
                if a > 1.0E300 or a < -1.0E300:
                    continue
                if a < 0.0:
                    inModel.dataCache.independentData1ContainsNegativeFlag = True
                elif a > 0.0:
                    inModel.dataCache.independentData1ContainsPositiveFlag = True
                else:
                    inModel.dataCache.independentData1ContainsZeroFlag = True

                dataLists[0].append(a)
                dataLists[1].append(1.0)
                dataLists[2].append(1.0)
                dataLists[3].append(1.0)

            if inModel.GetDimensionality() == 2:
                try:
                    a = float(tokenlist[0])
                    b = float(tokenlist[1])
                    if inUseWeightsFlag:
                        c = float(tokenlist[2])
                    else:
                        c = 1.0
                    d = 1.0
                    
                except:
                    continue
                if a > 1.0E300 or a < -1.0E300:
                    continue
                if b > 1.0E300 or b < -1.0E300:
                    continue
                if b == 0.0:
                    inModel.dataCache.DependentDataContainsZeroFlag = True
                if a < 0.0:
                    inModel.dataCache.independentData1ContainsNegativeFlag = True
                elif a > 0.0:
                    inModel.dataCache.independentData1ContainsPositiveFlag = True
                else:
                    inModel.dataCache.independentData1ContainsZeroFlag = True
                
                dataLists[0].append(c)
                dataLists[1].append(d)
                dataLists[2].append(a)
                dataLists[3].append(b)

            if inModel.GetDimensionality() == 3:
                try:
                    a = float(tokenlist[0])
                    b = float(tokenlist[1])
                    c = float(tokenlist[2])
                    if inUseWeightsFlag:
                        d = float(tokenlist[3])
                    else:
                        d = 1.0
                except:
                    continue
                if a > 1.0E300 or a < -1.0E300:
                    continue
                if b > 1.0E300 or b < -1.0E300:
                    continue
                if c > 1.0E300 or c < -1.0E300:
                    continue
                if c == 0.0:
                    inModel.dataCache.DependentDataContainsZeroFlag = True
                if a < 0.0:
                    inModel.dataCache.independentData1ContainsNegativeFlag = True
                elif a > 0.0:
                    inModel.dataCache.independentData1ContainsPositiveFlag = True
                else:
                    inModel.dataCache.independentData1ContainsZeroFlag = True
                if b < 0.0:
                    inModel.dataCache.independentData2ContainsNegativeFlag = True
                elif b > 0.0:
                    inModel.dataCache.independentData2ContainsPositiveFlag = True
                else:
                    inModel.dataCache.independentData2ContainsZeroFlag = True

                dataLists[0].append(d)
                dataLists[1].append(a)
                dataLists[2].append(b)
                dataLists[3].append(c)
                
        if inModel.ShouldDataBeRejected(inModel) == True:
            raise Exception(inModel.reasonWhyDataRejected)
            
        if inModel.GetDimensionality() == 1:
            dataLists[0].sort()
            inModel.dataCache.allDataCacheDictionary['IndependentData'] = [numpy.array(dataLists[0]), dataLists[1]]
            return
            
        arrayLists = numpy.array(dataLists) # for sorting all data by values of dependent variable
        indices = numpy.argsort(arrayLists[3])

        inModel.dataCache.allDataCacheDictionary['DependentData'] = numpy.array(arrayLists[3][indices])
        
        if inModel.GetDimensionality() == 2:
            inModel.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([arrayLists[2][indices], numpy.ones(len(arrayLists[0]))]) # the second  _unused_  list is for a bug in scipy.odr, which is used to calculate standard errors on parameters
        if inModel.GetDimensionality() == 3:
            inModel.dataCache.allDataCacheDictionary['IndependentData'] = numpy.array([arrayLists[1][indices], arrayLists[2][indices]])
            
        if inUseWeightsFlag:
            inModel.dataCache.allDataCacheDictionary['Weights'] = numpy.array(arrayLists[0][indices])
        else:
            inModel.dataCache.allDataCacheDictionary['Weights'] = []

Example 9

Project: cclib
Source File: gamessukparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line[1:22] == "total number of atoms":
            natom = int(line.split()[-1])
            self.set_attribute('natom', natom)

        if line[3:44] == "convergence threshold in optimization run":
            # Assuming that this is only found in the case of OPTXYZ
            # (i.e. an optimization in Cartesian coordinates)
            self.geotargets = [float(line.split()[-2])]

        if line[32:61] == "largest component of gradient":
            # This is the geotarget in the case of OPTXYZ
            if not hasattr(self, "geovalues"):
                self.geovalues = []
            self.geovalues.append([float(line.split()[4])])

        if line[37:49] == "convergence?":
            # Get the geovalues and geotargets for OPTIMIZE
            if not hasattr(self, "geovalues"):
                self.geovalues = []
                self.geotargets = []
            geotargets = []
            geovalues = []
            for i in range(4):
                temp = line.split()
                geovalues.append(float(temp[2]))
                if not self.geotargets:
                    geotargets.append(float(temp[-2]))
                line = next(inputfile)
            self.geovalues.append(geovalues)
            if not self.geotargets:
                self.geotargets = geotargets

        # This is the only place coordinates are printed in single point calculations. Note that
        # in the following fragment, the basis set selection is not always printed:
        #
        #                                        ******************
        #                                        molecular geometry
        #                                        ******************
        #
        # ****************************************
        # * basis selected is sto     sto3g      *
        # ****************************************
        #
        #         *******************************************************************************
        #         *                                                                             *
        #         *     atom   atomic                coordinates                 number of      *
        #         *            charge       x             y              z       shells         *
        #         *                                                                             *
        #         *******************************************************************************
        #         *                                                                             *
        #         *                                                                             *
        #         *    c         6.0   0.0000000     -2.6361501      0.0000000       2          *
        #         *                                                                1s  2sp      *
        #         *                                                                             *
        #         *                                                                             *
        #         *    c         6.0   0.0000000      2.6361501      0.0000000       2          *
        #         *                                                                1s  2sp      *
        #         *                                                                             *
        # ...
        #
        if line.strip() == "molecular geometry":

            self.updateprogress(inputfile, "Coordinates")

            self.skip_lines(inputfile, ['s', 'b', 's'])
            line = next(inputfile)
            if "basis selected is" in line:
                self.skip_lines(inputfile, ['s', 'b', 's', 's'])

            self.skip_lines(inputfile, ['header1', 'header2', 's', 's'])

            atomnos = []
            atomcoords = []
            line = next(inputfile)
            while line.strip():
                line = next(inputfile)
                if line.strip()[1:10].strip() and list(set(line.strip())) != ['*']:
                    atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in line.split()[3:6]])
                    atomnos.append(int(round(float(line.split()[2]))))

            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            self.atomcoords.append(atomcoords)
            self.set_attribute('atomnos', atomnos)

        # Each step of a geometry optimization will also print the coordinates:
        #
        # search  0
        #                                        *******************
        # point   0                              nuclear coordinates
        #                                        *******************
        #
        #         x              y              z            chg  tag
        #  ============================================================
        #        0.0000000     -2.6361501      0.0000000    6.00  c
        #        0.0000000      2.6361501      0.0000000    6.00  c
        # ..
        #
        if line[40:59] == "nuclear coordinates":

            self.updateprogress(inputfile, "Coordinates")

            # We need not remember the first geometry in geometry optimizations, as this will
            # be already parsed from the "molecular geometry" section (see above).
            if not hasattr(self, 'firstnuccoords') or self.firstnuccoords:
                self.firstnuccoords = False
                return

            self.skip_lines(inputfile, ['s', 'b', 'colname', 'e'])

            atomcoords = []
            atomnos = []
            line = next(inputfile)
            while list(set(line.strip())) != ['=']:

                cols = line.split()
                atomcoords.append([utils.convertor(float(x), "bohr", "Angstrom") for x in cols[0:3]])
                atomnos.append(int(float(cols[3])))

                line = next(inputfile)

            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            self.atomcoords.append(atomcoords)
            self.set_attribute('atomnos', atomnos)

        # This is printed when a geometry optimization succeeds, after the last gradient of the energy.
        if line[40:62] == "optimization converged":
            self.skip_line(inputfile, 's')
            if not hasattr(self, 'optdone'):
                self.optdone = []
            self.optdone.append(len(self.geovalues)-1)

        # This is apparently printed when a geometry optimization is not converged but the job ends.
        if "minimisation not converging" in line:
            self.skip_line(inputfile, 's')
            self.optdone = []

        if line[1:32] == "total number of basis functions":

            nbasis = int(line.split()[-1])
            self.set_attribute('nbasis', nbasis)

            while line.find("charge of molecule") < 0:
                line = next(inputfile)

            charge = int(line.split()[-1])
            self.set_attribute('charge', charge)

            mult = int(next(inputfile).split()[-1])
            self.set_attribute('mult', mult)

            alpha = int(next(inputfile).split()[-1])-1
            beta = int(next(inputfile).split()[-1])-1
            if self.mult == 1:
                self.homos = numpy.array([alpha], "i")
            else:
                self.homos = numpy.array([alpha, beta], "i")

        if line[37:69] == "s-matrix over gaussian basis set":
            self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            self.skip_lines(inputfile, ['d', 'b'])

            i = 0
            while i < self.nbasis:
                self.updateprogress(inputfile, "Overlap")

                self.skip_lines(inputfile, ['b', 'b', 'header', 'b', 'b'])

                for j in range(self.nbasis):
                    temp = list(map(float, next(inputfile).split()[1:]))
                    self.aooverlaps[j, (0+i):(len(temp)+i)] = temp

                i += len(temp)

        if line[18:43] == 'EFFECTIVE CORE POTENTIALS':

            self.skip_line(inputfile, 'stars')

            self.coreelectrons = numpy.zeros(self.natom, 'i')
            line = next(inputfile)
            while line[15:46] != "*"*31:
                if line.find("for atoms ...") >= 0:
                    atomindex = []
                    line = next(inputfile)
                    while line.find("core charge") < 0:
                        broken = line.split()
                        atomindex.extend([int(x.split("-")[0]) for x in broken])
                        line = next(inputfile)
                    charge = float(line.split()[4])
                    for idx in atomindex:
                        self.coreelectrons[idx-1] = self.atomnos[idx-1] - charge
                line = next(inputfile)

        if line[3:27] == "Wavefunction convergence":
            self.scftarget = float(line.split()[-2])
            self.scftargets = []

        if line[11:22] == "normal mode":
            if not hasattr(self, "vibfreqs"):
                self.vibfreqs = []
                self.vibirs = []

            units = next(inputfile)
            xyz = next(inputfile)
            equals = next(inputfile)
            line = next(inputfile)
            while line != equals:
                temp = line.split()
                self.vibfreqs.append(float(temp[1]))
                self.vibirs.append(float(temp[-2]))
                line = next(inputfile)
            # Use the length of the vibdisps to figure out
            # how many rotations and translations to remove
            self.vibfreqs = self.vibfreqs[-len(self.vibdisps):]
            self.vibirs = self.vibirs[-len(self.vibdisps):]

        if line[44:73] == "normalised normal coordinates":

            self.skip_lines(inputfile, ['e', 'b', 'b'])

            self.vibdisps = []
            freqnum = next(inputfile)
            while freqnum.find("=") < 0:

                self.skip_lines(inputfile, ['b', 'e', 'freqs', 'e', 'b', 'header', 'e'])

                p = [[] for x in range(9)]
                for i in range(len(self.atomnos)):
                    brokenx = list(map(float, next(inputfile)[25:].split()))
                    brokeny = list(map(float, next(inputfile)[25:].split()))
                    brokenz = list(map(float, next(inputfile)[25:].split()))
                    for j, x in enumerate(list(zip(brokenx, brokeny, brokenz))):
                        p[j].append(x)
                self.vibdisps.extend(p)

                self.skip_lines(inputfile, ['b', 'b'])

                freqnum = next(inputfile)

        if line[26:36] == "raman data":
            self.vibramans = []

            self.skip_lines(inputfile, ['s', 'b', 'header', 'b'])

            line = next(inputfile)
            while line[1] != "*":
                self.vibramans.append(float(line.split()[3]))
                self.skip_line(inputfile, 'blank')
                line = next(inputfile)
            # Use the length of the vibdisps to figure out
            # how many rotations and translations to remove
            self.vibramans = self.vibramans[-len(self.vibdisps):]

        if line[3:11] == "SCF TYPE":
            self.scftype = line.split()[-2]
            assert self.scftype in ['rhf', 'uhf', 'gvb'], "%s not one of 'rhf', 'uhf' or 'gvb'" % self.scftype

        if line[15:31] == "convergence data":
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
            self.scftargets.append([self.scftarget])  # Assuming it does not change over time
            while line[1:10] != "="*9:
                line = next(inputfile)
            line = next(inputfile)
            tester = line.find("tester")  # Can be in a different place depending
            assert tester >= 0
            while line[1:10] != "="*9:  # May be two or three lines (unres)
                line = next(inputfile)

            scfvalues = []
            line = next(inputfile)
            while line.strip():
                # e.g. **** recalulation of fock matrix on iteration  4 (examples/chap12/pyridine.out)
                if line[2:6] != "****":
                    scfvalues.append([float(line[tester-5:tester+6])])
                try:
                    line = next(inputfile)
                except StopIteration:
                    self.logger.warning('File terminated before end of last SCF! Last tester: {}'.format(line.split()[5]))
                    break
            self.scfvalues.append(scfvalues)

        if line[10:22] == "total energy" and len(line.split()) == 3:
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            scfenergy = utils.convertor(float(line.split()[-1]), "hartree", "eV")
            self.scfenergies.append(scfenergy)

        # Total energies after Moller-Plesset corrections
        # Second order correction is always first, so its first occurance
        #   triggers creation of mpenergies (list of lists of energies)
        # Further corrections are appended as found
        # Note: GAMESS-UK sometimes prints only the corrections,
        #   so they must be added to the last value of scfenergies
        if line[10:32] == "mp2 correlation energy" or \
           line[10:42] == "second order perturbation energy":
            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            self.mpenergies.append([])
            self.mp2correction = self.float(line.split()[-1])
            self.mp2energy = self.scfenergies[-1] + self.mp2correction
            self.mpenergies[-1].append(utils.convertor(self.mp2energy, "hartree", "eV"))
        if line[10:41] == "third order perturbation energy":
            self.mp3correction = self.float(line.split()[-1])
            self.mp3energy = self.mp2energy + self.mp3correction
            self.mpenergies[-1].append(utils.convertor(self.mp3energy, "hartree", "eV"))

        if line[40:59] == "molecular basis set":
            self.gbasis = []
            line = next(inputfile)
            while line.find("contraction coefficients") < 0:
                line = next(inputfile)
            equals = next(inputfile)
            blank = next(inputfile)
            atomname = next(inputfile)
            basisregexp = re.compile("\d*(\D+)")  # Get everything after any digits
            shellcounter = 1
            while line != equals:
                gbasis = []  # Stores basis sets on one atom
                blank = next(inputfile)
                blank = next(inputfile)
                line = next(inputfile)
                shellno = int(line.split()[0])
                shellgap = shellno - shellcounter
                shellsize = 0
                while len(line.split()) != 1 and line != equals:
                    if line.split():
                        shellsize += 1
                    coeff = {}
                    # coefficients and symmetries for a block of rows
                    while line.strip() and line != equals:
                        temp = line.strip().split()
                    # temp[1] may be either like (a) "1s" and "1sp", or (b) "s" and "sp"
                    # See GAMESS-UK 7.0 distribution/examples/chap12/pyridine2_21m10r.out
                    # for an example of the latter
                        sym = basisregexp.match(temp[1]).groups()[0]
                        assert sym in ['s', 'p', 'd', 'f', 'sp'], "'%s' not a recognized symmetry" % sym
                        if sym == "sp":
                            coeff.setdefault("S", []).append((float(temp[3]), float(temp[6])))
                            coeff.setdefault("P", []).append((float(temp[3]), float(temp[10])))
                        else:
                            coeff.setdefault(sym.upper(), []).append((float(temp[3]), float(temp[6])))
                        line = next(inputfile)
                    # either a blank or a continuation of the block
                    if coeff:
                        if sym == "sp":
                            gbasis.append(('S', coeff['S']))
                            gbasis.append(('P', coeff['P']))
                        else:
                            gbasis.append((sym.upper(), coeff[sym.upper()]))
                    if line == equals:
                        continue
                    line = next(inputfile)
                    # either the start of the next block or the start of a new atom or
                    # the end of the basis function section (signified by a line of equals)
                numtoadd = 1 + (shellgap // shellsize)
                shellcounter = shellno + shellsize
                for x in range(numtoadd):
                    self.gbasis.append(gbasis)

        if line[50:70] == "----- beta set -----":
            self.betamosyms = True
            self.betamoenergies = True
            self.betamocoeffs = True
            # betamosyms will be turned off in the next
            # SYMMETRY ASSIGNMENT section

        if line[31:50] == "SYMMETRY ASSIGNMENT":
            if not hasattr(self, "mosyms"):
                self.mosyms = []

            multiple = {'a': 1, 'b': 1, 'e': 2, 't': 3, 'g': 4, 'h': 5}

            equals = next(inputfile)
            line = next(inputfile)
            while line != equals:  # There may be one or two lines of title (compare mg10.out and duhf_1.out)
                line = next(inputfile)

            mosyms = []
            line = next(inputfile)
            while line != equals:
                temp = line[25:30].strip()
                if temp[-1] == '?':
                    # e.g. e? or t? or g? (see example/chap12/na7mg_uhf.out)
                    # for two As, an A and an E, and two Es of the same energy respectively.
                    t = line[91:].strip().split()
                    for i in range(1, len(t), 2):
                        for j in range(multiple[t[i][0]]):  # add twice for 'e', etc.
                            mosyms.append(self.normalisesym(t[i]))
                else:
                    for j in range(multiple[temp[0]]):
                        mosyms.append(self.normalisesym(temp))  # add twice for 'e', etc.
                line = next(inputfile)
            assert len(mosyms) == self.nmo, "mosyms: %d but nmo: %d" % (len(mosyms), self.nmo)
            if self.betamosyms:
                # Only append if beta (otherwise with IPRINT SCF
                # it will add mosyms for every step of a geo opt)
                self.mosyms.append(mosyms)
                self.betamosyms = False
            elif self.scftype == 'gvb':
                # gvb has alpha and beta orbitals but they are identical
                self.mosysms = [mosyms, mosyms]
            else:
                self.mosyms = [mosyms]

        if line[50:62] == "eigenvectors":
        # Mocoeffs...can get evalues from here too
        # (only if using FORMAT HIGH though will they all be present)
            if not hasattr(self, "mocoeffs"):
                self.aonames = []
                aonames = []
            minus = next(inputfile)

            mocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")
            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                for i in range(self.natom):
                    self.atombasis.append([])
                readatombasis = True

            self.skip_lines(inputfile, ['b', 'b', 'evalues'])

            p = re.compile(r"\d+\s+(\d+)\s*(\w+) (\w+)")
            oldatomname = "DUMMY VALUE"

            mo = 0
            while mo < self.nmo:
                self.updateprogress(inputfile, "Coefficients")

                self.skip_lines(inputfile, ['b', 'b', 'nums', 'b', 'b'])

                for basis in range(self.nbasis):
                    line = next(inputfile)
                    # Fill atombasis only first time around.
                    if readatombasis:
                        orbno = int(line[1:5])-1
                        atomno = int(line[6:9])-1
                        self.atombasis[atomno].append(orbno)
                    if not self.aonames:
                        pg = p.match(line[:18].strip()).groups()
                        atomname = "%s%s%s" % (pg[1][0].upper(), pg[1][1:], pg[0])
                        if atomname != oldatomname:
                            aonum = 1
                        oldatomname = atomname
                        name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
                        if name in aonames:
                            aonum += 1
                        name = "%s_%d%s" % (atomname, aonum, pg[2].upper())
                        aonames.append(name)
                    temp = list(map(float, line[19:].split()))
                    mocoeffs[mo:(mo+len(temp)), basis] = temp
                # Fill atombasis only first time around.
                readatombasis = False
                if not self.aonames:
                    self.aonames = aonames

                line = next(inputfile)  # blank line
                while not line.strip():
                    line = next(inputfile)
                evalues = line
                if evalues[:17].strip():  # i.e. if these aren't evalues
                    break  # Not all the MOs are present
                mo += len(temp)
            mocoeffs = mocoeffs[0:(mo+len(temp)), :]  # In case some aren't present
            if self.betamocoeffs:
                self.mocoeffs.append(mocoeffs)
            else:
                self.mocoeffs = [mocoeffs]

        if line[7:12] == "irrep":
            ########## eigenvalues ###########
            # This section appears once at the start of a geo-opt and once at the end
            # unless IPRINT SCF is used (when it appears at every step in addition)
            if not hasattr(self, "moenergies"):
                self.moenergies = []

            equals = next(inputfile)
            while equals[1:5] != "====":  # May be one or two lines of title (compare duhf_1.out and mg10.out)
                equals = next(inputfile)

            moenergies = []
            line = next(inputfile)
            if not line.strip():  # May be a blank line here (compare duhf_1.out and mg10.out)
                line = next(inputfile)

            while line.strip() and line != equals:  # May end with a blank or equals
                temp = line.strip().split()
                moenergies.append(utils.convertor(float(temp[2]), "hartree", "eV"))
                line = next(inputfile)
            self.nmo = len(moenergies)
            if self.betamoenergies:
                self.moenergies.append(moenergies)
                self.betamoenergies = False
            elif self.scftype == 'gvb':
                self.moenergies = [moenergies, moenergies]
            else:
                self.moenergies = [moenergies]

        # The dipole moment is printed by default at the beginning of the wavefunction analysis,
        # but the value is in atomic units, so we need to convert to Debye. It seems pretty
        # evident that the reference point is the origin (0,0,0) which is also the center
        # of mass after reorientation at the beginning of the job, although this is not
        # stated anywhere (would be good to check).
        #
        #                                        *********************
        #                                        wavefunction analysis
        #                                        *********************
        #
        # commence analysis at     24.61 seconds
        #
        #                 dipole moments
        #
        #
        #           nuclear      electronic           total
        #
        # x       0.0000000       0.0000000       0.0000000
        # y       0.0000000       0.0000000       0.0000000
        # z       0.0000000       0.0000000       0.0000000
        #
        if line.strip() == "dipole moments":

            # In older version there is only one blank line before the header,
            # and newer version there are two.
            self.skip_line(inputfile, 'blank')
            line = next(inputfile)
            if not line.strip():
                line = next(inputfile)
            self.skip_line(inputfile, 'blank')

            dipole = []
            for i in range(3):
                line = next(inputfile)
                dipole.append(float(line.split()[-1]))

            reference = [0.0, 0.0, 0.0]
            dipole = utils.convertor(numpy.array(dipole), "ebohr", "Debye")

            if not hasattr(self, 'moments'):
                self.moments = [reference, dipole]
            else:
                assert self.moments[1] == dipole

        # Net atomic charges are not printed at all, it seems,
        # but you can get at them from nuclear charges and
        # electron populations, which are printed like so:
        #
        #  ---------------------------------------
        #  mulliken and lowdin population analyses
        #  ---------------------------------------
        #
        # ----- total gross population in aos ------
        #
        # 1  1  c s         1.99066     1.98479
        # 2  1  c s         1.14685     1.04816
        # ...
        #
        #  ----- total gross population on atoms ----
        #
        # 1  c            6.0     6.00446     5.99625
        # 2  c            6.0     6.00446     5.99625
        # 3  c            6.0     6.07671     6.04399
        # ...
        if line[10:49] == "mulliken and lowdin population analyses":

            if not hasattr(self, "atomcharges"):
                self.atomcharges = {}

            while not "total gross population on atoms" in line:
                line = next(inputfile)

            self.skip_line(inputfile, 'blank')

            line = next(inputfile)
            mulliken, lowdin = [], []
            while line.strip():
                nuclear = float(line.split()[2])
                mulliken.append(nuclear - float(line.split()[3]))
                lowdin.append(nuclear - float(line.split()[4]))
                line = next(inputfile)

            self.atomcharges["mulliken"] = mulliken
            self.atomcharges["lowdin"] = lowdin

        #          ----- spinfree UHF natural orbital occupations -----
        #
        #               2.0000000     2.0000000     2.0000000     2.0000000     2.0000000     2.0000000     2.0000000
        #
        #               2.0000000     2.0000000     2.0000000     2.0000000     2.0000000     1.9999997     1.9999997
        # ...
        if "natural orbital occupations" in line:

            occupations = []

            self.skip_line(inputfile, "blank")
            line = inputfile.next()

            while line.strip():
                occupations += map(float, line.split())

                self.skip_line(inputfile, "blank")
                line = inputfile.next()

            self.set_attribute('nooccnos', occupations)

Example 10

Project: cclib
Source File: jaguarparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        # Extract the version number first
        if "Jaguar version" in line:
            self.metadata["package_version"] = line.split()[3][:-1]

        # Extract the basis set name
        if line[2:12] == "basis set:":
            self.metadata["basis_set"] = line.split()[2]

        # Extract charge and multiplicity
        if line[2:22] == "net molecular charge":
            self.set_attribute('charge', int(line.split()[-1]))
            self.set_attribute('mult', int(next(inputfile).split()[-1]))

        # The Gaussian basis set information is printed before the geometry, and we need
        # to do some indexing to get this into cclib format, because fn increments
        # for each engular momentum, but cclib does not (we have just P instead of
        # all three X/Y/Z with the same parameters. On the other hand, fn enumerates
        # the atomic orbitals correctly, so use it to build atombasis.
        #
        #  Gaussian basis set information
        #
        #                                                        renorm    mfac*renorm
        #   atom    fn   prim  L        z            coef         coef         coef
        # -------- ----- ---- --- -------------  -----------  -----------  -----------
        # C1           1    1   S  7.161684E+01   1.5433E-01   2.7078E+00   2.7078E+00
        # C1           1    2   S  1.304510E+01   5.3533E-01   2.6189E+00   2.6189E+00
        # ...
        # C1           3    6   X  2.941249E+00   2.2135E-01   1.2153E+00   1.2153E+00
        #              4        Y                                           1.2153E+00
        #              5        Z                                           1.2153E+00
        # C1           2    8   S  2.222899E-01   1.0000E+00   2.3073E-01   2.3073E-01
        # C1           3    7   X  6.834831E-01   8.6271E-01   7.6421E-01   7.6421E-01
        # ...
        # C2           6    1   S  7.161684E+01   1.5433E-01   2.7078E+00   2.7078E+00
        # ...
        #
        if line.strip() == "Gaussian basis set information":

            self.skip_lines(inputfile, ['b', 'renorm', 'header', 'd'])

            # This is probably the only place we can get this information from Jaguar.
            self.gbasis = []

            atombasis = []
            line = next(inputfile)
            fn_per_atom = []
            while line.strip():

                if len(line.split()) > 3:

                    aname = line.split()[0]
                    fn = int(line.split()[1])
                    prim = int(line.split()[2])
                    L = line.split()[3]
                    z = float(line.split()[4])
                    coef = float(line.split()[5])

                    # The primitive count is reset for each atom, so use that for adding
                    # new elements to atombasis and gbasis. We could also probably do this
                    # using the atom name, although that perhaps might not always be unique.
                    if prim == 1:
                        atombasis.append([])
                        fn_per_atom = []
                        self.gbasis.append([])

                    # Remember that fn is repeated when functions are contracted.
                    if not fn-1 in atombasis[-1]:
                        atombasis[-1].append(fn-1)

                    # Here we use fn only to know when a new contraction is encountered,
                    # so we don't need to decrement it, and we don't even use all values.
                    # What's more, since we only wish to save the parameters for each subshell
                    # once, we don't even need to consider lines for orbitals other than
                    # those for X*, making things a bit easier.
                    if not fn in fn_per_atom:
                        fn_per_atom.append(fn)
                        label = {'S': 'S', 'X': 'P', 'XX': 'D', 'XXX': 'F'}[L]
                        self.gbasis[-1].append((label, []))
                    igbasis = fn_per_atom.index(fn)
                    self.gbasis[-1][igbasis][1].append([z, coef])

                else:

                    fn = int(line.split()[0])
                    L = line.split()[1]

                    # Some AO indices are only printed in these lines, for L > 0.
                    if not fn-1 in atombasis[-1]:
                        atombasis[-1].append(fn-1)

                line = next(inputfile)

            # The indices for atombasis can also be read later from the molecular orbital output.
            self.set_attribute('atombasis', atombasis)

            # This length of atombasis should always be the number of atoms.
            self.set_attribute('natom', len(self.atombasis))

        #  Effective Core Potential
        #
        #  Atom      Electrons represented by ECP
        # Mo                    36
        #              Maximum angular term         3
        # F Potential      1/r^n   Exponent  Coefficient
        #                  -----   --------  -----------
        #                    0  140.4577691   -0.0469492
        #                    1   89.4739342  -24.9754989
        # ...
        # S-F Potential    1/r^n   Exponent  Coefficient
        #                  -----   --------  -----------
        #                    0   33.7771969    2.9278406
        #                    1   10.0120020   34.3483716
        # ...
        # O                      0
        # Cl                    10
        #              Maximum angular term         2
        # D Potential      1/r^n   Exponent  Coefficient
        #                  -----   --------  -----------
        #                    1   94.8130000  -10.0000000
        # ...
        if line.strip() == "Effective Core Potential":

            self.skip_line(inputfile, 'blank')
            line = next(inputfile)
            assert line.split()[0] == "Atom"
            assert " ".join(line.split()[1:]) == "Electrons represented by ECP"

            self.coreelectrons = []
            line = next(inputfile)
            while line.strip():
                if len(line.split()) == 2:
                    self.coreelectrons.append(int(line.split()[1]))
                line = next(inputfile)

        if line[2:14] == "new geometry" or line[1:21] == "Symmetrized geometry" or line.find("Input geometry") > 0:
        # Get the atom coordinates
            if not hasattr(self, "atomcoords") or line[1:21] == "Symmetrized geometry":
                # Wipe the "Input geometry" if "Symmetrized geometry" present
                self.atomcoords = []
            p = re.compile("(\D+)\d+")  # One/more letters followed by a number
            atomcoords = []
            atomnos = []
            angstrom = next(inputfile)
            title = next(inputfile)
            line = next(inputfile)
            while line.strip():
                temp = line.split()
                element = p.findall(temp[0])[0]
                atomnos.append(self.table.number[element])
                atomcoords.append(list(map(float, temp[1:])))
                line = next(inputfile)
            self.atomcoords.append(atomcoords)
            self.atomnos = numpy.array(atomnos, "i")
            self.set_attribute('natom', len(atomcoords))

        # Hartree-Fock energy after SCF
        if line[1:18] == "SCFE: SCF energy:":
            self.metadata["methods"].append("HF")
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            temp = line.strip().split()
            scfenergy = float(temp[temp.index("hartrees") - 1])
            scfenergy = utils.convertor(scfenergy, "hartree", "eV")
            self.scfenergies.append(scfenergy)

        # Energy after LMP2 correction
        if line[1:18] == "Total LMP2 Energy":
            self.metadata["methods"].append("LMP2")
            if not hasattr(self, "mpenergies"):
                self.mpenergies = [[]]
            lmp2energy = float(line.split()[-1])
            lmp2energy = utils.convertor(lmp2energy, "hartree", "eV")
            self.mpenergies[-1].append(lmp2energy)

        if line[15:45] == "Geometry optimization complete":
            if not hasattr(self, 'optdone'):
                self.optdone = []
            self.optdone.append(len(self.geovalues) - 1)

        if line.find("number of occupied orbitals") > 0:
        # Get number of MOs
            occs = int(line.split()[-1])
            line = next(inputfile)
            virts = int(line.split()[-1])
            self.nmo = occs + virts
            self.homos = numpy.array([occs-1], "i")

            self.unrestrictedflag = False

        if line[1:28] == "number of occupied orbitals":
            self.homos = numpy.array([float(line.strip().split()[-1])-1], "i")

        if line[2:27] == "number of basis functions":
            nbasis = int(line.strip().split()[-1])
            self.set_attribute('nbasis', nbasis)

        if line.find("number of alpha occupied orb") > 0:
        # Get number of MOs for an unrestricted calc

            aoccs = int(line.split()[-1])
            line = next(inputfile)
            avirts = int(line.split()[-1])
            line = next(inputfile)
            boccs = int(line.split()[-1])
            line = next(inputfile)
            bvirt = int(line.split()[-1])

            self.nmo = aoccs + avirts
            self.homos = numpy.array([aoccs-1, boccs-1], "i")
            self.unrestrictedflag = True

        if line[0:4] == "etot":
        # Get SCF convergence information
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
                self.scftargets = [[5E-5, 5E-6]]
            values = []
            while line[0:4] == "etot":
        # Jaguar 4.2
        # etot   1  N  N  0  N  -382.08751886450           2.3E-03  1.4E-01
        # etot   2  Y  Y  0  N  -382.27486023153  1.9E-01  1.4E-03  5.7E-02
        # Jaguar 6.5
        # etot   1  N  N  0  N    -382.08751881733           2.3E-03  1.4E-01
        # etot   2  Y  Y  0  N    -382.27486018708  1.9E-01  1.4E-03  5.7E-02
                temp = line.split()[7:]
                if len(temp) == 3:
                    denergy = float(temp[0])
                else:
                    denergy = 0  # Should really be greater than target value
                                 # or should we just ignore the values in this line
                ddensity = float(temp[-2])
                maxdiiserr = float(temp[-1])
                if not self.geoopt:
                    values.append([denergy, ddensity])
                else:
                    values.append([ddensity])
                try:
                    line = next(inputfile)
                except StopIteration:
                    self.logger.warning('File terminated before end of last SCF! Last error: {}'.format(maxdiiserr))
                    break
            self.scfvalues.append(values)

        # MO energies and symmetries.
        # Jaguar 7.0: provides energies and symmetries for both
        #   restricted and unrestricted calculations, like this:
        #     Alpha Orbital energies/symmetry label:
        #     -10.25358 Bu  -10.25353 Ag  -10.21931 Bu  -10.21927 Ag
        #     -10.21792 Bu  -10.21782 Ag  -10.21773 Bu  -10.21772 Ag
        #     ...
        # Jaguar 6.5: prints both only for restricted calculations,
        #   so for unrestricted calculations the output it looks like this:
        #     Alpha Orbital energies:
        #     -10.25358  -10.25353  -10.21931  -10.21927  -10.21792  -10.21782
        #     -10.21773  -10.21772  -10.21537  -10.21537   -1.02078   -0.96193
        #     ...
        # Presence of 'Orbital energies' is enough to catch all versions.
        if "Orbital energies" in line:

            # Parsing results is identical for restricted/unrestricted
            #   calculations, just assert later that alpha/beta order is OK.
            spin = int(line[2:6] == "Beta")

            # Check if symmetries are printed also.
            issyms = "symmetry label" in line

            if not hasattr(self, "moenergies"):
                self.moenergies = []
            if issyms and not hasattr(self, "mosyms"):
                    self.mosyms = []

            # Grow moeneriges/mosyms and make sure they are empty when
            #   parsed multiple times - currently cclib returns only
            #   the final output (ex. in a geomtry optimization).
            if len(self.moenergies) < spin+1:
                self.moenergies.append([])
            self.moenergies[spin] = []
            if issyms:
                if len(self.mosyms) < spin+1:
                    self.mosyms.append([])
                self.mosyms[spin] = []

            line = next(inputfile).split()
            while len(line) > 0:
                if issyms:
                    energies = [float(line[2*i]) for i in range(len(line)//2)]
                    syms = [line[2*i+1] for i in range(len(line)//2)]
                else:
                    energies = [float(e) for e in line]
                energies = [utils.convertor(e, "hartree", "eV") for e in energies]
                self.moenergies[spin].extend(energies)
                if issyms:
                    syms = [self.normalisesym(s) for s in syms]
                    self.mosyms[spin].extend(syms)
                line = next(inputfile).split()

            line = next(inputfile)

        # The second trigger string is in the version 8.3 unit test and the first one was
        # encountered in version 6.x and is followed by a bit different format. In particular,
        # the line with occupations is missing in each block. Here is a fragment of this block
        # from version 8.3:
        #
        # *****************************************
        #
        # occupied + virtual orbitals: final wave function
        #
        # *****************************************
        #
        #
        #                              1         2         3         4         5
        #  eigenvalues-            -11.04064 -11.04058 -11.03196 -11.03196 -11.02881
        #  occupations-              2.00000   2.00000   2.00000   2.00000   2.00000
        #    1 C1               S    0.70148   0.70154  -0.00958  -0.00991   0.00401
        #    2 C1               S    0.02527   0.02518   0.00380   0.00374   0.00371
        # ...
        #
        if line.find("Occupied + virtual Orbitals- final wvfn") > 0 or \
           line.find("occupied + virtual orbitals: final wave function") > 0:

            self.skip_lines(inputfile, ['b', 's', 'b', 'b'])

            if not hasattr(self, "mocoeffs"):
                self.mocoeffs = []

            aonames = []
            lastatom = "X"

            readatombasis = False
            if not hasattr(self, "atombasis"):
                self.atombasis = []
                for i in range(self.natom):
                    self.atombasis.append([])
                readatombasis = True

            offset = 0

            spin = 1 + int(self.unrestrictedflag)
            for s in range(spin):
                mocoeffs = numpy.zeros((len(self.moenergies[s]), self.nbasis), "d")

                if s == 1:  # beta case
                    self.skip_lines(inputfile, ['s', 'b', 'title', 'b', 's', 'b', 'b'])

                for k in range(0, len(self.moenergies[s]), 5):
                    self.updateprogress(inputfile, "Coefficients")

                    # All known version have a line with indices followed by the eigenvalues.
                    self.skip_lines(inputfile, ['numbers', 'eigens'])

                    # Newer version also have a line with occupation numbers here.
                    line = next(inputfile)
                    if "occupations-" in line:
                        line = next(inputfile)

                    for i in range(self.nbasis):

                        info = line.split()

                        # Fill atombasis only first time around.
                        if readatombasis and k == 0:
                            orbno = int(info[0])
                            atom = info[1]
                            if atom[1].isalpha():
                                atomno = int(atom[2:])
                            else:
                                atomno = int(atom[1:])
                            self.atombasis[atomno-1].append(orbno-1)

                        if not hasattr(self, "aonames"):
                            if lastatom != info[1]:
                                scount = 1
                                pcount = 3
                                dcount = 6  # six d orbitals in Jaguar

                            if info[2] == 'S':
                                aonames.append("%s_%i%s" % (info[1], scount, info[2]))
                                scount += 1

                            if info[2] == 'X' or info[2] == 'Y' or info[2] == 'Z':
                                aonames.append("%s_%iP%s" % (info[1], pcount / 3, info[2]))
                                pcount += 1

                            if info[2] == 'XX' or info[2] == 'YY' or info[2] == 'ZZ' or \
                               info[2] == 'XY' or info[2] == 'XZ' or info[2] == 'YZ':

                                aonames.append("%s_%iD%s" % (info[1], dcount / 6, info[2]))
                                dcount += 1

                            lastatom = info[1]

                        for j in range(len(info[3:])):
                            mocoeffs[j+k, i] = float(info[3+j])

                        line = next(inputfile)

                    if not hasattr(self, "aonames"):
                        self.aonames = aonames

                    offset += 5
                self.mocoeffs.append(mocoeffs)

        #  Atomic charges from Mulliken population analysis:
        #
        # Atom       C1           C2           C3           C4           C5
        # Charge    0.00177     -0.06075     -0.05956      0.00177     -0.06075
        #
        # Atom       H6           H7           H8           C9           C10
        # ...
        if line.strip() == "Atomic charges from Mulliken population analysis:":

            if not hasattr(self, 'atomcharges'):
                self.atomcharges = {}

            charges = []
            self.skip_line(inputfile, "blank")
            line = next(inputfile)
            while "sum of atomic charges" not in line:
                assert line.split()[0] == "Atom"
                line = next(inputfile)
                assert line.split()[0] == "Charge"
                charges.extend([float(c) for c in line.split()[1:]])
                self.skip_line(inputfile, "blank")
                line = next(inputfile)

            self.atomcharges['mulliken'] = charges

        if (line[2:6] == "olap") or (line.strip() == "overlap matrix:"):

            if line[6] == "-":
                return
                # This was continue (in loop) before parser refactoring.
                # continue # avoid "olap-dev"
            self.aooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            for i in range(0, self.nbasis, 5):
                self.updateprogress(inputfile, "Overlap")

                self.skip_lines(inputfile, ['b', 'header'])

                for j in range(i, self.nbasis):
                    temp = list(map(float, next(inputfile).split()[1:]))
                    self.aooverlaps[j, i:(i+len(temp))] = temp
                    self.aooverlaps[i:(i+len(temp)), j] = temp

        if line[2:24] == "start of program geopt":
            if not self.geoopt:
                # Need to keep only the RMS density change info
                # if this is a geooptz
                self.scftargets = [[self.scftargets[0][0]]]
                if hasattr(self, "scfvalues"):
                    self.scfvalues[0] = [[x[0]] for x in self.scfvalues[0]]
                self.geoopt = True
            else:
                self.scftargets.append([5E-5])

        # Get Geometry Opt convergence information
        #
        #  geometry optimization step  7
        #  energy:            -382.30219111487 hartrees
        #  [ turning on trust-radius adjustment ]
        #  ** restarting optimization from step    6 **
        #
        #
        #  Level shifts adjusted to satisfy step-size constraints
        #   Step size:    0.0360704
        #   Cos(theta):   0.8789215
        #   Final level shift:  -8.6176299E-02
        #
        #  energy change:           2.5819E-04 .  (  5.0000E-05 )
        #  gradient maximum:        5.0947E-03 .  (  4.5000E-04 )
        #  gradient rms:            1.2996E-03 .  (  3.0000E-04 )
        #  displacement maximum:    1.3954E-02 .  (  1.8000E-03 )
        #  displacement rms:        4.6567E-03 .  (  1.2000E-03 )
        #
        if line[2:28] == "geometry optimization step":

            if not hasattr(self, "geovalues"):
                self.geovalues = []
                self.geotargets = numpy.zeros(5, "d")

            gopt_step = int(line.split()[-1])

            energy = next(inputfile)
            blank = next(inputfile)

            # A quick hack for messages that show up right after the energy
            # at this point, which include:
            #   ** restarting optimization from step    2 **
            #   [ turning on trust-radius adjustment ]
            # as found in regression file ptnh3_2_H2O_2_2plus.out and other logfiles.
            restarting_from_1 = False
            while blank.strip():
                if blank.strip() == "** restarting optimization from step    1 **":
                    restarting_from_1 = True
                blank = next(inputfile)

            # One or more blank lines, depending on content.
            line = next(inputfile)
            while not line.strip():
                line = next(inputfile)

            # Note that the level shift message is followed by a blank, too.
            if "Level shifts adjusted" in line:
                while line.strip():
                    line = next(inputfile)
                line = next(inputfile)

            # The first optimization step does not produce an energy change, and
            # ther is also no energy change when the optimization is restarted
            # from step 1 (since step 1 had no change).
            values = []
            target_index = 0
            if (gopt_step == 1) or restarting_from_1:
                values.append(0.0)
                target_index = 1
            while line.strip():
                if len(line) > 40 and line[41] == "(":
                    # A new geo convergence value
                    values.append(float(line[26:37]))
                    self.geotargets[target_index] = float(line[43:54])
                    target_index += 1
                line = next(inputfile)
            self.geovalues.append(values)

        # IR output looks like this:
        #   frequencies        72.45   113.25   176.88   183.76   267.60   312.06
        #   symmetries       Au       Bg       Au       Bu       Ag       Bg
        #   intensities         0.07     0.00     0.28     0.52     0.00     0.00
        #   reduc. mass         1.90     0.74     1.06     1.42     1.19     0.85
        #   force const         0.01     0.01     0.02     0.03     0.05     0.05
        #   C1       X     0.00000  0.00000  0.00000 -0.05707 -0.06716  0.00000
        #   C1       Y     0.00000  0.00000  0.00000  0.00909 -0.02529  0.00000
        #   C1       Z     0.04792 -0.06032 -0.01192  0.00000  0.00000  0.11613
        #   C2       X     0.00000  0.00000  0.00000 -0.06094 -0.04635  0.00000
        #   ... etc. ...
        # This is a complete ouput, some files will not have intensities,
        #   and older Jaguar versions sometimes skip the symmetries.
        if line[2:23] == "start of program freq":

            self.skip_line(inputfile, 'blank')

            # Version 8.3 has two blank lines here, earlier versions just one.
            line = next(inputfile)
            if not line.strip():
                line = next(inputfile)

            self.vibfreqs = []
            self.vibdisps = []
            forceconstants = False
            intensities = False
            while line.strip():
                if "force const" in line:
                    forceconstants = True
                if "intensities" in line:
                    intensities = True
                line = next(inputfile)

            # In older version, the last block had an extra blank line after it,
            # which could be caught. This is not true in newer version (including 8.3),
            # but in general it would be better to bound this loop more strictly.
            freqs = next(inputfile)
            while freqs.strip() and not "imaginary frequencies" in freqs:

                # Number of modes (columns printed in this block).
                nmodes = len(freqs.split())-1

                # Append the frequencies.
                self.vibfreqs.extend(list(map(float, freqs.split()[1:])))
                line = next(inputfile).split()

                # May skip symmetries (older Jaguar versions).
                if line[0] == "symmetries":
                    if not hasattr(self, "vibsyms"):
                        self.vibsyms = []
                    self.vibsyms.extend(list(map(self.normalisesym, line[1:])))
                    line = next(inputfile).split()
                if intensities:
                    if not hasattr(self, "vibirs"):
                        self.vibirs = []
                    self.vibirs.extend(list(map(float, line[1:])))
                    line = next(inputfile).split()
                if forceconstants:
                    line = next(inputfile)

                # Start parsing the displacements.
                # Variable 'q' holds up to 7 lists of triplets.
                q = [[] for i in range(7)]
                for n in range(self.natom):
                    # Variable 'p' holds up to 7 triplets.
                    p = [[] for i in range(7)]
                    for i in range(3):
                        line = next(inputfile)
                        disps = [float(disp) for disp in line.split()[2:]]
                        for j in range(nmodes):
                            p[j].append(disps[j])
                    for i in range(nmodes):
                        q[i].append(p[i])

                self.vibdisps.extend(q[:nmodes])

                self.skip_line(inputfile, 'blank')
                freqs = next(inputfile)

            # Convert new data to arrays.
            self.vibfreqs = numpy.array(self.vibfreqs, "d")
            self.vibdisps = numpy.array(self.vibdisps, "d")
            if hasattr(self, "vibirs"):
                self.vibirs = numpy.array(self.vibirs, "d")

        # Parse excited state output (for CIS calculations).
        # Jaguar calculates only singlet states.
        if line[2:15] == "Excited State":
            if not hasattr(self, "etenergies"):
                self.etenergies = []
            if not hasattr(self, "etoscs"):
                self.etoscs = []
            if not hasattr(self, "etsecs"):
                self.etsecs = []
                self.etsyms = []
            etenergy = float(line.split()[3])
            etenergy = utils.convertor(etenergy, "eV", "cm-1")
            self.etenergies.append(etenergy)

            self.skip_lines(inputfile, ['line', 'line', 'line', 'line'])

            line = next(inputfile)
            self.etsecs.append([])
            # Jaguar calculates only singlet states.
            self.etsyms.append('Singlet-A')
            while line.strip() != "":
                fromMO = int(line.split()[0])-1
                toMO = int(line.split()[2])-1
                coeff = float(line.split()[-1])
                self.etsecs[-1].append([(fromMO, 0), (toMO, 0), coeff])
                line = next(inputfile)
            # Skip 3 lines
            for i in range(4):
                line = next(inputfile)
            strength = float(line.split()[-1])
            self.etoscs.append(strength)

Example 11

Project: STAMP
Source File: HeatmapPlot.py
View license
	def plot(self, profile, statsResults):

		# determine features to plot
		featuresToPlot = profile.profileDict.keys()
		if self.bPlotOnlyActiveFeatures:
			featuresToPlot = statsResults.activeFeatures

		if len(featuresToPlot) <= 1 or (len(profile.samplesInGroup1) + len(profile.samplesInGroup2)) <= 1:
			self.emptyAxis()
			return
		elif len(featuresToPlot) > 1000 or len(profile.samplesInGroup1) + len(profile.samplesInGroup2) > 1000:
			QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
			QtGui.QMessageBox.information(self, 'Too much data!', 'Heatmap plots are limited to 1000 samples and 1000 features.', QtGui.QMessageBox.Ok)
			QtGui.QApplication.instance().restoreOverrideCursor()
			self.emptyAxis()
			return

		# *** Colour of plot elements
		group1Colour = str(self.preferences['Group colours'][profile.groupName1].name())
		group2Colour = str(self.preferences['Group colours'][profile.groupName2].name())

		# *** Colour map for category dendrogram on left
		if self.colourmap == "Blues":
			self.matrixColourmap = pylab.cm.Blues
		elif self.colourmap == "Blue to red to green":
			self.matrixColourmap = pylab.cm.brg
		elif self.colourmap == "Blue to white to red":
			self.matrixColourmap = pylab.cm.bwr
		elif self.colourmap == "Cool to warm":
			self.matrixColourmap = pylab.cm.cool
		elif self.colourmap == "Grayscale":
			self.matrixColourmap = pylab.cm.gist_yarg
		elif self.colourmap == "Jet":
			self.matrixColourmap = pylab.cm.jet
		elif self.colourmap == "Orange to red":
			self.matrixColourmap = pylab.cm.OrRd
		elif self.colourmap == "Paired":
			self.matrixColourmap = pylab.cm.Paired
		elif self.colourmap == "Purple to green":
			self.matrixColourmap = pylab.cm.PRGn
		elif self.colourmap == "Reds":
			self.matrixColourmap = pylab.cm.Reds
		elif self.colourmap == "Red to blue":
			self.matrixColourmap = pylab.cm.RdBu
		elif self.colourmap == "Red to yellow to blue":
			self.matrixColourmap = pylab.cm.RdYlBu
		elif self.colourmap == "Spectral":
			self.matrixColourmap = pylab.cm.spectral
		elif self.colourmap == "Yellow to orange to red":
			self.matrixColourmap = pylab.cm.YlOrRd

		# *** Get data for each group
		if self.fieldToPlot == "Number of sequences":
			data1, data2 = profile.getActiveFeatureCounts(featuresToPlot)
		else:  # Proportion of sequences (%)
			data1, data2 = profile.getActiveFeatureProportions(featuresToPlot)

		matrix = []
		for row in data1:
			matrix.append(row)

		for r in xrange(0, len(data2)):
			matrix[r] += data2[r]

		matrix = numpy.array(matrix)

		# *** Get heatmap data
		colHeaders = profile.samplesInGroup1 + profile.samplesInGroup2
		rowHeaders = featuresToPlot

		# *** Find longest label
		bTruncate = False
		if self.preferences['Truncate feature names']:
			length = self.preferences['Length of truncated feature names']
			bTruncate = True

		longestLabelLen = 0
		longestRowLabel = ''
		for i in xrange(0, len(rowHeaders)):
			if bTruncate and len(rowHeaders[i]) > length + 3:
				rowHeaders[i] = rowHeaders[i][0:length] + '...'

			if len(rowHeaders[i]) > longestLabelLen:
				longestLabelLen = len(rowHeaders[i])
				longestRowLabel = rowHeaders[i]

		longestLabelLen = 0
		longestColLabel = ''
		for i in xrange(0, len(colHeaders)):
			if bTruncate and len(colHeaders[i]) > length + 3:
				colHeaders[i] = colHeaders[i][0:length] + '...'

			if len(colHeaders[i]) > longestLabelLen:
				longestLabelLen = len(colHeaders[i])
				longestColLabel = colHeaders[i]

		# *** Check sorting method and adjust dendrogram parameters appropriately
		if self.sortRowMethod == 'Alphabetical order' or self.sortRowMethod == 'Mean abundance':
			self.bShowRowDendrogram = False

		if self.sortColMethod == 'Alphabetical order' or self.sortColMethod == 'Mean abundance':
			self.bShowColDendrogram = False

		# *** Set figure size
		self.fig.clear()
		self.fig.set_size_inches(self.figWidth, self.figHeight)

		xLabelBounds, yLabelBounds = self.labelExtents([longestColLabel], 8, 90, [longestRowLabel], 8, 0)

		# position all figure elements
		colourBarWidthX = 0.2 / self.figWidth
		colourBarWidthY = 0.2 / self.figHeight
		marginX = 0.1 / self.figWidth
		marginY = 0.1 / self.figHeight

		if self.bShowRowDendrogram:
			dendrogramWidth = self.dendrogramWidth / self.figWidth
		else:
			dendrogramWidth = 0.2 / self.figWidth

		if self.bShowColDendrogram:
			dendrogramHeight = self.dendrogramHeight / self.figHeight
		else:
			dendrogramHeight = 0.2 / self.figHeight

		cellSizeX = max((1.0 - 2 * 0.02 - dendrogramWidth - colourBarWidthX - 2 * marginX - yLabelBounds.width), 0.01) * self.figWidth / len(colHeaders)
		cellSizeY = max((1.0 - 2 * 0.02 - dendrogramHeight - colourBarWidthY - 2 * marginY - xLabelBounds.height), 0.01) * self.figHeight / len(rowHeaders)
		cellSize = min(cellSizeX, cellSizeY)

		cellSizeXPer = cellSize / self.figWidth
		cellSizeYPer = cellSize / self.figHeight

		paddingX = 0.5 * (1.0 - dendrogramWidth - 2 * marginX - colourBarWidthX - cellSizeXPer * len(colHeaders) - yLabelBounds.width)
		paddingY = 0.5 * (1.0 - dendrogramHeight - 2 * marginY - colourBarWidthY - cellSizeYPer * len(rowHeaders) - xLabelBounds.height)

		rowDendrogramX = paddingX
		rowDendrogramY = paddingY + (xLabelBounds.height)
		rowDendrogramW = dendrogramWidth
		rowDendrogramH = cellSizeYPer * len(rowHeaders)

		rowClusterBarX = rowDendrogramX + rowDendrogramW + marginX
		rowClusterBarY = rowDendrogramY
		rowClusterBarW = colourBarWidthX
		rowClusterBarH = rowDendrogramH

		colDendrogramX = rowClusterBarX + rowClusterBarW + marginX
		colDendrogramY = rowDendrogramY + rowDendrogramH + marginY + colourBarWidthY + marginY
		colDendrogramW = cellSizeXPer * len(colHeaders)
		colDendrogramH = dendrogramHeight

		colClusterBarX = colDendrogramX
		colClusterBarY = rowDendrogramY + rowDendrogramH + marginY
		colClusterBarW = colDendrogramW
		colClusterBarH = colourBarWidthY

		heatmapX = rowClusterBarX + rowClusterBarW + marginX
		heatmapY = rowDendrogramY
		heatmapW = colDendrogramW
		heatmapH = rowDendrogramH

		legendHeight = 0.2 / self.figHeight
		legendW = min(0.8 * yLabelBounds.width, 1.25 / self.figWidth)
		legendH = legendHeight
		legendX = heatmapX + heatmapW + 0.2 / self.figWidth
		legendY = 1.0 - legendHeight - (2 * yLabelBounds.height) - marginY
		if not self.bShowColDendrogram:
			# move legend to side
			legendX = heatmapX + 0.5 * (heatmapW - legendW)
			legendY = heatmapY + heatmapH + (1.5 * yLabelBounds.height) + 0.1 / self.figWidth

		# plot dendrograms
		if self.sortRowMethod == 'Alphabetical order':
			leafIndex1 = numpy.argsort(rowHeaders)[::-1]
		elif self.sortRowMethod == 'Mean abundance':
			leafIndex1 = numpy.argsort(numpy.mean(matrix, axis=1))
		else:
			axisRowDendrogram = self.fig.add_axes([rowDendrogramX, rowDendrogramY, rowDendrogramW, rowDendrogramH], frame_on=False)
			ind1, leafIndex1 = self.plotDendrogram(matrix, axisRowDendrogram, self.sortRowMethod, self.clusteringRowThreshold, 'right', bPlot=self.bShowRowDendrogram)

		if self.sortColMethod == 'Alphabetical order':
			leafIndex2 = numpy.argsort(colHeaders)
		elif self.sortColMethod == 'Mean abundance':
			leafIndex2 = numpy.argsort(numpy.mean(matrix, axis=0))
		else:
			axisColDendrogram = self.fig.add_axes([colDendrogramX, colDendrogramY, colDendrogramW, colDendrogramH], frame_on=False)
			ind2, leafIndex2 = self.plotDendrogram(matrix.T, axisColDendrogram, self.sortColMethod, self.clusteringColThreshold, 'top', bPlot=self.bShowColDendrogram)

		# *** Handle mouse events
		xCell = []
		yCell = []
		tooltips = []
		for x in xrange(0, len(colHeaders)):
			for y in xrange(0, len(rowHeaders)):
				xCell.append(x)
				yCell.append(y)

				tooltip = rowHeaders[leafIndex1[y]] + ', ' + colHeaders[leafIndex2[x]] + '\n'

				if self.fieldToPlot == "Number of sequences":
					tooltip += '%d' % (matrix[leafIndex1[y]][leafIndex2[x]])
				else:
					tooltip += '%.3f' % (matrix[leafIndex1[y]][leafIndex2[x]]) + '%'
				tooltips.append(tooltip)

		self.plotEventHandler = 	PlotEventHandler(xCell, yCell, tooltips, 0.4, 0.4)
		self.mouseEventCallback(self.plotEventHandler)

		# plot column clustering bars
		sampleColourMap = []

		for i in leafIndex2:
			if colHeaders[i] in profile.samplesInGroup1:
				sampleColourMap.append(group1Colour)
			else:
				sampleColourMap.append(group2Colour)

		sampleColourMap = mpl.colors.ListedColormap(sampleColourMap)
		matrix = matrix[:, leafIndex2]

		if self.bShowColDendrogram:
			ind2 = ind2[leafIndex2]
			axc = self.fig.add_axes([colClusterBarX, colClusterBarY, colClusterBarW, colClusterBarH])  # axes for column side colorbar
			dc = numpy.array(numpy.arange(len(leafIndex2)), dtype=int)
			dc.shape = (1, len(leafIndex2))
			axc.matshow(dc, aspect='auto', origin='lower', cmap=sampleColourMap)
			axc.set_xticks([])
			axc.set_yticks([])

		# plot row clustering bars
		matrix = matrix[leafIndex1, :]

		if self.bShowRowDendrogram:
			ind1 = ind1[leafIndex1]
			axr = self.fig.add_axes([rowClusterBarX, rowClusterBarY, rowClusterBarW, rowClusterBarH])
			dr = numpy.array(ind1, dtype=int)
			dr.shape = (len(ind1), 1)
			axr.matshow(dr, aspect='auto', origin='lower', cmap=self.discreteColourMap)
			axr.set_xticks([])
			axr.set_yticks([])

		# determine scale for colour map
		minValue = 1e6
		maxValue = 0
		for row in matrix:
			minValue = min(minValue, min(row))
			maxValue = max(maxValue, max(row))
		norm = mpl.colors.Normalize(minValue, maxValue)

		# plot heatmap
		axisHeatmap = self.fig.add_axes([heatmapX, heatmapY, heatmapW, heatmapH])

		axisHeatmap.matshow(matrix, origin='lower', cmap=self.matrixColourmap, norm=norm)
		axisHeatmap.set_xticks([])
		axisHeatmap.set_yticks([])

		# row and column labels
		labelOffset = 0.5 * (yLabelBounds.height / cellSizeYPer)
		for i in xrange(0, len(rowHeaders)):
			axisHeatmap.text(matrix.shape[1] - 0.5, i - labelOffset, '  ' + rowHeaders[leafIndex1[i]], horizontalalignment="left")

		labelOffset = 0.5 * (xLabelBounds.width / cellSizeXPer)
		for i in xrange(0, len(colHeaders)):
			axisHeatmap.text(i - labelOffset, -0.5, '  ' + colHeaders[leafIndex2[i]], rotation='vertical', verticalalignment="top")

		# plot colour map legend
		axisColourMap = self.fig.add_axes([legendX, legendY, legendW, legendH], frame_on=False)  # axes for colorbar
		colourBar = mpl.colorbar.ColorbarBase(axisColourMap, cmap=self.matrixColourmap, norm=norm, orientation='horizontal')

		if self.fieldToPlot == "Number of sequences":
			axisColourMap.set_title("# sequences")
		else:
			axisColourMap.set_title("abundance (%)")

		colourBar.set_ticks([minValue, 0.5 * (maxValue - minValue) + minValue, maxValue])
		colourBar.set_ticklabels(['%.1f' % minValue, '%.1f' % (0.5 * (maxValue - minValue) + minValue), '%.1f' % maxValue])

		# plot column and row lines
		for i in xrange(0, len(rowHeaders)):
			axisHeatmap.plot([-0.5, len(colHeaders) - 0.5], [i - 0.5, i - 0.5], color='white', linestyle='-', linewidth=1.5)

		for i in xrange(0, len(colHeaders)):
			axisHeatmap.plot([i - 0.5, i - 0.5], [-0.5, len(rowHeaders) - 0.5], color='white', linestyle='-', linewidth=1.5)

		# plot legend
		if self.legendPos != -1:
			legend1 = Rectangle((0, 0), 1, 1, fc=group1Colour)
			legend2 = Rectangle((0, 0), 1, 1, fc=group2Colour)
			legend = self.fig.legend([legend1, legend2], (profile.groupName1, profile.groupName2), loc=self.legendPos, ncol=1)
			legend.get_frame().set_linewidth(0)

		self.updateGeometry()
		self.draw()

Example 12

Project: STAMP
Source File: HeatmapPlot.py
View license
	def plot(self, profile, statsResults):

		featuresToPlot = profile.profileDict.keys()
		if self.bPlotOnlyActiveFeatures:
			featuresToPlot = statsResults.activeFeatures

		if len(featuresToPlot) <= 1 or len(profile.activeGroupNames) <= 1:
			self.emptyAxis()
			return
		elif len(featuresToPlot) > 1000 or len(profile.activeSamplesInGroups) > 1000:
			QtGui.QApplication.instance().setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
			QtGui.QMessageBox.information(self, 'Too much data!', 'Heatmap plots are limited to 1000 samples and 1000 features.', QtGui.QMessageBox.Ok)
			QtGui.QApplication.instance().restoreOverrideCursor()
			self.emptyAxis()
			return

		# *** Colour of plot elements
		groupColours = {}
		for groupName in profile.activeGroupNames:
			groupColours[groupName] = str(self.preferences['Group colours'][groupName].name())

		# *** Colour map for category dendrogram on left
		if self.colourmap == "Blues":
			self.matrixColourmap = pylab.cm.Blues
		elif self.colourmap == "Blue to red to green":
			self.matrixColourmap = pylab.cm.brg
		elif self.colourmap == "Blue to white to red":
			self.matrixColourmap = pylab.cm.bwr
		elif self.colourmap == "Cool to warm":
			self.matrixColourmap = pylab.cm.cool
		elif self.colourmap == "Grayscale":
			self.matrixColourmap = pylab.cm.gist_yarg
		elif self.colourmap == "Jet":
			self.matrixColourmap = pylab.cm.jet
		elif self.colourmap == "Orange to red":
			self.matrixColourmap = pylab.cm.OrRd
		elif self.colourmap == "Paired":
			self.matrixColourmap = pylab.cm.Paired
		elif self.colourmap == "Purple to green":
			self.matrixColourmap = pylab.cm.PRGn
		elif self.colourmap == "Reds":
			self.matrixColourmap = pylab.cm.Reds
		elif self.colourmap == "Red to blue":
			self.matrixColourmap = pylab.cm.RdBu
		elif self.colourmap == "Red to yellow to blue":
			self.matrixColourmap = pylab.cm.RdYlBu
		elif self.colourmap == "Spectral":
			self.matrixColourmap = pylab.cm.spectral
		elif self.colourmap == "Yellow to orange to red":
			self.matrixColourmap = pylab.cm.YlOrRd

		# *** Get data for each group
		if self.fieldToPlot == "Number of sequences":
			data = profile.getActiveFeatureFromActiveSamplesCounts(featuresToPlot)
		else:  # Proportion of sequences (%)
			data = profile.getActiveFeatureFromActiveSamplesProportions(featuresToPlot)

		matrix = []
		for r in xrange(0, len(data)):
			row = []
			for d in data[r]:
				row += d
			matrix.append(row)

		matrix = numpy.array(matrix)

		# *** Get heatmap data
		colHeaders = []
		for sampleNames in profile.activeSamplesInGroups:
			colHeaders += sampleNames
		rowHeaders = featuresToPlot

		# *** Find longest label
		bTruncate = False
		if self.preferences['Truncate feature names']:
			length = self.preferences['Length of truncated feature names']
			bTruncate = True

		longestLabelLen = 0
		longestRowLabel = ''
		for i in xrange(0, len(rowHeaders)):
			if bTruncate and len(rowHeaders[i]) > length + 3:
				rowHeaders[i] = rowHeaders[i][0:length] + '...'

			if len(rowHeaders[i]) > longestLabelLen:
				longestLabelLen = len(rowHeaders[i])
				longestRowLabel = rowHeaders[i]

		longestLabelLen = 0
		longestColLabel = ''
		for i in xrange(0, len(colHeaders)):
			if bTruncate and len(colHeaders[i]) > length + 3:
				colHeaders[i] = colHeaders[i][0:length] + '...'

			if len(colHeaders[i]) > longestLabelLen:
				longestLabelLen = len(colHeaders[i])
				longestColLabel = colHeaders[i]

		# *** Check sorting method and adjust dendrogram parameters appropriately
		if self.sortRowMethod == 'Alphabetical order' or self.sortRowMethod == 'Mean abundance':
			self.bShowRowDendrogram = False

		if self.sortColMethod == 'Alphabetical order' or self.sortColMethod == 'Mean abundance':
			self.bShowColDendrogram = False

		# *** Set figure size
		self.fig.clear()
		self.fig.set_size_inches(self.figWidth, self.figHeight)

		xLabelBounds, yLabelBounds = self.labelExtents([longestColLabel], 8, 90, [longestRowLabel], 8, 0)

		# position all figure elements
		colourBarWidthX = 0.2 / self.figWidth
		colourBarWidthY = 0.2 / self.figHeight
		marginX = 0.1 / self.figWidth
		marginY = 0.1 / self.figHeight

		if self.bShowRowDendrogram:
			dendrogramWidth = self.dendrogramWidth / self.figWidth
		else:
			dendrogramWidth = 0.2 / self.figWidth

		if self.bShowColDendrogram:
			dendrogramHeight = self.dendrogramHeight / self.figHeight
		else:
			dendrogramHeight = 0.2 / self.figHeight

		cellSizeX = max((1.0 - 2 * 0.02 - dendrogramWidth - colourBarWidthX - 2 * marginX - yLabelBounds.width), 0.01) * self.figWidth / len(colHeaders)
		cellSizeY = max((1.0 - 2 * 0.02 - dendrogramHeight - colourBarWidthY - 2 * marginY - xLabelBounds.height), 0.01) * self.figHeight / len(rowHeaders)
		cellSize = min(cellSizeX, cellSizeY)

		cellSizeXPer = cellSize / self.figWidth
		cellSizeYPer = cellSize / self.figHeight

		paddingX = 0.5 * (1.0 - dendrogramWidth - 2 * marginX - colourBarWidthX - cellSizeXPer * len(colHeaders) - yLabelBounds.width)
		paddingY = 0.5 * (1.0 - dendrogramHeight - 2 * marginY - colourBarWidthY - cellSizeYPer * len(rowHeaders) - xLabelBounds.height)

		rowDendrogramX = paddingX
		rowDendrogramY = paddingY + (xLabelBounds.height)
		rowDendrogramW = dendrogramWidth
		rowDendrogramH = cellSizeYPer * len(rowHeaders)

		rowClusterBarX = rowDendrogramX + rowDendrogramW + marginX
		rowClusterBarY = rowDendrogramY
		rowClusterBarW = colourBarWidthX
		rowClusterBarH = rowDendrogramH

		colDendrogramX = rowClusterBarX + rowClusterBarW + marginX
		colDendrogramY = rowDendrogramY + rowDendrogramH + marginY + colourBarWidthY + marginY
		colDendrogramW = cellSizeXPer * len(colHeaders)
		colDendrogramH = dendrogramHeight

		colClusterBarX = colDendrogramX
		colClusterBarY = rowDendrogramY + rowDendrogramH + marginY
		colClusterBarW = colDendrogramW
		colClusterBarH = colourBarWidthY

		heatmapX = rowClusterBarX + rowClusterBarW + marginX
		heatmapY = rowDendrogramY
		heatmapW = colDendrogramW
		heatmapH = rowDendrogramH

		legendHeight = 0.2 / self.figHeight
		legendW = min(0.8 * yLabelBounds.width, 1.25 / self.figWidth)
		legendH = legendHeight
		legendX = heatmapX + heatmapW + 0.2 / self.figWidth
		legendY = 1.0 - legendHeight - (2 * yLabelBounds.height) - marginY
		if not self.bShowColDendrogram:
			# move legend to side
			legendX = heatmapX + 0.5 * (heatmapW - legendW)
			legendY = heatmapY + heatmapH + (1.5 * yLabelBounds.height) + 0.1 / self.figWidth

		# plot dendrograms
		if self.sortRowMethod == 'Alphabetical order':
			leafIndex1 = numpy.argsort(rowHeaders)[::-1]
		elif self.sortRowMethod == 'Mean abundance':
			leafIndex1 = numpy.argsort(numpy.mean(matrix, axis=1))
		else:
			axisRowDendrogram = self.fig.add_axes([rowDendrogramX, rowDendrogramY, rowDendrogramW, rowDendrogramH], frame_on=False)
			ind1, leafIndex1 = self.plotDendrogram(matrix, axisRowDendrogram, self.sortRowMethod, self.clusteringRowThreshold, 'right', bPlot=self.bShowRowDendrogram)

		if self.sortColMethod == 'Alphabetical order':
			leafIndex2 = numpy.argsort(colHeaders)
		elif self.sortColMethod == 'Mean abundance':
			leafIndex2 = numpy.argsort(numpy.mean(matrix, axis=0))
		else:
			axisColDendrogram = self.fig.add_axes([colDendrogramX, colDendrogramY, colDendrogramW, colDendrogramH], frame_on=False)
			ind2, leafIndex2 = self.plotDendrogram(matrix.T, axisColDendrogram, self.sortColMethod, self.clusteringColThreshold, 'top', bPlot=self.bShowColDendrogram)

		# *** Handle mouse events
		xCell = []
		yCell = []
		tooltips = []
		for x in xrange(0, len(colHeaders)):
			for y in xrange(0, len(rowHeaders)):
				xCell.append(x)
				yCell.append(y)

				tooltip = rowHeaders[leafIndex1[y]] + ', ' + colHeaders[leafIndex2[x]] + '\n'

				if self.fieldToPlot == "Number of sequences":
					tooltip += '%d' % (matrix[leafIndex1[y]][leafIndex2[x]])
				else:
					tooltip += '%.3f' % (matrix[leafIndex1[y]][leafIndex2[x]]) + '%'
				tooltips.append(tooltip)

		self.plotEventHandler = 	PlotEventHandler(xCell, yCell, tooltips, 0.4, 0.4)
		self.mouseEventCallback(self.plotEventHandler)

		# plot column clustering bars
		sampleColourMap = []

		for i in leafIndex2:
			groupName = profile.getSampleGroup(colHeaders[i])
			sampleColourMap.append(groupColours[groupName])

		sampleColourMap = mpl.colors.ListedColormap(sampleColourMap)
		matrix = matrix[:, leafIndex2]

		if self.bShowColDendrogram:
			ind2 = ind2[leafIndex2]
			axc = self.fig.add_axes([colClusterBarX, colClusterBarY, colClusterBarW, colClusterBarH])  # axes for column side colorbar
			dc = numpy.array(numpy.arange(len(leafIndex2)), dtype=int)
			dc.shape = (1, len(leafIndex2))
			axc.matshow(dc, aspect='auto', origin='lower', cmap=sampleColourMap)
			axc.set_xticks([])
			axc.set_yticks([])

		# plot row clustering bars
		matrix = matrix[leafIndex1, :]

		if self.bShowRowDendrogram:
			ind1 = ind1[leafIndex1]
			axr = self.fig.add_axes([rowClusterBarX, rowClusterBarY, rowClusterBarW, rowClusterBarH])
			dr = numpy.array(ind1, dtype=int)
			dr.shape = (len(ind1), 1)
			axr.matshow(dr, aspect='auto', origin='lower', cmap=self.discreteColourMap)
			axr.set_xticks([])
			axr.set_yticks([])

		# determine scale for colour map
		minValue = 1e6
		maxValue = 0
		for row in matrix:
			minValue = min(minValue, min(row))
			maxValue = max(maxValue, max(row))
		norm = mpl.colors.Normalize(minValue, maxValue)

		# plot heatmap
		axisHeatmap = self.fig.add_axes([heatmapX, heatmapY, heatmapW, heatmapH])
		axisHeatmap.matshow(matrix, origin='lower', cmap=self.matrixColourmap, norm=norm)
		axisHeatmap.set_xticks([])
		axisHeatmap.set_yticks([])

		# row and column labels
		labelOffset = 0.5 * (yLabelBounds.height / cellSizeYPer)
		for i in xrange(0, len(rowHeaders)):
			axisHeatmap.text(matrix.shape[1] - 0.5, i - labelOffset, '  ' + rowHeaders[leafIndex1[i]], horizontalalignment="left")

		labelOffset = 0.5 * (xLabelBounds.width / cellSizeXPer)
		for i in xrange(0, len(colHeaders)):
			axisHeatmap.text(i - labelOffset, -0.5, '  ' + colHeaders[leafIndex2[i]], rotation='vertical', verticalalignment="top")

		# plot colour map legend
		axisColourMap = self.fig.add_axes([legendX, legendY, legendW, legendH], frame_on=False)  # axes for colorbar
		colourBar = mpl.colorbar.ColorbarBase(axisColourMap, cmap=self.matrixColourmap, norm=norm, orientation='horizontal')

		if self.fieldToPlot == "Number of sequences":
			axisColourMap.set_title("# sequences")
		else:
			axisColourMap.set_title("abundance (%)")

		colourBar.set_ticks([minValue, 0.5 * (maxValue - minValue) + minValue, maxValue])
		colourBar.set_ticklabels(['%.1f' % minValue, '%.1f' % (0.5 * (maxValue - minValue) + minValue), '%.1f' % maxValue])

		# plot column and row lines
		for i in xrange(0, len(rowHeaders)):
			axisHeatmap.plot([-0.5, len(colHeaders) - 0.5], [i - 0.5, i - 0.5], color='white', linestyle='-', linewidth=1.5)

		for i in xrange(0, len(colHeaders)):
			axisHeatmap.plot([i - 0.5, i - 0.5], [-0.5, len(rowHeaders) - 0.5], color='white', linestyle='-', linewidth=1.5)

		# plot legend
		if self.legendPos != -1:
			groupRecs = []
			groups = []
			for group, colour in groupColours.iteritems():
				groupRecs.append(Rectangle((0, 0), 1, 1, fc=colour))
				groups.append(group)

			legend = self.fig.legend(groupRecs, groups, loc=self.legendPos, ncol=int(len(groups) / 2 + 0.5))
			legend.get_frame().set_linewidth(0)

		self.updateGeometry()
		self.draw()

Example 13

Project: PyGazeAnalyser
Source File: edfreader.py
View license
def read_edf(filename, start, stop=None, missing=0.0, debug=False):
	
	"""Returns a list with dicts for every trial. A trial dict contains the
	following keys:
		x		-	numpy array of x positions
		y		-	numpy array of y positions
		size		-	numpy array of pupil size
		time		-	numpy array of timestamps, t=0 at trialstart
		trackertime	-	numpy array of timestamps, according to EDF
		events	-	dict with the following keys:
						Sfix	-	list of lists, each containing [starttime]
						Ssac	-	list of lists, each containing [starttime]
						Sblk	-	list of lists, each containing [starttime]
						Efix	-	list of lists, each containing [starttime, endtime, duration, endx, endy]
						Esac	-	list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
						Eblk	-	list of lists, each containing [starttime, endtime, duration]
						msg	-	list of lists, each containing [time, message]
						NOTE: timing is in EDF time!
	
	arguments
	filename		-	path to the file that has to be read
	start		-	trial start string
	
	keyword arguments
	stop			-	trial ending string (default = None)
	missing		-	value to be used for missing data (default = 0.0)
	debug		-	Boolean indicating if DEBUG mode should be on or off;
				if DEBUG mode is on, information on what the script
				currently is doing will be printed to the console
				(default = False)
	
	returns
	data			-	a list with a dict for every trial (see above)
	"""

	# # # # #
	# debug mode
	
	if debug:
		def message(msg):
			print(msg)
	else:
		def message(msg):
			pass
		
	
	# # # # #
	# file handling
	
	# check if the file exists
	if os.path.isfile(filename):
		# open file
		message("opening file '%s'" % filename)
		f = open(filename, 'r')
	# raise exception if the file does not exist
	else:
		raise Exception("Error in read_edf: file '%s' does not exist" % filename)
	
	# read file contents
	message("reading file '%s'" % filename)
	raw = f.readlines()
	
	# close file
	message("closing file '%s'" % filename)
	f.close()

	
	# # # # #
	# parse lines
	
	# variables
	data = []
	x = []
	y = []
	size = []
	time = []
	trackertime = []
	events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
	starttime = 0
	started = False
	trialend = False
	finalline = raw[-1]
	
	# loop through all lines
	for line in raw:
		
		# check if trial has already started
		if started:
			# only check for stop if there is one
			if stop != None:
				if stop in line:
					started = False
					trialend = True
			# check for new start otherwise
			else:
				if (start in line) or (line == finalline):
					started = True
					trialend = True
			
			# # # # #
			# trial ending
			
			if trialend:
				message("trialend %d; %d samples found" % (len(data),len(x)))
				# trial dict
				trial = {}
				trial['x'] = numpy.array(x)
				trial['y'] = numpy.array(y)
				trial['size'] = numpy.array(size)
				trial['time'] = numpy.array(time)
				trial['trackertime'] = numpy.array(trackertime)
				trial['events'] = copy.deepcopy(events)
				# add trial to data
				data.append(trial)
				# reset stuff
				x = []
				y = []
				size = []
				time = []
				trackertime = []
				events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
				trialend = False
				
		# check if the current line contains start message
		else:
			if start in line:
				message("trialstart %d" % len(data))
				# set started to True
				started = True
				# find starting time
				starttime = int(line[line.find('\t')+1:line.find(' ')])
		
		# # # # #
		# parse line
		
		if started:
			# message lines will start with MSG, followed by a tab, then a
			# timestamp, a space, and finally the message, e.g.:
			#	"MSG\t12345 something of importance here"
			if line[0:3] == "MSG":
				ms = line.find(" ") # message start
				t = int(line[4:ms]) # time
				m = line[ms+1:] # message
				events['msg'].append([t,m])
	
			# EDF event lines are constructed of 9 characters, followed by
			# tab separated values; these values MAY CONTAIN SPACES, but
			# these spaces are ignored by float() (thank you Python!)
					
			# fixation start
			elif line[0:4] == "SFIX":
				message("fixation start")
				l = line[9:]
				events['Sfix'].append(int(l))
			# fixation end
			elif line[0:4] == "EFIX":
				message("fixation end")
				l = line[9:]
				l = l.split('\t')
				st = int(l[0]) # starting time
				et = int(l[1]) # ending time
				dur = int(l[2]) # duration
				sx = replace_missing(l[3], missing=missing) # x position
				sy = replace_missing(l[4], missing=missing) # y position
				events['Efix'].append([st, et, dur, sx, sy])
			# saccade start
			elif line[0:5] == 'SSACC':
				message("saccade start")
				l = line[9:]
				events['Ssac'].append(int(l))
			# saccade end
			elif line[0:5] == "ESACC":
				message("saccade end")
				l = line[9:]
				l = l.split('\t')
				st = int(l[0]) # starting time
				et = int(l[1]) # endint time
				dur = int(l[2]) # duration
				sx = replace_missing(l[3], missing=missing) # start x position
				sy = replace_missing(l[4], missing=missing) # start y position
				ex = replace_missing(l[5], missing=missing) # end x position
				ey = replace_missing(l[6], missing=missing) # end y position
				events['Esac'].append([st, et, dur, sx, sy, ex, ey])
			# blink start
			elif line[0:6] == "SBLINK":
				message("blink start")
				l = line[9:]
				events['Sblk'].append(int(l))
			# blink end
			elif line[0:6] == "EBLINK":
				message("blink end")
				l = line[9:]
				l = l.split('\t')
				st = int(l[0])
				et = int(l[1])
				dur = int(l[2])
				events['Eblk'].append([st,et,dur])
			
			# regular lines will contain tab separated values, beginning with
			# a timestamp, follwed by the values that were asked to be stored
			# in the EDF and a mysterious '...'. Usually, this comes down to
			# timestamp, x, y, pupilsize, ...
			# e.g.: "985288\t  504.6\t  368.2\t 4933.0\t..."
			# NOTE: these values MAY CONTAIN SPACES, but these spaces are
			# ignored by float() (thank you Python!)
			else:
				# see if current line contains relevant data
				try:
					# split by tab
					l = line.split('\t')
					# if first entry is a timestamp, this should work
					int(l[0])
				except:
					message("line '%s' could not be parsed" % line)
					continue # skip this line

				# check missing
				if float(l[3]) == 0.0:
					l[1] = 0.0
					l[2] = 0.0
				
				# extract data
				x.append(float(l[1]))
				y.append(float(l[2]))
				size.append(float(l[3]))
				time.append(int(l[0])-starttime)
				trackertime.append(int(l[0]))
	
	
	# # # # #
	# return
	
	return data

Example 14

Project: PyGazeAnalyser
Source File: eyetribereader.py
View license
def read_eyetribe(filename, start, stop=None, missing=0.0, debug=False):
	
	"""Returns a list with dicts for every trial. A trial dict contains the
	following keys:
		x		-	numpy array of x positions
		y		-	numpy array of y positions
		size		-	numpy array of pupil size
		time		-	numpy array of timestamps, t=0 at trialstart
		trackertime-	numpy array of timestamps, according to the tracker
		events	-	dict with the following keys:
						Sfix	-	list of lists, each containing [starttime]
						Ssac	-	EMPTY! list of lists, each containing [starttime]
						Sblk	-	list of lists, each containing [starttime]
						Efix	-	list of lists, each containing [starttime, endtime, duration, endx, endy]
						Esac	-	EMPTY! list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
						Eblk	-	list of lists, each containing [starttime, endtime, duration]
						msg	-	list of lists, each containing [time, message]
						NOTE: timing is in EyeTribe time!
	
	arguments

	filename		-	path to the file that has to be read
	start		-	trial start string
	
	keyword arguments

	stop		-	trial ending string (default = None)
	missing	-	value to be used for missing data (default = 0.0)
	debug	-	Boolean indicating if DEBUG mode should be on or off;
				if DEBUG mode is on, information on what the script
				currently is doing will be printed to the console
				(default = False)
	
	returns

	data		-	a list with a dict for every trial (see above)
	"""

	# # # # #
	# debug mode
	
	if debug:
		def message(msg):
			print(msg)
	else:
		def message(msg):
			pass
		
	
	# # # # #
	# file handling
	
	# check if the file exists
	if os.path.isfile(filename):
		# open file
		message("opening file '%s'" % filename)
		f = open(filename, 'r')
	# raise exception if the file does not exist
	else:
		raise Exception("Error in read_eyetribe: file '%s' does not exist" % filename)
	
	# read file contents
	message("reading file '%s'" % filename)
	raw = f.readlines()
	
	# close file
	message("closing file '%s'" % filename)
	f.close()

	
	# # # # #
	# parse lines
	
	# variables
	data = []
	x = []
	y = []
	size = []
	time = []
	trackertime = []
	events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
	starttime = 0
	started = False
	trialend = False
	
	# loop through all lines
	for i in range(len(raw)):
		
		# string to list
		line = raw[i].replace('\n','').replace('\r','').split('\t')
		
		# check if trial has already started
		if started:
			# only check for stop if there is one
			if stop != None:
				if (line[0] == 'MSG' and stop in line[3]) or i == len(raw)-1:
					started = False
					trialend = True
			# check for new start otherwise
			else:
				if start in line or i == len(raw)-1:
					started = True
					trialend = True

			# # # # #
			# trial ending
			
			if trialend:
				message("trialend %d; %d samples found" % (len(data),len(x)))
				# trial dict
				trial = {}
				trial['x'] = numpy.array(x)
				trial['y'] = numpy.array(y)
				trial['size'] = numpy.array(size)
				trial['time'] = numpy.array(time)
				trial['trackertime'] = numpy.array(trackertime)
				trial['events'] = copy.deepcopy(events)
				# events
				trial['events']['Sblk'], trial['events']['Eblk'] = blink_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				trial['events']['Sfix'], trial['events']['Efix'] = fixation_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				trial['events']['Ssac'], trial['events']['Esac'] = saccade_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				# add trial to data
				data.append(trial)
				# reset stuff
				x = []
				y = []
				size = []
				time = []
				trackertime = []
				events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
				trialend = False
				
		# check if the current line contains start message
		else:
			if line[0] == "MSG":
				if start in line[3]:
					message("trialstart %d" % len(data))
					# set started to True
					started = True
					# find starting time
					starttime = int(line[2])
		
		# # # # #
		# parse line
		
		if started:
			# message lines will start with MSG, followed by a tab, then a
			# timestamp, a tab, the time, a tab and the message, e.g.:
			#	"MSG\t2014-07-01 17:02:33.770\t853589802\tsomething of importance here"
			if line[0] == "MSG":
				t = int(line[2]) # time
				m = line[3] # message
				events['msg'].append([t,m])
			
			# regular lines will contain tab separated values, beginning with
			# a timestamp, follwed by the values that were asked to be stored
			# in the data file. Usually, this comes down to
			# timestamp, time, fix, state, rawx, rawy, avgx, avgy, psize, 
			# Lrawx, Lrawy, Lavgx, Lavgy, Lpsize, Lpupilx, Lpupily,
			# Rrawx, Rrawy, Ravgx, Ravgy, Rpsize, Rpupilx, Rpupily
			# e.g.:
			# '2014-07-01 17:02:33.770, 853589802, False, 7, 512.5897, 510.8104, 614.6975, 614.3327, 16.8657,
			# 523.3592, 475.2756, 511.1529, 492.7412, 16.9398, 0.4037, 0.5209,
			# 501.8202, 546.3453, 609.3405, 623.2287, 16.7916, 0.5539, 0.5209'
			else:
				# see if current line contains relevant data
				try:
					# extract data
					x.append(float(line[6]))
					y.append(float(line[7]))
					size.append(float(line[8]))
					time.append(int(line[1])-starttime)
					trackertime.append(int(line[1]))
				except:
					message("line '%s' could not be parsed" % line)
					continue # skip this line	
	
	# # # # #
	# return
	
	return data

Example 15

Project: PyGazeAnalyser
Source File: idfreader.py
View license
def read_idf(filename, start, stop=None, missing=0.0, debug=False):
	
	"""Returns a list with dicts for every trial. A trial dict contains the
	following keys:
		x		-	numpy array of x positions
		y		-	numpy array of y positions
		size		-	numpy array of pupil size
		time		-	numpy array of timestamps, t=0 at trialstart
		trackertime-	numpy array of timestamps, according to the tracker
		events	-	dict with the following keys:
						Sfix	-	list of lists, each containing [starttime]
						Ssac	-	EMPTY! list of lists, each containing [starttime]
						Sblk	-	list of lists, each containing [starttime]
						Efix	-	list of lists, each containing [starttime, endtime, duration, endx, endy]
						Esac	-	EMPTY! list of lists, each containing [starttime, endtime, duration, startx, starty, endx, endy]
						Eblk	-	list of lists, each containing [starttime, endtime, duration]
						msg	-	list of lists, each containing [time, message]
						NOTE: timing is in EyeTribe time!
	
	arguments

	filename		-	path to the file that has to be read
	start		-	trial start string
	
	keyword arguments

	stop		-	trial ending string (default = None)
	missing	-	value to be used for missing data (default = 0.0)
	debug	-	Boolean indicating if DEBUG mode should be on or off;
				if DEBUG mode is on, information on what the script
				currently is doing will be printed to the console
				(default = False)
	
	returns

	data		-	a list with a dict for every trial (see above)
	"""

	# # # # #
	# debug mode
	
	if debug:
		def message(msg):
			print(msg)
	else:
		def message(msg):
			pass
		
	
	# # # # #
	# file handling
	
	# check if the file exists
	if os.path.isfile(filename):
		# open file
		message("opening file '%s'" % filename)
		f = open(filename, 'r')
	# raise exception if the file does not exist
	else:
		raise Exception("Error in read_eyetribe: file '%s' does not exist" % filename)
	
	# read file contents
	message("reading file '%s'" % filename)
	raw = f.readlines()
	
	# close file
	message("closing file '%s'" % filename)
	f.close()

	
	# # # # #
	# parse lines
	
	# variables
	data = []
	x = []
	y = []
	size = []
	time = []
	trackertime = []
	events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
	starttime = 0
	started = False
	trialend = False
	filestarted = False
	
	# loop through all lines
	for i in range(len(raw)):
		
		# string to list
		line = raw[i].replace('\n','').replace('\r','').split('\t')
		
		# check if the line starts with '##' (denoting header)
		if '##' in line[0]:
			# skip processing
			continue
		elif '##' not in line[0] and not filestarted:
			# check the indexes for several key things we want to extract
			# (we need to do this, because ASCII outputs of the IDF reader
			# are different, based on whatever the user wanted to extract)
			timei = line.index("Time")
			typei = line.index("Type")
			msgi = -1
			xi = {'L':None, 'R':None}
			yi = {'L':None, 'R':None}
			sizei = {'L':None, 'R':None}
			if "L POR X [px]" in line:
				xi['L']  = line.index("L POR X [px]")
			if "R POR X [px]" in line:
				xi['R']  = line.index("R POR X [px]")
			if "L POR Y [px]" in line:
				yi['L']  = line.index("L POR Y [px]")
			if "R POR Y [px]" in line:
				yi['R']  = line.index("R POR Y [px]")
			if "L Dia X [px]" in line:
				sizei['L']  = line.index("L Dia X [px]")
			if "R Dia X [px]" in line:
				sizei['R']  = line.index("R Dia X [px]")
			# set filestarted to True, so we don't attempt to extract
			# this info on all consecutive lines
			filestarted = True

		# check if trial has already started
		if started:
			# only check for stop if there is one
			if stop != None:
				if (line[typei] == 'MSG' and stop in line[msgi]) or i == len(raw)-1:
					started = False
					trialend = True
			# check for new start otherwise
			else:
				if start in line or i == len(raw)-1:
					started = True
					trialend = True

			# # # # #
			# trial ending
			
			if trialend:
				message("trialend %d; %d samples found" % (len(data),len(x)))
				# trial dict
				trial = {}
				trial['x'] = numpy.array(x)
				trial['y'] = numpy.array(y)
				trial['size'] = numpy.array(size)
				trial['time'] = numpy.array(time)
				trial['trackertime'] = numpy.array(trackertime)
				trial['events'] = copy.deepcopy(events)
				# events
				trial['events']['Sblk'], trial['events']['Eblk'] = blink_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				trial['events']['Sfix'], trial['events']['Efix'] = fixation_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				trial['events']['Ssac'], trial['events']['Esac'] = saccade_detection(trial['x'],trial['y'],trial['trackertime'],missing=missing)
				# add trial to data
				data.append(trial)
				# reset stuff
				x = []
				y = []
				size = []
				time = []
				trackertime = []
				events = {'Sfix':[],'Ssac':[],'Sblk':[],'Efix':[],'Esac':[],'Eblk':[],'msg':[]}
				trialend = False
				
		# check if the current line contains start message
		else:
			if line[typei] == "MSG":
				if start in line[msgi]:
					message("trialstart %d" % len(data))
					# set started to True
					started = True
					# find starting time
					starttime = int(line[timei])
		
		# # # # #
		# parse line
		
		if started:
			# message lines will usually start with a timestamp, followed
			# by 'MSG', the trial number and the actual message, e.g.:
			#	"7818328012	MSG	1	# Message: 3"
			if line[typei] == "MSG":
				t = int(line[timei]) # time
				m = line[msgi] # message
				events['msg'].append([t,m])
			
			# regular lines will contain tab separated values, beginning with
			# a timestamp, follwed by the values that were chosen to be
			# extracted by the IDF converter
			else:
				# see if current line contains relevant data
				try:
					# extract data on POR and pupil size
					for var in ['x', 'y', 'size']:
						exec("vi = %si" % var)
						exec("v = %s" % var)
						# nothing
						if vi['L'] == None and vi['R'] == None:
							val = 'not in IDF'
						# only left eye
						elif vi['L'] != None and vi['R'] == None:
							val = float(line[vi['L']])
						# only right eye
						elif vi['L'] == None and vi['R'] != None:
							val = float(line[vi['R']])
						# average the two eyes, but only if they both
						# contain valid data
						elif vi['L'] != None and vi['R'] != None:
							if float(line[vi['L']]) == 0:
								val = float(line[vi['R']])
							elif float(line[vi['R']]) == 0:
								val = float(line[vi['L']])
							else:
								val = (float(line[vi['L']]) + float(line[vi['R']])) / 2.0
						v.append(val)
					# extract time data
					time.append(int(line[timei])-starttime)
					trackertime.append(int(line[timei]))
				except:
					message("line '%s' could not be parsed" % line)
					continue # skip this line	
	
	# # # # #
	# return
	
	return data

Example 16

View license
    def __init__(self, x, y, z,
                 x_b, y_b, z_b,
                 x_w, y_w, z_w,
                 l_a,
                 n_c,
                 n_b,
                 l_as=None,
                 cct_w=None,
                 n_cb=None,
                 n_bb=None,
                 x_p=None,
                 y_p=None,
                 z_p=None,
                 p=None,
                 helson_judd=False,
                 discount_illuminant=True,
                 s=None,
                 s_w=None):
        """
        :param x: X value of test sample :math:`X`.
        :param y: Y value of test sample :math:`Y`.
        :param z: Z value of test sample :math:`Z`.
        :param x_b: X value of background :math:`X_b`.
        :param y_b: Y value of background :math:`Y_b`.
        :param z_b: Z value of background :math:`Z_b`.
        :param x_w: X value of reference white :math:`X_W`.
        :param y_w: Y value of reference white :math:`Y_W`.
        :param z_w: Z value of reference white :math:`Z_W`.
        :param l_a: Adapting luminance :math:`L_A`.
        :param n_c: Chromatic surround induction_factor :math:`N_c`.
        :param n_b: Brightness surround induction factor :math:`N_b`.
        :param l_as: Scotopic luminance of the illuminant :math:`L_{AS}`.
                     Will be approximated if not supplied.
        :param cct_w: Correlated color temperature of illuminant :math:`T`.
                      Will be used to approximate l_as if not supplied.
        :param n_cb: Chromatic background induction factor :math:`N_{cb}`.
                     Will be approximated using y_w and y_b if not supplied.
        :param n_bb: Brightness background induction factor :math:`N_{bb}`.
                     Will be approximated using y_w and y_b if not supplied.
        :param x_p: X value of proxima field :math:`X_p`.
                    If not supplied, will be assumed to equal background.
        :param y_p: Y value of proxima field :math:`Y_p`.
                    If not supplied, will be assumed to equal background.
        :param z_p: Z value of proxima field :math:`Z_p`.
                    If not supplied, will be assumed to equal background.
        :param p: Simultaneous contrast/assimilation parameter.
        :param helson_judd: Truth value indicating whether the Heslon-Judd effect should be accounted for.
                            Default False.
        :param discount_illuminant: Truth value whether discount-the-illuminant should be applied. Default True.
        :param s: Scotopic response to the stimulus.
        :param s_w: Scotopic response for th reference white.
        :raises ValueError: if illegal parameter combination is supplied.
        """
        if x_p is None:
            x_p = x_b
            logger.warn('Approximated x_p with x_b.')
        if y_p is None:
            y_p = y_b
            logger.warn('Approximated y_p with y_b.')
        if z_p is None:
            z_p = y_b
            logger.warn('Approximated z_p with z_b.')

        if n_cb is None:
            n_cb = 0.725 * (y_w / y_b) ** 0.2
            logger.warn('Approximated n_cb.')
        logger.debug('N_cb: {}'.format(n_cb))
        if n_bb is None:
            n_bb = 0.725 * (y_w / y_b) ** 0.2
            logger.warn('Approximated n_bb.')
        logger.debug('N_bb: {}'.format(n_cb))

        if l_as is None:
            logger.warn('Approximated scotopic luminance.')
            if cct_w is None:
                cct_w = self._get_cct(x_w, y_w, z_w)
                logger.warn('Approximated cct_w: {}'.format(cct_w))
            l_as = 2.26 * l_a
            l_as *= ((cct_w / 4000) - 0.4) ** (1 / 3)
        logger.debug('LA_S: {}'.format(l_as))

        if s is None != s_w is None:
            raise ValueError("Either both scotopic responses (s, s_w) need to be supplied or none.")
        elif s is None and s_w is None:
            s = y
            s_w = y_w
            logger.warn('Approximated scotopic response to stimulus and reference white.')

        if p is None:
            logger.warn('p not supplied. Model will not account for simultaneous chromatic contrast .')

        xyz = numpy.array([x, y, z])
        logger.debug('XYZ: {}'.format(xyz))
        xyz_w = numpy.array([x_w, y_w, z_w])
        logger.debug('XYZ_W: {}'.format(xyz_w))
        xyz_b = numpy.array([x_b, y_b, z_b])
        xyz_p = numpy.array([x_p, y_p, z_p])

        k = 1 / (5 * l_a + 1)
        logger.debug('k: {}'.format(k))
        # luminance adaptation factor
        f_l = 0.2 * (k ** 4) * (5 * l_a) + 0.1 * ((1 - (k ** 4)) ** 2) * ((5 * l_a) ** (1 / 3))
        logger.debug('F_L: {}'.format(f_l))

        logger.debug('--- Stimulus RGB adaptation start ----')
        rgb_a = self._adaptation(f_l, l_a, xyz, xyz_w, xyz_b, xyz_p, p, helson_judd, discount_illuminant)
        logger.debug('--- Stimulus RGB adaptation end ----')
        r_a, g_a, b_a = rgb_a
        logger.debug('RGB_A: {}'.format(rgb_a))
        logger.debug('--- White RGB adaptation start ----')
        rgb_aw = self._adaptation(f_l, l_a, xyz_w, xyz_w, xyz_b, xyz_p, p, helson_judd, discount_illuminant)
        logger.debug('--- White RGB adaptation end ----')
        r_aw, g_aw, b_aw = rgb_aw
        logger.debug('RGB_AW: {}'.format(rgb_aw))

        # ---------------------------
        # Opponent Color Dimensions
        # ---------------------------

        # achromatic_cone_signal
        a_a = 2 * r_a + g_a + (1 / 20) * b_a - 3.05 + 1
        logger.debug('A_A: {}'.format(a_a))
        a_aw = 2 * r_aw + g_aw + (1 / 20) * b_aw - 3.05 + 1
        logger.debug('A_AW: {}'.format(a_aw))

        c1 = r_a - g_a
        logger.debug('C1: {}'.format(c1))
        c2 = g_a - b_a
        logger.debug('C2: {}'.format(c2))
        c3 = b_a - r_a
        logger.debug('C3: {}'.format(c3))

        c1_w = r_aw - g_aw
        logger.debug('C1_W: {}'.format(c1_w))
        c2_w = g_aw - b_aw
        logger.debug('C2_W: {}'.format(c2_w))
        c3_w = b_aw - r_aw
        logger.debug('C3_W: {}'.format(c3_w))

        # -----
        # Hue
        # -----
        self._hue_angle = (180 * numpy.arctan2(0.5 * (c2 - c3) / 4.5, c1 - (c2 / 11)) / numpy.pi) % 360
        hue_angle_w = (180 * numpy.arctan2(0.5 * (c2_w - c3_w) / 4.5, c1_w - (c2_w / 11)) / numpy.pi) % 360

        # -------------
        # Saturation
        # -------------
        e_s = self._calculate_eccentricity_factor(self.hue_angle)
        logger.debug('es: {}'.format(e_s))
        e_s_w = self._calculate_eccentricity_factor(hue_angle_w)

        f_t = l_a / (l_a + 0.1)
        logger.debug('F_t: {}'.format(f_t))
        m_yb = 100 * (0.5 * (c2 - c3) / 4.5) * (e_s * (10 / 13) * n_c * n_cb * f_t)
        logger.debug('m_yb: {}'.format(m_yb))
        m_rg = 100 * (c1 - (c2 / 11)) * (e_s * (10 / 13) * n_c * n_cb)
        logger.debug('m_rg: {}'.format(m_rg))
        m = ((m_rg ** 2) + (m_yb ** 2)) ** 0.5
        logger.debug('m: {}'.format(m))

        self._saturation = 50 * m / rgb_a.sum(axis=0)

        m_yb_w = 100 * (0.5 * (c2_w - c3_w) / 4.5) * (e_s_w * (10 / 13) * n_c * n_cb * f_t)
        m_rg_w = 100 * (c1_w - (c2_w / 11)) * (e_s_w * (10 / 13) * n_c * n_cb)
        m_w = ((m_rg_w ** 2) + (m_yb_w ** 2)) ** 0.5

        # ------------
        # Brightness
        # ------------
        logger.debug('--- Stimulus achromatic signal START ----')
        a = self._calculate_achromatic_signal(l_as, s, s_w, n_bb, a_a)
        logger.debug('--- Stimulus achromatic signal END ----')
        logger.debug('A: {}'.format(a))

        logger.debug('--- White achromatic signal START ----')
        a_w = self._calculate_achromatic_signal(l_as, s_w, s_w, n_bb, a_aw)
        logger.debug('--- White achromatic signal END ----')
        logger.debug('A_w: {}'.format(a_w))

        n1 = ((7 * a_w) ** 0.5) / (5.33 * n_b ** 0.13)
        n2 = (7 * a_w * n_b ** 0.362) / 200
        logger.debug('N1: {}'.format(n1))
        logger.debug('N2: {}'.format(n2))

        self._brightness = ((7 * (a + (m / 100))) ** 0.6) * n1 - n2
        brightness_w = ((7 * (a_w + (m_w / 100))) ** 0.6) * n1 - n2
        logger.debug('Q: {}'.format(self.brightness))
        logger.debug('Q_W: {}'.format(brightness_w))

        # ----------
        # Lightness
        # ----------
        z = 1 + (y_b / y_w) ** 0.5
        logger.debug('z: {}'.format(z))
        self._lightness = 100 * (self.brightness / brightness_w) ** z

        # -------
        # Chroma
        # -------
        self._chroma = 2.44 * (self.saturation ** 0.69) * ((self.brightness / brightness_w) ** (y_b / y_w)) * (
            1.64 - 0.29 ** (y_b / y_w))

        # -------------
        # Colorfulness
        # -------------
        self._colorfulness = (f_l ** 0.15) * self.chroma

Example 17

Project: hedge
Source File: var-velocity.py
View license
def main(write_output=True, flux_type_arg="central", use_quadrature=True,
        final_time=20):
    from math import sin, cos, pi, sqrt

    from hedge.backends import guess_run_context
    rcon = guess_run_context()

    # mesh setup --------------------------------------------------------------
    if rcon.is_head_rank:
        #from hedge.mesh.generator import make_disk_mesh
        #mesh = make_disk_mesh()
        from hedge.mesh.generator import make_rect_mesh
        mesh = make_rect_mesh(a=(-1,-1),b=(1,1),max_area=0.008)

    if rcon.is_head_rank:
        mesh_data = rcon.distribute_mesh(mesh)
    else:
        mesh_data = rcon.receive_mesh()

    # space-time-dependent-velocity-field -------------------------------------
    # simple vortex
    class TimeDependentVField:
        """ `TimeDependentVField` is a callable expecting `(x, t)` representing space and time

        `x` is of the length of the spatial dimension and `t` is the time."""
        shape = (2,)

        def __call__(self, pt, el, t):
            x, y = pt
            # Correction-Factor to make the speed zero on the on the boundary
            #fac = (1-x**2)*(1-y**2)
            fac = 1.
            return numpy.array([-y*fac, x*fac]) * cos(pi*t)

    class VField:
        """ `VField` is a callable expecting `(x)` representing space

        `x` is of the length of the spatial dimension."""
        shape = (2,)

        def __call__(self, pt, el):
            x, y = pt
            # Correction-Factor to make the speed zero on the on the boundary
            #fac = (1-x**2)*(1-y**2)
            fac = 1.
            return numpy.array([-y*fac, x*fac])

    # space-time-dependent State BC (optional)-----------------------------------
    class TimeDependentBc_u:
        """ space and time dependent BC for state u"""
        def __call__(self, pt, el, t):
            x, y = pt
            if t <= 0.5:
                if x > 0:
                    return 1
                else:
                    return 0
            else:
                return 0

    class Bc_u:
        """ Only space dependent BC for state u"""
        def __call__(seld, pt, el):
            x, y = pt
            if x > 0:
                return 1
            else:
                return 0


    # operator setup ----------------------------------------------------------
    # In the operator setup it is possible to switch between a only space
    # dependent velocity field `VField` or a time and space dependent
    # `TimeDependentVField`.
    # For `TimeDependentVField`: advec_v=TimeDependentGivenFunction(VField())
    # For `VField`: advec_v=TimeConstantGivenFunction(GivenFunction(VField()))
    # Same for the Bc_u Function! If you don't define Bc_u then the BC for u = 0.

    from hedge.data import \
            ConstantGivenFunction, \
            TimeConstantGivenFunction, \
            TimeDependentGivenFunction, \
            GivenFunction
    from hedge.models.advection import VariableCoefficientAdvectionOperator
    op = VariableCoefficientAdvectionOperator(mesh.dimensions,
        #advec_v=TimeDependentGivenFunction(
        #    TimeDependentVField()),
        advec_v=TimeConstantGivenFunction(
            GivenFunction(VField())),
        #bc_u_f=TimeDependentGivenFunction(
        #    TimeDependentBc_u()),
        bc_u_f=TimeConstantGivenFunction(
            GivenFunction(Bc_u())),
        flux_type=flux_type_arg)

    # discretization setup ----------------------------------------------------
    order = 5
    if use_quadrature:
        quad_min_degrees = {"quad": 3*order}
    else:
        quad_min_degrees = {}

    discr = rcon.make_discretization(mesh_data, order=order,
            default_scalar_type=numpy.float64, 
            debug=["cuda_no_plan"],
            quad_min_degrees=quad_min_degrees,
            tune_for=op.op_template(),

            )
    vis_discr = discr

    # visualization setup -----------------------------------------------------
    from hedge.visualization import VtkVisualizer
    if write_output:
        vis = VtkVisualizer(vis_discr, rcon, "fld")

    # initial condition -------------------------------------------------------
    if True:
        def initial(pt, el):
            # Gauss pulse
            from math import exp
            x = (pt-numpy.array([0.3, 0.5]))*8
            return exp(-numpy.dot(x, x))
    else:
        def initial(pt, el):
            # Rectangle
            x, y = pt
            if abs(x) < 0.5 and abs(y) < 0.2:
                return 2
            else:
                return 1

    u = discr.interpolate_volume_function(initial)

    # timestep setup ----------------------------------------------------------
    from hedge.timestep.runge_kutta import LSRK4TimeStepper
    stepper = LSRK4TimeStepper(
            vector_primitive_factory=discr.get_vector_primitive_factory())

    if rcon.is_head_rank:
        print "%d elements" % len(discr.mesh.elements)

    # filter setup-------------------------------------------------------------
    from hedge.discretization import ExponentialFilterResponseFunction
    from hedge.optemplate.operators import FilterOperator
    mode_filter = FilterOperator(
            ExponentialFilterResponseFunction(min_amplification=0.9,order=4))\
                    .bind(discr)

    # diagnostics setup -------------------------------------------------------
    from pytools.log import LogManager, \
            add_general_quantities, \
            add_simulation_quantities, \
            add_run_info

    if write_output:
        log_file_name = "space-dep.dat"
    else:
        log_file_name = None

    logmgr = LogManager(log_file_name, "w", rcon.communicator)
    add_run_info(logmgr)
    add_general_quantities(logmgr)
    add_simulation_quantities(logmgr)
    discr.add_instrumentation(logmgr)

    stepper.add_instrumentation(logmgr)

    from hedge.log import Integral, LpNorm
    u_getter = lambda: u
    logmgr.add_quantity(Integral(u_getter, discr, name="int_u"))
    logmgr.add_quantity(LpNorm(u_getter, discr, p=1, name="l1_u"))
    logmgr.add_quantity(LpNorm(u_getter, discr, name="l2_u"))

    logmgr.add_watches(["step.max", "t_sim.max", "l2_u", "t_step.max"])

    # Initialize v for data output:
    v = op.advec_v.volume_interpolant(0, discr)

    # timestep loop -----------------------------------------------------------
    rhs = op.bind(discr)
    try:
        from hedge.timestep import times_and_steps
        step_it = times_and_steps(
                final_time=final_time, logmgr=logmgr,
                max_dt_getter=lambda t: op.estimate_timestep(discr,
                    stepper=stepper, t=t, fields=u))

        for step, t, dt in step_it:
            if step % 10 == 0 and write_output:
                visf = vis.make_file("fld-%04d" % step)
                vis.add_data(visf, [ 
                    ("u", discr.convert_volume(u, kind="numpy")), 
                    ("v", discr.convert_volume(v, kind="numpy"))
                    ], time=t, step=step)
                visf.close()

            u = stepper(u, t, dt, rhs)

            # We're feeding in a discontinuity through the BCs.
            # Quadrature does not help with shock capturing--
            # therefore we do need to filter here, regardless
            # of whether quadrature is enabled.
            u = mode_filter(u)

        assert discr.norm(u) < 10

    finally:
        if write_output:
            vis.close()

        logmgr.close()
        discr.close()

Example 18

Project: KMCLib
Source File: BucketsTest.py
View license
    def testReal2D(self):
        """
        Test a realistic 2D diffusion system.
        """
        unit_cell = KMCUnitCell(cell_vectors=numpy.array([[1.0,0.0,0.0],
                                                          [0.0,1.0,0.0],
                                                          [0.0,0.0,1.0]]),
                                basis_points=[[0.0,0.0,0.0],
                                              [0.5,0.0,0.0],
                                              [0.0,0.5,0.0]])

        # And a lattice.
        rep_a = 100
        rep_b = 10
        lattice = KMCLattice(unit_cell=unit_cell,
                             repetitions=(rep_a,rep_b,1),
                             periodic=(True,True,False))

        # Populate the lattice with types.
        types = [["d"],["d"],["d"]] * rep_a * rep_b

        # Add creation sites and block sites.
        for i,c in enumerate(lattice.sites()):
            if (c[0] == 1.0 or c[0] == rep_a - 1) and c[1]%1 == 0:
                types[i] = ["C", "d"]
            elif c[0] == 0.0:
                types[i] = ["x"]

        # Add a random distribution of roughly 20% ions "I".
        numpy.random.seed(8765621)
        for i,(c,t) in enumerate(zip(lattice.sites(), types)):
            if c[0]%1 == 0.5 or c[1]%1 == 0.5:
                if numpy.random.rand() < 0.2:
                    types[i] = ["I"]

        # The starting configuration.
        config = KMCConfiguration(lattice=lattice,
                                  types=types,
                                  possible_types=["C","I", "M1", "d", "x"])

        # A processes that creates an "M1" at a creation site.
        coordinates = [[0.0, 0.0, 0.0]]
        p0 = KMCBucketProcess(coordinates=coordinates,
                              minimum_match=[[(1, "C")]],
                              update=[[(1,"M1")]],
                              basis_sites=[0],
                              rate_constant=1.0)

        # A processes that moves an "M1" in the bulk, to the right.
        coordinates = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
        p1 = KMCBucketProcess(coordinates=coordinates,
                              minimum_match=[[(1, "M1")], [(1, "d")]],
                              update=[[(-1,"M1")], [(1, "M1")]],
                              basis_sites=[0],
                              rate_constant=1.0)

        # A processes that moves an "M1" in the bulk, to the left.
        coordinates = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
        p2 = KMCBucketProcess(coordinates=coordinates,
                              minimum_match=[[(1, "M1")], [(1, "d")]],
                              update=[[(-1,"M1")], [(1, "M1")]],
                              basis_sites=[0],
                              rate_constant=1.0)

        # A processes that moves an "M1" in the bulk, to up.
        coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
        p3 = KMCBucketProcess(coordinates=coordinates,
                              minimum_match=[[(1, "M1")], [(1, "d")]],
                              update=[[(-1,"M1")], [(1, "M1")]],
                              basis_sites=[0],
                              rate_constant=1.0)

        # A processes that moves an "M1" in the bulk, to down.
        coordinates = [[0.0, 0.0, 0.0], [0.0,-1.0, 0.0]]
        p4 = KMCBucketProcess(coordinates=coordinates,
                              minimum_match=[[(1, "M1")], [(1, "d")]],
                              update=[[(-1,"M1")], [(1, "M1")]],
                              basis_sites=[0],
                              rate_constant=1.0)

        # Set up the interactions.
        interactions = KMCInteractions(processes=[p0, p1, p2, p3, p4],
                                       implicit_wildcards=True)
        interactions.setRateCalculator(rate_calculator="BucketsTestCalculator")

        # Construct the lattice mode.
        model = KMCLatticeModel(configuration=config,
                                interactions=interactions)

        # Control parameters.
        control_parameters = KMCControlParameters(number_of_steps=118817,
                                                  seed=12,
                                                  dump_time_interval=10.0)

        # Run the model.
        t1 = time.clock()
        model.run(control_parameters=control_parameters,
                  trajectory_filename="traj1.py")
        t2 = time.clock()

        #breakers=[Breaker()])

        # Make a simple analysis of the distribution of M1 in the structure.
        distribution = []

        types = config.types()
        row = numpy.array([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
        for i in range(len(config.types())/(len(row)*3)):
            this_row = row + numpy.ones(len(row), dtype=int)*i*30

            # One-liner to calculate the number of M1 on this row.
            distribution.append(len([t for i in this_row for t in types[i] if t == 'M1']))

        ref_distribution = [0, 104, 36, 17, 23, 10, 18, 6, 3, 7, 5, 3, 6, 5, 5, 3, 2, 4, 8, 4, 0, 1, 2, 0, 0, 4, 2, 1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 1, 1, 0, 1, 1, 2, 1, 2, 2, 1, 2, 1, 3, 4, 5, 4, 10, 7, 5, 8, 10, 4, 10, 8, 17, 26, 20, 38, 13, 78]
        # Check.
        self.assertEqual( ref_distribution, distribution )

        # Populate the lattice with types.
        types = [["d"],["d"],["d"]] * rep_a * rep_b

        # Add creation sites and block sites.
        for i,c in enumerate(lattice.sites()):
            if (c[0] == 1.0 or c[0] == rep_a - 1) and c[1]%1 == 0:
                types[i] = ["C", "d"]
            elif c[0] == 0.0:
                types[i] = ["x"]

        # Add a random distribution of roughly 20% ions "I".
        numpy.random.seed(8765621)
        for i,(c,t) in enumerate(zip(lattice.sites(), types)):
            if c[0]%1 == 0.5 or c[1]%1 == 0.5:
                if numpy.random.rand() < 0.2:
                    types[i] = ["I"]

        # The starting configuration.
        config = KMCConfiguration(lattice=lattice,
                                  types=types,
                                  possible_types=["C","I", "M1", "d", "x"])

        # Set up the interactions.
        interactions = KMCInteractions(processes=[p0, p1, p2, p3, p4],
                                       implicit_wildcards=True)
        interactions.setRateCalculator(rate_calculator=Bucket2DRateCalculator)

        # Construct the lattice mode.
        model = KMCLatticeModel(configuration=config,
                                interactions=interactions)

        # Control parameters.
        control_parameters = KMCControlParameters(number_of_steps=118817,
                                                  seed=12,
                                                  dump_time_interval=10.0)

        # Run the model.
        t3 = time.clock()
        model.run(control_parameters=control_parameters,
                  trajectory_filename="traj2.py")
        t4 = time.clock()

        # Check the results.
        distribution = []
        types = config.types()
        row = numpy.array([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
        for i in range(len(config.types())/(len(row)*3)):
            this_row = row + numpy.ones(len(row), dtype=int)*i*30

            # One-liner to calculate the number of M1 on this row.
            distribution.append(len([t for i in this_row for t in types[i] if t == 'M1']))

        # Check.
        self.assertEqual( ref_distribution, distribution )

        # Print the times.
        print "Time for custom C++ run:    ", t2-t1
        print "Time for custom Python run: ", t4-t3

Example 19

Project: mne-python
Source File: montage.py
View license
def read_dig_montage(hsp=None, hpi=None, elp=None, point_names=None,
                     unit='auto', fif=None, transform=True, dev_head_t=False):
    r"""Read subject-specific digitization montage from a file.

    Parameters
    ----------
    hsp : None | str | array, shape (n_points, 3)
        If str, this corresponds to the filename of the headshape points.
        This is typically used with the Polhemus FastSCAN system.
        If numpy.array, this corresponds to an array of positions of the
        headshape points in 3d. These points are assumed to be in the native
        digitizer space and will be rescaled according to the unit parameter.
    hpi : None | str | array, shape (n_hpi, 3)
        If str, this corresponds to the filename of Head Position Indicator
        (HPI) points. If numpy.array, this corresponds to an array
        of HPI points. These points are in device space.
    elp : None | str | array, shape (n_fids + n_hpi, 3)
        If str, this corresponds to the filename of electrode position
        points. This is typically used with the Polhemus FastSCAN system.
        Fiducials should be listed first: nasion, left periauricular point,
        right periauricular point, then the points corresponding to the HPI.
        If numpy.array, this corresponds to an array of digitizer points in
        the same order. These points are assumed to be in the native digitizer
        space and will be rescaled according to the unit parameter.
    point_names : None | list
        If list, this corresponds to a list of point names. This must be
        specified if elp is defined.
    unit : 'auto' | 'm' | 'cm' | 'mm'
        Unit of the digitizer files (hsp and elp). If not 'm', coordinates will
        be rescaled to 'm'. Default is 'auto', which assumes 'm' for \*.hsp and
        \*.elp files and 'mm' for \*.txt files, corresponding to the known
        Polhemus export formats.
    fif : str | None
        FIF file from which to read digitization locations.
        If str (filename), all other arguments are ignored.

        .. versionadded:: 0.12

    transform : bool
        If True, points will be transformed to Neuromag space.
        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
        the montage file. Useful for points captured using Polhemus FastSCAN.
        Default is True.
    dev_head_t : bool
        If True, a Dev-to-Head transformation matrix will be added to the
        montage. To get a proper `dev_head_t`, the hpi and the elp points
        must be in the same order. If False, an identity matrix will be added
        to the montage. Default is False.

    Returns
    -------
    montage : instance of DigMontage
        The digitizer montage.

    See Also
    --------
    read_montage : Function to read generic EEG templates

    Notes
    -----
    All digitized points will be transformed to head-based coordinate system
    if transform is True and fiducials are present.

    .. versionadded:: 0.9.0
    """
    if fif is not None:
        # Use a different code path
        if dev_head_t or not transform:
            raise ValueError('transform must be True and dev_head_t must be '
                             'False for FIF dig montage')
        if not all(x is None for x in (hsp, hpi, elp, point_names)):
            raise ValueError('hsp, hpi, elp, and point_names must all be None '
                             'if fif is not None')
        _check_fname(fif, overwrite=True, must_exist=True)
        # Load the dig data
        f, tree = fiff_open(fif)[:2]
        with f as fid:
            dig = _read_dig_fif(fid, tree)
        # Split up the dig points by category
        hsp = list()
        hpi = list()
        elp = list()
        point_names = list()
        fids = dict()
        dig_ch_pos = dict()
        for d in dig:
            if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
                _check_frame(d, 'head')
                fids[_cardinal_ident_mapping[d['ident']]] = d['r']
            elif d['kind'] == FIFF.FIFFV_POINT_HPI:
                _check_frame(d, 'head')
                hpi.append(d['r'])
                elp.append(d['r'])
                point_names.append('HPI%03d' % d['ident'])
            elif d['kind'] == FIFF.FIFFV_POINT_EXTRA:
                _check_frame(d, 'head')
                hsp.append(d['r'])
            elif d['kind'] == FIFF.FIFFV_POINT_EEG:
                _check_frame(d, 'head')
                dig_ch_pos['EEG%03d' % d['ident']] = d['r']
        fids = np.array([fids[key] for key in ('nasion', 'lpa', 'rpa')])
        hsp = np.array(hsp)
        elp = np.array(elp)
    else:
        dig_ch_pos = None
        scale = {'mm': 1e-3, 'cm': 1e-2, 'auto': 1e-3, 'm': None}
        if unit not in scale:
            raise ValueError("Unit needs to be one of %s, not %r" %
                             (tuple(map(repr, scale)), unit))

        # HSP
        if isinstance(hsp, string_types):
            hsp = _read_dig_points(hsp, unit=unit)
        elif hsp is not None and scale[unit]:
            hsp *= scale[unit]

        # HPI
        if isinstance(hpi, string_types):
            ext = op.splitext(hpi)[-1]
            if ext == '.txt':
                hpi = _read_dig_points(hpi, unit='m')
            elif ext in ('.sqd', '.mrk'):
                from ..io.kit import read_mrk
                hpi = read_mrk(hpi)
            else:
                raise ValueError('HPI file with extension *%s is not '
                                 'supported. Only *.txt, *.sqd and *.mrk are '
                                 'supported.' % ext)

        # ELP
        if isinstance(elp, string_types):
            elp = _read_dig_points(elp, unit=unit)
        elif elp is not None and scale[unit]:
            elp *= scale[unit]

        if elp is not None:
            if not isinstance(point_names, Iterable):
                raise TypeError("If elp is specified, point_names must "
                                "provide a list of str with one entry per ELP "
                                "point")
            point_names = list(point_names)
            if len(point_names) != len(elp):
                raise ValueError("The elp file contains %i points, but %i "
                                 "names were specified." %
                                 (len(elp), len(point_names)))

        # Transform digitizer coordinates to neuromag space
        if transform:
            if elp is None:
                raise ValueError("ELP points are not specified. Points are "
                                 "needed for transformation.")
            names_lower = [name.lower() for name in point_names]

            # check that all needed points are present
            missing = [name for name in ('nasion', 'lpa', 'rpa')
                       if name not in names_lower]
            if missing:
                raise ValueError("The points %s are missing, but are needed "
                                 "to transform the points to the MNE "
                                 "coordinate system. Either add the points, "
                                 "or read the montage with transform=False."
                                 % str(missing))

            nasion = elp[names_lower.index('nasion')]
            lpa = elp[names_lower.index('lpa')]
            rpa = elp[names_lower.index('rpa')]

            # remove fiducials from elp
            mask = np.ones(len(names_lower), dtype=bool)
            for fid in ['nasion', 'lpa', 'rpa']:
                mask[names_lower.index(fid)] = False
            elp = elp[mask]

            neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
            fids = apply_trans(neuromag_trans, [nasion, lpa, rpa])
            elp = apply_trans(neuromag_trans, elp)
            hsp = apply_trans(neuromag_trans, hsp)
        else:
            fids = [None] * 3

    if dev_head_t:
        from ..coreg import fit_matched_points
        trans = fit_matched_points(tgt_pts=elp, src_pts=hpi, out='trans')
    else:
        trans = np.identity(4)

    return DigMontage(hsp, hpi, elp, point_names, fids[0], fids[1], fids[2],
                      trans, dig_ch_pos)

Example 20

Project: mne-python
Source File: montage.py
View license
def read_dig_montage(hsp=None, hpi=None, elp=None, point_names=None,
                     unit='auto', fif=None, transform=True, dev_head_t=False):
    r"""Read subject-specific digitization montage from a file.

    Parameters
    ----------
    hsp : None | str | array, shape (n_points, 3)
        If str, this corresponds to the filename of the headshape points.
        This is typically used with the Polhemus FastSCAN system.
        If numpy.array, this corresponds to an array of positions of the
        headshape points in 3d. These points are assumed to be in the native
        digitizer space and will be rescaled according to the unit parameter.
    hpi : None | str | array, shape (n_hpi, 3)
        If str, this corresponds to the filename of Head Position Indicator
        (HPI) points. If numpy.array, this corresponds to an array
        of HPI points. These points are in device space.
    elp : None | str | array, shape (n_fids + n_hpi, 3)
        If str, this corresponds to the filename of electrode position
        points. This is typically used with the Polhemus FastSCAN system.
        Fiducials should be listed first: nasion, left periauricular point,
        right periauricular point, then the points corresponding to the HPI.
        If numpy.array, this corresponds to an array of digitizer points in
        the same order. These points are assumed to be in the native digitizer
        space and will be rescaled according to the unit parameter.
    point_names : None | list
        If list, this corresponds to a list of point names. This must be
        specified if elp is defined.
    unit : 'auto' | 'm' | 'cm' | 'mm'
        Unit of the digitizer files (hsp and elp). If not 'm', coordinates will
        be rescaled to 'm'. Default is 'auto', which assumes 'm' for \*.hsp and
        \*.elp files and 'mm' for \*.txt files, corresponding to the known
        Polhemus export formats.
    fif : str | None
        FIF file from which to read digitization locations.
        If str (filename), all other arguments are ignored.

        .. versionadded:: 0.12

    transform : bool
        If True, points will be transformed to Neuromag space.
        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
        the montage file. Useful for points captured using Polhemus FastSCAN.
        Default is True.
    dev_head_t : bool
        If True, a Dev-to-Head transformation matrix will be added to the
        montage. To get a proper `dev_head_t`, the hpi and the elp points
        must be in the same order. If False, an identity matrix will be added
        to the montage. Default is False.

    Returns
    -------
    montage : instance of DigMontage
        The digitizer montage.

    See Also
    --------
    read_montage : Function to read generic EEG templates

    Notes
    -----
    All digitized points will be transformed to head-based coordinate system
    if transform is True and fiducials are present.

    .. versionadded:: 0.9.0
    """
    if fif is not None:
        # Use a different code path
        if dev_head_t or not transform:
            raise ValueError('transform must be True and dev_head_t must be '
                             'False for FIF dig montage')
        if not all(x is None for x in (hsp, hpi, elp, point_names)):
            raise ValueError('hsp, hpi, elp, and point_names must all be None '
                             'if fif is not None')
        _check_fname(fif, overwrite=True, must_exist=True)
        # Load the dig data
        f, tree = fiff_open(fif)[:2]
        with f as fid:
            dig = _read_dig_fif(fid, tree)
        # Split up the dig points by category
        hsp = list()
        hpi = list()
        elp = list()
        point_names = list()
        fids = dict()
        dig_ch_pos = dict()
        for d in dig:
            if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
                _check_frame(d, 'head')
                fids[_cardinal_ident_mapping[d['ident']]] = d['r']
            elif d['kind'] == FIFF.FIFFV_POINT_HPI:
                _check_frame(d, 'head')
                hpi.append(d['r'])
                elp.append(d['r'])
                point_names.append('HPI%03d' % d['ident'])
            elif d['kind'] == FIFF.FIFFV_POINT_EXTRA:
                _check_frame(d, 'head')
                hsp.append(d['r'])
            elif d['kind'] == FIFF.FIFFV_POINT_EEG:
                _check_frame(d, 'head')
                dig_ch_pos['EEG%03d' % d['ident']] = d['r']
        fids = np.array([fids[key] for key in ('nasion', 'lpa', 'rpa')])
        hsp = np.array(hsp)
        elp = np.array(elp)
    else:
        dig_ch_pos = None
        scale = {'mm': 1e-3, 'cm': 1e-2, 'auto': 1e-3, 'm': None}
        if unit not in scale:
            raise ValueError("Unit needs to be one of %s, not %r" %
                             (tuple(map(repr, scale)), unit))

        # HSP
        if isinstance(hsp, string_types):
            hsp = _read_dig_points(hsp, unit=unit)
        elif hsp is not None and scale[unit]:
            hsp *= scale[unit]

        # HPI
        if isinstance(hpi, string_types):
            ext = op.splitext(hpi)[-1]
            if ext == '.txt':
                hpi = _read_dig_points(hpi, unit='m')
            elif ext in ('.sqd', '.mrk'):
                from ..io.kit import read_mrk
                hpi = read_mrk(hpi)
            else:
                raise ValueError('HPI file with extension *%s is not '
                                 'supported. Only *.txt, *.sqd and *.mrk are '
                                 'supported.' % ext)

        # ELP
        if isinstance(elp, string_types):
            elp = _read_dig_points(elp, unit=unit)
        elif elp is not None and scale[unit]:
            elp *= scale[unit]

        if elp is not None:
            if not isinstance(point_names, Iterable):
                raise TypeError("If elp is specified, point_names must "
                                "provide a list of str with one entry per ELP "
                                "point")
            point_names = list(point_names)
            if len(point_names) != len(elp):
                raise ValueError("The elp file contains %i points, but %i "
                                 "names were specified." %
                                 (len(elp), len(point_names)))

        # Transform digitizer coordinates to neuromag space
        if transform:
            if elp is None:
                raise ValueError("ELP points are not specified. Points are "
                                 "needed for transformation.")
            names_lower = [name.lower() for name in point_names]

            # check that all needed points are present
            missing = [name for name in ('nasion', 'lpa', 'rpa')
                       if name not in names_lower]
            if missing:
                raise ValueError("The points %s are missing, but are needed "
                                 "to transform the points to the MNE "
                                 "coordinate system. Either add the points, "
                                 "or read the montage with transform=False."
                                 % str(missing))

            nasion = elp[names_lower.index('nasion')]
            lpa = elp[names_lower.index('lpa')]
            rpa = elp[names_lower.index('rpa')]

            # remove fiducials from elp
            mask = np.ones(len(names_lower), dtype=bool)
            for fid in ['nasion', 'lpa', 'rpa']:
                mask[names_lower.index(fid)] = False
            elp = elp[mask]

            neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
            fids = apply_trans(neuromag_trans, [nasion, lpa, rpa])
            elp = apply_trans(neuromag_trans, elp)
            hsp = apply_trans(neuromag_trans, hsp)
        else:
            fids = [None] * 3

    if dev_head_t:
        from ..coreg import fit_matched_points
        trans = fit_matched_points(tgt_pts=elp, src_pts=hpi, out='trans')
    else:
        trans = np.identity(4)

    return DigMontage(hsp, hpi, elp, point_names, fids[0], fids[1], fids[2],
                      trans, dig_ch_pos)

Example 21

Project: spinalcordtoolbox
Source File: straighten.py
View license
def main():
    
    # Initialization
    fname_anat = ''
    fname_centerline = ''
    gapxy = param.gapxy
    gapz = param.gapz
    padding = param.padding
    centerline_fitting = param.fitting_method
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    interpolation_warp = param.interpolation_warp

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    print path_sct
    # extract path of the script
    path_script = os.path.dirname(__file__)+'/'
    
    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        # fname_anat = path_sct+'/testing/data/errsm_23/t2/t2.nii.gz'
        # fname_centerline = path_sct+'/testing/data/errsm_23/t2/t2_segmentation_PropSeg.nii.gz'
        fname_anat = '/home/django/jtouati/data/cover_z_slices/errsm13_t2.nii.gz'
        fname_centerline = '/home/django/jtouati/data/cover_z_slices/segmentation_centerline_binary.nii.gz'
        remove_temp_files = 0
        centerline_fitting = 'splines'
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        verbose = 2
    
    # Check input param
    try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:w:f:v:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_anat = arg
        elif opt in ('-c'):
            fname_centerline = arg
        elif opt in ('-r'):
            remove_temp_files = int(arg)
        elif opt in ('-w'):
            interpolation_warp = str(arg)
        elif opt in ('-f'):
            centerline_fitting = str(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()
    
    # Display usage if optional arguments are not correctly provided
    if centerline_fitting == '':
        centerline_fitting = 'splines'
    elif not centerline_fitting == '' and not centerline_fitting == 'splines' and not centerline_fitting == 'polynomial':
        print '\n \n -f argument is not valid \n \n'
        usage()
    
    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # check interp method
    if interpolation_warp == 'spline':
        interpolation_warp_ants = '--use-BSpline'
    elif interpolation_warp == 'trilinear':
        interpolation_warp_ants = ''
    elif interpolation_warp == 'nearestneighbor':
        interpolation_warp_ants = '--use-NN'
    else:
        print '\WARNING: Interpolation method not recognized. Using: '+param.interpolation_warp
        interpolation_warp_ants = '--use-BSpline'

    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... '+fname_anat
    print '  Centerline ........................ '+fname_centerline
    print '  Centerline fitting option ......... '+centerline_fitting
    print '  Final interpolation ............... '+interpolation_warp
    print '  Verbose ........................... '+str(verbose)
    print ''

    # if verbose 2, import matplotlib
    if verbose == 2:
        import matplotlib.pyplot as plt

    # Extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
    
    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files into tmp folder
    sct.run('cp '+fname_anat+' '+path_tmp)
    sct.run('cp '+fname_centerline+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Open centerline
    #==========================================================================================
    # Change orientation of the input centerline into RPI
    print '\nOrient centerline to RPI orientation...'
    fname_centerline_orient = 'tmp.centerline_rpi' + ext_centerline
    sct.run('sct_orientation -i ' + file_centerline + ext_centerline + ' -o ' + fname_centerline_orient + ' -orientation RPI')
    
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
    print '\nOpen centerline volume...'
    file = nibabel.load(fname_centerline_orient)
    data = file.get_data()
    
    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(0, nz, 1)]
    y_centerline = [0 for iz in range(0, nz, 1)]
    z_centerline = [iz for iz in range(0, nz, 1)]
    x_centerline_deriv = [0 for iz in range(0, nz, 1)]
    y_centerline_deriv = [0 for iz in range(0, nz, 1)]
    z_centerline_deriv = [0 for iz in range(0, nz, 1)]
    
    # Two possible scenario:
    # 1. the centerline is probabilistic: each slice contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.
    #
    # x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
    # x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()
# REMOVED: 2014-07-18
    # check if centerline covers all the image
#    if len(x_seg_start)==0 or len(x_seg_end)==0:
#        print '\nERROR: centerline/segmentation must cover all "z" slices of the input image.\n' \
#              'To solve the problem, you need to crop the input image (you can use \'sct_crop_image\') and generate one' \
#              'more time the spinal cord centerline/segmentation from this cropped image.\n'
#        usage()
      #
    # X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
    # if (len(X) > 0): # Scenario 1
    #     for iz in range(0, nz, 1):
    #         x_centerline[iz], y_centerline[iz] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
    # else: # Scenario 2
    #     for iz in range(0, nz, 1):
    #         print (data[:,:,iz]>0).nonzero()
    #         x_seg, y_seg = (data[:,:,iz]>0).nonzero()
    #         x_centerline[iz] = numpy.mean(x_seg)
    #         y_centerline[iz] = numpy.mean(y_seg)
    # # TODO: find a way to do the previous loop with this, which is more neat:
    # # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]

    # get center of mass of the centerline/segmentation
    print '\nGet center of mass of the centerline/segmentation...'
    for iz in range(0, nz, 1):
        x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))

    #print len(x_centerline),len(y_centerline)
    #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])
    
    x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
    x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()

    #check if centerline covers all the image
    if len(x_seg_start)==0 or len(x_seg_end)==0:
        sct.printv('\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image. Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate a new spinalcord centerline/segmentation from this cropped image.\n', 1, 'warning')
        # print '\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image.\n' \
        #       'Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate\n'\
        #       'a new spinalcord centerline/segmentation from this cropped image.\n'
        #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])
        min_centerline = min((numpy.array(x_centerline)>=0).nonzero()[0])
        max_centerline = max((numpy.array(x_centerline)>=0).nonzero()[0])
        z_centerline = z_centerline[(min_centerline):(max_centerline+1)]
        #print len(z_centerline)
        nz = len(z_centerline)
        x_centerline = [ x for x in x_centerline if not isnan(x) ]
        y_centerline = [ y for y in y_centerline if not isnan(y) ]
        #print len(x_centerline),len(y_centerline)

    # clear variable
    del data

    # Fit the centerline points with the kind of curve given as argument of the script and return the new fitted coordinates
    if centerline_fitting == 'splines':
        x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = msct_smooth.b_spline_nurbs(x_centerline,y_centerline,z_centerline)
        #x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynomial':
        x_centerline_fit, y_centerline_fit, polyx, polyy = polynome_centerline(x_centerline,y_centerline,z_centerline)
        #numpy.interp([i for i in xrange(0,min_centerline+1)],
        #y_centerline_fit

    #print z_centerline
    
    if verbose == 2:
        # plot centerline
        ax = plt.subplot(1,2,1)
        plt.plot(x_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(x_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('x')
        plt.ylabel('z')
        ax = plt.subplot(1,2,2)
        plt.plot(y_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(y_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('y')
        plt.ylabel('z')
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(handles, labels)
        plt.show()

    
    # Get coordinates of landmarks along curved centerline
    #==========================================================================================
    print '\nGet coordinates of landmarks along curved centerline...'
    # landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy.
    # find derivative of polynomial
    step_z = round(nz/gapz)
    #iz_curved = [i for i in range (0, nz, gapz)]
    iz_curved = [(min(z_centerline) + i*step_z) for i in range (0, gapz)]
    iz_curved.append(max(z_centerline))
    #print iz_curved, len(iz_curved)
    n_iz_curved = len(iz_curved)
    #print n_iz_curved
    landmark_curved = [ [ [ 0 for i in range(0,3)] for i in range(0,5) ] for i in iz_curved ]
    # print x_centerline_deriv,len(x_centerline_deriv)
    # landmark[a][b][c]
    #   a: index along z. E.g., the first cross with have index=0, the next index=1, and so on...
    #   b: index of element on the cross. I.e., 0: center of the cross, 1: +x, 2 -x, 3: +y, 4: -y
    #   c: dimension, i.e., 0: x, 1: y, 2: z
    # loop across index, which corresponds to iz (points along the centerline)
    
    if centerline_fitting=='polynomial':
        for index in range(0, n_iz_curved, 1):
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
            # set x and z coordinates for landmarks +x and -x
            landmark_curved[index][1][2], landmark_curved[index][1][0], landmark_curved[index][2][2], landmark_curved[index][2][0] = get_points_perpendicular_to_curve(polyx, polyx.deriv(), iz_curved[index], gapxy)
            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1,3):
                landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            landmark_curved[index][3][2], landmark_curved[index][3][1], landmark_curved[index][4][2], landmark_curved[index][4][1] = get_points_perpendicular_to_curve(polyy, polyy.deriv(), iz_curved[index], gapxy)
            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3,5):
                landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
    
    elif centerline_fitting=='splines':
        for index in range(0, n_iz_curved, 1):
            # calculate d (ax+by+cz+d=0)
            # print iz_curved[index]
            a=x_centerline_deriv[iz_curved[index]-min(z_centerline)]
            b=y_centerline_deriv[iz_curved[index]-min(z_centerline)]
            c=z_centerline_deriv[iz_curved[index]-min(z_centerline)]
            x=x_centerline_fit[iz_curved[index]-min(z_centerline)]
            y=y_centerline_fit[iz_curved[index]-min(z_centerline)]
            z=iz_curved[index]
            d=-(a*x+b*y+c*z)
            #print a,b,c,d,x,y,z
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]-min(z_centerline)], y_centerline_fit[iz_curved[index]-min(z_centerline)], iz_curved[index]
            
            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1,3):
                landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]-min(z_centerline)]
            
            # set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
            x_n=Symbol('x_n')
            landmark_curved[index][2][0],landmark_curved[index][1][0]=solve((x_n-x)**2+((-1/c)*(a*x_n+b*y+d)-z)**2-gapxy**2,x_n)  #x for -x and +x
            landmark_curved[index][1][2]=(-1/c)*(a*landmark_curved[index][1][0]+b*y+d)  #z for +x
            landmark_curved[index][2][2]=(-1/c)*(a*landmark_curved[index][2][0]+b*y+d)  #z for -x
            
            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3,5):
                landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]-min(z_centerline)]
            
            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            y_n=Symbol('y_n')
            landmark_curved[index][4][1],landmark_curved[index][3][1]=solve((y_n-y)**2+((-1/c)*(a*x+b*y_n+d)-z)**2-gapxy**2,y_n)  #y for -y and +y
            landmark_curved[index][3][2]=(-1/c)*(a*x+b*landmark_curved[index][3][1]+d)#z for +y
            landmark_curved[index][4][2]=(-1/c)*(a*x+b*landmark_curved[index][4][1]+d)#z for -y
    
    
#    #display
#    fig = plt.figure()
#    ax = fig.add_subplot(111, projection='3d')
#    ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
#    ax.plot(x_centerline, y_centerline,z_centerline, 'r')
#    ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
#    ax.set_xlabel('x')
#    ax.set_ylabel('y')
#    ax.set_zlabel('z')
#    plt.show()

    # Get coordinates of landmarks along straight centerline
    #==========================================================================================
    print '\nGet coordinates of landmarks along straight centerline...'
    landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
    
    # calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
    iz_straight = [(min(z_centerline) + 0) for i in range (0,gapz+1)]
    #print iz_straight,len(iz_straight)
    for index in range(1, n_iz_curved, 1):
        # compute vector between two consecutive points on the curved centerline
        vector_centerline = [x_centerline_fit[iz_curved[index]-min(z_centerline)] - x_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             y_centerline_fit[iz_curved[index]-min(z_centerline)] - y_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             iz_curved[index] - iz_curved[index-1]]
        # compute norm of this vector
        norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
        # round to closest integer value
        norm_vector_centerline_rounded = int(round(norm_vector_centerline,0))
        # assign this value to the current z-coordinate on the straight centerline
        iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
    
    # initialize x0 and y0 to be at the center of the FOV
    x0 = int(round(nx/2))
    y0 = int(round(ny/2))
    for index in range(0, n_iz_curved, 1):
        # set coordinates for landmark at the center of the cross
        landmark_straight[index][0][0], landmark_straight[index][0][1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks +x
        landmark_straight[index][1][0], landmark_straight[index][1][1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks -x
        landmark_straight[index][2][0], landmark_straight[index][2][1], landmark_straight[index][2][2] = x0-gapxy, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks +y
        landmark_straight[index][3][0], landmark_straight[index][3][1], landmark_straight[index][3][2] = x0, y0+gapxy, iz_straight[index]
        # set x, y and z coordinates for landmarks -y
        landmark_straight[index][4][0], landmark_straight[index][4][1], landmark_straight[index][4][2] = x0, y0-gapxy, iz_straight[index]
    
    # # display
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
    # ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
    # ax.set_xlabel('x')
    # ax.set_ylabel('y')
    # ax.set_zlabel('z')
    # plt.show()
    #
    
    # Create NIFTI volumes with landmarks
    #==========================================================================================
    # Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
    # N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
    print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
    sct.run('isct_c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
    
    # TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.
    
    # Open padded centerline for reading
    print '\nOpen padded centerline for reading...'
    file = nibabel.load('tmp.centerline_pad.nii.gz')
    data = file.get_data()
    hdr = file.get_header()
    
    # Create volumes containing curved and straight landmarks
    data_curved_landmarks = data * 0
    data_straight_landmarks = data * 0
    # initialize landmark value
    landmark_value = 1
    # Loop across cross index
    for index in range(0, n_iz_curved, 1):
        # loop across cross element index
        for i_element in range(0, 5, 1):
            # get x, y and z coordinates of curved landmark (rounded to closest integer)
            x, y, z = int(round(landmark_curved[index][i_element][0])), int(round(landmark_curved[index][i_element][1])), int(round(landmark_curved[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_curved_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
            # get x, y and z coordinates of straight landmark (rounded to closest integer)
            x, y, z = int(round(landmark_straight[index][i_element][0])), int(round(landmark_straight[index][i_element][1])), int(round(landmark_straight[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_straight_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
            # increment landmark value
            landmark_value = landmark_value + 1
    
    # Write NIFTI volumes
    hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
    print '\nWrite NIFTI volumes...'
    img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
    print '.. File created: tmp.landmarks_curved.nii.gz'
    img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
    print '.. File created: tmp.landmarks_straight.nii.gz'
    
    
    # Estimate deformation field by pairing landmarks
    #==========================================================================================
    
    # Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
    #print '\nDilate landmarks...'
    #sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
    #sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')
    
    # Estimate rigid transformation
    print '\nEstimate rigid transformation between paired landmarks...'
    sct.run('isct_ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt')
    
    # Apply rigid transformation
    print '\nApply rigid transformation to curved landmarks...'
    sct.run('sct_WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN')
    
    # Estimate b-spline transformation curve --> straight
    print '\nEstimate b-spline transformation: curve --> straight...'
    sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0')
    
    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
    print('>> '+cmd)
    commands.getstatusoutput(cmd)
    
    # Estimate b-spline transformation straight --> curve
    # TODO: invert warping field instead of estimating a new one
    print '\nEstimate b-spline transformation: straight --> curve...'
    sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0')
    
    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
    print('>> '+cmd)
    commands.getstatusoutput(cmd)
    
    #print '\nPad input image...'
    #sct.run('isct_c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')
    
    # Unpad landmarks...
    # THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
    # print '\nUnpad landmarks...'
    # sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))
    
    # Apply deformation to input image
    print '\nApply transformation to input image...'
    sct.run('sct_WarpImageMultiTransform 3 '+file_anat+ext_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
    # sct.run('sct_WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
    
    # come back to parent folder
    os.chdir('..')

    # Generate output file (in current folder)
    # TODO: do not uncompress the warping field, it is too time consuming!
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz','','warp_curve2straight','.nii.gz')  # warping field
    sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz','','warp_straight2curve','.nii.gz')  # warping field
    sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz','',file_anat+'_straight',ext_anat)  # straightened anatomic

    # Remove temporary files
    if remove_temp_files == 1:
        print('\nRemove temporary files...')
        sct.run('rm -rf '+path_tmp)
    
    print '\nDone!\n'

Example 22

Project: spinalcordtoolbox
Source File: straighten.py
View license
def main():
    
    # Initialization
    fname_anat = ''
    fname_centerline = ''
    gapxy = param.gapxy
    gapz = param.gapz
    padding = param.padding
    centerline_fitting = param.fitting_method
    remove_temp_files = param.remove_temp_files
    verbose = param.verbose
    interpolation_warp = param.interpolation_warp

    # get path of the toolbox
    status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
    print path_sct
    # extract path of the script
    path_script = os.path.dirname(__file__)+'/'
    
    # Parameters for debug mode
    if param.debug == 1:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        # fname_anat = path_sct+'/testing/data/errsm_23/t2/t2.nii.gz'
        # fname_centerline = path_sct+'/testing/data/errsm_23/t2/t2_segmentation_PropSeg.nii.gz'
        fname_anat = '/home/django/jtouati/data/cover_z_slices/errsm13_t2.nii.gz'
        fname_centerline = '/home/django/jtouati/data/cover_z_slices/segmentation_centerline_binary.nii.gz'
        remove_temp_files = 0
        centerline_fitting = 'splines'
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D
        verbose = 2
    
    # Check input param
    try:
        opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:w:f:v:')
    except getopt.GetoptError as err:
        print str(err)
        usage()
    for opt, arg in opts:
        if opt == '-h':
            usage()
        elif opt in ('-i'):
            fname_anat = arg
        elif opt in ('-c'):
            fname_centerline = arg
        elif opt in ('-r'):
            remove_temp_files = int(arg)
        elif opt in ('-w'):
            interpolation_warp = str(arg)
        elif opt in ('-f'):
            centerline_fitting = str(arg)
        elif opt in ('-v'):
            verbose = int(arg)

    # display usage if a mandatory argument is not provided
    if fname_anat == '' or fname_centerline == '':
        usage()
    
    # Display usage if optional arguments are not correctly provided
    if centerline_fitting == '':
        centerline_fitting = 'splines'
    elif not centerline_fitting == '' and not centerline_fitting == 'splines' and not centerline_fitting == 'polynomial':
        print '\n \n -f argument is not valid \n \n'
        usage()
    
    # check existence of input files
    sct.check_file_exist(fname_anat)
    sct.check_file_exist(fname_centerline)

    # check interp method
    if interpolation_warp == 'spline':
        interpolation_warp_ants = '--use-BSpline'
    elif interpolation_warp == 'trilinear':
        interpolation_warp_ants = ''
    elif interpolation_warp == 'nearestneighbor':
        interpolation_warp_ants = '--use-NN'
    else:
        print '\WARNING: Interpolation method not recognized. Using: '+param.interpolation_warp
        interpolation_warp_ants = '--use-BSpline'

    # Display arguments
    print '\nCheck input arguments...'
    print '  Input volume ...................... '+fname_anat
    print '  Centerline ........................ '+fname_centerline
    print '  Centerline fitting option ......... '+centerline_fitting
    print '  Final interpolation ............... '+interpolation_warp
    print '  Verbose ........................... '+str(verbose)
    print ''

    # if verbose 2, import matplotlib
    if verbose == 2:
        import matplotlib.pyplot as plt

    # Extract path/file/extension
    path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
    path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
    
    # create temporary folder
    path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
    sct.run('mkdir '+path_tmp)

    # copy files into tmp folder
    sct.run('cp '+fname_anat+' '+path_tmp)
    sct.run('cp '+fname_centerline+' '+path_tmp)

    # go to tmp folder
    os.chdir(path_tmp)

    # Open centerline
    #==========================================================================================
    # Change orientation of the input centerline into RPI
    print '\nOrient centerline to RPI orientation...'
    fname_centerline_orient = 'tmp.centerline_rpi' + ext_centerline
    sct.run('sct_orientation -i ' + file_centerline + ext_centerline + ' -o ' + fname_centerline_orient + ' -orientation RPI')
    
    print '\nGet dimensions of input centerline...'
    nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
    print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
    print '.. voxel size:  '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
    
    print '\nOpen centerline volume...'
    file = nibabel.load(fname_centerline_orient)
    data = file.get_data()
    
    # loop across z and associate x,y coordinate with the point having maximum intensity
    x_centerline = [0 for iz in range(0, nz, 1)]
    y_centerline = [0 for iz in range(0, nz, 1)]
    z_centerline = [iz for iz in range(0, nz, 1)]
    x_centerline_deriv = [0 for iz in range(0, nz, 1)]
    y_centerline_deriv = [0 for iz in range(0, nz, 1)]
    z_centerline_deriv = [0 for iz in range(0, nz, 1)]
    
    # Two possible scenario:
    # 1. the centerline is probabilistic: each slice contains voxels with the probability of containing the centerline [0:...:1]
    # We only take the maximum value of the image to aproximate the centerline.
    # 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
    # We take all the points and approximate the centerline on all these points.
    #
    # x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
    # x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()
# REMOVED: 2014-07-18
    # check if centerline covers all the image
#    if len(x_seg_start)==0 or len(x_seg_end)==0:
#        print '\nERROR: centerline/segmentation must cover all "z" slices of the input image.\n' \
#              'To solve the problem, you need to crop the input image (you can use \'sct_crop_image\') and generate one' \
#              'more time the spinal cord centerline/segmentation from this cropped image.\n'
#        usage()
      #
    # X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
    # if (len(X) > 0): # Scenario 1
    #     for iz in range(0, nz, 1):
    #         x_centerline[iz], y_centerline[iz] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
    # else: # Scenario 2
    #     for iz in range(0, nz, 1):
    #         print (data[:,:,iz]>0).nonzero()
    #         x_seg, y_seg = (data[:,:,iz]>0).nonzero()
    #         x_centerline[iz] = numpy.mean(x_seg)
    #         y_centerline[iz] = numpy.mean(y_seg)
    # # TODO: find a way to do the previous loop with this, which is more neat:
    # # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]

    # get center of mass of the centerline/segmentation
    print '\nGet center of mass of the centerline/segmentation...'
    for iz in range(0, nz, 1):
        x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))

    #print len(x_centerline),len(y_centerline)
    #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])
    
    x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
    x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()

    #check if centerline covers all the image
    if len(x_seg_start)==0 or len(x_seg_end)==0:
        sct.printv('\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image. Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate a new spinalcord centerline/segmentation from this cropped image.\n', 1, 'warning')
        # print '\nWARNING : the centerline/segmentation you gave does not cover all "z" slices of the input image.\n' \
        #       'Results should be improved if you crop the input image (you can use \'sct_crop_image\') and generate\n'\
        #       'a new spinalcord centerline/segmentation from this cropped image.\n'
        #print len((numpy.array(x_centerline)>=0).nonzero()[0]),len((numpy.array(y_centerline)>=0).nonzero()[0])
        min_centerline = min((numpy.array(x_centerline)>=0).nonzero()[0])
        max_centerline = max((numpy.array(x_centerline)>=0).nonzero()[0])
        z_centerline = z_centerline[(min_centerline):(max_centerline+1)]
        #print len(z_centerline)
        nz = len(z_centerline)
        x_centerline = [ x for x in x_centerline if not isnan(x) ]
        y_centerline = [ y for y in y_centerline if not isnan(y) ]
        #print len(x_centerline),len(y_centerline)

    # clear variable
    del data

    # Fit the centerline points with the kind of curve given as argument of the script and return the new fitted coordinates
    if centerline_fitting == 'splines':
        x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = msct_smooth.b_spline_nurbs(x_centerline,y_centerline,z_centerline)
        #x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
    elif centerline_fitting == 'polynomial':
        x_centerline_fit, y_centerline_fit, polyx, polyy = polynome_centerline(x_centerline,y_centerline,z_centerline)
        #numpy.interp([i for i in xrange(0,min_centerline+1)],
        #y_centerline_fit

    #print z_centerline
    
    if verbose == 2:
        # plot centerline
        ax = plt.subplot(1,2,1)
        plt.plot(x_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(x_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('x')
        plt.ylabel('z')
        ax = plt.subplot(1,2,2)
        plt.plot(y_centerline, z_centerline, 'b:', label='centerline')
        plt.plot(y_centerline_fit, z_centerline, 'r-', label='fit')
        plt.xlabel('y')
        plt.ylabel('z')
        handles, labels = ax.get_legend_handles_labels()
        ax.legend(handles, labels)
        plt.show()

    
    # Get coordinates of landmarks along curved centerline
    #==========================================================================================
    print '\nGet coordinates of landmarks along curved centerline...'
    # landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy.
    # find derivative of polynomial
    step_z = round(nz/gapz)
    #iz_curved = [i for i in range (0, nz, gapz)]
    iz_curved = [(min(z_centerline) + i*step_z) for i in range (0, gapz)]
    iz_curved.append(max(z_centerline))
    #print iz_curved, len(iz_curved)
    n_iz_curved = len(iz_curved)
    #print n_iz_curved
    landmark_curved = [ [ [ 0 for i in range(0,3)] for i in range(0,5) ] for i in iz_curved ]
    # print x_centerline_deriv,len(x_centerline_deriv)
    # landmark[a][b][c]
    #   a: index along z. E.g., the first cross with have index=0, the next index=1, and so on...
    #   b: index of element on the cross. I.e., 0: center of the cross, 1: +x, 2 -x, 3: +y, 4: -y
    #   c: dimension, i.e., 0: x, 1: y, 2: z
    # loop across index, which corresponds to iz (points along the centerline)
    
    if centerline_fitting=='polynomial':
        for index in range(0, n_iz_curved, 1):
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
            # set x and z coordinates for landmarks +x and -x
            landmark_curved[index][1][2], landmark_curved[index][1][0], landmark_curved[index][2][2], landmark_curved[index][2][0] = get_points_perpendicular_to_curve(polyx, polyx.deriv(), iz_curved[index], gapxy)
            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1,3):
                landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            landmark_curved[index][3][2], landmark_curved[index][3][1], landmark_curved[index][4][2], landmark_curved[index][4][1] = get_points_perpendicular_to_curve(polyy, polyy.deriv(), iz_curved[index], gapxy)
            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3,5):
                landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
    
    elif centerline_fitting=='splines':
        for index in range(0, n_iz_curved, 1):
            # calculate d (ax+by+cz+d=0)
            # print iz_curved[index]
            a=x_centerline_deriv[iz_curved[index]-min(z_centerline)]
            b=y_centerline_deriv[iz_curved[index]-min(z_centerline)]
            c=z_centerline_deriv[iz_curved[index]-min(z_centerline)]
            x=x_centerline_fit[iz_curved[index]-min(z_centerline)]
            y=y_centerline_fit[iz_curved[index]-min(z_centerline)]
            z=iz_curved[index]
            d=-(a*x+b*y+c*z)
            #print a,b,c,d,x,y,z
            # set coordinates for landmark at the center of the cross
            landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]-min(z_centerline)], y_centerline_fit[iz_curved[index]-min(z_centerline)], iz_curved[index]
            
            # set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
            for i in range(1,3):
                landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]-min(z_centerline)]
            
            # set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
            x_n=Symbol('x_n')
            landmark_curved[index][2][0],landmark_curved[index][1][0]=solve((x_n-x)**2+((-1/c)*(a*x_n+b*y+d)-z)**2-gapxy**2,x_n)  #x for -x and +x
            landmark_curved[index][1][2]=(-1/c)*(a*landmark_curved[index][1][0]+b*y+d)  #z for +x
            landmark_curved[index][2][2]=(-1/c)*(a*landmark_curved[index][2][0]+b*y+d)  #z for -x
            
            # set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
            for i in range(3,5):
                landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]-min(z_centerline)]
            
            # set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
            y_n=Symbol('y_n')
            landmark_curved[index][4][1],landmark_curved[index][3][1]=solve((y_n-y)**2+((-1/c)*(a*x+b*y_n+d)-z)**2-gapxy**2,y_n)  #y for -y and +y
            landmark_curved[index][3][2]=(-1/c)*(a*x+b*landmark_curved[index][3][1]+d)#z for +y
            landmark_curved[index][4][2]=(-1/c)*(a*x+b*landmark_curved[index][4][1]+d)#z for -y
    
    
#    #display
#    fig = plt.figure()
#    ax = fig.add_subplot(111, projection='3d')
#    ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
#    ax.plot(x_centerline, y_centerline,z_centerline, 'r')
#    ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
#           [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
#    ax.set_xlabel('x')
#    ax.set_ylabel('y')
#    ax.set_zlabel('z')
#    plt.show()

    # Get coordinates of landmarks along straight centerline
    #==========================================================================================
    print '\nGet coordinates of landmarks along straight centerline...'
    landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
    
    # calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
    iz_straight = [(min(z_centerline) + 0) for i in range (0,gapz+1)]
    #print iz_straight,len(iz_straight)
    for index in range(1, n_iz_curved, 1):
        # compute vector between two consecutive points on the curved centerline
        vector_centerline = [x_centerline_fit[iz_curved[index]-min(z_centerline)] - x_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             y_centerline_fit[iz_curved[index]-min(z_centerline)] - y_centerline_fit[iz_curved[index-1]-min(z_centerline)], \
                             iz_curved[index] - iz_curved[index-1]]
        # compute norm of this vector
        norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
        # round to closest integer value
        norm_vector_centerline_rounded = int(round(norm_vector_centerline,0))
        # assign this value to the current z-coordinate on the straight centerline
        iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
    
    # initialize x0 and y0 to be at the center of the FOV
    x0 = int(round(nx/2))
    y0 = int(round(ny/2))
    for index in range(0, n_iz_curved, 1):
        # set coordinates for landmark at the center of the cross
        landmark_straight[index][0][0], landmark_straight[index][0][1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks +x
        landmark_straight[index][1][0], landmark_straight[index][1][1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks -x
        landmark_straight[index][2][0], landmark_straight[index][2][1], landmark_straight[index][2][2] = x0-gapxy, y0, iz_straight[index]
        # set x, y and z coordinates for landmarks +y
        landmark_straight[index][3][0], landmark_straight[index][3][1], landmark_straight[index][3][2] = x0, y0+gapxy, iz_straight[index]
        # set x, y and z coordinates for landmarks -y
        landmark_straight[index][4][0], landmark_straight[index][4][1], landmark_straight[index][4][2] = x0, y0-gapxy, iz_straight[index]
    
    # # display
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
    # ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
    #        [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
    # ax.set_xlabel('x')
    # ax.set_ylabel('y')
    # ax.set_zlabel('z')
    # plt.show()
    #
    
    # Create NIFTI volumes with landmarks
    #==========================================================================================
    # Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
    # N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
    print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
    sct.run('isct_c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
    
    # TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.
    
    # Open padded centerline for reading
    print '\nOpen padded centerline for reading...'
    file = nibabel.load('tmp.centerline_pad.nii.gz')
    data = file.get_data()
    hdr = file.get_header()
    
    # Create volumes containing curved and straight landmarks
    data_curved_landmarks = data * 0
    data_straight_landmarks = data * 0
    # initialize landmark value
    landmark_value = 1
    # Loop across cross index
    for index in range(0, n_iz_curved, 1):
        # loop across cross element index
        for i_element in range(0, 5, 1):
            # get x, y and z coordinates of curved landmark (rounded to closest integer)
            x, y, z = int(round(landmark_curved[index][i_element][0])), int(round(landmark_curved[index][i_element][1])), int(round(landmark_curved[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_curved_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
            # get x, y and z coordinates of straight landmark (rounded to closest integer)
            x, y, z = int(round(landmark_straight[index][i_element][0])), int(round(landmark_straight[index][i_element][1])), int(round(landmark_straight[index][i_element][2]))
            # attribute landmark_value to the voxel and its neighbours
            data_straight_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
            # increment landmark value
            landmark_value = landmark_value + 1
    
    # Write NIFTI volumes
    hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
    print '\nWrite NIFTI volumes...'
    img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
    print '.. File created: tmp.landmarks_curved.nii.gz'
    img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
    nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
    print '.. File created: tmp.landmarks_straight.nii.gz'
    
    
    # Estimate deformation field by pairing landmarks
    #==========================================================================================
    
    # Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
    #print '\nDilate landmarks...'
    #sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
    #sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')
    
    # Estimate rigid transformation
    print '\nEstimate rigid transformation between paired landmarks...'
    sct.run('isct_ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt')
    
    # Apply rigid transformation
    print '\nApply rigid transformation to curved landmarks...'
    sct.run('sct_WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN')
    
    # Estimate b-spline transformation curve --> straight
    print '\nEstimate b-spline transformation: curve --> straight...'
    sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0')
    
    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
    print('>> '+cmd)
    commands.getstatusoutput(cmd)
    
    # Estimate b-spline transformation straight --> curve
    # TODO: invert warping field instead of estimating a new one
    print '\nEstimate b-spline transformation: straight --> curve...'
    sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0')
    
    # Concatenate rigid and non-linear transformations...
    print '\nConcatenate rigid and non-linear transformations...'
    #sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
    # TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
    cmd = 'isct_ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
    print('>> '+cmd)
    commands.getstatusoutput(cmd)
    
    #print '\nPad input image...'
    #sct.run('isct_c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')
    
    # Unpad landmarks...
    # THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
    # print '\nUnpad landmarks...'
    # sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))
    
    # Apply deformation to input image
    print '\nApply transformation to input image...'
    sct.run('sct_WarpImageMultiTransform 3 '+file_anat+ext_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
    # sct.run('sct_WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
    
    # come back to parent folder
    os.chdir('..')

    # Generate output file (in current folder)
    # TODO: do not uncompress the warping field, it is too time consuming!
    print '\nGenerate output file (in current folder)...'
    sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz','','warp_curve2straight','.nii.gz')  # warping field
    sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz','','warp_straight2curve','.nii.gz')  # warping field
    sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz','',file_anat+'_straight',ext_anat)  # straightened anatomic

    # Remove temporary files
    if remove_temp_files == 1:
        print('\nRemove temporary files...')
        sct.run('rm -rf '+path_tmp)
    
    print '\nDone!\n'

Example 23

Project: lineid_plot
Source File: lineid_plot.py
View license
def plot_line_ids(wave, flux, line_wave, line_label1, label1_size=None,
                  extend=True, annotate_kwargs={}, plot_kwargs={},
                  **kwargs):
    """Label features with automatic layout of labels.

    Parameters
    ----------
    wave: list or array of floats
        Wave lengths of data.
    flux: list or array of floats
        Flux at each wavelength.
    line_wave: list or array of floats
        Wave length of features to be labelled.
    line_label1: list of strings
        Label text for each line.
    label1_size: list of floats
        Font size in points. If not given the default value in
        Matplotlib is used. This is typically 12.
    extend: boolean or list of boolean values
        For those lines for which this keyword is True, a dashed line
        will be drawn from the tip of the annotation to the flux at the
        line.
    annotate_kwargs : dict
        Keyword arguments to pass to `annotate`, e.g. color.

        Default value is obtained by calling ``initial_annotate_kwargs()``.
    plot_kwargs : dict
        Keyword arguments to pass to `plot`, e.g. color.

        Default value is obtained by calling ``initial_plot_kwargs()``.
    kwargs: key value pairs
        All of these keywords are optional.

        The following keys are recognized:

          ax : Matplotlib Axes
              The Axes in which the labels are to be placed. If not
              given a new Axes is created.
          fig: Matplotlib Figure
              The figure in which the labels are to be placed. If `ax`
              if given then keyword is then ignored. The figure
              associated with `ax` is used. If `fig` and `ax` are not
              given then a new figure is created and an axes is added
              to it.
          arrow_tip: scalar or list of floats
              The location of the annotation point, in data coords. If
              the value is scalar then it is used for all. Default
              value is the upper bound of the Axes, at the time of
              plotting.
          box_loc: scalar or list of floats
              The y axis location of the text label boxes, in data
              units. The default is to place it above the `arrow_tip`
              by `box_axes_space` units in figure fraction length.
          box_axes_space: float
              If no `box_loc` is given then the y position of label
              boxes is set to `arrow_tip` + this many figure fraction
              units. The default is 0.06. This ensures that the label
              layout appearance is independent of the y data range.
          max_iter: int
              Maximum iterations to use. Default is set to 1000.

    Returns
    -------
    fig, ax: Matplotlib Figure, Matplotlib Axes
        Figure instance on which the labels were placed and the Axes
        instance on which the labels were placed. These can be used for
        further customizations. For example, some labels can be hidden
        by accessing the corresponding `Text` instance form the
        `ax.texts` list.

    Notes
    -----
    + By default the labels are placed along the top of the Axes. The
      annotation point is on the top boundary of the Axes at the y
      location of the line. The y location of the boxes are 0.06 figure
      fraction units above the annotation location. This value can be
      customized using the `box_axes_space` parameter. The value must
      be in figure fractions units. Y location of both labels and
      annotation points can be changed using `arrow_tip` and `box_loc`
      parameters.
    + If `arrow_tip` parameter is given then it is used as the
      annotation point. This can be a list in which case each line can
      have its own annotation point.
    + If `box_loc` is given, then the boxes are placed at this
      position. This too can be a list.
    + `arrow_tip` and `box_loc` are the "y" components of `xy` and
      `xyann` parameters accepted by the `annotate` function in
      Matplotlib.
    + If the `extend` keyword is True then a line is drawn from the
      annotation point to the flux at the line wavelength. The flux is
      calculated by linear interpolation. This parameter can be a list,
      with one value for each line.
    + The maximum iterations to be used can be customized using the
      `max_iter` keyword parameter.

    """
    wave = np.array(wave)
    flux = np.array(flux)
    line_wave = np.array(line_wave)
    line_label1 = np.array(line_label1)

    nlines = len(line_wave)
    assert nlines == len(line_label1), "Each line must have a label."

    if label1_size is None:
        label1_size = np.array([12] * nlines)
    label1_size = _convert_to_array(label1_size, nlines, "lable1_size")

    extend = _convert_to_array(extend, nlines, "extend")

    # Sort.
    indx = np.argsort(wave)
    wave[:] = wave[indx]
    flux[:] = flux[indx]
    indx = np.argsort(line_wave)
    line_wave[:] = line_wave[indx]
    line_label1[:] = line_label1[indx]
    label1_size[:] = label1_size[indx]

    # Flux at the line wavelengths.
    line_flux = get_line_flux(line_wave, wave, flux)

    # Figure and Axes. If Axes is given then use it. If not, create
    # figure, if not given, and add Axes to it using a default
    # layout. Also plot the data in the Axes.
    ax = kwargs.get("ax", None)
    if not ax:
        fig = kwargs.get("fig", None)
        fig, ax = prepare_axes(wave, flux, fig)
    else:
        fig = ax.figure

    # Find location of the tip of the arrow. Either the top edge of the
    # Axes or the given data coordinates.
    ax_bounds = ax.get_ybound()
    arrow_tip = kwargs.get("arrow_tip", ax_bounds[1])
    arrow_tip = _convert_to_array(arrow_tip, nlines, "arrow_tip")

    # The y location of boxes from the arrow tips. Either given heights
    # in data coordinates or use `box_axes_space` in figure
    # fraction. The latter has a default value which is used when no
    # box locations are given. Figure coordiantes are used so that the
    # y location does not dependent on the data y range.
    box_loc = kwargs.get("box_loc", None)
    if not box_loc:
        box_axes_space = kwargs.get("box_axes_space", 0.06)
        box_loc = get_box_loc(fig, ax, line_wave, arrow_tip, box_axes_space)
    else:
        box_loc = _convert_to_array(box_loc, nlines, "box_loc")
        box_loc = tuple(zip(line_wave, box_loc))

    # If any labels are repeated add "_num_#" to it. If there are 3 "X"
    # then the first gets "X_num_3". The result is passed as the label
    # parameter of annotate. This makes it easy to find the box
    # corresponding to a label using Figure.findobj.
    label_u = unique_labels(line_label1)

    ak = initial_annotate_kwargs()
    ak.update(annotate_kwargs)
    pk = initial_plot_kwargs()
    pk.update(plot_kwargs)
    # Draw boxes at initial (x, y) location.
    for i in range(nlines):
        ax.annotate(line_label1[i], xy=(line_wave[i], arrow_tip[i]),
                    xytext=(box_loc[i][0],
                            box_loc[i][1]),

                    fontsize=label1_size[i],
                    label=label_u[i],
                    **ak)
        if extend[i]:
            ax.plot([line_wave[i]] * 2, [arrow_tip[i], line_flux[i]],
                    scalex=False, scaley=False,
                    label=label_u[i] + "_line",
                    **pk)

    # Draw the figure so that get_window_extent() below works.
    fig.canvas.draw()

    # Get annotation boxes and convert their dimensions from display
    # coordinates to data coordinates. Specifically, we want the width
    # in wavelength units. For each annotation box, transform the
    # bounding box into data coordinates and extract the width.
    ax_inv_trans = ax.transData.inverted()  # display to data
    box_widths = []  # box width in wavelength units.
    for box in ax.texts:
        b_ext = box.get_window_extent()
        box_widths.append(b_ext.transformed(ax_inv_trans).width)

    # Find final x locations of boxes so that they don't overlap.
    # Function adjust_boxes uses a direct translation of the equivalent
    # code in lineid_plot.pro in IDLASTRO.
    max_iter = kwargs.get('max_iter', 1000)
    adjust_factor = kwargs.get('adjust_factor', 0.35)
    factor_decrement = kwargs.get('factor_decrement', 3.0)
    wlp, niter, changed = adjust_boxes(line_wave, box_widths,
                                       np.min(wave), np.max(wave),
                                       adjust_factor=adjust_factor,
                                       factor_decrement=factor_decrement,
                                       max_iter=max_iter)

    # Redraw the boxes at their new x location.
    for i in range(nlines):
        box = ax.texts[i]
        if hasattr(box, 'xyann'):
            box.xyann = (wlp[i], box.xyann[1])
        elif hasattr(box, 'xytext'):
            box.xytext = (wlp[i], box.xytext[1])
        else:
            warnings.warn("Warning: missing xyann and xytext attributes. "
                          "Your matplotlib version may not be compatible "
                          "with lineid_plot.")

    # Update the figure
    fig.canvas.draw()

    # Return Figure and Axes so that they can be used for further
    # manual customization.
    return fig, ax

Example 24

Project: augur-core
Source File: runtests.py
View license
def test_consensus(example, verbose=False):

    reports, reputation, scaled, scaledMax, scaledMin = example()

    num_reports = len(reputation)
    num_events = len(reports[0])
    flatsize = num_reports * num_events
    reputation_fixed = map(fix, reputation)
    reports_fixed = map(fix, reports.ravel())
    scaledMax_fixed = map(fix, scaledMax)
    scaledMin_fixed = map(fix, scaledMin)
    if verbose:
        display(np.array(reports_fixed), "reports (raw):", refold=num_events, show_all=True)

    s = init_chain()

    c = compile_contract(s, "interpolate.se")
    result = profile(c, "interpolate", reports_fixed,
                                       reputation_fixed,
                                       scaled,
                                       scaledMax_fixed,
                                       scaledMin_fixed)
    result = np.array(result)
    reports_filled = result[0:flatsize].tolist()
    reports_mask = result[flatsize:].tolist()
    if verbose:
        display(reports_filled, "reports_filled:", refold=num_events, show_all=True)

    c = compile_contract(s, "center.se")
    result = profile(c, "center", reports_filled,
                                  reputation_fixed,
                                  scaled,
                                  scaledMax_fixed,
                                  scaledMin_fixed,
                                  max_iterations,
                                  max_components)
    result = np.array(result)
    weighted_centered_data = result[0:flatsize].tolist()
    if verbose:
        display(weighted_centered_data, "Weighted centered data:", refold=num_events, show_all=True)

    lv = np.array(map(unfix, result[flatsize:-2]))
    wcd = np.array(fold(map(unfix, weighted_centered_data), num_events))
    wcd_init = wcd
    rep = map(unfix, reputation_fixed)
    R = np.diag(rep)

    # Get "Satoshi" (integer) Reputation values
    # Python
    tokens = np.array([int(r * 1e6) for r in rep])
    alltokens = np.sum(tokens)
    # Serpent
    
    reptokens = profile(c, "tokenize", reputation_fixed, num_reports, nparray=False)
    if verbose:
        print BR("Tokens:")
        print BW("  Python: "), tokens
        print BW("  Serpent:"), np.array(map(unfix, reptokens)).astype(int)

    # Calculate the first row of the covariance matrix
    # Python
    covmat = wcd.T.dot(np.diag(tokens)).dot(wcd) / float(alltokens - 1)
    totalvar = np.trace(covmat)
    Crow = np.zeros(num_events)
    wcd_x_tokens = wcd[:,0] * tokens
    Crow = wcd_x_tokens.dot(wcd) / (alltokens-1)
    # Serpent
    covrow = profile(c, "covariance", weighted_centered_data,
                                      reptokens,
                                      num_reports,
                                      num_events)
    if verbose:
        print BR("Covariance matrix row")
        print BW("  Python: "), Crow
        print BW("  Serpent:"), np.array(map(unfix, covrow))
    tol(covrow, Crow)

    #######
    # PCA #
    #######

    # Python
    iv = result[flatsize:]
    variance_explained = 0
    nc = np.zeros(num_reports)
    negative = False

    for j in range(min(max_components, num_events)):

        # Calculate loading vector
        lv = np.array(map(unfix, iv[:-2]))
        for i in range(max_iterations):
            lv = R.dot(wcd).dot(lv).dot(wcd)
            lv /= np.sqrt(lv.dot(lv))

        # Calculate the eigenvalue for this eigenvector
        for k in range(num_events):
            if lv[k] != 0:
                break
        E = covmat[k,:].dot(lv) / lv[k]

        # Cumulative variance explained
        variance_explained += E / totalvar

        # Projection onto new axis: nonconformity vector
        slv = lv
        if slv[0] < 0:
            slv *= -1
        nc += E * wcd.dot(slv)

        if verbose:
            print BW("  Loadings %d:" % j), np.round(np.array(lv), 6)
            print BW("  Latent %d:  " % j), E, "(%s%% variance explained)" % np.round(variance_explained * 100, 3)

        # Deflate the data matrix
        wcd = wcd - wcd.dot(np.outer(lv, lv))

    if verbose:
        print BW("  Nonconformity: "), np.round(nc, 6)

    # Serpent
    loading_vector = result[flatsize:].tolist()
    data = weighted_centered_data
    scores = map(int, np.zeros(num_reports).tolist())
    var_exp = 0
    num_comps = 0

    c = compile_contract(s, "score.se")

    while True:
        print(BC("  COMPONENT %s" % str(num_comps + 1)))

        # Loading vector (eigenvector)
        #   - Second-to-last element: number of iterations remaining
        #   - Last element: number of components remaining
        loading_vector = profile(c, "blank", loading_vector[-1],
                                             max_iterations,
                                             num_events)
        sys.stdout.write(BW("  - loadings"))
        sys.stdout.flush()
        lv_gas = []
        lv_time = []
        while loading_vector[num_events] > 0:
            sys.stdout.write(BW("."))
            sys.stdout.flush()
            result = c.loadings(loading_vector,
                                data,
                                reputation_fixed,
                                num_reports,
                                num_events,
                                profiling=True)
            loading_vector = result['output']
            lv_gas.append(result['gas'])
            lv_time.append(result['time'])
        print(" %i gas (%s seconds)" % (np.mean(lv_gas), np.mean(lv_time)))

        # Latent factor (eigenvalue; check sign bit)
        latent = profile(c, "latent", covrow, loading_vector, num_events)

        # Deflate the data matrix
        data = profile(c, "deflate", loading_vector, data, num_reports, num_events)

        # Project data onto this component and add to weighted scores
        scores = profile(c, "score", scores,
                                     loading_vector,
                                     weighted_centered_data,
                                     latent,
                                     num_reports,
                                     num_events)
        if verbose:
            printable_loadings = np.array(map(unfix, loading_vector[:-2]))
            if printable_loadings[0] < 0:
                printable_loadings *= -1
            print BW("Component %d [%s]:\t" %
                     (num_comps, np.round(unfix(latent), 4))), printable_loadings
        num_comps += 1
        if loading_vector[num_events + 1] == 0:
            break
    tol(scores, nc)

    c = compile_contract(s, "adjust.se")
    result = profile(c, "reputation_delta", scores, num_reports, num_events)
    result = np.array(result)
    set1 = result[0:num_reports].tolist()
    set2 = result[num_reports:].tolist()
    assert(len(set1) == len(set2))
    assert(len(result) == 2*num_reports)
    if verbose:
        display(set1, "set1:", show_all=True)
        display(set2, "set2:", show_all=True)

    result = profile(c, "weighted_delta", set1,
                                          set2,
                                          reputation_fixed,
                                          reports_filled,
                                          num_reports,
                                          num_events)
    result = np.array(result)
    old = result[0:num_events].tolist()
    new1 = result[num_events:(2*num_events)].tolist()
    new2 = result[(2*num_events):].tolist()
    assert(len(result) == 3*num_events)
    assert(len(old) == len(new1) == len(new2))
    if verbose:
        display(old, "old:", show_all=True)
        display(new1, "new1:", show_all=True)
        display(new2, "new2:", show_all=True)

    adjusted_scores = profile(c, "select_scores", old,
                                                  new1,
                                                  new2,
                                                  set1,
                                                  set2,
                                                  scores,
                                                  num_reports,
                                                  num_events)

    c = compile_contract(s, "resolve.se")
    smooth_rep = profile(c, "smooth", adjusted_scores,
                                      reputation_fixed,
                                      num_reports,
                                      num_events)
    event_outcomes = profile(c, "resolve", smooth_rep,
                                           reports_filled,
                                           scaled,
                                           scaledMax_fixed,
                                           scaledMin_fixed,
                                           num_reports,
                                           num_events)

    c = compile_contract(s, "payout.se")
    reporter_payout = profile(c, "payout", event_outcomes,
                                           smooth_rep,
                                           reports_mask,
                                           num_reports,
                                           num_events)
    reporter_payout = np.array(reporter_payout)
    if verbose:
        print BW("Nonconformity scores:"), np.array(map(unfix, scores))
        print BW("Raw reputation:      "), np.array(map(unfix, smooth_rep))
        print BW("Adjusted scores:     "), np.array(map(unfix, adjusted_scores))
        print BW("Reporter payout:     "), np.array(map(unfix, reporter_payout))
        print BW("Event outcomes:      "), np.array(map(unfix, event_outcomes))

    # Compare to pyconsensus

    print BG("pyconsensus")

    event_bounds = []
    for i, s in enumerate(scaled):
        event_bounds.append({
            'scaled': 0 if s == False else 1,
            'min': scaledMin[i],
            'max': scaledMax[i],
        })
    for j in range(num_events):
        for i in range(num_reports):
            if reports[i,j] == 0:
                reports[i,j] = np.nan

    pyresults = Oracle(reports=reports,
                       reputation=reputation,
                       event_bounds=event_bounds,
                       algorithm="big-five",
                       variance_threshold=variance_threshold,
                       max_components=max_components,
                       verbose=False).consensus()
    serpent_results = {
        'reputation': map(unfix, smooth_rep),
        'outcomes': map(unfix, event_outcomes),
    }
    python_results = {
        'reputation': pyresults['agents']['smooth_rep'],
        'outcomes': np.array(pyresults['events']['outcomes_final']),
    }
    comparisons = {}
    for m in ('reputation', 'outcomes'):
        comparisons[m] = abs((python_results[m] - serpent_results[m]) / python_results[m])

    fails = 0
    for key, value in comparisons.items():
        try:
            assert((value < tolerance).all())
        except Exception as e:
            fails += 1
            print BW("Tolerance exceeded for ") + BR("%s:" % key)
            print "Serpent:    ", np.array(serpent_results[key])
            print "Python:     ", python_results[key]
            print "Difference: ", comparisons[key]
    if fails == 0:
        print BC("Tests passed!")

Example 25

Project: pyconsensus
Source File: __init__.py
View license
def main(argv=None):
    if argv is None:
        argv = sys.argv
    try:
        short_opts = 'hxmst:'
        long_opts = ['help', 'example', 'missing', 'scaled', 'test=']
        opts, vals = getopt.getopt(argv[1:], short_opts, long_opts)
    except getopt.GetoptError as e:
        sys.stderr.write(e.msg)
        sys.stderr.write("for help use --help")
        return 2
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            print(__doc__)
            return 0
        elif opt in ('-t', '--test'):
            testalgo = "PCA"
            if arg == "1":
                reports = np.array([[ YES, YES,  NO,  NO ],
                                    [ YES,  NO,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [  NO,  NO, YES, YES ],
                                    [  NO,  NO, YES, YES ]])
            elif arg == "2":
                reports = np.array([[ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ]])
            elif arg == "3":
                reports =  np.array([[ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],
                                     [ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],
                                     [ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],
                                     [ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],
                                     [ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],
                                     [ YES,  YES,   NO,  NO,  YES,  YES,  NO,   NO,  YES,  YES,   NO,   NO,  YES],

                                     [  NO,   NO,   NO, YES,   NO,   NO,  NO,  YES,   NO,   NO,   NO,  YES,   NO],
                                     
                                     [ YES,  YES,  YES,  NO,  YES,  YES,  YES,  NO,  YES,  YES,  YES,   NO,  YES],
                                     [ YES,  YES,  YES,  NO,  YES,  YES,  YES,  NO,  YES,  YES,  YES,   NO,  YES],
                                     [ YES,  YES,  YES,  NO,  YES,  YES,  YES,  NO,  YES,  YES,  YES,   NO,  YES],
                                     [ YES,  YES,  YES,  NO,  YES,  YES,  YES,  NO,  YES,  YES,  YES,   NO,  YES]])
            elif arg == "4":
                reports =  np.array([[ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [  NO,   NO,   NO,  YES,   NO ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ]])

            elif arg == "5":
                reports = np.array([[ BAD,  NO,  NO, YES,  NO,  NO, YES, YES, BAD, BAD ],
                                    [ BAD, BAD,  NO, BAD, BAD, YES, YES, BAD, YES, BAD ],
                                    [  NO, YES, BAD, BAD,  NO, YES,  NO,  NO, BAD, BAD ],
                                    [ BAD, BAD, BAD, BAD, BAD,  NO,  NO,  NO, BAD, YES ],
                                    [  NO, YES, YES, BAD, BAD, YES, BAD, YES, BAD, YES ],
                                    [  NO, YES, YES, YES,  NO, BAD,  NO, BAD, BAD, BAD ],
                                    [  NO,  NO,  NO, YES,  NO,  NO,  NO, YES, BAD, YES ],
                                    [ BAD, BAD, BAD, YES, BAD, YES, BAD, BAD, YES,  NO ],
                                    [ BAD, BAD, BAD,  NO, BAD, YES, YES,  NO,  NO, BAD ],
                                    [ BAD, YES, BAD, YES,  NO,  NO, YES, YES,  NO, BAD ],
                                    [ YES, YES, BAD, BAD, BAD, YES, BAD, BAD, YES, YES ],
                                    [ YES, BAD, YES,  NO, YES, BAD, YES,  NO, YES, BAD ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [  NO,  NO,  NO, YES, YES, YES, BAD, YES, BAD,  NO ],
                                    [ BAD, BAD, BAD, YES, BAD, YES, BAD, BAD, YES,  NO ]])
            elif arg == "6":
                reports = np.array([[  NO,   NO,  YES,  YES,   NO,  YES,   NO,   NO,   NO,   NO ],
                                    [ YES,  YES,   NO,   NO,   NO,  YES,  YES,  YES,   NO,  YES ],
                                    [ YES,  YES,   NO,  YES,   NO,  YES,  YES,   NO,  YES,  YES ],
                                    [  NO,  YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,  YES ],
                                    [  NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,   NO,   NO ],
                                    [  NO,  YES,   NO,   NO,   NO,  YES,  YES,   NO,  YES,  YES ],
                                    [ YES,   NO,   NO,  YES,  YES,   NO,  YES,   NO,   NO,   NO ],
                                    [ YES,  YES,   NO,   NO,  YES,   NO,  YES,  YES,  YES,   NO ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [  NO,  YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,  YES ]])
            elif arg == "7":
                reports = np.array([[ YES, YES, YES, YES, YES, YES ],
                                    [ YES, YES, YES,  NO,  NO,  NO ],
                                    [  NA,  NA,  NA,  NA,  NA,  NA ]])
                                    # [ np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
            elif arg == "8":
                reports = np.array([[ YES, YES, YES, YES, YES, YES ],
                                    [ YES, YES, YES,  NO,  NA,  NA ],
                                    [ YES, YES, YES,  NA,  NA,  NO ]])
            elif arg == "9":
                reports = np.array([[ YES, YES, YES, YES, YES, YES ],
                                    [ YES, YES, YES,  NO,  NA,  NA ],
                                    [ YES, YES, YES,  NO,  NA,  NA ]])
            elif arg == "10":
                reports = np.array([[ YES, YES, YES,  NO, YES, YES ],
                                    [ YES, YES, YES,  NO,  NA,  NA ],
                                    [ YES, YES, YES,  NO,  NA,  NA ]])
            elif arg == "11":
                reports = np.array([[ YES, YES, YES, YES, YES, YES ],
                                    [  NA,  NA,  NA,  NA,  NA,  NA ],
                                    [ YES, YES, YES,  NO,  NO,  NO ]])
            elif arg == "12":
                reports = np.array([[ YES, YES, YES,  NO,  NO,  NO ],
                                    [ YES, YES, YES,  NO,  NO,  NO ],
                                    [ YES, YES, YES,  NO,  NO,  NO ]])
            elif arg == "13":
                reports = np.array([[ YES, YES, YES,  NO,  NO,  NO ]])
            elif arg == "14":
                reports = np.array([[ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [ YES, YES, YES,  NO ]])
            elif arg == "15":
                reports =  np.array([[ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [  NO,   NO,   NO,  YES,   NO ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,  YES,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ],
                                     [ YES,  YES,   NO,   NO,  YES ]])
            elif arg == "16":
                reports = np.array([[  NO,   NO,  YES,  YES,   NO,  YES,   NO,   NO,   NO,   NO ],
                                    [ YES,  YES,   NO,   NO,   NO,  YES,  YES,  YES,   NO,  YES ],
                                    [ YES,  YES,   NO,  YES,   NO,  YES,  YES,   NO,  YES,  YES ],
                                    [  NO,  YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,  YES ],
                                    [  NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,   NO,   NO ],
                                    [  NO,  YES,   NO,   NO,   NO,  YES,  YES,   NO,  YES,  YES ],
                                    [ YES,   NO,   NO,  YES,  YES,   NO,  YES,   NO,   NO,   NO ],
                                    [ YES,  YES,   NO,   NO,  YES,   NO,  YES,  YES,  YES,   NO ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [ YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,   NO,  YES ],
                                    [  NO,  YES,   NO,   NO,  YES,   NO,  YES,   NO,   NO,  YES ]])
            elif arg == "17":
                reports = np.array([[ YES, YES,  NO,  NO ],
                                    [ YES,  NO,  NO,  NO ],
                                    [ YES, YES,  NO,  NO ],
                                    [ YES, YES, YES,  NO ],
                                    [  NO,  NO, YES, YES ],
                                    [  NO,  NO, YES, YES ]])
            elif arg == "18":
                reports = np.array([[ YES, YES,  NO,  NO ],
                                    [ YES,  NO,  NO,  NO ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ],
                                    [  NA,  NA,  NA,  NA ]])
            oracle = Oracle(reports=reports, algorithm=testalgo)
            A = oracle.consensus()
            print(reports)
            print(pd.DataFrame(A["events"]))
            print
            print(pd.DataFrame(A["agents"]))

        elif opt in ('-x', '--example'):
            reports = np.array([[ YES, YES,  NO,  NO],
                                [ YES,  NO,  NO,  NO],
                                [ YES, YES,  NO,  NO],
                                [ YES, YES, YES,  NO],
                                [  NO,  NO, YES, YES],
                                [  NO,  NO, YES, YES]])
            reputation = [2, 10, 4, 2, 7, 1]
            oracle = Oracle(reports=reports,
                            reputation=reputation,
                            algorithm="absolute")
            A = oracle.consensus()
            print(pd.DataFrame(A["events"]))
            print(pd.DataFrame(A["agents"]))
        elif opt in ('-m', '--missing'):
            reports = np.array([[    YES, YES,  NO,     NA],
                                [    YES,  NO,  NO,     NO],
                                [    YES, YES,  NO,     NO],
                                [    YES, YES, YES,     NO],
                                [     NA,  NO, YES,    YES],
                                [     NO,  NO, YES,    YES]])
            reputation = [2, 10, 4, 2, 7, 1]
            oracle = Oracle(reports=reports,
                            reputation=reputation,
                            algorithm="PCA")
            A = oracle.consensus()
            print(pd.DataFrame(A["events"]))
            print(pd.DataFrame(A["agents"]))
        elif opt in ('-s', '--scaled'):
            reports = np.array([[ YES, YES,  NO,  NO, 233, 16027.59],
                                [ YES,  NO,  NO,  NO, 199,      NA ],
                                [ YES, YES,  NO,  NO, 233, 16027.59],
                                [ YES, YES, YES,  NO, 250,      NA ],
                                [  NO,  NO, YES, YES, 435,  8001.00],
                                [  NO,  NO, YES, YES, 435, 19999.00]])
            event_bounds = [
                { "scaled": False, "min": NO,   "max": 1 },
                { "scaled": False, "min": NO,   "max": 1 },
                { "scaled": False, "min": NO,   "max": 1 },
                { "scaled": False, "min": NO,   "max": 1 },
                { "scaled": True,  "min":  0,   "max": 435 },
                { "scaled": True,  "min": 8000, "max": 20000 },
            ]
            oracle = Oracle(reports=reports, event_bounds=event_bounds)
            A = oracle.consensus()
            print(pd.DataFrame(A["events"]))
            print(pd.DataFrame(A["agents"]))

Example 26

Project: popupcad
Source File: generatealignmentlayup.py
View license
    def operate(self, design):

        """
        Return a generic_laminate ref of a layup laminate with all the layers of the part and with the appropriate 25x25mm alignment
        features compatible with the Wood lab micro-robotics manufacturing process.

        Input:
        Design -> a popupcad design file

        Output:
        layup -> A handle to the layup design file
        subop -> A subop which is inserted into the input design file to reduce the number of operations
        """


        #### general geometry constants that most layups will have
        sheet_width = self.values[0]        # mm
        hole_offset = self.values[1]        # location of hole in from corner
        hole_rad    = self.values[2]        # alignment pin geoms

        cross_len   = .75                   # tick length
        cross_horiz = sheet_width/2 - 2*cross_len        # horizontal dimension from center crosshair
        dt          = 0.001                 # small thickness for crosshair

        buff_x      = 5                     # for window sizes
        buff_y      = 1
        wind_h      = 1
        space_x     = 1.3

        # window width, maximum of 1 mm
        wind_w      = lambda N: max(min((sheet_width - 2*buff_x)/(N + 1.3*N - 1.3), 1),0.01)

        # the laminate design
        layup = design # popupcad.filetypes.design.Design.new()
        layer_list = design.return_layer_definition().layers

        # initiate the sketches
        ############# sheet first
        sheet = Sketch.new()
        tmp_geom = [(-sheet_width/2., -sheet_width/2.), (-sheet_width/2.,  sheet_width/2.),
                    ( sheet_width/2.,  sheet_width/2.), ( sheet_width/2., -sheet_width/2.)]
        sheet_poly = popupcad.filetypes.genericshapes.GenericPoly.gen_from_point_lists(tmp_geom,[])
        sheet.addoperationgeometries([sheet_poly])

        ############# holes second
        holes = Sketch.new()
        tmp_geom = [(-sheet_width/2. + hole_offset, -sheet_width/2. + hole_offset),
                    (-sheet_width/2. + hole_offset,  sheet_width/2. - hole_offset),
                    ( sheet_width/2. - hole_offset,  sheet_width/2. - hole_offset),
                    ( sheet_width/2. - hole_offset, -sheet_width/2. + hole_offset)]
        # make list of hole geometry
        holes_poly = [popupcad.filetypes.genericshapes.GenericCircle.gen_from_point_lists([pt, (pt[0]+hole_rad, pt[1])],[])
                                            for pt in tmp_geom]
        holes.addoperationgeometries(holes_poly)

        ############# upper triangle
        left_tri = Sketch.new()
        tmp_geom = [(-sheet_width/2. + hole_offset/4, sheet_width/2. - hole_offset*(2/3)),
                    (-sheet_width/2. + hole_offset/4 + hole_rad,  sheet_width/2. - hole_offset*(2/3)),
                    (-sheet_width/2. + hole_offset/4 + 0.5*hole_rad,  sheet_width/2. - hole_offset*(2/3) + 1.2*hole_rad*.75)]
        # make list of hole geometry
        sheet_poly = popupcad.filetypes.genericshapes.GenericPoly.gen_from_point_lists(tmp_geom,[])
        left_tri.addoperationgeometries([sheet_poly])

        ############# crosshairs
        cross_hairs = Sketch.new()
        tmp_geom_horiz = [(0,-cross_len), (0,cross_len)]
        tmp_geom_vert  = [(-cross_len,0), (cross_len,0)]
        shift = [-cross_horiz, 0, cross_horiz]

        cross_poly_horiz = [popupcad.filetypes.genericshapes.GenericPoly.gen_from_point_lists([(tmp_geom_horiz[0][0] + c - dt/2.,
                                                                                                tmp_geom_horiz[0][1] - dt/2.),
                                                                                               (tmp_geom_horiz[1][0] + c - dt/2.,
                                                                                                tmp_geom_horiz[1][1] - dt/2.),
                                                                                               (tmp_geom_horiz[1][0] + c + dt/2.,
                                                                                                tmp_geom_horiz[1][1] + dt/2.),
                                                                                               (tmp_geom_horiz[0][0] + c + dt/2.,
                                                                                                tmp_geom_horiz[0][1] - dt/2.)],
                                                                                               [])
                                                                                        for c in shift]

        cross_poly_vert  = [popupcad.filetypes.genericshapes.GenericPoly.gen_from_point_lists([(tmp_geom_vert[0][0] + c - dt/2.,
                                                                                                tmp_geom_vert[0][1] - dt/2.),
                                                                                               (tmp_geom_vert[1][0] + c - dt/2.,
                                                                                                tmp_geom_vert[1][1] + dt/2.),
                                                                                               (tmp_geom_vert[1][0] + c + dt/2.,
                                                                                                tmp_geom_vert[1][1] + dt/2.),
                                                                                               (tmp_geom_vert[0][0] + c + dt/2.,
                                                                                                tmp_geom_vert[0][1] - dt/2.)],
                                                                                               [])
                                                                                        for c in shift]

        cross_hairs.addoperationgeometries(cross_poly_horiz + cross_poly_vert)

        # Build the sheet with holes
        # Add the sketches to the sketch list
        layup.sketches[sheet.id] = sheet
        layup.sketches[holes.id] = holes
        layup.sketches[cross_hairs.id] = cross_hairs
        layup.sketches[left_tri.id] = left_tri

        # get the layer links for making sketch ops
        layer_links = [layer.id for layer in layer_list]

        holes_sketch = popupcad.manufacturing.simplesketchoperation.SimpleSketchOp({'sketch': [holes.id]},layer_links)
        holes_sketch .name = "Holes"

        trian_sketch = popupcad.manufacturing.simplesketchoperation.SimpleSketchOp({'sketch': [left_tri.id]},layer_links)
        trian_sketch .name = "Left triangle"

        sheet_sketch = popupcad.manufacturing.simplesketchoperation.SimpleSketchOp({'sketch': [sheet.id]},layer_links)
        sheet_sketch.name = "sheet"

        cross_sketch = popupcad.manufacturing.simplesketchoperation.SimpleSketchOp({'sketch': [cross_hairs.id]},layer_links)
        cross_sketch.name = "Crosshairs"

        # laminate operation to combine cross hairs and holes
        sheet_with_holes = popupcad.manufacturing.laminateoperation2.LaminateOperation2({'unary': [(sheet_sketch.id,0)],
                                                                                         'binary': [(holes_sketch.id,0),
                                                                                                    (cross_sketch.id,0),
                                                                                                    (trian_sketch.id,0)]},
                                                                                        'difference')
        sheet_with_holes.name = "Sheet with holes"

        ############# rectangle windows
        windows = [Sketch.new() for _ in layer_list]
        windows_sketchop = []
        # make windows, center on middle of sheet at bottom
        window_width = wind_w(len(windows))
        window_coords = np.array([round(kk*(1 + space_x)*window_width,4) for kk in range(len(windows))])
        window_coords = list(window_coords - np.mean(window_coords)) # center is 0

        for kk, (layer, window, x_coord) in enumerate(zip(layer_list,
                                                          windows,
                                                          window_coords)):

            window.name = layer.name + '_window'

            tmp_geom = [(x_coord, -sheet_width/2. + buff_y),
                        (x_coord,  -sheet_width/2. + buff_y + wind_h),
                        (x_coord + window_width, -sheet_width/2. + buff_y + wind_h),
                        (x_coord + window_width, -sheet_width/2. + buff_y)]
            sheet_poly = popupcad.filetypes.genericshapes.GenericPoly.gen_from_point_lists(tmp_geom,[])
            window.addoperationgeometries([sheet_poly])
            layup.sketches[window.id] = window

            # make a sketch op on all layers above the current layer, this will be removed with a difference from the sheet
            windows_sketchop.append(popupcad.manufacturing.simplesketchoperation.SimpleSketchOp({'sketch': [window.id]},
                                                                                       layer_links[kk+1:]))
            windows_sketchop[-1].name = "Window_" + layer.name

        # laminate operation to remove windows from sheet with holes
        sheet_with_windows = popupcad.manufacturing.laminateoperation2.LaminateOperation2({'unary': [(sheet_with_holes.id,0)],
                                                                                         'binary': [(sktch.id,0) for sktch
                                                                                                    in windows_sketchop]},
                                                                                         'difference')
        sheet_with_windows.name = "Final sheet"

        # add the sketch ops to the design and generate the sketch op
        other_ops = windows_sketchop + [trian_sketch, holes_sketch, sheet_sketch, cross_sketch, sheet_with_holes, sheet_with_windows]
        [layup.addoperation(item) for item in other_ops]
        [item.generate(layup) for item in other_ops]
        [layup.remove_operation(item) for item in other_ops]

        self.output = [OperationOutput(sheet_with_windows.output[0], "OutputLaminate", self)]
        return sheet_with_windows.output[0].csg

Example 27

Project: pupil
Source File: finish_calibration.py
View license
def finish_calibration(g_pool,pupil_list,ref_list):

    if pupil_list and ref_list:
        pass
    else:
        logger.error(not_enough_data_error_msg)
        g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
        return

    camera_intrinsics = load_camera_calibration(g_pool)

    # match eye data and check if biocular and or monocular
    pupil0 = [p for p in pupil_list if p['id']==0]
    pupil1 = [p for p in pupil_list if p['id']==1]

    #TODO unify this and don't do both
    matched_binocular_data = calibrate.closest_matches_binocular(ref_list,pupil_list)
    matched_pupil0_data = calibrate.closest_matches_monocular(ref_list,pupil0)
    matched_pupil1_data = calibrate.closest_matches_monocular(ref_list,pupil1)

    if len(matched_pupil0_data)>len(matched_pupil1_data):
        matched_monocular_data = matched_pupil0_data
    else:
        matched_monocular_data = matched_pupil1_data

    logger.info('Collected %s monocular calibration data.'%len(matched_monocular_data))
    logger.info('Collected %s binocular calibration data.'%len(matched_binocular_data))


    mode = g_pool.detection_mapping_mode

    if mode == '3d' and not camera_intrinsics:
        mode = '2d'
        logger.warning("Please calibrate your world camera using 'camera intrinsics estimation' for 3d gaze mapping.")

    if mode == '3d':
        hardcoded_translation0  = np.array([20,15,-20])
        hardcoded_translation1  = np.array([-40,15,-20])
        if matched_binocular_data:
            method = 'binocular 3d model'

            #TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
            # right now we solve using a few permutations of K
            smallest_residual = 1000
            scales = list(np.linspace(0.7,1.4,20))
            K = camera_intrinsics["camera_matrix"]

            for s in scales:
                scale = np.ones(K.shape)
                scale[0,0] *= s
                scale[1,1] *= s
                camera_intrinsics["camera_matrix"] = K*scale

                ref_dir, gaze0_dir, gaze1_dir = calibrate.preprocess_3d_data(matched_binocular_data,
                                                camera_intrinsics = camera_intrinsics )

                if len(ref_dir) < 1 or len(gaze0_dir) < 1 or len(gaze1_dir) < 1:
                    logger.error(not_enough_data_error_msg)
                    g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                    return

                sphere_pos0 = pupil0[-1]['sphere']['center']
                sphere_pos1 = pupil1[-1]['sphere']['center']

                initial_R0,initial_t0 = find_rigid_transform(np.array(gaze0_dir)*500,np.array(ref_dir)*500)
                initial_rotation0 = math_helper.quaternion_from_rotation_matrix(initial_R0)
                initial_translation0 = np.array(initial_t0).reshape(3)

                initial_R1,initial_t1 = find_rigid_transform(np.array(gaze1_dir)*500,np.array(ref_dir)*500)
                initial_rotation1 = math_helper.quaternion_from_rotation_matrix(initial_R1)
                initial_translation1 = np.array(initial_t1).reshape(3)

                eye0 = { "observations" : gaze0_dir , "translation" : hardcoded_translation0 , "rotation" : initial_rotation0,'fix':['translation']  }
                eye1 = { "observations" : gaze1_dir , "translation" : hardcoded_translation1 , "rotation" : initial_rotation1,'fix':['translation']  }
                world = { "observations" : ref_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation'],'fix':['translation','rotation']  }
                initial_observers = [eye0,eye1,world]
                initial_points = np.array(ref_dir)*500


                success,residual, observers, points  = bundle_adjust_calibration(initial_observers , initial_points, fix_points=False )

                if residual <= smallest_residual:
                    smallest_residual = residual
                    scales[-1] = s


            if not success:
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                logger.error("Calibration solver faild to converge.")
                return


            eye0,eye1,world = observers


            t_world0 = np.array(eye0['translation'])
            R_world0 = math_helper.quaternion_rotation_matrix(np.array(eye0['rotation']))
            t_world1 = np.array(eye1['translation'])
            R_world1 = math_helper.quaternion_rotation_matrix(np.array(eye1['rotation']))

            def toWorld0(p):
                return np.dot(R_world0, p)+t_world0

            def toWorld1(p):
                return np.dot(R_world1, p)+t_world1

            points_a = [] #world coords
            points_b = [] #eye0 coords
            points_c = [] #eye1 coords
            for a,b,c,point in zip(world['observations'] , eye0['observations'],eye1['observations'],points):
                line_a = np.array([0,0,0]) , np.array(a) #observation as line
                line_b = toWorld0(np.array([0,0,0])) , toWorld0(b)  #eye0 observation line in world coords
                line_c = toWorld1(np.array([0,0,0])) , toWorld1(c)  #eye1 observation line in world coords
                close_point_a,_ =  math_helper.nearest_linepoint_to_point( point , line_a )
                close_point_b,_ =  math_helper.nearest_linepoint_to_point( point , line_b )
                close_point_c,_ =  math_helper.nearest_linepoint_to_point( point , line_c )
                points_a.append(close_point_a)
                points_b.append(close_point_b)
                points_c.append(close_point_c)


            # we need to take the sphere position into account
            # orientation and translation are referring to the sphere center.
            # but we want to have it referring to the camera center
            # since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
            sphere_translation = np.array( sphere_pos0 )
            sphere_translation_world = np.dot( R_world0 , sphere_translation)
            camera_translation = t_world0 - sphere_translation_world
            eye_camera_to_world_matrix0  = np.eye(4)
            eye_camera_to_world_matrix0[:3,:3] = R_world0
            eye_camera_to_world_matrix0[:3,3:4] = np.reshape(camera_translation, (3,1) )

            sphere_translation = np.array( sphere_pos1 )
            sphere_translation_world = np.dot( R_world1 , sphere_translation)
            camera_translation = t_world1 - sphere_translation_world
            eye_camera_to_world_matrix1  = np.eye(4)
            eye_camera_to_world_matrix1[:3,:3] = R_world1
            eye_camera_to_world_matrix1[:3,3:4] = np.reshape(camera_translation, (3,1) )


            g_pool.plugins.add(Binocular_Vector_Gaze_Mapper,args={
                                    'eye_camera_to_world_matrix0':eye_camera_to_world_matrix0,
                                    'eye_camera_to_world_matrix1':eye_camera_to_world_matrix1 ,
                                    'camera_intrinsics': camera_intrinsics ,
                                    'cal_points_3d': points,
                                    'cal_ref_points_3d': points_a,
                                    'cal_gaze_points0_3d': points_b,
                                    'cal_gaze_points1_3d': points_c})


        elif matched_monocular_data:
            method = 'monocular 3d model'

            #TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
            # right now we solve using a few permutations of K
            smallest_residual = 1000
            scales = list(np.linspace(0.7,1.4,20))
            K = camera_intrinsics["camera_matrix"]
            for s in scales:
                scale = np.ones(K.shape)
                scale[0,0] *= s
                scale[1,1] *= s
                camera_intrinsics["camera_matrix"] = K*scale
                ref_dir , gaze_dir, _ = calibrate.preprocess_3d_data(matched_monocular_data,
                                                camera_intrinsics = camera_intrinsics )
                # save_object((ref_dir,gaze_dir),os.path.join(g_pool.user_dir, "testdata"))
                if len(ref_dir) < 1 or len(gaze_dir) < 1:
                    g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                    logger.error(not_enough_data_error_msg + " Using:" + method)
                    return



                ### monocular calibration strategy: mimize the reprojection error by moving the world camera.
                # we fix the eye points and work in the eye coord system.
                initial_R,initial_t = find_rigid_transform(np.array(ref_dir)*500,np.array(gaze_dir)*500)
                initial_rotation = math_helper.quaternion_from_rotation_matrix(initial_R)
                initial_translation = np.array(initial_t).reshape(3)
                # this problem is scale invariant so we scale to some sensical value.


                if matched_monocular_data[0]['pupil']['id'] == 0:
                    hardcoded_translation = hardcoded_translation0
                else:
                    hardcoded_translation = hardcoded_translation1


                eye = { "observations" : gaze_dir , "translation" : (0,0,0) , "rotation" : (1,0,0,0),'fix':['translation','rotation']  }
                world = { "observations" : ref_dir , "translation" : np.dot(initial_R,-hardcoded_translation) , "rotation" : initial_rotation,'fix':['translation']  }
                initial_observers = [eye,world]
                initial_points = np.array(gaze_dir)*500


                success,residual, observers, points_in_eye  = bundle_adjust_calibration(initial_observers , initial_points, fix_points=True )
                if residual <= smallest_residual:
                    smallest_residual = residual
                    scales[-1] = s

            eye, world = observers

            if not success:
                logger.error("Calibration solver faild to converge.")
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                return


            #pose of the world in eye coords.
            rotation = np.array(world['rotation'])
            t_world = np.array(world['translation'])
            R_world = math_helper.quaternion_rotation_matrix(rotation)

            # inverse is pose of eye in world coords
            R_eye = R_world.T
            t_eye = np.dot(R_eye,-t_world)



            def toWorld(p):
                return np.dot(R_eye, p)+np.array(t_eye)

            points_in_world = [toWorld(p) for p in points_in_eye]

            points_a = [] #world coords
            points_b = [] #cam2 coords
            for a,b,point in zip(world['observations'] , eye['observations'],points_in_world):

                line_a = np.array([0,0,0]) , np.array(a) #observation as line
                line_b = toWorld(np.array([0,0,0])) , toWorld(b)  #cam2 observation line in cam1 coords
                close_point_a,_ =  math_helper.nearest_linepoint_to_point( point , line_a )
                close_point_b,_ =  math_helper.nearest_linepoint_to_point( point , line_b )
                # print np.linalg.norm(point-close_point_a),np.linalg.norm(point-close_point_b)

                points_a.append(close_point_a)
                points_b.append(close_point_b)


            # we need to take the sphere position into account
            # orientation and translation are referring to the sphere center.
            # but we want to have it referring to the camera center
            # since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
            sphere_translation = np.array( matched_monocular_data[-1]['pupil']['sphere']['center'] )
            sphere_translation_world = np.dot( R_eye , sphere_translation)
            camera_translation = t_eye - sphere_translation_world
            eye_camera_to_world_matrix  = np.eye(4)
            eye_camera_to_world_matrix[:3,:3] = R_eye
            eye_camera_to_world_matrix[:3,3:4] = np.reshape(camera_translation, (3,1) )


            g_pool.plugins.add(Vector_Gaze_Mapper,args=
                {'eye_camera_to_world_matrix':eye_camera_to_world_matrix ,
                'camera_intrinsics': camera_intrinsics ,
                'cal_points_3d': points_in_world,
                'cal_ref_points_3d': points_a,
                'cal_gaze_points_3d': points_b,
                'gaze_distance':500})

        else:
            logger.error(not_enough_data_error_msg)
            g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
            return

    elif mode == '2d':
        if matched_binocular_data:
            method = 'binocular polynomial regression'
            cal_pt_cloud_binocular = calibrate.preprocess_2d_data_binocular(matched_binocular_data)
            cal_pt_cloud0 = calibrate.preprocess_2d_data_monocular(matched_pupil0_data)
            cal_pt_cloud1 = calibrate.preprocess_2d_data_monocular(matched_pupil1_data)

            map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud_binocular,g_pool.capture.frame_size,binocular=True)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                return

            map_fn,inliers,params_eye0 = calibrate.calibrate_2d_polynomial(cal_pt_cloud0,g_pool.capture.frame_size,binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                return

            map_fn,inliers,params_eye1 = calibrate.calibrate_2d_polynomial(cal_pt_cloud1,g_pool.capture.frame_size,binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                return

            g_pool.plugins.add(Binocular_Gaze_Mapper,args={'params':params, 'params_eye0':params_eye0, 'params_eye1':params_eye1})


        elif matched_monocular_data:
            method = 'monocular polynomial regression'
            cal_pt_cloud = calibrate.preprocess_2d_data_monocular(matched_monocular_data)
            map_fn,inliers,params = calibrate.calibrate_2d_polynomial(cal_pt_cloud,g_pool.capture.frame_size,binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':solver_failed_to_converge_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
                return

            g_pool.plugins.add(Monocular_Gaze_Mapper,args={'params':params})
        else:
            logger.error(not_enough_data_error_msg)
            g_pool.active_calibration_plugin.notify_all({'subject':'calibration.failed','reason':not_enough_data_error_msg,'timestamp':g_pool.get_timestamp(),'record':True})
            return

    user_calibration_data = {'pupil_list':pupil_list,'ref_list':ref_list,'calibration_method':method}
    save_object(user_calibration_data,os.path.join(g_pool.user_dir, "user_calibration_data"))
    g_pool.active_calibration_plugin.notify_all({'subject':'calibration.successful','method':method,'timestamp':g_pool.get_timestamp(),'record':True})

Example 28

Project: RLScore
Source File: global_rankrls.py
View license
    def _leave_pair_out_python(self, pairs_start_inds, pairs_end_inds, oind=0):
        """Computes leave-pair-out predictions for a trained RankRLS.
        
        Parameters
        ----------
        pairs_start_inds : list of indices, shape = [n_pairs]
            list of indices from range [0, n_samples-1]
        pairs_end_inds : list of indices, shape = [n_pairs]
            list of indices from range [0, n_samples-1]
        oind : index from range [0, n_samples-1]
            column of Y, for which pairwise cv is computed
        
        Returns
        -------
        P1 : array, shape = [n_pairs]
            holdout predictions for pairs_start_inds
        P2 : array, shape = [n_pairs]
            holdout predictions for pairs_end_inds
            
        Notes
        -----
    
        Computes the leave-pair-out cross-validation predicitons, where each (i,j) pair with
        i= pair_start_inds[k] and j = pairs_end_inds[k] is left out in turn.
        
        When estimating area under ROC curve with leave-pair-out, one should leave out all
        positive-negative pairs, while for estimating the general ranking error one should
        leave out all pairs with different labels.
        
        Computational complexity of holdout with most pairs left out: m = n_samples
        
        O(TODO)
        
        The leave-pair-out cross-validation algorithm is described in [1,2]. The use of
        leave-pair-out cross-validation for AUC estimation has been analyzed in [3]

        References
        ----------
        
        [1] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jouni Jarvinen, and Jorma Boberg.
        An efficient algorithm for learning to rank from preference graphs.
        Machine Learning, 75(1):129-165, 2009.
    
        [2] Tapio Pahikkala, Antti Airola, Jorma Boberg, and Tapio Salakoski.
        Exact and efficient leave-pair-out cross-validation for ranking RLS.
        In Proceedings of the 2nd International and Interdisciplinary Conference
        on Adaptive Knowledge Representation and Reasoning (AKRR'08), pages 1-8,
        Espoo, Finland, 2008.

        [3] Antti Airola, Tapio Pahikkala, Willem Waegeman, Bernard De Baets, Tapio Salakoski.
        An Experimental Comparison of Cross-Validation Techniques for Estimating the Area Under the ROC Curve.
        Computational Statistics & Data Analysis 55(4), 1828-1844, 2011.
        """
        
        evals, svecs = self.evals, self.svecs
        m = self.size
        
        Y = self.Y
        
        #This is, in the worst case, a cubic operation.
        #If there are multiple outputs,
        #this operation should be common for them all. THIS IS TO BE FIXED!
        def computeG():
            regparam = self.regparam
            G = svecs * multiply(multiply(evals, 1. / ((m - 2.) * evals + regparam)).T, svecs.T)
            return G
        G = computeG()
        
        GDY = (self.size - 2.) * G * Y
        GC = sum(G, axis=1)
        
        CTGC = sum(GC)

        
        CTY = sum(Y, axis=0)[0, oind]
        CTGDY = sum(GDY, axis=0)[0, oind]
        
        sm2 = self.size - 2.
        sqrtsm2 = sqrt(sm2)
        
        #Array is faster to access than matrix
        G = array(G)
        
        #Lists are faster to access than matrices or arrays
        def hack():
            GDY_ = []
            sqrtsm2GDY_ = []
            GC_ = []
            Y_ = []
            BTY_ = []
            Gdiag_ = []
            sm2Gdiag_ = []
            BTGBBTY_ = []
            for i in range(m):
                GDYi = GDY[i, oind]
                GDY_.append(GDYi)
                sqrtsm2GDY_.append(sqrtsm2 * GDYi)
                GC_.append(GC[i, 0])
                Yi = Y[i, oind]
                Y_.append(Yi)
                BTY_.append(sqrtsm2 * Yi)
                Gii = G[i, i]
                Gdiag_.append(Gii)
                sm2Gdiag_.append(sm2 * Gii - 1.)
                BTGBBTY_.append(sm2 * Gii * sqrtsm2 * Yi)
            return GDY_, sqrtsm2GDY_, GC_, Y_, BTY_, Gdiag_, sm2Gdiag_, BTGBBTY_
        GDY_, sqrtsm2GDY_, GC_, Y_, BTY_, Gdiag_, sm2Gdiag_, BTGBBTY_ = hack()
        
        results_start, results_end = [], []
        
        #This loops through the list of hold-out pairs.
        #Each pair is handled in a constant time.
        def looppairs(results_start, results_end):
            for pairind in range(len(pairs_start_inds)):
                
                i, j = pairs_start_inds[pairind], pairs_end_inds[pairind]
                
                Gii = Gdiag_[i]
                Gij = G[i, j]
                Gjj = Gdiag_[j]
                GCi = GC_[i]
                GCj = GC_[j]
                
                Yi = Y_[i]
                Yj = Y_[j]
                
                GDYi = GDY_[i]
                GDYj = GDY_[j]
                
                BTY0 = CTY - Yi - Yj
                BTY1 = BTY_[i]
                BTY2 = BTY_[j]
                
                GiipGij = Gii + Gij
                GijpGjj = Gij + Gjj
                GCipGCj = GCi + GCj
                
                BTGB00 = GiipGij + GijpGjj + CTGC - GCipGCj - GCipGCj
                BTGB01 = sqrtsm2 * (GCi - GiipGij)
                BTGB02 = sqrtsm2 * (GCj - GijpGjj)
                BTGB12 = sm2 * Gij
                
                BTGLY0 = CTGDY - (GDYi + GDYj + BTGB00 * BTY0 + BTGB01 * BTY1 + BTGB02 * BTY2)
                BTGLY1 = sqrtsm2GDY_[i] - (BTGB01 * BTY0 + BTGBBTY_[i] + BTGB12 * BTY2)
                BTGLY2 = sqrtsm2GDY_[j] - (BTGB02 * BTY0 + BTGB12 * BTY1 + BTGBBTY_[j])
                print CTGDY, BTGLY0
                BTGB00m1 = BTGB00 - 1.
                BTGB11m1 = sm2Gdiag_[i]
                BTGB22m1 = sm2Gdiag_[j]
                
                CF00 = BTGB11m1 * BTGB22m1 - BTGB12 * BTGB12
                CF01 = -BTGB01 * BTGB22m1 + BTGB12 * BTGB02
                CF02 = BTGB01 * BTGB12 - BTGB11m1 * BTGB02
                CF11 = BTGB00m1 * BTGB22m1 - BTGB02 * BTGB02
                CF12 = -BTGB00m1 * BTGB12 + BTGB01 * BTGB02
                CF22 = BTGB00m1 * BTGB11m1 - BTGB01 * BTGB01
                
                invdeter = 1. / (BTGB00m1 * CF00 + BTGB01 * CF01 + BTGB02 * CF02)
                
                b0 = invdeter * (CF00 * BTGLY0 + CF01 * BTGLY1 + CF02 * BTGLY2) + BTY0
                b1 = invdeter * (CF01 * BTGLY0 + CF11 * BTGLY1 + CF12 * BTGLY2) + BTY1
                b2 = invdeter * (CF02 * BTGLY0 + CF12 * BTGLY1 + CF22 * BTGLY2) + BTY2
                
                t1 = -b0 + sqrtsm2 * b1
                t2 = -b0 + sqrtsm2 * b2
                F0 = GDYi - (Gii * t1 + Gij * t2 + GCi * b0)
                F1 = GDYj - (Gij * t1 + Gjj * t2 + GCj * b0)
                
                results_start.append(F0), results_end.append(F1)
        looppairs(results_start, results_end)
        return np.array(results_start), np.array(results_end)

Example 29

Project: RLScore
Source File: global_rankrls.py
View license
    def _leave_pair_out_python(self, pairs_start_inds, pairs_end_inds, oind=0):
        """Computes leave-pair-out predictions for a trained RankRLS.
        
        Parameters
        ----------
        pairs_start_inds : list of indices, shape = [n_pairs]
            list of indices from range [0, n_samples-1]
        pairs_end_inds : list of indices, shape = [n_pairs]
            list of indices from range [0, n_samples-1]
        oind : index from range [0, n_samples-1]
            column of Y, for which pairwise cv is computed
        
        Returns
        -------
        P1 : array, shape = [n_pairs]
            holdout predictions for pairs_start_inds
        P2 : array, shape = [n_pairs]
            holdout predictions for pairs_end_inds
            
        Notes
        -----
    
        Computes the leave-pair-out cross-validation predicitons, where each (i,j) pair with
        i= pair_start_inds[k] and j = pairs_end_inds[k] is left out in turn.
        
        When estimating area under ROC curve with leave-pair-out, one should leave out all
        positive-negative pairs, while for estimating the general ranking error one should
        leave out all pairs with different labels.
        
        Computational complexity of holdout with most pairs left out: m = n_samples
        
        O(TODO)
        
        The leave-pair-out cross-validation algorithm is described in [1,2]. The use of
        leave-pair-out cross-validation for AUC estimation has been analyzed in [3]

        References
        ----------
        
        [1] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jouni Jarvinen, and Jorma Boberg.
        An efficient algorithm for learning to rank from preference graphs.
        Machine Learning, 75(1):129-165, 2009.
    
        [2] Tapio Pahikkala, Antti Airola, Jorma Boberg, and Tapio Salakoski.
        Exact and efficient leave-pair-out cross-validation for ranking RLS.
        In Proceedings of the 2nd International and Interdisciplinary Conference
        on Adaptive Knowledge Representation and Reasoning (AKRR'08), pages 1-8,
        Espoo, Finland, 2008.

        [3] Antti Airola, Tapio Pahikkala, Willem Waegeman, Bernard De Baets, Tapio Salakoski.
        An Experimental Comparison of Cross-Validation Techniques for Estimating the Area Under the ROC Curve.
        Computational Statistics & Data Analysis 55(4), 1828-1844, 2011.
        """
        
        evals, svecs = self.evals, self.svecs
        m = self.size
        
        Y = self.Y
        
        #This is, in the worst case, a cubic operation.
        #If there are multiple outputs,
        #this operation should be common for them all. THIS IS TO BE FIXED!
        def computeG():
            regparam = self.regparam
            G = svecs * multiply(multiply(evals, 1. / ((m - 2.) * evals + regparam)).T, svecs.T)
            return G
        G = computeG()
        
        GDY = (self.size - 2.) * G * Y
        GC = sum(G, axis=1)
        
        CTGC = sum(GC)

        
        CTY = sum(Y, axis=0)[0, oind]
        CTGDY = sum(GDY, axis=0)[0, oind]
        
        sm2 = self.size - 2.
        sqrtsm2 = sqrt(sm2)
        
        #Array is faster to access than matrix
        G = array(G)
        
        #Lists are faster to access than matrices or arrays
        def hack():
            GDY_ = []
            sqrtsm2GDY_ = []
            GC_ = []
            Y_ = []
            BTY_ = []
            Gdiag_ = []
            sm2Gdiag_ = []
            BTGBBTY_ = []
            for i in range(m):
                GDYi = GDY[i, oind]
                GDY_.append(GDYi)
                sqrtsm2GDY_.append(sqrtsm2 * GDYi)
                GC_.append(GC[i, 0])
                Yi = Y[i, oind]
                Y_.append(Yi)
                BTY_.append(sqrtsm2 * Yi)
                Gii = G[i, i]
                Gdiag_.append(Gii)
                sm2Gdiag_.append(sm2 * Gii - 1.)
                BTGBBTY_.append(sm2 * Gii * sqrtsm2 * Yi)
            return GDY_, sqrtsm2GDY_, GC_, Y_, BTY_, Gdiag_, sm2Gdiag_, BTGBBTY_
        GDY_, sqrtsm2GDY_, GC_, Y_, BTY_, Gdiag_, sm2Gdiag_, BTGBBTY_ = hack()
        
        results_start, results_end = [], []
        
        #This loops through the list of hold-out pairs.
        #Each pair is handled in a constant time.
        def looppairs(results_start, results_end):
            for pairind in range(len(pairs_start_inds)):
                
                i, j = pairs_start_inds[pairind], pairs_end_inds[pairind]
                
                Gii = Gdiag_[i]
                Gij = G[i, j]
                Gjj = Gdiag_[j]
                GCi = GC_[i]
                GCj = GC_[j]
                
                Yi = Y_[i]
                Yj = Y_[j]
                
                GDYi = GDY_[i]
                GDYj = GDY_[j]
                
                BTY0 = CTY - Yi - Yj
                BTY1 = BTY_[i]
                BTY2 = BTY_[j]
                
                GiipGij = Gii + Gij
                GijpGjj = Gij + Gjj
                GCipGCj = GCi + GCj
                
                BTGB00 = GiipGij + GijpGjj + CTGC - GCipGCj - GCipGCj
                BTGB01 = sqrtsm2 * (GCi - GiipGij)
                BTGB02 = sqrtsm2 * (GCj - GijpGjj)
                BTGB12 = sm2 * Gij
                
                BTGLY0 = CTGDY - (GDYi + GDYj + BTGB00 * BTY0 + BTGB01 * BTY1 + BTGB02 * BTY2)
                BTGLY1 = sqrtsm2GDY_[i] - (BTGB01 * BTY0 + BTGBBTY_[i] + BTGB12 * BTY2)
                BTGLY2 = sqrtsm2GDY_[j] - (BTGB02 * BTY0 + BTGB12 * BTY1 + BTGBBTY_[j])
                print CTGDY, BTGLY0
                BTGB00m1 = BTGB00 - 1.
                BTGB11m1 = sm2Gdiag_[i]
                BTGB22m1 = sm2Gdiag_[j]
                
                CF00 = BTGB11m1 * BTGB22m1 - BTGB12 * BTGB12
                CF01 = -BTGB01 * BTGB22m1 + BTGB12 * BTGB02
                CF02 = BTGB01 * BTGB12 - BTGB11m1 * BTGB02
                CF11 = BTGB00m1 * BTGB22m1 - BTGB02 * BTGB02
                CF12 = -BTGB00m1 * BTGB12 + BTGB01 * BTGB02
                CF22 = BTGB00m1 * BTGB11m1 - BTGB01 * BTGB01
                
                invdeter = 1. / (BTGB00m1 * CF00 + BTGB01 * CF01 + BTGB02 * CF02)
                
                b0 = invdeter * (CF00 * BTGLY0 + CF01 * BTGLY1 + CF02 * BTGLY2) + BTY0
                b1 = invdeter * (CF01 * BTGLY0 + CF11 * BTGLY1 + CF12 * BTGLY2) + BTY1
                b2 = invdeter * (CF02 * BTGLY0 + CF12 * BTGLY1 + CF22 * BTGLY2) + BTY2
                
                t1 = -b0 + sqrtsm2 * b1
                t2 = -b0 + sqrtsm2 * b2
                F0 = GDYi - (Gii * t1 + Gij * t2 + GCi * b0)
                F1 = GDYj - (Gij * t1 + Gjj * t2 + GCj * b0)
                
                results_start.append(F0), results_end.append(F1)
        looppairs(results_start, results_end)
        return np.array(results_start), np.array(results_end)

Example 30

Project: python-control
Source File: freqplot.py
View license
def bode_plot(syslist, omega=None, dB=None, Hz=None, deg=None,
        Plot=True, omega_limits=None, omega_num=None, *args, **kwargs):
    """Bode plot for a system

    Plots a Bode plot for the system over a (optional) frequency range.

    Parameters
    ----------
    syslist : linsys
        List of linear input/output systems (single system is OK)
    omega : freq_range
        Range of frequencies in rad/sec
    dB : boolean
        If True, plot result in dB
    Hz : boolean
        If True, plot frequency in Hz (omega must be provided in rad/sec)
    deg : boolean
        If True, plot phase in degrees (else radians)
    Plot : boolean
        If True, plot magnitude and phase
    omega_limits: tuple, list, ... of two values 
        Limits of the to generate frequency vector.
        If Hz=True the limits are in Hz otherwise in rad/s.
    omega_num: int
        number of samples        
    *args, **kwargs:
        Additional options to matplotlib (color, linestyle, etc)

    Returns
    -------
    mag : array (list if len(syslist) > 1)
        magnitude
    phase : array (list if len(syslist) > 1)
        phase in radians
    omega : array (list if len(syslist) > 1)
        frequency in rad/sec

    Notes
    -----
    1. Alternatively, you may use the lower-level method (mag, phase, freq)
    = sys.freqresp(freq) to generate the frequency response for a system,
    but it returns a MIMO response.

    2. If a discrete time model is given, the frequency response is plotted
    along the upper branch of the unit circle, using the mapping z = exp(j
    \omega dt) where omega ranges from 0 to pi/dt and dt is the discrete
    time base.  If not timebase is specified (dt = True), dt is set to 1.

    Examples
    --------
    >>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
    >>> mag, phase, omega = bode(sys)
    """
    # Set default values for options
    from . import config
    if (dB is None): dB = config.bode_dB
    if (deg is None): deg = config.bode_deg
    if (Hz is None): Hz = config.bode_Hz

    # If argument was a singleton, turn it into a list
    if (not getattr(syslist, '__iter__', False)):
        syslist = (syslist,)
    
    if omega is None:
        if omega_limits is None:
            # Select a default range if none is provided
            omega = default_frequency_range(syslist, Hz=Hz, number_of_samples=omega_num)
        else:
            omega_limits = np.array(omega_limits)
            if Hz:
                omega_limits *= 2.*np.pi
            if omega_num:
                omega = sp.logspace(np.log10(omega_limits[0]), np.log10(omega_limits[1]), num=omega_num, endpoint=True)
            else:
                omega = sp.logspace(np.log10(omega_limits[0]), np.log10(omega_limits[1]), endpoint=True)
                    
    mags, phases, omegas, nyquistfrqs = [], [], [], []
    for sys in syslist:
        if (sys.inputs > 1 or sys.outputs > 1):
            # TODO: Add MIMO bode plots.
            raise NotImplementedError("Bode is currently only implemented for SISO systems.")
        else:
            omega_sys = np.array(omega)
            if sys.isdtime(True):
                nyquistfrq = 2. * np.pi * 1. / sys.dt / 2. 
                omega_sys = omega_sys[omega_sys < nyquistfrq] 
                # TODO: What distance to the Nyquist frequency is appropriate?
            else:
                nyquistfrq = None
            # Get the magnitude and phase of the system
            mag_tmp, phase_tmp, omega_sys = sys.freqresp(omega_sys)
            mag = np.atleast_1d(np.squeeze(mag_tmp))
            phase = np.atleast_1d(np.squeeze(phase_tmp))
            phase = unwrap(phase)
            nyquistfrq_plot = None
            if Hz:
                omega_plot = omega_sys / (2. * np.pi)
                if nyquistfrq:
                    nyquistfrq_plot = nyquistfrq / (2. * np.pi)
            else:
                omega_plot = omega_sys
                if nyquistfrq:
                    nyquistfrq_plot = nyquistfrq

            mags.append(mag)
            phases.append(phase)
            omegas.append(omega_sys)
            nyquistfrqs.append(nyquistfrq)
            # Get the dimensions of the current axis, which we will divide up
            #! TODO: Not current implemented; just use subplot for now

            if (Plot):
                # Magnitude plot
                ax_mag = plt.subplot(211);
                if dB:
                    pltline = ax_mag.semilogx(omega_plot, 20 * np.log10(mag), *args, **kwargs)
                else:
                    pltline = ax_mag.loglog(omega_plot, mag, *args, **kwargs)
                plt.hold(True);
                if nyquistfrq_plot:
                    ax_mag.axvline(nyquistfrq_plot, color=pltline[0].get_color())
                     
                # Add a grid to the plot + labeling
                ax_mag.grid(True, which='both')
                ax_mag.set_ylabel("Magnitude (dB)" if dB else "Magnitude")

                # Phase plot
                ax_phase = plt.subplot(212, sharex=ax_mag);
                if deg:
                    phase_plot = phase * 180. / np.pi
                else:
                    phase_plot = phase
                ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)
                ax_phase.hold(True);
                if nyquistfrq_plot:
                    ax_phase.axvline(nyquistfrq_plot, color=pltline[0].get_color())
                                  
                # Add a grid to the plot + labeling
                ax_phase.set_ylabel("Phase (deg)" if deg else "Phase (rad)")                
                def genZeroCenteredSeries(val_min, val_max, period):
                    v1 = np.ceil(val_min / period - 0.2)
                    v2 = np.floor(val_max / period + 0.2)
                    return np.arange(v1, v2 + 1) * period                
                if deg:
                    ylim = ax_phase.get_ylim()
                    ax_phase.set_yticks(genZeroCenteredSeries(ylim[0], ylim[1], 45.))                                                       
                    ax_phase.set_yticks(genZeroCenteredSeries(ylim[0], ylim[1], 15.), minor=True) 
                else:
                    ylim = ax_phase.get_ylim()
                    ax_phase.set_yticks(genZeroCenteredSeries(ylim[0], ylim[1], np.pi / 4.))                                                       
                    ax_phase.set_yticks(genZeroCenteredSeries(ylim[0], ylim[1], np.pi / 12.), minor=True)
                ax_phase.grid(True, which='both')
                # ax_mag.grid(which='minor', alpha=0.3)                                                
                # ax_mag.grid(which='major', alpha=0.9)
                # ax_phase.grid(which='minor', alpha=0.3)                                                
                # ax_phase.grid(which='major', alpha=0.9)     
                
                # Label the frequency axis
                ax_phase.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")      
                    
    if len(syslist) == 1:
        return mags[0], phases[0], omegas[0]
    else:
        return mags, phases, omegas

Example 31

Project: mpop
Source File: test_satin_helpers.py
View license
    def test_boundaries_to_extent(self):
        '''Test conversion of area boundaries to area extent.
        '''

        from mpop.satin.helper_functions import boundaries_to_extent

        # MSG3 proj4 string from
        #  xrit.sat.load(..., only_metadata=True).proj4_params
        proj4_str = 'proj=geos lon_0=0.00 lat_0=0.00 ' \
            'a=6378169.00 b=6356583.80 h=35785831.00'

        # MSG3 maximum extent
        msg_extent = [-5567248.07, -5570248.48, 5570248.48, 5567248.07]


        euro4_lons = [np.array([-47.45398384, -43.46278935,
                                 -38.35946515, -31.73014962,
                                 -23.05306111, 11.8361092,
                                 1.9545262, 17.28655348,
                                 32.17162432, 44.92350518,
                                 55.01855232, 56.988557157486078]),
                      np.array([56.98855716, 50.26011569,
                                45.1592762, 41.21696892,
                                38.10602167, 35.60224391,
                                33.55098034, 31.8438098,
                                30.40324844, 29.17282762,
                                28.11061579, 27.886603224354555]),
                      np.array([27.88660322, 23.94855341,
                                19.91336672, 15.81854029,
                                11.70507781, 7.61511006,
                                3.58934937, -0.33524747,
                                -4.1272886, -7.76204144,
                                -11.2217833, -11.991484302295099]),
                      np.array([-11.9914843, -13.71190987,
                                 -15.65433484, -17.8592324,
                                 -20.37559742, -23.26235124,
                                 -26.5893562, -30.43725577,
                                 -34.8946782, -40.05040055,
                                 -45.97725877, -47.453983842896925])
                      ]

        euro4_lats = [np.array([ 60.95152407, 64.07948755,
                                 67.08804237, 69.89447062,
                                 72.37400834, 74.34558786,
                                 75.57997723, 75.8713547,
                                 75.16167548, 73.58553666,
                                 71.37260506, 70.797059167821104]),
                      np.array([ 70.79705917, 67.92687675,
                                 64.85946318, 61.67911498,
                                 58.44076323, 55.18141964,
                                 51.92695755, 48.69607712,
                                 45.50265971, 42.35720453,
                                 39.26773508, 38.565754283815295]),
                      np.array([ 38.56575428, 39.21556029,
                                 39.65166546, 39.86532337,
                                 39.85213881, 39.61238514,
                                 39.15098428, 38.47715262,
                                 37.60377021, 36.54656798,
                                 35.32324138, 35.020342638475668]),
                      np.array([ 35.02034264, 37.76813725,
                                 40.533077, 43.300949,
                                 46.05396441, 48.76986157,
                                 51.42078481, 53.97194327,
                                 56.38014919, 58.59254174,
                                 60.54617556, 60.95152407157881])
                      ]

        # Correct extent values for these boundaries
        correct_values_euro4 = [-2041009.079233268, 3502723.3881863873,
                                2211266.5660426724, 5387911.4915445326]


        maximum_extent_euro4 = boundaries_to_extent(proj4_str,
                                                    None,
                                                    msg_extent,
                                                    euro4_lons, euro4_lats)

        for i in range(4):
            self.assertAlmostEqual(maximum_extent_euro4[i],
                                   correct_values_euro4[i], 2)

        # Two of the area corner points is outside the satellite view

        afgh_lons = [np.array([49.94506701, 52.14080597,
                               54.33654493, 56.53228389,
                               58.72802285, 60.92376181,
                               63.11950077, 65.31523973,
                               67.51097869, 69.70671766,
                               71.90245662, 74.09819558,
                               76.29393454, 78.4896735,
                               80.68541246, 82.88115142]),
                     np.array([85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299,
                               85.05493299, 85.05493299]),
                     np.array([85.05493299, 82.85919403,
                               80.66345507, 78.46771611,
                               76.27197715, 74.07623819,
                               71.88049923, 69.68476027,
                               67.48902131, 65.29328234,
                               63.09754338, 60.90180442,
                               58.70606546, 56.5103265,
                               54.31458754, 52.11884858]),
                     np.array([49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701,
                               49.94506701, 49.94506701])]


        afgh_lats = [np.array([46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743,
                               46.52610743, 46.52610743]),
                     np.array([46.52610743, 44.99436458,
                               43.42055852, 41.804754,
                               40.14714935, 38.4480861,
                               36.70805834, 34.92772129,
                               33.10789917, 31.24959192,
                               29.35398073, 27.42243208,
                               25.45649997, 23.4579264,
                               21.4286396, 19.37075017]),
                     np.array([17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918,
                               17.30750918, 17.30750918]),
                     np.array([17.30750918, 19.39146328,
                               21.44907771, 23.47806753,
                               25.47632393, 27.44192051,
                               29.37311717, 31.26836176,
                               33.12628971, 34.94572163,
                               36.72565938, 38.46528046,
                               40.16393131, 41.82111941,
                               43.43650469, 45.00989022])
                     ]

        # Correct values for these borders
        correct_values_afgh = [3053894.9120028536, 1620176.1036167517,
                               5187086.4642274799, 4155907.3124084808]

        maximum_extent_afgh = boundaries_to_extent(proj4_str,
                                                   None,
                                                   msg_extent,
                                                   afgh_lons, afgh_lats)

        for i in range(len(maximum_extent_afgh)):
            self.assertAlmostEqual(maximum_extent_afgh[i],
                                   correct_values_afgh[i], 2)

        # Correct values for combined boundaries
        correct_values_comb = [-2041009.079233268, 1620176.1036167517,
                                5187086.4642274799, 5387911.4915445326]

        maximum_extent_comb = boundaries_to_extent(proj4_str,
                                                   maximum_extent_euro4,
                                                   msg_extent,
                                                   afgh_lons, afgh_lats)
        for i in range(4):
            self.assertAlmostEqual(maximum_extent_comb[i],
                                   correct_values_comb[i], 2)

        # Borders where none of the corners are within the satellite view
        lons = [np.array([-170., 170., -170., 170])]
        lats = [np.array([89., 89., -89., -89])]

        # Correct values are the same as the full disc extent
        correct_values = [-5567248.07, -5570248.48, 5570248.48, 5567248.07]

        maximum_extent_full = boundaries_to_extent(proj4_str,
                                                   None,
                                                   msg_extent,
                                                   lons, lats)
        for i in range(4):
            self.assertAlmostEqual(maximum_extent_full[i],
                                   correct_values[i], 2)

Example 32

Project: zipline
Source File: test_finance.py
View license
    def transaction_sim(self, **params):
        """This is a utility method that asserts expected
        results for conversion of orders to transactions given a
        trade history
        """
        trade_count = params['trade_count']
        trade_interval = params['trade_interval']
        order_count = params['order_count']
        order_amount = params['order_amount']
        order_interval = params['order_interval']
        expected_txn_count = params['expected_txn_count']
        expected_txn_volume = params['expected_txn_volume']

        # optional parameters
        # ---------------------
        # if present, alternate between long and short sales
        alternate = params.get('alternate')

        # if present, expect transaction amounts to match orders exactly.
        complete_fill = params.get('complete_fill')

        sid = 1
        metadata = make_simple_equity_info([sid], self.start, self.end)
        with TempDirectory() as tempdir, \
                tmp_trading_env(equities=metadata) as env:

            if trade_interval < timedelta(days=1):
                sim_params = factory.create_simulation_parameters(
                    start=self.start,
                    end=self.end,
                    data_frequency="minute"
                )

                minutes = self.trading_calendar.minutes_window(
                    sim_params.first_open,
                    int((trade_interval.total_seconds() / 60) * trade_count)
                    + 100)

                price_data = np.array([10.1] * len(minutes))
                assets = {
                    sid: pd.DataFrame({
                        "open": price_data,
                        "high": price_data,
                        "low": price_data,
                        "close": price_data,
                        "volume": np.array([100] * len(minutes)),
                        "dt": minutes
                    }).set_index("dt")
                }

                write_bcolz_minute_data(
                    self.trading_calendar,
                    self.trading_calendar.sessions_in_range(
                        self.trading_calendar.minute_to_session_label(
                            minutes[0]
                        ),
                        self.trading_calendar.minute_to_session_label(
                            minutes[-1]
                        )
                    ),
                    tempdir.path,
                    iteritems(assets),
                )

                equity_minute_reader = BcolzMinuteBarReader(tempdir.path)

                data_portal = DataPortal(
                    env.asset_finder, self.trading_calendar,
                    first_trading_day=equity_minute_reader.first_trading_day,
                    equity_minute_reader=equity_minute_reader,
                )
            else:
                sim_params = factory.create_simulation_parameters(
                    data_frequency="daily"
                )

                days = sim_params.sessions

                assets = {
                    1: pd.DataFrame({
                        "open": [10.1] * len(days),
                        "high": [10.1] * len(days),
                        "low": [10.1] * len(days),
                        "close": [10.1] * len(days),
                        "volume": [100] * len(days),
                        "day": [day.value for day in days]
                    }, index=days)
                }

                path = os.path.join(tempdir.path, "testdata.bcolz")
                BcolzDailyBarWriter(path, self.trading_calendar, days[0],
                                    days[-1]).write(
                    assets.items()
                )

                equity_daily_reader = BcolzDailyBarReader(path)

                data_portal = DataPortal(
                    env.asset_finder, self.trading_calendar,
                    first_trading_day=equity_daily_reader.first_trading_day,
                    equity_daily_reader=equity_daily_reader,
                )

            if "default_slippage" not in params or \
               not params["default_slippage"]:
                slippage_func = FixedSlippage()
            else:
                slippage_func = None

            blotter = Blotter(sim_params.data_frequency, self.env.asset_finder,
                              slippage_func)

            start_date = sim_params.first_open

            if alternate:
                alternator = -1
            else:
                alternator = 1

            tracker = PerformanceTracker(sim_params, self.trading_calendar,
                                         self.env)

            # replicate what tradesim does by going through every minute or day
            # of the simulation and processing open orders each time
            if sim_params.data_frequency == "minute":
                ticks = minutes
            else:
                ticks = days

            transactions = []

            order_list = []
            order_date = start_date
            for tick in ticks:
                blotter.current_dt = tick
                if tick >= order_date and len(order_list) < order_count:
                    # place an order
                    direction = alternator ** len(order_list)
                    order_id = blotter.order(
                        blotter.asset_finder.retrieve_asset(sid),
                        order_amount * direction,
                        MarketOrder())
                    order_list.append(blotter.orders[order_id])
                    order_date = order_date + order_interval
                    # move after market orders to just after market next
                    # market open.
                    if order_date.hour >= 21:
                        if order_date.minute >= 00:
                            order_date = order_date + timedelta(days=1)
                            order_date = order_date.replace(hour=14, minute=30)
                else:
                    bar_data = BarData(
                        data_portal=data_portal,
                        simulation_dt_func=lambda: tick,
                        data_frequency=sim_params.data_frequency,
                        trading_calendar=self.trading_calendar,
                        restrictions=NoRestrictions(),
                    )
                    txns, _, closed_orders = blotter.get_transactions(bar_data)
                    for txn in txns:
                        tracker.process_transaction(txn)
                        transactions.append(txn)

                    blotter.prune_orders(closed_orders)

            for i in range(order_count):
                order = order_list[i]
                self.assertEqual(order.sid, sid)
                self.assertEqual(order.amount, order_amount * alternator ** i)

            if complete_fill:
                self.assertEqual(len(transactions), len(order_list))

            total_volume = 0
            for i in range(len(transactions)):
                txn = transactions[i]
                total_volume += txn.amount
                if complete_fill:
                    order = order_list[i]
                    self.assertEqual(order.amount, txn.amount)

            self.assertEqual(total_volume, expected_txn_volume)

            self.assertEqual(len(transactions), expected_txn_count)

            cumulative_pos = tracker.position_tracker.positions[sid]
            if total_volume == 0:
                self.assertIsNone(cumulative_pos)
            else:
                self.assertEqual(total_volume, cumulative_pos.amount)

            # the open orders should not contain sid.
            oo = blotter.open_orders
            self.assertNotIn(sid, oo, "Entry is removed when no open orders")

Example 33

Project: RMG-Py
Source File: adfparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line.find("INPUT FILE") >= 0:
        #check to make sure we aren't parsing Create jobs
            while line:

                self.updateprogress(inputfile, "Unsupported Information", self.fupdate)

                if line.find("INPUT FILE") >=0 and hasattr(self,"scftargets"):
                #does this file contain multiple calculations?
                #if so, print a warning and skip to end of file
                    self.logger.warning("Skipping remaining calculations")
                    inputfile.seek(0,2)
                    break

                if line.find("INPUT FILE") >= 0:
                    line2 = inputfile.next()
                else:
                    line2 = None

                if line2 and len(line2) <= 2:
                #make sure that it's not blank like in the NiCO4 regression
                    line2 = inputfile.next()

                if line2 and (line2.find("Create") < 0 and line2.find("create") < 0):
                    break

                line = inputfile.next()

        if line[1:10] == "Symmetry:":
            info = line.split()
            if info[1] == "NOSYM":
                self.nosymflag = True

        # Use this to read the subspecies of irreducible representations.
        # It will be a list, with each element representing one irrep.
        if line.strip() == "Irreducible Representations, including subspecies":
            dashes = inputfile.next()
            self.irreps = []
            line = inputfile.next()
            while line.strip() != "":
                self.irreps.append(line.split())
                line = inputfile.next()

        if line[4:13] == 'Molecule:':
            info = line.split()
            if info[1] == 'UNrestricted':
                self.unrestrictedflag = True

        if line[1:6] == "ATOMS":
        # Find the number of atoms and their atomic numbers
        # Also extract the starting coordinates (for a GeoOpt anyway)
            self.updateprogress(inputfile, "Attributes", self.cupdate)

            self.atomnos = []
            self.atomcoords = []
            self.coreelectrons = []

            underline = inputfile.next()  #clear pointless lines
            label1 = inputfile.next()     # 
            label2 = inputfile.next()     #
            line = inputfile.next()
            atomcoords = []
            while len(line)>2: #ensure that we are reading no blank lines
                info = line.split()
                element = info[1].split('.')[0]
                self.atomnos.append(self.table.number[element])
                atomcoords.append(map(float, info[2:5]))
                self.coreelectrons.append(int(float(info[5]) - float(info[6])))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)

            self.natom = len(self.atomnos)
            self.atomnos = numpy.array(self.atomnos, "i")

        if line[1:10] == "FRAGMENTS":
            header = inputfile.next()

            self.frags = []
            self.fragnames = []

            line = inputfile.next()
            while len(line) > 2: #ensure that we are reading no blank lines
                info = line.split()

                if len(info) == 7: #fragment name is listed here
                    self.fragnames.append("%s_%s"%(info[1],info[0]))
                    self.frags.append([])
                    self.frags[-1].append(int(info[2]) - 1)

                elif len(info) == 5: #add atoms into last fragment
                    self.frags[-1].append(int(info[0]) - 1)

                line = inputfile.next()

        # Extract charge
        if line[1:11] == "Net Charge":
            self.charge = int(line.split()[2])
            line = inputfile.next()
            if len(line.strip()):
                #  Spin polar: 1 (Spin_A minus Spin_B electrons)
                self.mult = int(line.split()[2]) + 1
                 # (Not sure about this for higher multiplicities)
            else:
                self.mult = 1

        if line[1:22] == "S C F   U P D A T E S":
        # find targets for SCF convergence

            if not hasattr(self,"scftargets"):
                self.scftargets = []

            #underline, blank, nr
            for i in range(3):
                inputfile.next()

            line = inputfile.next()
            self.SCFconv = float(line.split()[-1])
            line = inputfile.next()
            self.sconv2 = float(line.split()[-1])

        if line[1:11] == "CYCLE    1":

            self.updateprogress(inputfile, "QM convergence", self.fupdate)

            newlist = []
            line = inputfile.next()

            if not hasattr(self,"geovalues"):
                # This is the first SCF cycle
                self.scftargets.append([self.sconv2*10, self.sconv2])
            elif self.finalgeometry in [self.GETLAST, self.NOMORE]:
                # This is the final SCF cycle
                self.scftargets.append([self.SCFconv*10, self.SCFconv])
            else:
                # This is an intermediate SCF cycle
                oldscftst = self.scftargets[-1][1]
                grdmax = self.geovalues[-1][1]
                scftst = max(self.SCFconv, min(oldscftst, grdmax/30, 10**(-self.accint)))
                self.scftargets.append([scftst*10, scftst])

            while line.find("SCF CONVERGED") == -1 and line.find("SCF not fully converged, result acceptable") == -1 and line.find("SCF NOT CONVERGED") == -1:
                if line[4:12] == "SCF test":
                    if not hasattr(self, "scfvalues"):
                        self.scfvalues = []

                    info = line.split()
                    newlist.append([float(info[4]), abs(float(info[6]))])
                try:
                    line = inputfile.next()
                except StopIteration: #EOF reached?
                    self.logger.warning("SCF did not converge, so attributes may be missing")
                    break            

            if line.find("SCF not fully converged, result acceptable") > 0:
                self.logger.warning("SCF not fully converged, results acceptable")

            if line.find("SCF NOT CONVERGED") > 0:
                self.logger.warning("SCF did not converge! moenergies and mocoeffs are unreliable")

            if hasattr(self, "scfvalues"):
                self.scfvalues.append(newlist)

        # Parse SCF energy for SP calcs from bonding energy decomposition section.
        # It seems ADF does not print it earlier for SP calcualtions.
        # If it does (does it?), parse that instead.
        # Check that scfenergies does not exist, becuase gopt runs also print this,
        #   repeating the values in the last "Geometry Convergence Tests" section.
        if "Total Bonding Energy:" in line:
            if not hasattr(self, "scfenergies"):
                energy = utils.convertor(float(line.split()[3]), "hartree", "eV")
                self.scfenergies = [energy]            

        if line[51:65] == "Final Geometry":
            self.finalgeometry = self.GETLAST

        if line[1:24] == "Coordinates (Cartesian)" and self.finalgeometry in [self.NOTFOUND, self.GETLAST]:
            # Get the coordinates from each step of the GeoOpt
            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            equals = inputfile.next()
            blank = inputfile.next()
            title = inputfile.next()
            title = inputfile.next()
            hyphens = inputfile.next()

            atomcoords = []
            line = inputfile.next()
            while line != hyphens:
                atomcoords.append(map(float, line.split()[5:8]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
            if self.finalgeometry == self.GETLAST: # Don't get any more coordinates
                self.finalgeometry = self.NOMORE

        if line[1:27] == 'Geometry Convergence Tests':
        # Extract Geometry convergence information
            if not hasattr(self, "geotargets"):
                self.geovalues = []
                self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0], "d")
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            equals = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            temp = inputfile.next().strip().split()
            self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV"))
            for i in range(6):
                line = inputfile.next()
            values = []
            for i in range(5):
                temp = inputfile.next().split()
                self.geotargets[i] = float(temp[-3])
                values.append(float(temp[-4]))
            self.geovalues.append(values)

        if line[1:27] == 'General Accuracy Parameter':
            # Need to know the accuracy of the integration grid to
            # calculate the scftarget...note that it changes with time
            self.accint = float(line.split()[-1])

        if line.find('Orbital Energies, per Irrep and Spin') > 0 and not hasattr(self, "mosyms") and self.nosymflag and not self.unrestrictedflag:
        #Extracting orbital symmetries and energies, homos for nosym case
        #Should only be for restricted case because there is a better text block for unrestricted and nosym

            self.mosyms = [[]]

            self.moenergies = [[]]

            underline = inputfile.next()
            header = inputfile.next()
            underline = inputfile.next()
            label = inputfile.next()
            line = inputfile.next()

            info = line.split()

            if not info[0] == '1':
                self.logger.warning("MO info up to #%s is missing" % info[0])

            #handle case where MO information up to a certain orbital are missing
            while int(info[0]) - 1 != len(self.moenergies[0]):
                self.moenergies[0].append(99999)
                self.mosyms[0].append('A')

            homoA = None

            while len(line) > 10:
                info = line.split()
                self.mosyms[0].append('A')
                self.moenergies[0].append(utils.convertor(float(info[2]), 'hartree', 'eV'))
                if info[1] == '0.000' and not hasattr(self, 'homos'):
                    self.homos = [len(self.moenergies[0]) - 2]
                line = inputfile.next()

            self.moenergies = [numpy.array(self.moenergies[0], "d")]
            self.homos = numpy.array(self.homos, "i")

        if line[1:29] == 'Orbital Energies, both Spins' and not hasattr(self, "mosyms") and self.nosymflag and self.unrestrictedflag:
        #Extracting orbital symmetries and energies, homos for nosym case
        #should only be here if unrestricted and nosym

            self.mosyms = [[], []]

            moenergies = [[], []]

            underline = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            underline = inputfile.next()
            line = inputfile.next()

            homoa = 0
            homob = None

            while len(line) > 5:
                info = line.split()
                if info[2] == 'A': 
                    self.mosyms[0].append('A')
                    moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
                    if info[3] != '0.00':
                        homoa = len(moenergies[0]) - 1
                elif info[2] == 'B':
                    self.mosyms[1].append('A')
                    moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))
                    if info[3] != '0.00':
                        homob = len(moenergies[1]) - 1
                else:
                    print "Error reading line: %s" % line

                line = inputfile.next()

            self.moenergies = [numpy.array(x, "d") for x in moenergies]
            self.homos = numpy.array([homoa, homob], "i")


        if line[1:29] == 'Orbital Energies, all Irreps' and not hasattr(self, "mosyms"):
        #Extracting orbital symmetries and energies, homos
            self.mosyms = [[]]
            self.symlist = {}

            self.moenergies = [[]]

            underline = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            underline2 = inputfile.next()
            line = inputfile.next()

            homoa = None
            homob = None

            #multiple = {'E':2, 'T':3, 'P':3, 'D':5}
            # The above is set if there are no special irreps
            names = [irrep[0].split(':')[0] for irrep in self.irreps]
            counts = [len(irrep) for irrep in self.irreps]
            multiple = dict(zip(names, counts))
            irrepspecies = {}
            for n in range(len(names)):
                indices = range(counts[n])
                subspecies = self.irreps[n]
                irrepspecies[names[n]] = dict(zip(indices, subspecies))

            while line.strip():
                info = line.split()
                if len(info) == 5: #this is restricted
                    #count = multiple.get(info[0][0],1)
                    count = multiple.get(info[0],1)
                    for repeat in range(count): # i.e. add E's twice, T's thrice
                        self.mosyms[0].append(self.normalisesym(info[0]))
                        self.moenergies[0].append(utils.convertor(float(info[3]), 'hartree', 'eV'))

                        sym = info[0]
                        if count > 1: # add additional sym label
                            sym = self.normalisedegenerates(info[0],repeat,ndict=irrepspecies)

                        try:
                            self.symlist[sym][0].append(len(self.moenergies[0])-1)
                        except KeyError:
                            self.symlist[sym]=[[]]
                            self.symlist[sym][0].append(len(self.moenergies[0])-1)

                    if info[2] == '0.00' and not hasattr(self, 'homos'):
                        self.homos = [len(self.moenergies[0]) - (count + 1)] #count, because need to handle degenerate cases
                    line = inputfile.next()
                elif len(info) == 6: #this is unrestricted
                    if len(self.moenergies) < 2: #if we don't have space, create it
                        self.moenergies.append([])
                        self.mosyms.append([])
#                    count = multiple.get(info[0][0], 1)
                    count = multiple.get(info[0], 1)
                    if info[2] == 'A':
                        for repeat in range(count): # i.e. add E's twice, T's thrice
                            self.mosyms[0].append(self.normalisesym(info[0]))
                            self.moenergies[0].append(utils.convertor(float(info[4]), 'hartree', 'eV'))

                            sym = info[0]
                            if count > 1: #add additional sym label
                                sym = self.normalisedegenerates(info[0],repeat)

                            try:
                                self.symlist[sym][0].append(len(self.moenergies[0])-1)
                            except KeyError:
                                self.symlist[sym]=[[],[]]
                                self.symlist[sym][0].append(len(self.moenergies[0])-1)

                        if info[3] == '0.00' and homoa == None:
                            homoa = len(self.moenergies[0]) - (count + 1) #count because degenerate cases need to be handled

                    if info[2] == 'B':
                        for repeat in range(count): # i.e. add E's twice, T's thrice
                            self.mosyms[1].append(self.normalisesym(info[0]))
                            self.moenergies[1].append(utils.convertor(float(info[4]), 'hartree', 'eV'))

                            sym = info[0]
                            if count > 1: #add additional sym label
                                sym = self.normalisedegenerates(info[0],repeat)

                            try:
                                self.symlist[sym][1].append(len(self.moenergies[1])-1)
                            except KeyError:
                                self.symlist[sym]=[[],[]]
                                self.symlist[sym][1].append(len(self.moenergies[1])-1)

                        if info[3] == '0.00' and homob == None:
                            homob = len(self.moenergies[1]) - (count + 1)

                    line = inputfile.next()

                else: #different number of lines
                    print "Error", info

            if len(info) == 6: #still unrestricted, despite being out of loop
                self.homos = [homoa, homob]

            self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
            self.homos = numpy.array(self.homos, "i")

        if line[1:28] == "Vibrations and Normal Modes":
            # Section on extracting vibdisps
            # Also contains vibfreqs, but these are extracted in the
            # following section (see below)
            self.vibdisps = []
            equals = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            header = inputfile.next()
            blank = inputfile.next()
            blank = inputfile.next()

            freqs = inputfile.next()
            while freqs.strip()!="":
                minus = inputfile.next()
                p = [ [], [], [] ]
                for i in range(len(self.atomnos)):
                    broken = map(float, inputfile.next().split()[1:])
                    for j in range(0, len(broken), 3):
                        p[j/3].append(broken[j:j+3])
                self.vibdisps.extend(p[:(len(broken)/3)])
                blank = inputfile.next()
                blank = inputfile.next()
                freqs = inputfile.next()
            self.vibdisps = numpy.array(self.vibdisps, "d")

        if line[1:24] == "List of All Frequencies":
        # Start of the IR/Raman frequency section
            self.updateprogress(inputfile, "Frequency information", self.fupdate)

        #                 self.vibsyms = [] # Need to look into this a bit more
            self.vibirs = []
            self.vibfreqs = []
            for i in range(8):
                line = inputfile.next()
            line = inputfile.next().strip()
            while line:
                temp = line.split()
                self.vibfreqs.append(float(temp[0]))                    
                self.vibirs.append(float(temp[2])) # or is it temp[1]?
                line = inputfile.next().strip()
            self.vibfreqs = numpy.array(self.vibfreqs, "d")
            self.vibirs = numpy.array(self.vibirs, "d")
            if hasattr(self, "vibramans"):
                self.vibramans = numpy.array(self.vibramans, "d")


        #******************************************************************************************************************8
        #delete this after new implementation using smat, eigvec print,eprint?
        if line[1:49] == "Total nr. of (C)SFOs (summation over all irreps)":
        # Extract the number of basis sets
            self.nbasis = int(line.split(":")[1].split()[0])

        # now that we're here, let's extract aonames

            self.fonames = []
            self.start_indeces = {}

            blank = inputfile.next()
            note = inputfile.next()
            symoffset = 0

            blank = inputfile.next() 
            blank = inputfile.next()
            if len(blank) > 2: #fix for ADF2006.01 as it has another note
                blank = inputfile.next()
                blank = inputfile.next()
            blank = inputfile.next()

            self.nosymreps = []
            while len(self.fonames) < self.nbasis:

                symline = inputfile.next()
                sym = symline.split()[1]
                line = inputfile.next()
                num = int(line.split(':')[1].split()[0])
                self.nosymreps.append(num)

                #read until line "--------..." is found
                while line.find('-----') < 0:
                    line = inputfile.next()

                line = inputfile.next() # the start of the first SFO

                while len(self.fonames) < symoffset + num:
                    info = line.split()

                    #index0 index1 occ2 energy3/4 fragname5 coeff6 orbnum7 orbname8 fragname9
                    if not sym in self.start_indeces.keys():
                    #have we already set the start index for this symmetry?
                        self.start_indeces[sym] = int(info[1])

                    orbname = info[8]
                    orbital = info[7] + orbname.replace(":", "")

                    fragname = info[5]
                    frag = fragname + info[9]

                    coeff = float(info[6])

                    line = inputfile.next()
                    while line.strip() and not line[:7].strip(): # while it's the same SFO
                        # i.e. while not completely blank, but blank at the start
                        info = line[43:].split()
                        if len(info)>0: # len(info)==0 for the second line of dvb_ir.adfout
                            frag += "+" + fragname + info[-1]
                            coeff = float(info[-4])
                            if coeff < 0:
                                orbital += '-' + info[-3] + info[-2].replace(":", "")
                            else:
                                orbital += '+' + info[-3] + info[-2].replace(":", "")
                        line = inputfile.next()
                    # At this point, we are either at the start of the next SFO or at
                    # a blank line...the end

                    self.fonames.append("%s_%s" % (frag, orbital))
                symoffset += num

                # blankline blankline
                inputfile.next(); inputfile.next()

        if line[1:32] == "S F O   P O P U L A T I O N S ,":
        #Extract overlap matrix

            self.fooverlaps = numpy.zeros((self.nbasis, self.nbasis), "d")

            symoffset = 0

            for nosymrep in self.nosymreps:

                line = inputfile.next()
                while line.find('===') < 10: #look for the symmetry labels
                    line = inputfile.next()
                #blank blank text blank col row
                for i in range(6):
                    inputfile.next()

                base = 0
                while base < nosymrep: #have we read all the columns?

                    for i in range(nosymrep - base):

                        self.updateprogress(inputfile, "Overlap", self.fupdate)
                        line = inputfile.next()
                        parts = line.split()[1:]
                        for j in range(len(parts)):
                            k = float(parts[j])
                            self.fooverlaps[base + symoffset + j, base + symoffset +i] = k
                            self.fooverlaps[base + symoffset + i, base + symoffset + j] = k

                    #blank, blank, column
                    for i in range(3):
                        inputfile.next()

                    base += 4

                symoffset += nosymrep
                base = 0

# The commented code below makes the atombasis attribute based on the BAS function in ADF,
#   but this is probably not so useful, since SFOs are used to build MOs in ADF.
#        if line[1:54] == "BAS: List of all Elementary Cartesian Basis Functions":
#
#            self.atombasis = []
#
#            # There will be some text, followed by a line:
#            #       (power of) X  Y  Z  R     Alpha  on Atom
#            while not line[1:11] == "(power of)":
#                line = inputfile.next()
#            dashes = inputfile.next()
#            blank = inputfile.next()
#            line = inputfile.next()
#            # There will be two blank lines when there are no more atom types.
#            while line.strip() != "":
#                atoms = [int(i)-1 for i in line.split()[1:]]
#                for n in range(len(atoms)):
#                    self.atombasis.append([])
#                dashes = inputfile.next()
#                line = inputfile.next()
#                while line.strip() != "":
#                    indices = [int(i)-1 for i in line.split()[5:]]
#                    for i in range(len(indices)):
#                        self.atombasis[atoms[i]].append(indices[i])
#                    line = inputfile.next()
#                line = inputfile.next()

        if line[48:67] == "SFO MO coefficients":

            self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d")]
            spin = 0
            symoffset = 0
            lastrow = 0

            # Section ends with "1" at beggining of a line.
            while line[0] != "1":
                line = inputfile.next()

                # If spin is specified, then there will be two coefficient matrices. 
                if line.strip() == "***** SPIN 1 *****":
                    self.mocoeffs = [numpy.zeros((self.nbasis, self.nbasis), "d"),
                                     numpy.zeros((self.nbasis, self.nbasis), "d")]

                # Bump up the spin.
                if line.strip() == "***** SPIN 2 *****":
                    spin = 1
                    symoffset = 0
                    lastrow = 0

                # Next symmetry.
                if line.strip()[:4] == "=== ":
                    sym = line.split()[1]
                    if self.nosymflag:
                        aolist = range(self.nbasis)
                    else:
                        aolist = self.symlist[sym][spin]
                    # Add to the symmetry offset of AO ordering.
                    symoffset += lastrow

                # Blocks with coefficient always start with "MOs :".
                if line[1:6] == "MOs :":
                    # Next line has the MO index contributed to.
                    monumbers = [int(n) for n in line[6:].split()]
                    occup = inputfile.next()
                    label = inputfile.next()
                    line = inputfile.next()
                    # The table can end with a blank line or "1".
                    row = 0
                    while not line.strip() in ["", "1"]:
                        info = line.split()

                        if int(info[0]) < self.start_indeces[sym]:
                        #check to make sure we aren't parsing CFs
                            line = inputfile.next()
                            continue

                        self.updateprogress(inputfile, "Coefficients", self.fupdate)
                        row += 1
                        coeffs = [float(x) for x in info[1:]]
                        moindices = [aolist[n-1] for n in monumbers]
                        # The AO index is 1 less than the row.
                        aoindex = symoffset + row - 1
                        for i in range(len(monumbers)):
                            self.mocoeffs[spin][moindices[i],aoindex] = coeffs[i]
                        line = inputfile.next()
                    lastrow = row

        if line[4:53] == "Final excitation energies from Davidson algorithm":

            # move forward in file past some various algorthm info

            # *   Final excitation energies from Davidson algorithm                    *
            # *                                                                        *
            # **************************************************************************

            #     Number of loops in Davidson routine     =   20                    
            #     Number of matrix-vector multiplications =   24                    
            #     Type of excitations = SINGLET-SINGLET 

            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next()

            symm = self.normalisesym(inputfile.next().split()[1])

            # move forward in file past some more txt and header info

            # Excitation energies E in a.u. and eV, dE wrt prev. cycle,
            # oscillator strengths f in a.u.

            # no.  E/a.u.        E/eV      f           dE/a.u.
            # -----------------------------------------------------

            inputfile.next(); inputfile.next(); inputfile.next()
            inputfile.next(); inputfile.next(); inputfile.next()

            # now start parsing etenergies and etoscs

            etenergies = []
            etoscs = []
            etsyms = []

            line = inputfile.next()
            while len(line) > 2:
                info = line.split()
                etenergies.append(utils.convertor(float(info[2]), "eV", "cm-1"))
                etoscs.append(float(info[3]))
                etsyms.append(symm)
                line = inputfile.next()

            # move past next section
            while line[1:53] != "Major MO -> MO transitions for the above excitations":
                line = inputfile.next()

            # move past headers

            #  Excitation  Occupied to virtual  Contribution                         
            #   Nr.          orbitals           weight        contribibutions to      
            #                                   (sum=1) transition dipole moment   
            #                                             x       y       z       

            inputfile.next(), inputfile.next(), inputfile.next()
            inputfile.next(), inputfile.next(), inputfile.next()

            # before we start handeling transitions, we need
            # to create mosyms with indices
            # only restricted calcs are possible in ADF

            counts = {}
            syms = []
            for mosym in self.mosyms[0]:
                if counts.keys().count(mosym) == 0:
                    counts[mosym] = 1
                else:
                    counts[mosym] += 1

                syms.append(str(counts[mosym]) + mosym)

            import re
            etsecs = []
            printed_warning = False 

            for i in range(len(etenergies)):
                etsec = []
                line = inputfile.next()
                info = line.split()
                while len(info) > 0:

                    match = re.search('[^0-9]', info[1])
                    index1 = int(info[1][:match.start(0)])
                    text = info[1][match.start(0):]
                    symtext = text[0].upper() + text[1:]
                    sym1 = str(index1) + self.normalisesym(symtext)

                    match = re.search('[^0-9]', info[3])
                    index2 = int(info[3][:match.start(0)])
                    text = info[3][match.start(0):]
                    symtext = text[0].upper() + text[1:]
                    sym2 = str(index2) + self.normalisesym(symtext)

                    try:
                        index1 = syms.index(sym1)
                    except ValueError:
                        if not printed_warning:
                            self.logger.warning("Etsecs are not accurate!")
                            printed_warning = True

                    try:
                        index2 = syms.index(sym2)
                    except ValueError:
                        if not printed_warning:
                            self.logger.warning("Etsecs are not accurate!")
                            printed_warning = True

                    etsec.append([(index1, 0), (index2, 0), float(info[4])])

                    line = inputfile.next()
                    info = line.split()

                etsecs.append(etsec)


            if not hasattr(self, "etenergies"):
                self.etenergies = etenergies
            else:
                self.etenergies += etenergies

            if not hasattr(self, "etoscs"):
                self.etoscs = etoscs
            else:
                self.etoscs += etoscs

            if not hasattr(self, "etsyms"):
                self.etsyms = etsyms
            else:
                self.etsyms += etsyms

            if not hasattr(self, "etsecs"):
                self.etsecs = etsecs
            else:
                self.etsecs += etsecs

Example 34

Project: RMG-Py
Source File: gaussianparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""
        
        # Number of atoms.
        if line[1:8] == "NAtoms=":

            self.updateprogress(inputfile, "Attributes", self.fupdate)
                    
            natom = int(line.split()[1])
            if not hasattr(self, "natom"):
                self.natom = natom

        # Catch message about completed optimization.
        if line[1:23] == "Optimization completed":
            self.optfinished = True
        
        # Extract the atomic numbers and coordinates from the input orientation,
        #   in the event the standard orientation isn't available.
        if not self.optfinished and line.find("Input orientation") > -1 or line.find("Z-Matrix orientation") > -1:

            # If this is a counterpoise calculation, this output means that
            #   the supermolecule is now being considered, so we can set:
            self.counterpoise = 0

            self.updateprogress(inputfile, "Attributes", self.cupdate)
            
            if not hasattr(self, "inputcoords"):
                self.inputcoords = []
            self.inputatoms = []
            
            hyphens = inputfile.next()
            colmNames = inputfile.next()
            colmNames = inputfile.next()
            hyphens = inputfile.next()
            
            atomcoords = []
            line = inputfile.next()
            while line != hyphens:
                broken = line.split()
                self.inputatoms.append(int(broken[1]))
                atomcoords.append(map(float, broken[3:6]))
                line = inputfile.next()

            self.inputcoords.append(atomcoords)

            if not hasattr(self, "natom"):
                self.atomnos = numpy.array(self.inputatoms, 'i')
                self.natom = len(self.atomnos)

        # Extract the atomic numbers and coordinates of the atoms.
        if not self.optfinished and line.strip() == "Standard orientation:":

            self.updateprogress(inputfile, "Attributes", self.cupdate)

            # If this is a counterpoise calculation, this output means that
            #   the supermolecule is now being considered, so we can set:
            self.counterpoise = 0

            if not hasattr(self, "atomcoords"):
                self.atomcoords = []
            
            hyphens = inputfile.next()
            colmNames = inputfile.next()
            colmNames = inputfile.next()
            hyphens = inputfile.next()
            
            atomnos = []
            atomcoords = []
            line = inputfile.next()
            while line != hyphens:
                broken = line.split()
                atomnos.append(int(broken[1]))
                atomcoords.append(map(float, broken[-3:]))
                line = inputfile.next()
            self.atomcoords.append(atomcoords)
            if not hasattr(self, "natom"):
                self.atomnos = numpy.array(atomnos, 'i')
                self.natom = len(self.atomnos)

        # Find the targets for SCF convergence (QM calcs).
        if line[1:44] == 'Requested convergence on RMS density matrix':

            if not hasattr(self, "scftargets"):
                self.scftargets = []

            scftargets = []
            # The RMS density matrix.
            scftargets.append(self.float(line.split('=')[1].split()[0]))
            line = inputfile.next()
            # The MAX density matrix.
            scftargets.append(self.float(line.strip().split('=')[1][:-1]))
            line = inputfile.next()
            # For G03, there's also the energy (not for G98).
            if line[1:10] == "Requested":
                scftargets.append(self.float(line.strip().split('=')[1][:-1]))

            self.scftargets.append(scftargets)

        # Extract SCF convergence information (QM calcs).
        if line[1:10] == 'Cycle   1':
                    
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []

            scfvalues = []
            line = inputfile.next()
            while line.find("SCF Done") == -1:
            
                self.updateprogress(inputfile, "QM convergence", self.fupdate)
                      
                if line.find(' E=') == 0:
                    self.logger.debug(line)

                #  RMSDP=3.74D-06 MaxDP=7.27D-05 DE=-1.73D-07 OVMax= 3.67D-05
                # or
                #  RMSDP=1.13D-05 MaxDP=1.08D-04              OVMax= 1.66D-04
                if line.find(" RMSDP") == 0:

                    parts = line.split()
                    newlist = [self.float(x.split('=')[1]) for x in parts[0:2]]
                    energy = 1.0
                    if len(parts) > 4:
                        energy = parts[2].split('=')[1]
                        if energy == "":
                            energy = self.float(parts[3])
                        else:
                            energy = self.float(energy)
                    if len(self.scftargets[0]) == 3: # Only add the energy if it's a target criteria
                        newlist.append(energy)
                    scfvalues.append(newlist)

                try:
                    line = inputfile.next()
                # May be interupted by EOF.
                except StopIteration:
                    break

            self.scfvalues.append(scfvalues)

        # Extract SCF convergence information (AM1 calcs).
        if line[1:4] == 'It=':
                    
            self.scftargets = numpy.array([1E-7], "d") # This is the target value for the rms
            self.scfvalues = [[]]

            line = inputfile.next()
            while line.find(" Energy") == -1:
            
                if self.progress:
                    step = inputfile.tell()
                    if step != oldstep:
                        self.progress.update(step, "AM1 Convergence")
                        oldstep = step
                        
                if line[1:4] == "It=":
                    parts = line.strip().split()
                    self.scfvalues[0].append(self.float(parts[-1][:-1]))
                line = inputfile.next()

        # Note: this needs to follow the section where 'SCF Done' is used
        #   to terminate a loop when extracting SCF convergence information.
        if line[1:9] == 'SCF Done':

            if not hasattr(self, "scfenergies"):
                self.scfenergies = []

            self.scfenergies.append(utils.convertor(self.float(line.split()[4]), "hartree", "eV"))
        #gmagoon 5/27/09: added scfenergies reading for PM3 case where line begins with Energy=
        #example line: " Energy=   -0.077520562724 NIter=  14."
        if line[1:8] == 'Energy=':
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            self.scfenergies.append(utils.convertor(self.float(line.split()[1]), "hartree", "eV"))
        #gmagoon 6/8/09: added molecular mass parsing (units will be amu)
        #example line: " Molecular mass:   208.11309 amu."
        if line[1:16] == 'Molecular mass:':
            self.molmass = self.float(line.split()[2])

	  #gmagoon 5/27/09: added rotsymm for reading rotational symmetry number
	  #it would probably be better to read in point group (or calculate separately with OpenBabel, and I probably won't end up using this
        #example line: " Rotational symmetry number  1."
        if line[1:27] == 'Rotational symmetry number':
            self.rotsymm = int(self.float(line.split()[3]))
        
	  #gmagoon 5/28/09: added rotcons for rotational constants (at each step) in GHZ
        #example line:  Rotational constants (GHZ):     17.0009421      5.8016756      4.5717439
        #could also read in moment of inertia, but this should just differ by a constant: rot cons= h/(8*Pi^2*I)
        #note that the last occurence of this in the thermochemistry section has reduced precision, so we will want to use the 2nd to last instance
        if line[1:28] == 'Rotational constants (GHZ):':
            if not hasattr(self, "rotcons"):
                self.rotcons = []

	    #some linear cases (e.g. if linearity is not recognized) can have asterisks ****... for the first rotational constant; e.g.:
	    # Rotational constants (GHZ):      ************    12.73690    12.73690
	    # or:
	    # Rotational constants (GHZ):***************     10.4988228     10.4988223
	    # if this is the case, replace the asterisks with a 0.0
	    #we can also have cases like this:
	    # Rotational constants (GHZ):6983905.3278703     11.8051382     11.8051183
            #if line[28:29] == '*' or line.split()[3].startswith('*'):
            if line[37:38] == '*':
                self.rotcons.append([0.0]+map(float, line[28:].split()[-2:])) #record last 0.0 and last 2 numbers (words) in the string following the prefix
	    else:
                self.rotcons.append(map(float, line[28:].split()[-3:])) #record last 3 numbers (words) in the string following the prefix

        # Total energies after Moller-Plesset corrections.
        # Second order correction is always first, so its first occurance
        #   triggers creation of mpenergies (list of lists of energies).
        # Further MP2 corrections are appended as found.
        #
        # Example MP2 output line:
        #  E2 =    -0.9505918144D+00 EUMP2 =    -0.28670924198852D+03
        # Warning! this output line is subtly different for MP3/4/5 runs
        if "EUMP2" in line[27:34]:

            if not hasattr(self, "mpenergies"):
                self.mpenergies = []
            self.mpenergies.append([])
            mp2energy = self.float(line.split("=")[2])
            self.mpenergies[-1].append(utils.convertor(mp2energy, "hartree", "eV"))

        # Example MP3 output line:
        #  E3=       -0.10518801D-01     EUMP3=      -0.75012800924D+02
        if line[34:39] == "EUMP3":

            mp3energy = self.float(line.split("=")[2])
            self.mpenergies[-1].append(utils.convertor(mp3energy, "hartree", "eV"))

        # Example MP4 output lines:
        #  E4(DQ)=   -0.31002157D-02        UMP4(DQ)=   -0.75015901139D+02
        #  E4(SDQ)=  -0.32127241D-02        UMP4(SDQ)=  -0.75016013648D+02
        #  E4(SDTQ)= -0.32671209D-02        UMP4(SDTQ)= -0.75016068045D+02
        # Energy for most substitutions is used only (SDTQ by default)
        if line[34:42] == "UMP4(DQ)":

            mp4energy = self.float(line.split("=")[2])
            line = inputfile.next()
            if line[34:43] == "UMP4(SDQ)":
              mp4energy = self.float(line.split("=")[2])
              line = inputfile.next()
              if line[34:44] == "UMP4(SDTQ)":
                mp4energy = self.float(line.split("=")[2])
            self.mpenergies[-1].append(utils.convertor(mp4energy, "hartree", "eV"))

        # Example MP5 output line:
        #  DEMP5 =  -0.11048812312D-02 MP5 =  -0.75017172926D+02
        if line[29:32] == "MP5":
            mp5energy = self.float(line.split("=")[2])
            self.mpenergies[-1].append(utils.convertor(mp5energy, "hartree", "eV"))

        # Total energies after Coupled Cluster corrections.
        # Second order MBPT energies (MP2) are also calculated for these runs,
        #  but the output is the same as when parsing for mpenergies.
        # First turn on flag for Coupled Cluster runs.
        if line[1:23] == "Coupled Cluster theory" or line[1:8] == "CCSD(T)":

            self.coupledcluster = True
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []

        # Now read the consecutive correlated energies when ,
        #  but append only the last one to ccenergies.
        # Only the highest level energy is appended - ex. CCSD(T), not CCSD.
        if self.coupledcluster and line[27:35] == "E(CORR)=":
            self.ccenergy = self.float(line.split()[3])
        if self.coupledcluster and line[1:9] == "CCSD(T)=":
            self.ccenergy = self.float(line.split()[1])
        # Append when leaving link 913
        if self.coupledcluster and line[1:16] == "Leave Link  913":
            self.ccenergies.append(utils.convertor(self.ccenergy, "hartree", "eV"))

        # Geometry convergence information.
        if line[49:59] == 'Converged?':

            if not hasattr(self, "geotargets"):
                self.geovalues = []
                self.geotargets = numpy.array([0.0, 0.0, 0.0, 0.0], "d")

            newlist = [0]*4
            for i in range(4):
                line = inputfile.next()
                self.logger.debug(line)
                parts = line.split()
                try:
                    value = self.float(parts[2])
                except ValueError:
                    value = -1.0
                    #self.logger.error("Problem parsing the value for geometry optimisation: %s is not a number." % parts[2])
		    #gmagoon 20111202: because the value can become **** (as shown below, I'm changing this to not report an error, and instead just set the value to -1.0
#         Item               Value     Threshold  Converged?
# Maximum Force            ********     0.000015     NO 
# RMS     Force            1.813626     0.000010     NO 
# Maximum Displacement     0.915407     0.000060     NO 
# RMS     Displacement     0.280831     0.000040     NO
                else:
                    newlist[i] = value
                self.geotargets[i] = self.float(parts[3])

            self.geovalues.append(newlist)

        # Gradients.
        # Read in the cartesian energy gradients (forces) from a block like this:
        # -------------------------------------------------------------------
        # Center     Atomic                   Forces (Hartrees/Bohr)
        # Number     Number              X              Y              Z
        # -------------------------------------------------------------------
        # 1          1          -0.012534744   -0.021754635   -0.008346094
        # 2          6           0.018984731    0.032948887   -0.038003451
        # 3          1          -0.002133484   -0.006226040    0.023174772
        # 4          1          -0.004316502   -0.004968213    0.023174772
        #           -2          -0.001830728   -0.000743108   -0.000196625
        # ------------------------------------------------------------------
        #
        # The "-2" line is for a dummy atom
        #
        # Then optimization is done in internal coordinates, Gaussian also
        # print the forces in internal coordinates, which can be produced from 
        # the above. This block looks like this:
        # Variable       Old X    -DE/DX   Delta X   Delta X   Delta X     New X
        #                                 (Linear)    (Quad)   (Total)
        #   ch        2.05980   0.01260   0.00000   0.01134   0.01134   2.07114
        #   hch        1.75406   0.09547   0.00000   0.24861   0.24861   2.00267
        #   hchh       2.09614   0.01261   0.00000   0.16875   0.16875   2.26489
        #         Item               Value     Threshold  Converged?
        if line[37:43] == "Forces":

            if not hasattr(self, "grads"):
                self.grads = []

            header = inputfile.next()
            dashes = inputfile.next()
            line = inputfile.next()
            forces = []
            while line != dashes:
                broken = line.split()
                Fx, Fy, Fz = broken[-3:]
                forces.append([float(Fx),float(Fy),float(Fz)])
                line = inputfile.next()
            self.grads.append(forces)                

        # Charge and multiplicity.
        # If counterpoise correction is used, multiple lines match.
        # The first one contains charge/multiplicity of the whole molecule.:
        #   Charge =  0 Multiplicity = 1 in supermolecule
        #   Charge =  0 Multiplicity = 1 in fragment  1.
        #   Charge =  0 Multiplicity = 1 in fragment  2.
        if line[1:7] == 'Charge' and line.find("Multiplicity")>=0:

            regex = ".*=(.*)Mul.*=\s*(\d+).*"
            match = re.match(regex, line)
            assert match, "Something unusual about the line: '%s'" % line
            
            self.charge = int(match.groups()[0])
            self.mult = int(match.groups()[1])

        # Orbital symmetries.
        if line[1:20] == 'Orbital symmetries:' and not hasattr(self, "mosyms"):

            # For counterpoise fragments, skip these lines.
            if self.counterpoise != 0: return

            self.updateprogress(inputfile, "MO Symmetries", self.fupdate)
                    
            self.mosyms = [[]]
            line = inputfile.next()
            unres = False
            if line.find("Alpha Orbitals") == 1:
                unres = True
                line = inputfile.next()
            i = 0
            while len(line) > 18 and line[17] == '(':
                if line.find('Virtual') >= 0:
                    self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays
                parts = line[17:].split()
                for x in parts:
                    self.mosyms[0].append(self.normalisesym(x.strip('()')))
                    i += 1 
                line = inputfile.next()
            if unres:
                line = inputfile.next()
                # Repeat with beta orbital information
                i = 0
                self.mosyms.append([])
                while len(line) > 18 and line[17] == '(':
                    if line.find('Virtual')>=0:
			if (hasattr(self, "homos")):#if there was also an alpha virtual orbital (here we consider beta) we will store two indices in the array
			    self.homos.resize([2]) # Extend the array to two elements
			    self.homos[1] = i-1 # 'HOMO' indexes the HOMO in the arrays
			else:#otherwise (e.g. for O triplet) there is no alpha virtual orbital, only beta virtual orbitals, and we initialize the array with one element
			    self.homos = numpy.array([i-1], "i") # 'HOMO' indexes the HOMO in the arrays
                    parts = line[17:].split()
                    for x in parts:
                        self.mosyms[1].append(self.normalisesym(x.strip('()')))
                        i += 1
                    line = inputfile.next()

        # Alpha/Beta electron eigenvalues.
        if line[1:6] == "Alpha" and line.find("eigenvalues") >= 0:

            # For counterpoise fragments, skip these lines.
            if self.counterpoise != 0: return

            # For ONIOM calcs, ignore this section in order to bypass assertion failure.
            if self.oniom: return

            self.updateprogress(inputfile, "Eigenvalues", self.fupdate)
            self.moenergies = [[]]
            HOMO = -2

            while line.find('Alpha') == 1:
                if line.split()[1] == "virt." and HOMO == -2:

                    # If there aren't any symmetries, this is a good way to find the HOMO.
                    # Also, check for consistency if homos was already parsed.
                    HOMO = len(self.moenergies[0])-1
                    if hasattr(self, "homos"):
                        assert HOMO == self.homos[0]
                    else:
                        self.homos = numpy.array([HOMO], "i")

                part = line[28:]
                i = 0
                while i*10+4 < len(part):
                    x = part[i*10:(i+1)*10]
                    self.moenergies[0].append(utils.convertor(self.float(x), "hartree", "eV"))
                    i += 1
                line = inputfile.next()
            # If, at this point, self.homos is unset, then there were not
            # any alpha virtual orbitals
            if not hasattr(self, "homos"):
                HOMO = len(self.moenergies[0])-1
                self.homos = numpy.array([HOMO], "i")
            

            if line.find('Beta') == 2:
                self.moenergies.append([])

            HOMO = -2
            while line.find('Beta') == 2:
                if line.split()[1] == "virt." and HOMO == -2:

                    # If there aren't any symmetries, this is a good way to find the HOMO.
                    # Also, check for consistency if homos was already parsed.
                    HOMO = len(self.moenergies[1])-1
                    if len(self.homos) == 2:
                        assert HOMO == self.homos[1]
                    else:
                        self.homos.resize([2])
                        self.homos[1] = HOMO

                part = line[28:]
                i = 0
                while i*10+4 < len(part):
                    x = part[i*10:(i+1)*10]
                    self.moenergies[1].append(utils.convertor(self.float(x), "hartree", "eV"))
                    i += 1
                line = inputfile.next()

            self.moenergies = [numpy.array(x, "d") for x in self.moenergies]
            
        # Gaussian Rev <= B.0.3 (?)
        # AO basis set in the form of general basis input:
        #  1 0
        # S   3 1.00       0.000000000000
        #      0.7161683735D+02  0.1543289673D+00
        #      0.1304509632D+02  0.5353281423D+00
        #      0.3530512160D+01  0.4446345422D+00
        # SP   3 1.00       0.000000000000
        #      0.2941249355D+01 -0.9996722919D-01  0.1559162750D+00
        #      0.6834830964D+00  0.3995128261D+00  0.6076837186D+00
        #      0.2222899159D+00  0.7001154689D+00  0.3919573931D+00
        if line[1:16] == "AO basis set in":
        
            # For counterpoise fragment calcualtions, skip these lines.
            if self.counterpoise != 0: return
        
            self.gbasis = []
            line = inputfile.next()
            while line.strip():
                gbasis = []
                line = inputfile.next()
                while line.find("*")<0:
                    temp = line.split()
                    symtype = temp[0]
                    numgau = int(temp[1])
                    gau = []
                    for i in range(numgau):
                        temp = map(self.float, inputfile.next().split())
                        gau.append(temp)
                        
                    for i,x in enumerate(symtype):
                        newgau = [(z[0],z[i+1]) for z in gau]
                        gbasis.append( (x,newgau) )
                    line = inputfile.next() # i.e. "****" or "SP ...."
                self.gbasis.append(gbasis)
                line = inputfile.next() # i.e. "20 0" or blank line

        # Start of the IR/Raman frequency section.
        # Caution is advised here, as additional frequency blocks
        #   can be printed by Gaussian (with slightly different formats),
        #   often doubling the information printed.
        # See, for a non-standard exmaple, regression Gaussian98/test_H2.log
        if line[1:14] == "Harmonic freq":

            self.updateprogress(inputfile, "Frequency Information", self.fupdate)

            # The whole block should not have any blank lines.
            while line.strip() != "":

                # Lines with symmetries and symm. indices begin with whitespace.
                if line[1:15].strip() == "" and not line[15:22].strip().isdigit():

                    if not hasattr(self, 'vibsyms'):
                        self.vibsyms = []
                    syms = line.split()
                    self.vibsyms.extend(syms)
            
                if line[1:15] == "Frequencies --":
                
                    if not hasattr(self, 'vibfreqs'):
                        self.vibfreqs = []
                    freqs = [self.float(f) for f in line[15:].split()]
                    self.vibfreqs.extend(freqs)
            
                if line[1:15] == "IR Inten    --":
                
                    if not hasattr(self, 'vibirs'):
                        self.vibirs = []
                    irs = [self.float(f) for f in line[15:].split()]
                    self.vibirs.extend(irs)

                if line[1:15] == "Raman Activ --":
                
                    if not hasattr(self, 'vibramans'):
                        self.vibramans = []
                    ramans = [self.float(f) for f in line[15:].split()]
                    self.vibramans.extend(ramans)
                
                # Block with displacement should start with this.
                # Remember, it is possible to have less than three columns!
                # There should be as many lines as there are atoms.
                if line[1:29] == "Atom AN      X      Y      Z":
                
                    if not hasattr(self, 'vibdisps'):
                        self.vibdisps = []
                    disps = []
                    for n in range(self.natom):
                        line = inputfile.next()
                        numbers = [float(s) for s in line[10:].split()]
                        N = len(numbers) / 3
                        if not disps:
                            for n in range(N):
                                disps.append([])
                        for n in range(N):
                            disps[n].append(numbers[3*n:3*n+3])
                    self.vibdisps.extend(disps)
                
                line = inputfile.next()

# Below is the old code for the IR/Raman frequency block, can probably be removed.
#            while len(line[:15].split()) == 0:
#                self.logger.debug(line)
#                self.vibsyms.extend(line.split()) # Adding new symmetry
#                line = inputfile.next()
#                # Read in frequencies.
#                freqs = [self.float(f) for f in line.split()[2:]]
#                self.vibfreqs.extend(freqs)
#                line = inputfile.next()
#                line = inputfile.next()
#                line = inputfile.next()
#                irs = [self.float(f) for f in line.split()[3:]]
#                self.vibirs.extend(irs)
#                line = inputfile.next() # Either the header or a Raman line
#                if line.find("Raman") >= 0:
#                    if not hasattr(self, "vibramans"):
#                        self.vibramans = []
#                    ramans = [self.float(f) for f in line.split()[3:]]
#                    self.vibramans.extend(ramans)
#                    line = inputfile.next() # Depolar (P)
#                    line = inputfile.next() # Depolar (U)
#                    line = inputfile.next() # Header
#                line = inputfile.next() # First line of cartesian displacement vectors
#                p = [[], [], []]
#                while len(line[:15].split()) > 0:
#                    # Store the cartesian displacement vectors
#                    broken = map(float, line.strip().split()[2:])
#                    for i in range(0, len(broken), 3):
#                        p[i/3].append(broken[i:i+3])
#                    line = inputfile.next()
#                self.vibdisps.extend(p[0:len(broken)/3])
#                line = inputfile.next() # Should be the line with symmetries
#            self.vibfreqs = numpy.array(self.vibfreqs, "d")
#            self.vibirs = numpy.array(self.vibirs, "d")
#            self.vibdisps = numpy.array(self.vibdisps, "d")
#            if hasattr(self, "vibramans"):
#                self.vibramans = numpy.array(self.vibramans, "d")
                
        # Electronic transitions.
        if line[1:14] == "Excited State":
        
            if not hasattr(self, "etenergies"):
                self.etenergies = []
                self.etoscs = []
                self.etsyms = []
                self.etsecs = []
            # Need to deal with lines like:
            # (restricted calc)
            # Excited State   1:   Singlet-BU     5.3351 eV  232.39 nm  f=0.1695
            # (unrestricted calc) (first excited state is 2!)
            # Excited State   2:   ?Spin  -A      0.1222 eV 10148.75 nm  f=0.0000
            # (Gaussian 09 ZINDO)
            # Excited State   1:      Singlet-?Sym    2.5938 eV  478.01 nm  f=0.0000  <S**2>=0.000
            p = re.compile(":(?P<sym>.*?)(?P<energy>-?\d*\.\d*) eV")
            groups = p.search(line).groups()
            self.etenergies.append(utils.convertor(self.float(groups[1]), "eV", "cm-1"))
            self.etoscs.append(self.float(line.split("f=")[-1].split()[0]))
            self.etsyms.append(groups[0].strip())
            
            line = inputfile.next()

            p = re.compile("(\d+)")
            CIScontrib = []
            while line.find(" ->") >= 0: # This is a contribution to the transition
                parts = line.split("->")
                self.logger.debug(parts)
                # Has to deal with lines like:
                #       32 -> 38         0.04990
                #      35A -> 45A        0.01921
                frommoindex = 0 # For restricted or alpha unrestricted
                fromMO = parts[0].strip()
                if fromMO[-1] == "B":
                    frommoindex = 1 # For beta unrestricted
                fromMO = int(p.match(fromMO).group())-1 # subtract 1 so that it is an index into moenergies
                
                t = parts[1].split()
                tomoindex = 0
                toMO = t[0]
                if toMO[-1] == "B":
                    tomoindex = 1
                toMO = int(p.match(toMO).group())-1 # subtract 1 so that it is an index into moenergies

                percent = self.float(t[1])
                # For restricted calculations, the percentage will be corrected
                # after parsing (see after_parsing() above).
                CIScontrib.append([(fromMO, frommoindex), (toMO, tomoindex), percent])
                line = inputfile.next()
            self.etsecs.append(CIScontrib)

# Circular dichroism data (different for G03 vs G09)

# G03

## <0|r|b> * <b|rxdel|0>  (Au), Rotatory Strengths (R) in
## cgs (10**-40 erg-esu-cm/Gauss)
##       state          X           Y           Z     R(length)
##         1         0.0006      0.0096     -0.0082     -0.4568
##         2         0.0251     -0.0025      0.0002     -5.3846
##         3         0.0168      0.4204     -0.3707    -15.6580
##         4         0.0721      0.9196     -0.9775     -3.3553

# G09

## 1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*]
## Rotatory Strengths (R) in cgs (10**-40 erg-esu-cm/Gauss)
##       state          XX          YY          ZZ     R(length)     R(au)
##         1        -0.3893     -6.7546      5.7736     -0.4568     -0.0010
##         2       -17.7437      1.7335     -0.1435     -5.3845     -0.0114
##         3       -11.8655   -297.2604    262.1519    -15.6580     -0.0332

        if (line[1:52] == "<0|r|b> * <b|rxdel|0>  (Au), Rotatory Strengths (R)" or
            line[1:50] == "1/2[<0|r|b>*<b|rxdel|0> + (<0|rxdel|b>*<b|r|0>)*]"):

            self.etrotats = []
            inputfile.next() # Units
            headers = inputfile.next() # Headers
            Ncolms = len(headers.split())
            line = inputfile.next()
            parts = line.strip().split()
            while len(parts) == Ncolms:
                try:
                    R = self.float(parts[4])
                except ValueError:
                    # nan or -nan if there is no first excited state
                    # (for unrestricted calculations)
                    pass
                else:
                    self.etrotats.append(R)
                line = inputfile.next()
                temp = line.strip().split()
                parts = line.strip().split()                
            self.etrotats = numpy.array(self.etrotats, "d")

        # Number of basis sets functions.
        # Has to deal with lines like:
        #  NBasis =   434 NAE=    97 NBE=    97 NFC=    34 NFV=     0
        # and...
        #  NBasis = 148  MinDer = 0  MaxDer = 0
        # Although the former is in every file, it doesn't occur before
        #   the overlap matrix is printed.
        if line[1:7] == "NBasis" or line[4:10] == "NBasis":

            # For counterpoise fragment, skip these lines.
            if self.counterpoise != 0: return

            # For ONIOM calcs, ignore this section in order to bypass assertion failure.
            if self.oniom: return

            # If nbasis was already parsed, check if it changed.
            nbasis = int(line.split('=')[1].split()[0])
            if hasattr(self, "nbasis"):
                assert nbasis == self.nbasis
            else:
                self.nbasis = nbasis
                
        # Number of linearly-independent basis sets.
        if line[1:7] == "NBsUse":

            # For counterpoise fragment, skip these lines.
            if self.counterpoise != 0: return

            # For ONIOM calcs, ignore this section in order to bypass assertion failure.
            if self.oniom: return

            # If nmo was already parsed, check if it changed.
            nmo = int(line.split('=')[1].split()[0])
            if hasattr(self, "nmo"):
                assert nmo == self.nmo
            else:
                self.nmo = nmo

        # For AM1 calculations, set nbasis by a second method,
        #   as nmo may not always be explicitly stated.
        if line[7:22] == "basis functions, ":
        
            nbasis = int(line.split()[0])
            if hasattr(self, "nbasis"):
                assert nbasis == self.nbasis
            else:
                self.nbasis = nbasis

        # Molecular orbital overlap matrix.
        # Has to deal with lines such as:
        #   *** Overlap ***
        #   ****** Overlap ******
        if line[1:4] == "***" and (line[5:12] == "Overlap"
                                 or line[8:15] == "Overlap"):

            self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d")
            # Overlap integrals for basis fn#1 are in aooverlaps[0]
            base = 0
            colmNames = inputfile.next()
            while base < self.nbasis:
                 
                self.updateprogress(inputfile, "Overlap", self.fupdate)
                        
                for i in range(self.nbasis-base): # Fewer lines this time
                    line = inputfile.next()
                    parts = line.split()
                    for j in range(len(parts)-1): # Some lines are longer than others
                        k = float(parts[j+1].replace("D", "E"))
                        self.aooverlaps[base+j, i+base] = k
                        self.aooverlaps[i+base, base+j] = k
                base += 5
                colmNames = inputfile.next()
            self.aooverlaps = numpy.array(self.aooverlaps, "d")                    

        # Molecular orbital coefficients (mocoeffs).
        # Essentially only produced for SCF calculations.
        # This is also the place where aonames and atombasis are parsed.
        if line[5:35] == "Molecular Orbital Coefficients" or line[5:41] == "Alpha Molecular Orbital Coefficients" or line[5:40] == "Beta Molecular Orbital Coefficients":

            if line[5:40] == "Beta Molecular Orbital Coefficients":
                beta = True
                if self.popregular:
                    return
                    # This was continue before refactoring the parsers.
                    #continue # Not going to extract mocoeffs
                # Need to add an extra array to self.mocoeffs
                self.mocoeffs.append(numpy.zeros((self.nmo, self.nbasis), "d"))
            else:
                beta = False
                self.aonames = []
                self.atombasis = []
                mocoeffs = [numpy.zeros((self.nmo, self.nbasis), "d")]

            base = 0
            self.popregular = False
            for base in range(0, self.nmo, 5):
                
                self.updateprogress(inputfile, "Coefficients", self.fupdate)
                         
                colmNames = inputfile.next()   

                if not colmNames.split():
                    self.logger.warning("Molecular coefficients header found but no coefficients.")
                    break;

                if base==0 and int(colmNames.split()[0])!=1:
                    # Implies that this is a POP=REGULAR calculation
                    # and so, only aonames (not mocoeffs) will be extracted
                    self.popregular = True
                symmetries = inputfile.next()
                eigenvalues = inputfile.next()
                for i in range(self.nbasis):
                                   
                    line = inputfile.next()
                    if base == 0 and not beta: # Just do this the first time 'round
                        # Changed below from :12 to :11 to deal with Elmar Neumann's example
                        parts = line[:11].split()
                        if len(parts) > 1: # New atom
                            if i>0:
                                self.atombasis.append(atombasis)
                            atombasis = []
                            atomname = "%s%s" % (parts[2], parts[1])
                        orbital = line[11:20].strip()
                        self.aonames.append("%s_%s" % (atomname, orbital))
                        atombasis.append(i)

                    part = line[21:].replace("D", "E").rstrip()
                    temp = [] 
                    for j in range(0, len(part), 10):
                        temp.append(float(part[j:j+10]))
                    if beta:
                        self.mocoeffs[1][base:base + len(part) / 10, i] = temp
                    else:
                        mocoeffs[0][base:base + len(part) / 10, i] = temp
                if base == 0 and not beta: # Do the last update of atombasis
                    self.atombasis.append(atombasis)
                if self.popregular:
                    # We now have aonames, so no need to continue
                    break
            if not self.popregular and not beta:
                self.mocoeffs = mocoeffs

        # Natural Orbital Coefficients (nocoeffs) - alternative for mocoeffs.
        # Most extensively formed after CI calculations, but not only.
        # Like for mocoeffs, this is also where aonames and atombasis are parsed.
        if line[5:33] == "Natural Orbital Coefficients":

            self.aonames = []
            self.atombasis = []
            nocoeffs = numpy.zeros((self.nmo, self.nbasis), "d")

            base = 0
            self.popregular = False
            for base in range(0, self.nmo, 5):
                
                self.updateprogress(inputfile, "Coefficients", self.fupdate)
                         
                colmNames = inputfile.next()   
                if base==0 and int(colmNames.split()[0])!=1:
                    # Implies that this is a POP=REGULAR calculation
                    # and so, only aonames (not mocoeffs) will be extracted
                    self.popregular = True

                # No symmetry line for natural orbitals.
                # symmetries = inputfile.next()
                eigenvalues = inputfile.next()

                for i in range(self.nbasis):
                                   
                    line = inputfile.next()

                    # Just do this the first time 'round.
                    if base == 0:

                        # Changed below from :12 to :11 to deal with Elmar Neumann's example.
                        parts = line[:11].split()
                        # New atom.
                        if len(parts) > 1:
                            if i>0:
                                self.atombasis.append(atombasis)
                            atombasis = []
                            atomname = "%s%s" % (parts[2], parts[1])
                        orbital = line[11:20].strip()
                        self.aonames.append("%s_%s" % (atomname, orbital))
                        atombasis.append(i)

                    part = line[21:].replace("D", "E").rstrip()
                    temp = [] 

                    for j in range(0, len(part), 10):
                        temp.append(float(part[j:j+10]))

                    nocoeffs[base:base + len(part) / 10, i] = temp

                # Do the last update of atombasis.
                if base == 0:
                    self.atombasis.append(atombasis)

                # We now have aonames, so no need to continue.
                if self.popregular:
                    break

            if not self.popregular:
                self.nocoeffs = nocoeffs

        # Pseudopotential charges.
        if line.find("Pseudopotential Parameters") > -1:

            dashes = inputfile.next()
            label1 = inputfile.next()
            label2 = inputfile.next()
            dashes = inputfile.next()

            line = inputfile.next()
            if line.find("Centers:") < 0:
                return
                # This was continue before parser refactoring.
                # continue

            centers = map(int, line.split()[1:])
            centers.sort() # Not always in increasing order
            
            self.coreelectrons = numpy.zeros(self.natom, "i")

            for center in centers:
                line = inputfile.next()
                front = line[:10].strip()
                while not (front and int(front) == center):
                    line = inputfile.next()
                    front = line[:10].strip()
                info = line.split()
                self.coreelectrons[center-1] = int(info[1]) - int(info[2])

        # This will be printed for counterpoise calcualtions only.
        # To prevent crashing, we need to know which fragment is being considered.
        # Other information is also printed in lines that start like this.
        if line[1:14] == 'Counterpoise:':
        
            if line[42:50] == "fragment":
                self.counterpoise = int(line[51:54])

        # This will be printed only during ONIOM calcs; use it to set a flag
        # that will allow assertion failures to be bypassed in the code.
        if line[1:7] == "ONIOM:":
            self.oniom = True

Example 35

Project: RMG-Py
Source File: molproparser.py
View license
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line[1:19] == "ATOMIC COORDINATES":
            
            if not hasattr(self,"atomcoords"):
                self.atomcoords = []
                self.atomnos = []
            line = inputfile.next()
            line = inputfile.next()
            line = inputfile.next()
            atomcoords = []
            atomnos = []
            
            line = inputfile.next()
            while line.strip():
                temp = line.strip().split()
                atomcoords.append([utils.convertor(float(x),"bohr","Angstrom") for x in temp[3:6]]) #bohrs to angs
                atomnos.append(int(round(float(temp[2]))))
                line = inputfile.next()
                
            self.atomnos = numpy.array(atomnos, "i")
            self.atomcoords.append(atomcoords)
            self.natom = len(self.atomnos)
        
        # Use BASIS DATA to parse input for aonames and atombasis.
        # This is always the first place this information is printed, so no attribute check is needed.
        if line[1:11] == "BASIS DATA":
            
            blank = inputfile.next()
            header = inputfile.next()
            blank = inputfile.next()
            self.aonames = []
            self.atombasis = []
            self.gbasis = []
            for i in range(self.natom):
                self.atombasis.append([])
                self.gbasis.append([])
            
            line = "dummy"
            while line.strip() != "":
                line = inputfile.next()
                funcnr = line[1:6]
                funcsym = line[7:9]
                funcatom_ = line[11:14]
                functype_ = line[16:22]
                funcexp = line[25:38]
                funccoeffs = line[38:]

                # If a new function type is printed or the BASIS DATA block ends,
                #   then the previous functions can be added to gbasis.
                # When translating the Molpro function type name into a gbasis code,
                #   note that Molpro prints all components, and we want to add
                #   only one to gbasis, with the proper code (S,P,D,F,G).
                # Warning! The function types differ for cartesian/spherical functions.
                # Skip the first printed function type, however (line[3] != '1').
                if (functype_.strip() and line[1:4] != '  1') or line.strip() == "":
                    funcbasis = None
                    if functype in ['1s', 's']:
                        funcbasis = 'S'
                    if functype in ['x', '2px']:
                        funcbasis = 'P'
                    if functype in ['xx', '3d0']:
                        funcbasis = 'D'
                    if functype in ['xxx', '4f0']:
                        funcbasis = 'F'
                    if functype in ['xxxx', '5g0']:
                        funcbasis = 'G'
                    if funcbasis:

                        # The function is split into as many columns as there are.
                        for i in range(len(coefficients[0])):
                            func = (funcbasis, [])
                            for j in range(len(exponents)):
                                func[1].append((exponents[j],coefficients[j][i]))
                            self.gbasis[funcatom-1].append(func)

                # If it is a new type, set up the variables for the next shell(s).
                if functype_.strip():
                    exponents = []
                    coefficients = []
                    functype = functype_.strip()
                    funcatom = int(funcatom_.strip())

                # Add exponents and coefficients to lists.
                if line.strip():
                    funcexp = float(funcexp)
                    funccoeffs = [float(s) for s in funccoeffs.split()]
                    exponents.append(funcexp)
                    coefficients.append(funccoeffs)

                # If the function number is there, add to atombasis and aonames.
                if funcnr.strip():
                    funcnr = int(funcnr.split('.')[0])
                    self.atombasis[funcatom-1].append(funcnr-1)
                    element = self.table.element[self.atomnos[funcatom-1]]
                    aoname = "%s%i_%s" %(element, funcatom, functype)
                    self.aonames.append(aoname)

        if line[1:23] == "NUMBER OF CONTRACTIONS":
            
            nbasis = int(line.split()[3])
            if hasattr(self, "nbasis"):
                assert nbasis == self.nbasis
            else:
                self.nbasis = nbasis

        # This is used to signalize whether we are inside an SCF calculation.
        if line[1:8] == "PROGRAM" and line[14:18] == "-SCF":

            self.insidescf = True

        # Use this information instead of 'SETTING ...', in case the defaults are standard.
        # Note that this is sometimes printed in each geometry optimization step.
        if line[1:20] == "NUMBER OF ELECTRONS":
            
            spinup = int(line.split()[3][:-1])
            spindown = int(line.split()[4][:-1])
            # Nuclear charges (atomnos) should be parsed by now.
            nuclear = numpy.sum(self.atomnos)
            charge = nuclear - spinup - spindown
            mult = spinup - spindown + 1
            
            # Copy charge, or assert for exceptions if already exists.
            if not hasattr(self, "charge"):
                self.charge = charge
            else:
                assert self.charge == charge
            
            # Copy multiplicity, or assert for exceptions if already exists.
            if not hasattr(self, "mult"):
                self.mult = mult
            else:
                assert self.mult == mult
        
        # Convergenve thresholds for SCF cycle, should be contained in a line such as:
        #   CONVERGENCE THRESHOLDS:    1.00E-05 (Density)    1.40E-07 (Energy)
        if self.insidescf and line[1:24] == "CONVERGENCE THRESHOLDS:":

            if not hasattr(self, "scftargets"):
                self.scftargets = []

            scftargets = map(float, line.split()[2::2])
            self.scftargets.append(scftargets)
            # Usually two criteria, but save the names this just in case.
            self.scftargetnames = line.split()[3::2]

        # Read in the print out of the SCF cycle - for scfvalues. For RHF looks like:
        # ITERATION    DDIFF          GRAD             ENERGY        2-EL.EN.            DIPOLE MOMENTS         DIIS
        #     1      0.000D+00      0.000D+00      -379.71523700   1159.621171   0.000000   0.000000   0.000000    0
        #     2      0.000D+00      0.898D-02      -379.74469736   1162.389787   0.000000   0.000000   0.000000    1
        #     3      0.817D-02      0.144D-02      -379.74635529   1162.041033   0.000000   0.000000   0.000000    2
        #     4      0.213D-02      0.571D-03      -379.74658063   1162.159929   0.000000   0.000000   0.000000    3
        #     5      0.799D-03      0.166D-03      -379.74660889   1162.144256   0.000000   0.000000   0.000000    4
        if self.insidescf and line[1:10] == "ITERATION":
        
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
        
            line = inputfile.next()
            energy = 0.0
            scfvalues = []
            while line.strip() != "":
                if line.split()[0].isdigit():
                
                    ddiff = float(line.split()[1].replace('D','E'))
                    newenergy = float(line.split()[3])
                    ediff = newenergy - energy
                    energy = newenergy

                    # The convergence thresholds must have been read above.
                    # Presently, we recognize MAX DENSITY and MAX ENERGY thresholds.
                    numtargets = len(self.scftargetnames)
                    values = [numpy.nan]*numtargets
                    for n,name in zip(range(numtargets),self.scftargetnames):
                        if "ENERGY" in name.upper():
                            values[n] = ediff
                        elif "DENSITY" in name.upper():
                            values[n] = ddiff
                    scfvalues.append(values)

                line = inputfile.next()
            self.scfvalues.append(numpy.array(scfvalues))

        # SCF result - RHF/UHF and DFT (RKS) energies.
        if line[1:5] in ["!RHF", "!UHF", "!RKS"] and line[16:22] == "ENERGY":
            
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            scfenergy = float(line.split()[4])
            self.scfenergies.append(utils.convertor(scfenergy, "hartree", "eV"))
            
            # We are now done with SCF cycle (after a few lines).
            self.insidescf = False

        # MP2 energies.
        if line[1:5] == "!MP2":
        
            if not hasattr(self, 'mpenergies'):
                self.mpenergies = []
            mp2energy = float(line.split()[-1])
            mp2energy = utils.convertor(mp2energy, "hartree", "eV")
            self.mpenergies.append([mp2energy])
            
        # MP2 energies if MP3 or MP4 is also calculated.
        if line[1:5] == "MP2:":
        
            if not hasattr(self, 'mpenergies'):
                self.mpenergies = []
            mp2energy = float(line.split()[2])
            mp2energy = utils.convertor(mp2energy, "hartree", "eV")
            self.mpenergies.append([mp2energy])
            
        # MP3 (D) and MP4 (DQ or SDQ) energies.
        if line[1:8] == "MP3(D):":
        
            mp3energy = float(line.split()[2])
            mp2energy = utils.convertor(mp3energy, "hartree", "eV")
            line = inputfile.next()
            self.mpenergies[-1].append(mp2energy)
            if line[1:9] == "MP4(DQ):":
                mp4energy = float(line.split()[2])
                line = inputfile.next()
                if line[1:10] == "MP4(SDQ):":
                    mp4energy = float(line.split()[2])
                mp4energy = utils.convertor(mp4energy, "hartree", "eV")
                self.mpenergies[-1].append(mp4energy)

        # The CCSD program operates all closed-shel coupled cluster runs.
        if line[1:15] == "PROGRAM * CCSD":
        
            if not hasattr(self, "ccenergies"):
                self.ccenergies = []
            while line[1:20] != "Program statistics:":
                # The last energy (most exact) will be read last and thus saved.
                if line[1:5] == "!CCD" or line[1:6] == "!CCSD" or line[1:9] == "!CCSD(T)":
                    ccenergy = float(line.split()[-1])
                    ccenergy = utils.convertor(ccenergy, "hartree", "eV")
                line = inputfile.next()
            self.ccenergies.append(ccenergy)

        # Read the occupancy (index of HOMO s).
        # For restricted calculations, there is one line here. For unrestricted, two:
        #   Final alpha occupancy:  ...
        #   Final beta  occupancy:  ...
        if line[1:17] == "Final occupancy:":
            self.homos = [int(line.split()[-1])-1]
        if line[1:23] == "Final alpha occupancy:":
            self.homos = [int(line.split()[-1])-1]
            line = inputfile.next()
            self.homos.append(int(line.split()[-1])-1)

        # From this block atombasis, moenergies, and mocoeffs can be parsed.
        # Note that Molpro does not print this by default, you must add this in the input:
        #   GPRINT,ORBITALS
        # What's more, this prints only the occupied orbitals. To get virtuals, add also:
        #   ORBPTIN,NVIRT
        #   where NVIRT is how many to print (can be some large number, like 99999, to print all).
        # The block is in general flipped when compared to other programs (GAMESS, Gaussian), and
        #   MOs in the rows. Also, it does not cut the table into parts, rather each MO row has
        #   as many lines as it takes to print all the coefficients, as shown below:
        #
        # ELECTRON ORBITALS
        # =================
        #
        #
        #   Orb  Occ    Energy  Couls-En    Coefficients
        #
        #                                   1 1s      1 1s      1 2px     1 2py     1 2pz     2 1s   (...)
        #                                   3 1s      3 1s      3 2px     3 2py     3 2pz     4 1s   (...)
        # (...)
        #
        #   1.1   2   -11.0351  -43.4915  0.701460  0.025696 -0.000365 -0.000006  0.000000  0.006922 (...)
        #                                -0.006450  0.004742 -0.001028 -0.002955  0.000000 -0.701460 (...)
        # (...)
        #
        # For unrestricted calcualtions, ELECTRON ORBITALS is followed on the same line
        #   by FOR POSITIVE SPIN or FOR NEGATIVE SPIN.
        # For examples, see data/Molpro/basicMolpro2006/dvb_sp*.
        if line[1:18] == "ELECTRON ORBITALS" or self.electronorbitals:
            # Detect if we are reading beta (negative spin) orbitals.
            spin = 0
            if line[19:36] == "FOR NEGATIVE SPIN" or self.electronorbitals[19:36] == "FOR NEGATIVE SPIN":
                spin = 1
            
            if not self.electronorbitals:
                dashes = inputfile.next()
            blank = inputfile.next()
            blank = inputfile.next()
            headers = inputfile.next()
            blank = inputfile.next()
            
            # Parse the list of atomic orbitals if atombasis or aonames is missing.
            line = inputfile.next()
            if not hasattr(self, "atombasis") or not hasattr(self, "aonames"):
                self.atombasis = []
                for i in range(self.natom):
                    self.atombasis.append([])
                self.aonames = []
                aonum = 0
                while line.strip():
                    for s in line.split():
                        if s.isdigit():
                            atomno = int(s)
                            self.atombasis[atomno-1].append(aonum)
                            aonum += 1
                        else:
                            functype = s
                            element = self.table.element[self.atomnos[atomno-1]]
                            aoname = "%s%i_%s" %(element, atomno, functype)
                            self.aonames.append(aoname)
                    line = inputfile.next()
            else:
                while line.strip():
                    line = inputfile.next()

            # Now there can be one or two blank lines.
            while not line.strip():
                line = inputfile.next()
            
            # Create empty moenergies and mocoeffs if they don't exist.
            if not hasattr(self, "moenergies"):
                self.moenergies = [[]]
                self.mocoeffs = [[]]
            # Do the same if they exist and are being read again (spin=0),
            #   this means only the last print-out of these data are saved,
            #   which consistent with current cclib practices.
            elif len(self.moenergies) == 1 and spin == 0:
                self.moenergies = [[]]
                self.mocoeffs = [[]]
            else:
                self.moenergies.append([])
                self.mocoeffs.append([])
                
            while line.strip() and not "ORBITALS" in line:
                coeffs = []
                while line.strip() != "":
                    if line[:30].strip():
                        moenergy = float(line.split()[2])
                        moenergy = utils.convertor(moenergy, "hartree", "eV")
                        self.moenergies[spin].append(moenergy)
                    line = line[31:]
                    # Each line has 10 coefficients in 10.6f format.
                    num = len(line)/10
                    for i in range(num):
                        try:
                            coeff = float(line[10*i:10*(i+1)])
                        # Molpro prints stars when coefficients are huge.
                        except ValueError, detail:
                            self.logger.warn("Set coefficient to zero: %s" %detail)
                            coeff = 0.0
                        coeffs.append(coeff)
                    line = inputfile.next()
                self.mocoeffs[spin].append(coeffs)
                line = inputfile.next()
            
            # Check if last line begins the next ELECTRON ORBITALS section.
            if line[1:18] == "ELECTRON ORBITALS":
                self.electronorbitals = line
            else:
                self.electronorbitals = ""

        # If the MATROP program was called appropriately,
        #   the atomic obital overlap matrix S is printed.
        # The matrix is printed straight-out, ten elements in each row, both halves.
        # Note that is the entire matrix is not printed, then aooverlaps
        #   will not have dimensions nbasis x nbasis.
        if line[1:9] == "MATRIX S":
        
            blank = inputfile.next()
            symblocklabel = inputfile.next()
            if not hasattr(self, "aooverlaps"):
                self.aooverlaps = [[]]
            line = inputfile.next()
            while line.strip() != "":
                elements = [float(s) for s in line.split()]
                if len(self.aooverlaps[-1]) + len(elements) <= self.nbasis:
                    self.aooverlaps[-1] += elements
                else:
                    n = len(self.aooverlaps[-1]) + len(elements) - self.nbasis
                    self.aooverlaps[-1] += elements[:-n]
                    self.aooverlaps.append([])
                    self.aooverlaps[-1] += elements[-n:]
                line = inputfile.next()

        # Thresholds are printed only if the defaults are changed with GTHRESH.
        # In that case, we can fill geotargets with non-default values.
        # The block should look like this as of Molpro 2006.1:
        #   THRESHOLDS:

        #   ZERO    =  1.00D-12  ONEINT  =  1.00D-12  TWOINT  =  1.00D-11  PREFAC  =  1.00D-14  LOCALI  =  1.00D-09  EORDER  =  1.00D-04
        #   ENERGY  =  0.00D+00  ETEST   =  0.00D+00  EDENS   =  0.00D+00  THRDEDEF=  1.00D-06  GRADIENT=  1.00D-02  STEP    =  1.00D-03
        #   ORBITAL =  1.00D-05  CIVEC   =  1.00D-05  COEFF   =  1.00D-04  PRINTCI =  5.00D-02  PUNCHCI =  9.90D+01  OPTGRAD =  3.00D-04
        #   OPTENERG=  1.00D-06  OPTSTEP =  3.00D-04  THRGRAD =  2.00D-04  COMPRESS=  1.00D-11  VARMIN  =  1.00D-07  VARMAX  =  1.00D-03
        #   THRDOUB =  0.00D+00  THRDIV  =  1.00D-05  THRRED  =  1.00D-07  THRPSP  =  1.00D+00  THRDC   =  1.00D-10  THRCS   =  1.00D-10
        #   THRNRM  =  1.00D-08  THREQ   =  0.00D+00  THRDE   =  1.00D+00  THRREF  =  1.00D-05  SPARFAC =  1.00D+00  THRDLP  =  1.00D-07
        #   THRDIA  =  1.00D-10  THRDLS  =  1.00D-07  THRGPS  =  0.00D+00  THRKEX  =  0.00D+00  THRDIS  =  2.00D-01  THRVAR  =  1.00D-10
        #   THRLOC  =  1.00D-06  THRGAP  =  1.00D-06  THRLOCT = -1.00D+00  THRGAPT = -1.00D+00  THRORB  =  1.00D-06  THRMLTP =  0.00D+00
        #   THRCPQCI=  1.00D-10  KEXTA   =  0.00D+00  THRCOARS=  0.00D+00  SYMTOL  =  1.00D-06  GRADTOL =  1.00D-06  THROVL  =  1.00D-08
        #   THRORTH =  1.00D-08  GRID    =  1.00D-06  GRIDMAX =  1.00D-03  DTMAX   =  0.00D+00
        if line [1:12] == "THRESHOLDS":

            blank = inputfile.next()
            line = inputfile.next()
            while line.strip():

                if "OPTENERG" in line:
                    start = line.find("OPTENERG")
                    optenerg = line[start+10:start+20]
                if "OPTGRAD" in line:
                    start = line.find("OPTGRAD")
                    optgrad = line[start+10:start+20]
                if "OPTSTEP" in line:
                    start = line.find("OPTSTEP")
                    optstep = line[start+10:start+20]
                line = inputfile.next()

            self.geotargets = [optenerg, optgrad, optstep]

        # The optimization history is the source for geovlues:
        #   END OF GEOMETRY OPTIMIZATION.    TOTAL CPU:       246.9 SEC
        #
        #     ITER.   ENERGY(OLD)    ENERGY(NEW)      DE          GRADMAX     GRADNORM    GRADRMS     STEPMAX     STEPLEN     STEPRMS
        #      1  -382.02936898  -382.04914450    -0.01977552  0.11354875  0.20127947  0.01183997  0.12972761  0.20171740  0.01186573
        #      2  -382.04914450  -382.05059234    -0.00144784  0.03299860  0.03963339  0.00233138  0.05577169  0.06687650  0.00393391
        #      3  -382.05059234  -382.05069136    -0.00009902  0.00694359  0.01069889  0.00062935  0.01654549  0.02016307  0.00118606
        #      4  -382.05069136  -382.05069130     0.00000006  0.00295497  0.00363023  0.00021354  0.00234307  0.00443525  0.00026090
        #      5  -382.05069130  -382.05069206    -0.00000075  0.00098220  0.00121031  0.00007119  0.00116863  0.00140452  0.00008262
        #      6  -382.05069206  -382.05069209    -0.00000003  0.00011350  0.00022306  0.00001312  0.00013321  0.00024526  0.00001443
        if line[1:30] == "END OF GEOMETRY OPTIMIZATION.":
            
            blank = inputfile.next()
            headers = inputfile.next()

            # Although criteria can be changed, the printed format should not change.
            # In case it does, retrieve the columns for each parameter.
            headers = headers.split()
            index_THRENERG = headers.index('DE')
            index_THRGRAD = headers.index('GRADMAX')
            index_THRSTEP = headers.index('STEPMAX')

            line = inputfile.next()
            self.geovalues = []            
            while line.strip() != "":
                
                line = line.split()
                geovalues = []
                geovalues.append(float(line[index_THRENERG]))
                geovalues.append(float(line[index_THRGRAD]))
                geovalues.append(float(line[index_THRSTEP]))
                self.geovalues.append(geovalues)
                line = inputfile.next()

        # This block should look like this:
        #   Normal Modes
        #
        #                                1 Au        2 Bu        3 Ag        4 Bg        5 Ag 
        #   Wavenumbers [cm-1]          151.81      190.88      271.17      299.59      407.86
        #   Intensities [km/mol]          0.33        0.28        0.00        0.00        0.00
        #   Intensities [relative]        0.34        0.28        0.00        0.00        0.00
        #             CX1              0.00000    -0.01009     0.02577     0.00000     0.06008
        #             CY1              0.00000    -0.05723    -0.06696     0.00000     0.06349
        #             CZ1             -0.02021     0.00000     0.00000     0.11848     0.00000
        #             CX2              0.00000    -0.01344     0.05582     0.00000    -0.02513
        #             CY2              0.00000    -0.06288    -0.03618     0.00000     0.00349
        #             CZ2             -0.05565     0.00000     0.00000     0.07815     0.00000
        #             ...
        # Molpro prints low frequency modes in a subsequent section with the same format,
        #   which also contains zero frequency modes, with the title:
        #   Normal Modes of low/zero frequencies
        if line[1:13] == "Normal Modes":
            
            if line[1:37] == "Normal Modes of low/zero frequencies":
                islow = True
            else:
                islow = False

            blank = inputfile.next()

            # Each portion of five modes is followed by a single blank line.
            # The whole block is followed by an additional blank line.
            line = inputfile.next()
            while line.strip():

                if line[1:25].isspace():
                    numbers = map(int, line.split()[::2])
                    vibsyms = line.split()[1::2]

                if line[1:12] == "Wavenumbers":
                    vibfreqs = map(float, line.strip().split()[2:])
                    
                if line[1:21] == "Intensities [km/mol]":
                    vibirs = map(float, line.strip().split()[2:])

                # There should always by 3xnatom displacement rows.
                if line[1:11].isspace() and line[13:25].strip().isdigit():

                    # There are a maximum of 5 modes per line.
                    nmodes = len(line.split())-1

                    vibdisps = []
                    for i in range(nmodes):
                        vibdisps.append([])
                        for n in range(self.natom):
                            vibdisps[i].append([])
                    for i in range(nmodes):
                        disp = float(line.split()[i+1])
                        vibdisps[i][0].append(disp)
                    for i in range(self.natom*3 - 1):
                        line = inputfile.next()
                        iatom = (i+1)/3
                        for i in range(nmodes):
                            disp = float(line.split()[i+1])
                            vibdisps[i][iatom].append(disp)

                line = inputfile.next()
                if not line.strip():
            
                    if not hasattr(self, "vibfreqs"):
                        self.vibfreqs = []
                    if not hasattr(self, "vibsyms"):
                        self.vibsyms = []
                    if not hasattr(self, "vibirs") and "vibirs" in dir():
                        self.vibirs = []
                    if not hasattr(self, "vibdisps") and "vibdisps" in dir():
                        self.vibdisps = []

                    if not islow:
                        self.vibfreqs.extend(vibfreqs)
                        self.vibsyms.extend(vibsyms)
                        if "vibirs" in dir():
                            self.vibirs.extend(vibirs)
                        if "vibdisps" in dir():
                            self.vibdisps.extend(vibdisps)
                    else:        
                        nonzero = [f > 0 for f in vibfreqs]
                        vibfreqs = [f for f in vibfreqs if f > 0]
                        self.vibfreqs = vibfreqs + self.vibfreqs
                        vibsyms = [vibsyms[i] for i in range(len(vibsyms)) if nonzero[i]]
                        self.vibsyms = vibsyms + self.vibsyms
                        if "vibirs" in dir():
                            vibirs = [vibirs[i] for i in range(len(vibirs)) if nonzero[i]]
                            self.vibirs = vibirs + self.vibirs
                        if "vibdisps" in dir():
                            vibdisps = [vibdisps[i] for i in range(len(vibdisps)) if nonzero[i]]
                            self.vibdisps = vibdisps + self.vibdisps

                    line = inputfile.next()
            
        if line[1:16] == "Force Constants":
            
            self.logger.info("Creating attribute hessian")
            self.hessian = []
            line = inputfile.next()
            hess = []
            tmp = []
            
            while line.strip():
                try: map(float, line.strip().split()[2:])
                except: 
                    line = inputfile.next()
                line.strip().split()[1:]
                hess.extend([map(float,line.strip().split()[1:])])
                line = inputfile.next()
            lig = 0
            
            while (lig==0) or (len(hess[0]) > 1):
                tmp.append(hess.pop(0))
                lig += 1
            k = 5
            
            while len(hess) != 0:
                tmp[k] += hess.pop(0)
                k += 1
                if (len(tmp[k-1]) == lig): break
                if k >= lig: k = len(tmp[-1])
            for l in tmp: self.hessian += l
            
        if line[1:14] == "Atomic Masses" and hasattr(self,"hessian"):
            
            line = inputfile.next()
            self.amass = map(float, line.strip().split()[2:])
            
            while line.strip():
                line = inputfile.next()
                self.amass += map(float, line.strip().split()[2:])        

Example 36

Project: RMG-Py
Source File: groups.py
View license
    def generateGroupAdditivityValues(self, trainingSet, kunits, method='Arrhenius'):
        """
        Generate the group additivity values using the given `trainingSet`,
        a list of 2-tuples of the form ``(template, kinetics)``. You must also
        specify the `kunits` for the family and the `method` to use when
        generating the group values. Returns ``True`` if the group values have
        changed significantly since the last time they were fitted, or ``False``
        otherwise.
        """
        
        # keep track of previous values so we can detect if they change
        old_entries = dict()
        for label,entry in self.entries.items():
            if entry.data is not None:
                old_entries[label] = entry.data
        
        # Determine a complete list of the entries in the database, sorted as in the tree
        groupEntries = self.top[:]
        for entry in self.top:
            groupEntries.extend(self.descendants(entry))
        
        # Determine a unique list of the groups we will be able to fit parameters for
        groupList = []
        for template, kinetics in trainingSet:
            for group in template:
                if group not in self.top:
                    groupList.append(group)
                    groupList.extend(self.ancestors(group)[:-1])
        groupList = list(set(groupList))
        groupList.sort(key=lambda x: x.index)

        if method == 'KineticsData':
            # Fit a discrete set of k(T) data points by training against k(T) data
            
            Tdata = numpy.array([300,400,500,600,800,1000,1500,2000])
            
            # Initialize dictionaries of fitted group values and uncertainties
            groupValues = {}; groupUncertainties = {}; groupCounts = {}; groupComments = {}
            for entry in groupEntries:
                groupValues[entry] = []
                groupUncertainties[entry] = []
                groupCounts[entry] = []
                groupComments[entry] = set()
            
            # Generate least-squares matrix and vector
            A = []; b = []
            
            kdata = []
            for template, kinetics in trainingSet:
                
                if isinstance(kinetics, (Arrhenius, KineticsData)):
                    kd = [kinetics.getRateCoefficient(T) for T in Tdata]
                elif isinstance(kinetics, ArrheniusEP):
                    kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata]
                else:
                    raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template))
                kdata.append(kd)
                    
                # Create every combination of each group and its ancestors with each other
                combinations = []
                for group in template:
                    groups = [group]; groups.extend(self.ancestors(group))
                    combinations.append(groups)
                combinations = getAllCombinations(combinations)
                # Add a row to the matrix for each combination
                for groups in combinations:
                    Arow = [1 if group in groups else 0 for group in groupList]
                    Arow.append(1)
                    brow = [math.log10(k) for k in kd]
                    A.append(Arow); b.append(brow)
                    
                    for group in groups:
                        groupComments[group].add("{0!s}".format(template))
                
            if len(A) == 0:
                logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label))
                return
            A = numpy.array(A)
            b = numpy.array(b)
            kdata = numpy.array(kdata)
            
            x, residues, rank, s = numpy.linalg.lstsq(A, b)
            
            for t, T in enumerate(Tdata):
                
                # Determine error in each group (on log scale)
                stdev = numpy.zeros(len(groupList)+1, numpy.float64)
                count = numpy.zeros(len(groupList)+1, numpy.int)
                
                for index in range(len(trainingSet)):
                    template, kinetics = trainingSet[index]
                    kd = math.log10(kdata[index,t])
                    km = x[-1,t] + sum([x[groupList.index(group),t] for group in template if group in groupList])
                    variance = (km - kd)**2
                    for group in template:
                        groups = [group]; groups.extend(self.ancestors(group))
                        for g in groups:
                            if g not in self.top:
                                ind = groupList.index(g)
                                stdev[ind] += variance
                                count[ind] += 1
                    stdev[-1] += variance
                    count[-1] += 1
                stdev = numpy.sqrt(stdev / (count - 1))
                import scipy.stats
                ci = scipy.stats.t.ppf(0.975, count - 1) * stdev
                
                # Update dictionaries of fitted group values and uncertainties
                for entry in groupEntries:
                    if entry == self.top[0]:
                        groupValues[entry].append(10**x[-1,t])
                        groupUncertainties[entry].append(10**ci[-1])
                        groupCounts[entry].append(count[-1])
                    elif entry in groupList:
                        index = groupList.index(entry)
                        groupValues[entry].append(10**x[index,t])
                        groupUncertainties[entry].append(10**ci[index])
                        groupCounts[entry].append(count[index])
                    else:
                        groupValues[entry] = None
                        groupUncertainties[entry] = None
                        groupCounts[entry] = None
            
            # Store the fitted group values and uncertainties on the associated entries
            for entry in groupEntries:
                if groupValues[entry] is not None:
                    entry.data = KineticsData(Tdata=(Tdata,"K"), kdata=(groupValues[entry],kunits))
                    if not any(numpy.isnan(numpy.array(groupUncertainties[entry]))):
                        entry.data.kdata.uncertainties = numpy.array(groupUncertainties[entry])
                        entry.data.kdata.uncertaintyType = '*|/'
                    entry.shortDesc = "Group additive kinetics."
                    entry.longDesc = "Fitted to {0} rates.\n".format(groupCounts[entry])
                    entry.longDesc += "\n".join(groupComments[entry])
                else:
                    entry.data = None
        
        elif method == 'Arrhenius':
            # Fit Arrhenius parameters (A, n, Ea) by training against k(T) data
            
            Tdata = numpy.array([300,400,500,600,800,1000,1500,2000])
            logTdata = numpy.log(Tdata)
            Tinvdata = 1000. / (constants.R * Tdata)
            
            A = []; b = []
            
            kdata = []
            for template, kinetics in trainingSet:
                
                if isinstance(kinetics, (Arrhenius, KineticsData)):
                    kd = [kinetics.getRateCoefficient(T) for T in Tdata]
                elif isinstance(kinetics, ArrheniusEP):
                    kd = [kinetics.getRateCoefficient(T, 0) for T in Tdata]
                else:
                    raise Exception('Unexpected kinetics model of type {0} for template {1}.'.format(kinetics.__class__, template))
                kdata.append(kd)
                
                # Create every combination of each group and its ancestors with each other
                combinations = []
                for group in template:
                    groups = [group]; groups.extend(self.ancestors(group))
                    combinations.append(groups)
                combinations = getAllCombinations(combinations)
                
                # Add a row to the matrix for each combination at each temperature
                for t, T in enumerate(Tdata):
                    logT = logTdata[t]
                    Tinv = Tinvdata[t]
                    for groups in combinations:
                        Arow = []
                        for group in groupList:
                            if group in groups:
                                Arow.extend([1,logT,-Tinv])
                            else:
                                Arow.extend([0,0,0])
                        Arow.extend([1,logT,-Tinv])
                        brow = math.log(kd[t])
                        A.append(Arow); b.append(brow)
            
            if len(A) == 0:
                logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label))
                return
            A = numpy.array(A)
            b = numpy.array(b)
            kdata = numpy.array(kdata)
            
            x, residues, rank, s = numpy.linalg.lstsq(A, b)
            
            # Store the results
            self.top[0].data = Arrhenius(
                A = (math.exp(x[-3]),kunits),
                n = x[-2],
                Ea = (x[-1],"kJ/mol"),
                T0 = (1,"K"),
            )
            for i, group in enumerate(groupList):
                group.data = Arrhenius(
                    A = (math.exp(x[3*i]),kunits),
                    n = x[3*i+1],
                    Ea = (x[3*i+2],"kJ/mol"),
                    T0 = (1,"K"),
                )
        
        elif method == 'Arrhenius2':
            # Fit Arrhenius parameters (A, n, Ea) by training against (A, n, Ea) values
            
            A = []; b = []
            
            for template, kinetics in trainingSet:
                
                # Create every combination of each group and its ancestors with each other
                combinations = []
                for group in template:
                    groups = [group]; groups.extend(self.ancestors(group))
                    combinations.append(groups)
                combinations = getAllCombinations(combinations)
                        
                # Add a row to the matrix for each parameter
                if isinstance(kinetics, Arrhenius) or (isinstance(kinetics, ArrheniusEP) and kinetics.alpha.value_si == 0):
                    for groups in combinations:
                        Arow = []
                        for group in groupList:
                            if group in groups:
                                Arow.append(1)
                            else:
                                Arow.append(0)
                        Arow.append(1)
                        Ea = kinetics.E0.value_si if isinstance(kinetics, ArrheniusEP) else kinetics.Ea.value_si
                        brow = [math.log(kinetics.A.value_si), kinetics.n.value_si, Ea / 1000.]
                        A.append(Arow); b.append(brow)
            
            if len(A) == 0:
                logging.warning('Unable to fit kinetics groups for family "{0}"; no valid data found.'.format(self.label))
                return
            A = numpy.array(A)
            b = numpy.array(b)
            
            x, residues, rank, s = numpy.linalg.lstsq(A, b)
            
            # Store the results
            self.top[0].data = Arrhenius(
                A = (math.exp(x[-1,0]),kunits),
                n = x[-1,1],
                Ea = (x[-1,2],"kJ/mol"),
                T0 = (1,"K"),
            )
            for i, group in enumerate(groupList):
                group.data = Arrhenius(
                    A = (math.exp(x[i,0]),kunits),
                    n = x[i,1],
                    Ea = (x[i,2],"kJ/mol"),
                    T0 = (1,"K"),
                )
        
        # Add a note to the history of each changed item indicating that we've generated new group values
        changed = False
        for label, entry in self.entries.items():
            if entry.data is not None and old_entries.has_key(label):
                if (isinstance(entry.data, KineticsData) and 
                    isinstance(old_entries[label], KineticsData) and
                    len(entry.data.kdata.value_si) == len(old_entries[label].kdata.value_si) and
                    all(abs(entry.data.kdata.value_si / old_entries[label].kdata.value_si - 1) < 0.01)):
                    #print "New group values within 1% of old."
                    pass
                elif (isinstance(entry.data, Arrhenius) and 
                    isinstance(old_entries[label], Arrhenius) and
                    abs(entry.data.A.value_si / old_entries[label].A.value_si - 1) < 0.01 and
                    abs(entry.data.n.value_si / old_entries[label].n.value_si - 1) < 0.01 and
                    abs(entry.data.Ea.value_si / old_entries[label].Ea.value_si - 1) < 0.01 and
                    abs(entry.data.T0.value_si / old_entries[label].T0.value_si - 1) < 0.01):
                    #print "New group values within 1% of old."
                    pass
                else:
                    changed = True
                    break
            else:
                changed = True
                break
        
        return changed

Example 37

Project: rlpy
Source File: FixedPolicy.py
View license
    def pi2(self, s, terminal, p_actions):
        domain = self.representation.domain
        if not className(domain) in self.supportedDomains:
            print "ERROR: There is no fixed policy defined for %s" % className(domain)
            return None

        if className(domain) == 'GridWorld':
            # Actions are Up, Down, Left, Right
            if not self.policyName in self.gridWorldPolicyNames:
                print "Error: There is no GridWorld policy with name %s" % self.policyName
                return None

            if self.policyName == 'cw_circle':
                # Cycle through actions, starting with 0, causing agent to go
                # in loop
                if not hasattr(self, "curAction"):
                    # it doesn't exist yet, so initialize it [immediately
                    # incremented]
                    self.curAction = 0
                while (not(self.curAction in domain.possibleActions(s))):
                    # We can't do something simple because of the order in which actions are defined
                    # must do switch statement
                    if self.curAction == 0:  # up
                        self.curAction = 3
                    elif self.curAction == 3:  # right
                        self.curAction = 1
                    elif self.curAction == 1:  # down
                        self.curAction = 2
                    elif self.curAction == 2:  # left
                        self.curAction = 0
                    else:
                        print 'Something terrible happened...got an invalid action on GridWorld Fixed Policy'
    #                 self.curAction = self.curAction % domain.actions_num
            elif self.policyName == 'ccw_circle':
                # Cycle through actions, starting with 0, causing agent to go
                # in loop
                if not hasattr(self, "curAction"):
                    # it doesn't exist yet, so initialize it
                    self.curAction = 1
                while (not(self.curAction in domain.possibleActions(s))):
                    # We can't do something simple because of the order in which actions are defined
                    # must do switch statement
                    if self.curAction == 3:  # right
                        self.curAction = 0
                    elif self.curAction == 0:  # up
                        self.curAction = 2
                    elif self.curAction == 2:  # left
                        self.curAction = 1
                    elif self.curAction == 1:  # down
                        self.curAction = 3
                    else:
                        print 'Something terrible happened...got an invalid action on GridWorld Fixed Policy'
    #                 self.curAction = self.curAction % domain.actions_num

            else:
                print "Error: No policy defined with name %s, but listed in gridWorldPolicyNames" % self.policyName
                print "You need to create a switch statement for the policy name above, or remove it from gridWorldPolicyNames"
                return None
            return self.curAction

# Cycle through actions, starting with 0, causing agent to go in other direction
#             if not hasattr(pi, "curAction"):
# pi.curAction = domain.actions_num-1  # it doesn't exist yet, so initialize it
#             if not(pi.curAction in domain.possibleActions(s)):
#                 pi.curAction -= 1
#                 if pi.curAction < 0: pi.curAction = domain.actions_num-1

        if className(domain) == 'InfCartPoleBalance':
            # Fixed policy rotate the pendulum in the opposite direction of the
            # thetadot
            theta, thetadot = s
            if thetadot > 0:
                return 2
            else:
                return 0
        if className(domain) == 'BlocksWorld':
            # Fixed policy rotate the blocksworld = Optimal Policy (Always pick the next piece of the tower and move it to the tower
            # Policy: Identify the top of the tower.
            # move the next piece on the tower with 95% chance 5% take a random
            # action

            # Random Action with some probability
            # TODO fix isTerminal use here
            if self.random_state.rand() < .3 or domain.isTerminal():
                return randSet(domain.possibleActions(s))

            # non-Random Policy
            # next_block is the block that should be stacked on the top of the tower
            # wrong_block is the highest block stacked on the top of the next_block
            # Wrong_tower_block is the highest stacked on the top of the tower
            blocks = domain.blocks
            # Length of the tower assumed to be built correctly.
            correct_tower_size = 0
            while True:
                # Check the next block
                block = correct_tower_size
                if (block == 0 and domain.on_table(block, s)) or domain.on(block, block - 1, s):
                    # This block is on the right position, check the next block
                    correct_tower_size += 1
                else:
                    # print s
                    # print "Incorrect block:", block
                    # The block is on the wrong place.
                    # 1. Check if the tower is empty => If not take one block from the tower and put it on the table
                    # 2. check to see if this wrong block is empty => If not put one block from its stack and put on the table
                    # 3. Otherwise move this block on the tower

                    ###################
                    # 1
                    ###################
                    # If the first block is in the wrong place, then the tower
                    # top which is table is empty by definition
                    if block != 0:
                        ideal_tower_top = block - 1
                        tower_top = domain.towerTop(ideal_tower_top, s)
                        if tower_top != ideal_tower_top:
                            # There is a wrong block there hence we should put
                            # it on the table first
                            return (
                                # put the top of the tower on the table since
                                # it is not correct
                                domain.getActionPutAonTable(tower_top)
                            )
                    ###################
                    # 2
                    ###################
                    block_top = domain.towerTop(block, s)
                    if block_top != block:
                        # The target block to be stacked is not empty
                        return domain.getActionPutAonTable(block_top)
                    ###################
                    # 3
                    ###################
                    if block == 0:
                        return domain.getActionPutAonTable(block)
                    else:
                        return domain.getActionPutAonB(block, block - 1)
        if className(domain) == 'IntruderMonitoring':
            # Each UAV assign themselves to a target
            # Each UAV finds the closest danger zone to its target and go towards there.
            # If UAVs_num > Target, the rest will hold position
            # Move all agents based on the taken action
            agents = np.array(s[:domain.NUMBER_OF_AGENTS * 2].reshape(-1, 2))
            targets = np.array(s[domain.NUMBER_OF_AGENTS * 2:].reshape(-1, 2))
            zones = domain.danger_zone_locations
            # Default action is hold
            actions = np.ones(len(agents), dtype=np.integer) * 4
            planned_agents_num = min(len(agents), len(targets))
            for i in xrange(planned_agents_num):
                # Find cloasest zone (manhattan) to the corresponding target
                target = targets[i, :]
                distances = np.sum(
                    np.abs(np.tile(target, (len(zones), 1)) - zones), axis=1)
                z_row, z_col = zones[np.argmin(distances), :]
                # find the valid action
                a_row, a_col = agents[i, :]
                a = 4  # hold as a default action
                if a_row > z_row:
                    a = 0  # up
                if a_row < z_row:
                    a = 1  # down
                if a_col > z_col:
                    a = 2  # left
                if a_col < z_col:
                    a = 3  # right
                actions[i] = a
#                print "Agent=", agents[i,:]
#                print "Target", target
#                print "Zone", zones[argmin(distances),:]
#                print "Action", a
#                print '============'
            return vec2id(actions, np.ones(len(agents), dtype=np.integer) * 5)
        if className(domain) == 'SystemAdministrator':
            # Select a broken computer and reset it
            brokenComputers = np.where(s == 0)[0]
            if len(brokenComputers):
                return randSet(brokenComputers)
            else:
                return domain.computers_num
        if className(domain) == 'MountainCar':
            # Accelerate in the direction of the valley
            # WORK IN PROGRESS
            x, xdot = s
            if xdot > 0:
                return 2
            else:
                return 0
        if className(domain) == 'PST':
            # One stays at comm, n-1 stay at target area. Whenever fuel is
            # lower than reaching the base the move back
            print s
            s = domain.state2Struct(s)
            uavs = domain.NUM_UAV
            print s
            return vec2id(np.zeros(uavs), np.ones(uavs) * 3)

Example 38

Project: director
Source File: robotviewbehaviors.py
View license
def getRobotActions(view, pickedObj, pickedPoint):

    reachFrame = getAsFrame(pickedObj)
    collisionParent = getCollisionParent(pickedObj)
    pointCloudObj = getObjectAsPointCloud(pickedObj)
    affordanceObj = pickedObj if isinstance(pickedObj, affordanceitems.AffordanceItem) else None

    def onReachLeft():
        reachToFrame(reachFrame, 'left', collisionParent)

    def onReachRight():
        reachToFrame(reachFrame, 'right', collisionParent)

    def flipHandSide():
        for obj in [pickedObj] + pickedObj.children():
            if not isGraspSeed(obj):
                continue
            side = 'right' if obj.side == 'left' else 'left'
            obj.side = side
            color = [1.0, 1.0, 0.0]
            if side == 'right':
                color = [0.33, 1.0, 0.0]
            obj.setProperty('Color', color)

            polyData = handFactory.getNewHandPolyData(side)
            obj.setPolyData(polyData)

            handFrame = obj.children()[0]
            t = transformUtils.copyFrame(handFrame.transform)
            t.PreMultiply()
            t.RotateY(180)
            handFrame.copyFrame(t)

            objName = obj.getProperty('Name')
            frameName = handFrame.getProperty('Name')
            if side == 'left':
                obj.setProperty('Name', objName.replace("right", "left"))
                handFrame.setProperty('Name', frameName.replace("right", "left"))
            else:
                obj.setProperty('Name', objName.replace("left", "right"))
                handFrame.setProperty('Name', frameName.replace("left", "right"))
            obj._renderAllViews()

    def flipHandThumb():
        handFrame = pickedObj.children()[0]
        t = transformUtils.copyFrame(handFrame.transform)
        t.PreMultiply()
        t.RotateY(180)
        handFrame.copyFrame(t)
        pickedObj._renderAllViews()

    def onSplineLeft():
        splinewidget.planner.newSpline(pickedObj, 'left')

    def onSplineRight():
        splinewidget.planner.newSpline(pickedObj, 'right')


    def onSegmentGround():
        groundPoints, scenePoints =  segmentation.removeGround(pointCloudObj.polyData)
        vis.showPolyData(groundPoints, 'ground points', color=[0,1,0], parent='segmentation')
        vis.showPolyData(scenePoints, 'scene points', color=[1,0,1], parent='segmentation')
        pickedObj.setProperty('Visible', False)


    def onCopyPointCloud():
        global lastRandomColor
        polyData = vtk.vtkPolyData()
        polyData.DeepCopy(pointCloudObj.polyData)

        if pointCloudObj.getChildFrame():
            polyData = segmentation.transformPolyData(polyData, pointCloudObj.getChildFrame().transform)
        polyData = segmentation.addCoordArraysToPolyData(polyData)

        # generate random color, and average with a common color to make them generally similar
        lastRandomColor = lastRandomColor + 0.1 + 0.1*random.random()
        rgb = colorsys.hls_to_rgb(lastRandomColor, 0.7, 1.0)
        obj = vis.showPolyData(polyData, pointCloudObj.getProperty('Name') + ' copy', color=rgb, parent='point clouds')

        #t = vtk.vtkTransform()
        #t.PostMultiply()
        #t.Translate(filterUtils.computeCentroid(polyData))
        #segmentation.makeMovable(obj, t)
        om.setActiveObject(obj)
        pickedObj.setProperty('Visible', False)

    def onMergeIntoPointCloud():
        allPointClouds = om.findObjectByName('point clouds')
        if allPointClouds:
            allPointClouds = [i.getProperty('Name') for i in allPointClouds.children()]
        sel =  QtGui.QInputDialog.getItem(None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False)
        sel = om.findObjectByName(sel)

        # Make a copy of each in same frame
        polyDataInto = vtk.vtkPolyData()
        polyDataInto.ShallowCopy(sel.polyData)
        if sel.getChildFrame():
            polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform)

        polyDataFrom = vtk.vtkPolyData()
        polyDataFrom.DeepCopy(pointCloudObj.polyData)
        if pointCloudObj.getChildFrame():
            polyDataFrom = segmentation.transformPolyData(polyDataFrom, pointCloudObj.getChildFrame().transform)

        # Actual merge
        append = filterUtils.appendPolyData([polyDataFrom, polyDataInto])
        if sel.getChildFrame():
            polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform.GetInverse())

        # resample
        append = segmentationroutines.applyVoxelGrid(append, 0.01)
        append = segmentation.addCoordArraysToPolyData(append)

        # Recenter the frame
        sel.setPolyData(append)
        t = vtk.vtkTransform()
        t.PostMultiply()
        t.Translate(filterUtils.computeCentroid(append))
        segmentation.makeMovable(sel, t)

        # Hide the old one
        if pointCloudObj.getProperty('Name') in allPointClouds:
            pointCloudObj.setProperty('Visible', False)


    def onSegmentTableScene():
        data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
        vis.showClusterObjects(data.clusters, parent='segmentation')
        segmentation.showTable(data.table, parent='segmentation')


    def onSegmentDrillAlignedWithTable():
        segmentation.segmentDrillAlignedWithTable(pickedPoint, pointCloudObj.polyData)

    def onCachePickedPoint():
        ''' Cache the Picked Point for general purpose use'''
        global lastCachedPickedPoint
        lastCachedPickedPoint = pickedPoint
        #data = segmentation.segmentTableScene(pointCloudObj.polyData, pickedPoint)
        #vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')


    def onLocalPlaneFit():
        planePoints, normal = segmentation.applyLocalPlaneFit(pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2)
        obj = vis.showPolyData(planePoints, 'local plane fit', color=[0,1,0])
        obj.setProperty('Point Size', 7)

        fields = segmentation.makePolyDataFields(obj.polyData)

        pose = transformUtils.poseFromTransform(fields.frame)
        desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose)
        box = segmentation.affordanceManager.newAffordanceFromDescription(desc)

    def onOrientToMajorPlane():
        polyData, planeFrame = segmentation.orientToMajorPlane(pointCloudObj.polyData, pickedPoint=pickedPoint)
        pointCloudObj.setPolyData(polyData)


    def onDiskGlyph():
        result = segmentation.applyDiskGlyphs(pointCloudObj.polyData)
        obj = vis.showPolyData(result, 'disks', color=[0.8,0.8,0.8])
        om.setActiveObject(obj)
        pickedObj.setProperty('Visible', False)

    def onArrowGlyph():
        result = segmentation.applyArrowGlyphs(pointCloudObj.polyData)
        obj = vis.showPolyData(result, 'arrows')

    def onSegmentationEditor():
        segmentationpanel.activateSegmentationMode(pointCloudObj.polyData)

    def addNewFrame():
        t = transformUtils.copyFrame(affordanceObj.getChildFrame().transform)
        t.PostMultiply()
        t.Translate(np.array(pickedPoint) - np.array(t.GetPosition()))
        newFrame = vis.showFrame(t, '%s frame %d' % (affordanceObj.getProperty('Name'), len(affordanceObj.children())), scale=0.2, parent=affordanceObj)
        affordanceObj.getChildFrame().getFrameSync().addFrame(newFrame, ignoreIncoming=True)

    def copyAffordance():
        desc = dict(affordanceObj.getDescription())
        del desc['uuid']
        desc['Name'] = desc['Name'] + ' copy'
        aff = robotSystem.affordanceManager.newAffordanceFromDescription(desc)
        aff.getChildFrame().setProperty('Edit', True)

    def onPromoteToAffordance():
        affObj = affordanceitems.MeshAffordanceItem.promotePolyDataItem(pickedObj)
        robotSystem.affordanceManager.registerAffordance(affObj)


    actions = []


    if affordanceObj:
        actions.extend([
            ('Copy affordance', copyAffordance),
            ('Add new frame', addNewFrame),
        ])

    elif type(pickedObj) == vis.PolyDataItem:
        actions.extend([
            ('Promote to Affordance', onPromoteToAffordance),
        ])

    if isGraspSeed(pickedObj):
        actions.extend([
            (None, None),
            ('Flip Side', flipHandSide),
            ('Flip Thumb', flipHandThumb),
        ])

    if reachFrame is not None:
        actions.extend([
            (None, None),
            ('Reach Left', onReachLeft),
            ('Reach Right', onReachRight),
            #('Spline Left', onSplineLeft),
            #('Spline Right', onSplineRight),
            ])

    if pointCloudObj:
        actions.extend([
            (None, None),
            ('Copy Pointcloud', onCopyPointCloud),
            ('Merge Pointcloud Into', onMergeIntoPointCloud),
            ('Segment Ground', onSegmentGround),
            ('Segment Table', onSegmentTableScene),
            ('Segment Drill Aligned', onSegmentDrillAlignedWithTable),
            ('Local Plane Fit', onLocalPlaneFit),
            ('Orient with Horizontal', onOrientToMajorPlane),
            ('Arrow Glyph', onArrowGlyph),
            ('Disk Glyph', onDiskGlyph),
            ('Cache Pick Point', onCachePickedPoint),
            (None, None),
            ('Open Segmentation Editor', onSegmentationEditor)
            ])

    return actions

Example 39

Project: rootpy
Source File: utils.py
View license
def get_limits(plottables,
               xpadding=0,
               ypadding=0.1,
               xerror_in_padding=True,
               yerror_in_padding=True,
               snap=True,
               logx=False,
               logy=False,
               logx_crop_value=1E-5,
               logy_crop_value=1E-5,
               logx_base=10,
               logy_base=10):
    """
    Get the axes limits that should be used for a 1D histogram, graph, or stack
    of histograms.

    Parameters
    ----------

    plottables : Hist, Graph, HistStack, or list of such objects
        The object(s) for which visually pleasing plot boundaries are
        requested.

    xpadding : float or 2-tuple, optional (default=0)
        The horizontal padding as a fraction of the final plot width.

    ypadding : float or 2-tuple, optional (default=0.1)
        The vertical padding as a fraction of the final plot height.

    xerror_in_padding : bool, optional (default=True)
        If False then exclude the x error bars from the calculation of the plot
        width.

    yerror_in_padding : bool, optional (default=True)
        If False then exclude the y error bars from the calculation of the plot
        height.

    snap : bool, optional (default=True)
        Make the minimum or maximum of the vertical range the x-axis depending
        on if the plot maximum and minimum are above or below the x-axis. If
        the plot maximum is above the x-axis while the minimum is below the
        x-axis, then this option will have no effect.

    logx : bool, optional (default=False)
        If True, then the x-axis is log scale.

    logy : bool, optional (default=False)
        If True, then the y-axis is log scale.

    logx_crop_value : float, optional (default=1E-5)
        If an x-axis is using a logarithmic scale then crop all non-positive
        values with this value.

    logy_crop_value : float, optional (default=1E-5)
        If the y-axis is using a logarithmic scale then crop all non-positive
        values with this value.

    logx_base : float, optional (default=10)
        The base used for the logarithmic scale of the x-axis.

    logy_base : float, optional (default=10)
        The base used for the logarithmic scale of the y-axis.

    Returns
    -------

    xmin, xmax, ymin, ymax : tuple of plot boundaries
        The computed x and y-axis ranges.

    """
    try:
        import numpy as np
        use_numpy = True
    except ImportError:
        use_numpy = False

    if not isinstance(plottables, (list, tuple)):
        plottables = [plottables]

    xmin = float('+inf')
    xmax = float('-inf')
    ymin = float('+inf')
    ymax = float('-inf')

    for h in plottables:

        if isinstance(h, HistStack):
            h = h.sum

        if not isinstance(h, (_Hist, _Graph1DBase)):
            raise TypeError(
                "unable to determine plot axes ranges "
                "from object of type `{0}`".format(
                    type(h)))

        if use_numpy:
            y_array_min = y_array_max = np.array(list(h.y()))
            if yerror_in_padding:
                y_array_min = y_array_min - np.array(list(h.yerrl()))
                y_array_max = y_array_max + np.array(list(h.yerrh()))
            _ymin = y_array_min.min()
            _ymax = y_array_max.max()
        else:
            y_array_min = y_array_max = list(h.y())
            if yerror_in_padding:
                y_array_min = multisub(y_array_min, list(h.yerrl()))
                y_array_max = multiadd(y_array_max, list(h.yerrh()))
            _ymin = min(y_array_min)
            _ymax = max(y_array_max)

        if isinstance(h, _Graph1DBase):
            if use_numpy:
                x_array_min = x_array_max = np.array(list(h.x()))
                if xerror_in_padding:
                    x_array_min = x_array_min - np.array(list(h.xerrl()))
                    x_array_max = x_array_max + np.array(list(h.xerrh()))
                _xmin = x_array_min.min()
                _xmax = x_array_max.max()
            else:
                x_array_min = x_array_max = list(h.x())
                if xerror_in_padding:
                    x_array_min = multisub(x_array_min, list(h.xerrl()))
                    x_array_max = multiadd(x_array_max, list(h.xerrh()))
                _xmin = min(x_array_min)
                _xmax = max(x_array_max)
        else:
            _xmin = h.xedgesl(1)
            _xmax = h.xedgesh(h.nbins(0))

        if logy:
            _ymin = max(logy_crop_value, _ymin)
            _ymax = max(logy_crop_value, _ymax)
        if logx:
            _xmin = max(logx_crop_value, _xmin)
            _xmax = max(logx_crop_value, _xmax)

        if _xmin < xmin:
            xmin = _xmin
        if _xmax > xmax:
            xmax = _xmax
        if _ymin < ymin:
            ymin = _ymin
        if _ymax > ymax:
            ymax = _ymax

    if isinstance(xpadding, (list, tuple)):
        if len(xpadding) != 2:
            raise ValueError("xpadding must be of length 2")
        xpadding_left = xpadding[0]
        xpadding_right = xpadding[1]
    else:
        xpadding_left = xpadding_right = xpadding

    if isinstance(ypadding, (list, tuple)):
        if len(ypadding) != 2:
            raise ValueError("ypadding must be of length 2")
        ypadding_top = ypadding[0]
        ypadding_bottom = ypadding[1]
    else:
        ypadding_top = ypadding_bottom = ypadding

    if logx:
        x0, x3 = _limits_helper(
            log(xmin, logx_base), log(xmax, logx_base),
            xpadding_left, xpadding_right)
        xmin = logx_base ** x0
        xmax = logx_base ** x3
    else:
        xmin, xmax = _limits_helper(
            xmin, xmax, xpadding_left, xpadding_right)

    if logy:
        y0, y3 = _limits_helper(
            log(ymin, logy_base), log(ymax, logy_base),
            ypadding_bottom, ypadding_top, snap=False)
        ymin = logy_base ** y0
        ymax = logy_base ** y3
    else:
        ymin, ymax = _limits_helper(
            ymin, ymax, ypadding_bottom, ypadding_top, snap=snap)

    return xmin, xmax, ymin, ymax

Example 40

Project: pylon
Source File: ipopf.py
View license
    def solve(self):
        """ Solves AC optimal power flow.
        """
        case = self.om.case
        base_mva = case.base_mva
        # TODO: Explain this value.
        self.opt["cost_mult"] = 1e-4

        # Unpack the OPF model.
        bs, ln, gn, _ = self._unpack_model(self.om)
        # Compute problem dimensions.
        ipol, _, nb, nl, _, ny, nxyz = self._dimension_data(bs, ln, gn)

        # Compute problem dimensions.
        ng = len(gn)
#        gpol = [g for g in gn if g.pcost_model == POLYNOMIAL]
        # Indexes of constrained lines.
        il = array([i for i,l in enumerate(ln) if 0.0 < l.rate_a < 1e10])
        nl2 = len(il)

        # Linear constraints (l <= A*x <= u).
        A, l, u = self.om.linear_constraints()
#        AA, bb = self._linear_constraints(self.om)

        _, xmin, xmax = self._var_bounds()

        # Select an interior initial point for interior point solver.
        x0 = self._initial_interior_point(bs, gn, xmin, xmax, ny)

        # Build admittance matrices.
        Ybus, Yf, Yt = case.Y

        # Optimisation variables.
        Va = self.om.get_var("Va")
        Vm = self.om.get_var("Vm")
        Pg = self.om.get_var("Pg")
        Qg = self.om.get_var("Qg")

        # Adds a constraint on the reference bus angles.
#        xmin, xmax = self._ref_bus_angle_constraint(bs, Va, xmin, xmax)

        def f_fcn(x, user_data=None):
            """ Evaluates the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            # Polynomial cost of P and Q.
            xx = r_[p_gen, q_gen] * base_mva
            if len(ipol) > 0:
                f = sum([g.total_cost(xx[i]) for i,g in enumerate(gn)])
            else:
                f = 0

            # Piecewise linear cost of P and Q.
            if ny:
                y = self.om.get_var("y")
                ccost = csr_matrix((ones(ny),
                    (range(y.i1, y.iN + 1), zeros(ny))), shape=(nxyz, 1)).T
                f = f + ccost * x
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            return f


        def df_fcn(x, usr_data=None):
            """ Calculates gradient of the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            xx = r_[p_gen, q_gen] * base_mva

            if ny > 0:
                y = self.om.get_var("y")
                iy = range(y.i1, y.iN + 1)
                ccost = \
                    csr_matrix((ones(ny), (iy, zeros(ny))), shape=(nxyz, 1)).T
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)

            # Polynomial cost of P and Q.
            df_dPgQg = zeros((2 * ng, 1))        # w.r.t p.u. Pg and Qg
#            df_dPgQg[ipol] = matrix([g.poly_cost(xx[i], 1) for g in gpol])
#            for i, g in enumerate(gn):
#                der = polyder(list(g.p_cost))
#                df_dPgQg[i] = polyval(der, xx[i]) * base_mva
            for i in ipol:
                df_dPgQg[i] = \
                    base_mva * polyval(polyder(list(gn[i].p_cost)), xx[i])

            df = zeros((nxyz, 1))
            df[iPg] = df_dPgQg[:ng]
            df[iQg] = df_dPgQg[ng:ng + ng]

            # Piecewise linear cost of P and Q.
            df = df + ccost.T
            # TODO: Generalised cost term.

            return asarray(df).flatten()


        def g_fcn(x, usr_data=None):
            """ Evaluates the non-linear constraint values.
            """
            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            # Rebuild the net complex bus power injection vector in p.u.
            Sbus = case.getSbus(bs)

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Evaluate the power flow equations.
            mis = V * conj(Ybus * V) - Sbus

            # Equality constraints (power flow).
            g = r_[mis.real,  # active power mismatch for all buses
                   mis.imag]  # reactive power mismatch for all buses

            # Inequality constraints (branch flow limits).
            # (line constraint is actually on square of limit)
            flow_max = array([(l.rate_a / base_mva)**2 for l in ln])
            # FIXME: There must be a more elegant method for this.
            for i, v in enumerate(flow_max):
                if v == 0.0:
                    flow_max[i] = Inf

            if self.flow_lim == IFLOW:
                If = Yf * V
                It = Yt * V
                # Branch current limits.
                h = r_[(If * conj(If)) - flow_max,
                       (If * conj(It)) - flow_max]
            else:
                i_fbus = [e.from_bus._i for e in ln]
                i_tbus = [e.to_bus._i for e in ln]
                # Complex power injected at "from" bus (p.u.).
                Sf = V[i_fbus] * conj(Yf * V)
                # Complex power injected at "to" bus (p.u.).
                St = V[i_tbus] * conj(Yt * V)
                if self.flow_lim == PFLOW: # active power limit, P (Pan Wei)
                    # Branch real power limits.
                    h = r_[Sf.real()**2 - flow_max,
                           St.real()**2 - flow_max]
                elif self.flow_lim == SFLOW: # apparent power limit, |S|
                    # Branch apparent power limits.
                    h = r_[(Sf * conj(Sf)) - flow_max,
                           (St * conj(St)) - flow_max].real
                else:
                    raise ValueError

            return r_[g, h]


        def dg_fcn(x, flag, usr_data=None):
            """ Calculates the Jacobian matrix. It takes two arguments, the
                first is the variable x and the second is a Boolean flag. If
                the flag is true, the function returns a tuple of arrays
                (row, col) to indicate the sparse structure of the Jacobian
                matrix. If the flag is false the function returns the values
                of the Jacobian matrix with length nnzj.
            """
            iVa = range(Va.i1, Va.iN + 1)
            iVm = range(Vm.i1, Vm.iN + 1)
            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)
            iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Compute partials of injected bus powers.
            dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V)

            i_gbus = [gen.bus._i for gen in gn]
            neg_Cg = csr_matrix((-ones(ng), (i_gbus, range(ng))), (nb, ng))

            # Transposed Jacobian of the power balance equality constraints.
            dg = lil_matrix((nxyz, 2 * nb))

            blank = csr_matrix((nb, ng))
            dg[iVaVmPgQg, :] = vstack([
                hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]),
                hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg])
            ], "csr").T

            # Compute partials of flows w.r.t V.
            if self.flow_lim == IFLOW:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dIbr_dV(Yf, Yt, V)
            else:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dSbr_dV(Yf, Yt, V, bs, ln)
            if self.flow_lim == PFLOW:
                dFf_dVa = dFf_dVa.real
                dFf_dVm = dFf_dVm.real
                dFt_dVa = dFt_dVa.real
                dFt_dVm = dFt_dVm.real
                Ff = Ff.real
                Ft = Ft.real

            # Squared magnitude of flow (complex power, current or real power).
            df_dVa, df_dVm, dt_dVa, dt_dVm = \
                case.dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft)

            # Construct Jacobian of inequality constraints (branch limits) and
            # transpose it.
            dh = lil_matrix((nxyz, 2 * nl))
            dh[r_[iVa, iVm].T, :] = vstack([hstack([df_dVa, df_dVm]),
                                            hstack([dt_dVa, dt_dVm])], "csr").T

            J = vstack([dg, dh, A]).tocoo()

            if flag:
                return (J.row, J.col)
            else:
                return J.data


        def h_fcn(x, lagrange, obj_factor, flag, usr_data=None):
            """ Evaluates the Hessian of the Lagrangian.
            """
            neqnln = 2 * nb
            niqnln = 2 * len(il) # no. of lines with constraints

            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)
            nxtra = nxyz - 2 * nb

            #------------------------------------------------------------------
            #  Evaluate d2f.
            #------------------------------------------------------------------

            d2f_dPg2 = lil_matrix((ng, 1)) # w.r.t p.u. Pg
            d2f_dQg2 = lil_matrix((ng, 1)) # w.r.t p.u. Qg]

            for i in ipol:
                d2f_dPg2[i, 0] = polyval(polyder(list(gn[i].p_cost), 2),
                                         Pg.v0[i] * base_mva) * base_mva**2
#            for i in ipol:
#                d2f_dQg2[i] = polyval(polyder(list(gn[i].p_cost), 2),
#                                      Qg.v0[i] * base_mva) * base_mva**2

            i = r_[range(Pg.i1, Pg.iN + 1), range(Qg.i1, Qg.iN + 1)]

            d2f = csr_matrix((vstack([d2f_dPg2, d2f_dQg2]).toarray().flatten(),
                              (i, i)), shape=(nxyz, nxyz))
            # TODO: Generalised cost model.
            d2f = d2f * self.opt["cost_mult"]

            #------------------------------------------------------------------
            #  Evaluate Hessian of power balance constraints.
            #------------------------------------------------------------------

            eqnonlin = lagrange[:neqnln]
#            nlam = len(lagrange["eqnonlin"]) / 2
            nlam = len(eqnonlin) / 2
            lamP = eqnonlin[:nlam]
            lamQ = eqnonlin[nlam:nlam + nlam]
            Gpaa, Gpav, Gpva, Gpvv = case.d2Sbus_dV2(Ybus, V, lamP)
            Gqaa, Gqav, Gqva, Gqvv = case.d2Sbus_dV2(Ybus, V, lamQ)

            d2G = vstack([
                hstack([
                    vstack([hstack([Gpaa, Gpav]),
                            hstack([Gpva, Gpvv])]).real +
                    vstack([hstack([Gqaa, Gqav]),
                            hstack([Gqva, Gqvv])]).imag,
                    csr_matrix((2 * nb, nxtra))]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            #------------------------------------------------------------------
            #  Evaluate Hessian of flow constraints.
            #------------------------------------------------------------------

            ineqnonlin = lagrange[neqnln:neqnln + niqnln]
            nmu = len(ineqnonlin) / 2
            muF = ineqnonlin[:nmu]
            muT = ineqnonlin[nmu:nmu + nmu]
            if self.flow_lim == "I":
                dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It = \
                    case.dIbr_dV(Yf, Yt, V)
                Hfaa, Hfav, Hfva, Hfvv = \
                    case.d2AIbr_dV2(dIf_dVa, dIf_dVm, If, Yf, V, muF)
                Htaa, Htav, Htva, Htvv = \
                    case.d2AIbr_dV2(dIt_dVa, dIt_dVm, It, Yt, V, muT)
            else:
                f = [e.from_bus._i for e in ln]
                t = [e.to_bus._i for e in ln]
                # Line-bus connection matrices.
                Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
                Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
                dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = \
                    case.dSbr_dV(Yf, Yt, V)
                if self.flow_lim == PFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa.real(), dSf_dVm.real(),
                                        Sf.real(), Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa.real(), dSt_dVm.real(),
                                        St.real(), Ct, Yt, V, muT)
                elif self.flow_lim == SFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa, dSf_dVm, Sf, Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa, dSt_dVm, St, Ct, Yt, V, muT)
                else:
                    raise ValueError

            d2H = vstack([
                hstack([
                    vstack([hstack([Hfaa, Hfav]),
                            hstack([Hfva, Hfvv])]) +
                    vstack([hstack([Htaa, Htav]),
                            hstack([Htva, Htvv])]),
                    csr_matrix((2 * nb, nxtra))
                ]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            H = d2f + d2G + d2H

            if flag:
                return (H.row, H.col)
            else:
                return H.data

        n = len(x0) # the number of variables
        gl = r_[zeros(2 * nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * nb),       zeros(2 * nl2), u]
        m = len(gl) # the number of constraints
        nnzj = 0 # the number of nonzeros in Jacobian matrix
        nnzh = 0 # the number of non-zeros in Hessian matrix

        nlp = pyipopt.create(n, xmin, xmax, m, gl, gu, nnzj, nnzh,
                             f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn)

#        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0)
        nlp.close()

        print "Success:", success
        print "Solution of the primal variables, x"
#        print x
        print "Solution of the bound multipliers, z_L and z_U"
#        print zl, zu
        print "Objective value"

Example 41

Project: pylon
Source File: pips.py
View license
def pips(f_fcn, x0, A=None, l=None, u=None, xmin=None, xmax=None,
         gh_fcn=None, hess_fcn=None, opt=None):
    """Primal-dual interior point method for NLP (non-linear programming).
    Minimize a function F(X) beginning from a starting point M{x0}, subject to
    optional linear and non-linear constraints and variable bounds::

            min f(x)
             x

    subject to::

            g(x) = 0            (non-linear equalities)
            h(x) <= 0           (non-linear inequalities)
            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note: The calling syntax is almost identical to that of FMINCON from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}. The functions for evaluating the objective function,
    constraints and Hessian are identical.

    Example from U{http://en.wikipedia.org/wiki/Nonlinear_programming}:
        >>> from numpy import array, r_, float64, dot
        >>> from scipy.sparse import csr_matrix
        >>> def f2(x):
        ...     f = -x[0] * x[1] - x[1] * x[2]
        ...     df = -r_[x[1], x[0] + x[2], x[1]]
        ...     # actually not used since 'hess_fcn' is provided
        ...     d2f = -array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], float64)
        ...     return f, df, d2f
        >>> def gh2(x):
        ...     h = dot(array([[1, -1, 1],
        ...                    [1,  1, 1]]), x**2) + array([-2.0, -10.0])
        ...     dh = 2 * csr_matrix(array([[ x[0], x[0]],
        ...                                [-x[1], x[1]],
        ...                                [ x[2], x[2]]]))
        ...     g = array([])
        ...     dg = None
        ...     return h, g, dh, dg
        >>> def hess2(x, lam):
        ...     mu = lam["ineqnonlin"]
        ...     a = r_[dot(2 * array([1, 1]), mu), -1, 0]
        ...     b = r_[-1, dot(2 * array([-1, 1]),mu),-1]
        ...     c = r_[0, -1, dot(2 * array([1, 1]),mu)]
        ...     Lxx = csr_matrix(array([a, b, c]))
        ...     return Lxx
        >>> x0 = array([1, 1, 0], float64)
        >>> solution = pips(f2, x0, gh_fcn=gh2, hess_fcn=hess2)
        >>> round(solution["f"], 11) == -7.07106725919
        True
        >>> solution["output"]["iterations"]
        8

    Ported by Richard Lincoln from the MATLAB Interior Point Solver (MIPS)
    (v1.9) by Ray Zimmerman.  MIPS is distributed as part of the MATPOWER
    project, developed at the Power System Engineering Research Center (PSERC),
    Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
    MIPS was ported by Ray Zimmerman from C code written by H. Wang for his
    PhD dissertation:
      - "On the Computation and Application of Multi-period
        Security-Constrained Optimal Power Flow for Real-time
        Electricity Market Operations", Cornell University, May 2007.

    See also:
      - H. Wang, C. E. Murillo-Sanchez, R. D. Zimmerman, R. J. Thomas,
        "On Computational Issues of Market-Based Optimal Power Flow",
        IEEE Transactions on Power Systems, Vol. 22, No. 3, Aug. 2007,
        pp. 1185-1193.

    All parameters are optional except C{f_fcn} and C{x0}.
    @param f_fcn: Function that evaluates the objective function, its gradients
                  and Hessian for a given value of M{x}. If there are
                  non-linear constraints, the Hessian information is provided
                  by the 'hess_fcn' argument and is not required here.
    @type f_fcn: callable
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param gh_fcn: Function that evaluates the optional non-linear constraints
                   and their gradients for a given value of M{x}.
    @type gh_fcn: callable
    @param hess_fcn: Handle to function that computes the Hessian of the
                     Lagrangian for given values of M{x}, M{lambda} and M{mu},
                     where M{lambda} and M{mu} are the multipliers on the
                     equality and inequality constraints, M{g} and M{h},
                     respectively.
    @type hess_fcn: callable
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: The
                    same value must also be passed to the Hessian evaluation
                    function so that it can appropriately scale the objective
                    function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - dictionary of arrays with trajectories of the
                     following: feascond, gradcond, compcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - non-linear equality constraints
                   - C{ineqnonlin} - non-linear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @license: Apache License version 2.0
    """
    nx = x0.shape[0]                        # number of variables
    nA = A.shape[0] if A is not None else 0 # number of original linear constr

    # default argument values
#    l = array([]) if A is None else l
#    u = array([]) if A is None else u
    l = -Inf * ones(nA) if l is None else l
    u =  Inf * ones(nA) if u is None else u
    xmin = -Inf * ones(x0.shape[0]) if xmin is None else xmin
    xmax =  Inf * ones(x0.shape[0]) if xmax is None else xmax
    if gh_fcn is None:
        nonlinear = False
        gn = array([])
        hn = array([])
    else:
        nonlinear = True

    opt = {} if opt is None else opt
    # options
    if not opt.has_key("feastol"):
        opt["feastol"] = 1e-06
    if not opt.has_key("gradtol"):
        opt["gradtol"] = 1e-06
    if not opt.has_key("comptol"):
        opt["comptol"] = 1e-06
    if not opt.has_key("costtol"):
        opt["costtol"] = 1e-06
    if not opt.has_key("max_it"):
        opt["max_it"] = 150
    if not opt.has_key("max_red"):
        opt["max_red"] = 20
    if not opt.has_key("step_control"):
        opt["step_control"] = False
    if not opt.has_key("cost_mult"):
        opt["cost_mult"] = 1
    if not opt.has_key("verbose"):
        opt["verbose"] = False

    # initialize history
    hist = {}

    # constants
    xi = 0.99995
    sigma = 0.1
    z0 = 1
    alpha_min = 1e-8
#    rho_min = 0.95
#    rho_max = 1.05
    mu_threshold = 1e-5

    # initialize
    i = 0                       # iteration counter
    converged = False           # flag
    eflag = False               # exit flag

    # add var limits to linear constraints
    eyex = eye(nx, nx, format="csr")
    AA = eyex if A is None else vstack([eyex, A], "csr")
    ll = r_[xmin, l]
    uu = r_[xmax, u]

    # split up linear constraints
    ieq = flatnonzero( absolute(uu - ll) <= EPS )
    igt = flatnonzero( (uu >=  1e10) & (ll > -1e10) )
    ilt = flatnonzero( (ll <= -1e10) & (uu <  1e10) )
    ibx = flatnonzero( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
    # zero-sized sparse matrices unsupported
    Ae = AA[ieq, :] if len(ieq) else None
    if len(ilt) or len(igt) or len(ibx):
        idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
        Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)])
    else:
        Ai = None
    be = uu[ieq, :]
    bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]

    # evaluate cost f(x0) and constraints g(x0), h(x0)
    x = x0
    f, df, _ = f_fcn(x)                 # cost
    f = f * opt["cost_mult"]
    df = df * opt["cost_mult"]
    if nonlinear:
        hn, gn, dhn, dgn = gh_fcn(x)        # non-linear constraints
        h = hn if Ai is None else r_[hn, Ai * x - bi] # inequality constraints
        g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints

        if (dhn is None) and (Ai is None):
            dh = None
        elif dhn is None:
            dh = Ai.T
        elif Ae is None:
            dh = dhn
        else:
            dh = hstack([dhn, Ai.T])

        if (dgn is None) and (Ae is None):
            dg = None
        elif dgn is None:
            dg = Ae.T
        elif Ae is None:
            dg = dgn
        else:
            dg = hstack([dgn, Ae.T])
    else:
        h = -bi if Ai is None else Ai * x - bi        # inequality constraints
        g = -be if Ae is None else Ae * x - be        # equality constraints
        dh = None if Ai is None else Ai.T     # 1st derivative of inequalities
        dg = None if Ae is None else Ae.T     # 1st derivative of equalities

    # some dimensions
    neq = g.shape[0]           # number of equality constraints
    niq = h.shape[0]           # number of inequality constraints
    neqnln = gn.shape[0]       # number of non-linear equality constraints
    niqnln = hn.shape[0]       # number of non-linear inequality constraints
    nlt = len(ilt)             # number of upper bounded linear inequalities
    ngt = len(igt)             # number of lower bounded linear inequalities
    nbx = len(ibx)             # number of doubly bounded linear inequalities

    # initialize gamma, lam, mu, z, e
    gamma = 1                  # barrier coefficient
    lam = zeros(neq)
    z = z0 * ones(niq)
    mu = z0 * ones(niq)
    k = flatnonzero(h < -z0)
    z[k] = -h[k]
    k = flatnonzero((gamma / z) > z0)
    mu[k] = gamma / z[k]
    e = ones(niq)

    # check tolerance
    f0 = f
#    if opt["step_control"]:
#        L = f + lam.T * g + mu.T * (h + z) - gamma * sum(log(z))

    Lx = df
    Lx = Lx + dg * lam if dg is not None else Lx
    Lx = Lx + dh * mu  if dh is not None else Lx

    gnorm = norm(g, Inf) if len(g) else 0.0
    lam_norm = norm(lam, Inf) if len(lam) else 0.0
    mu_norm = norm(mu, Inf) if len(mu) else 0.0
    feascond = \
        max([gnorm, max(h)]) / (1 + max([norm(x, Inf), norm(z, Inf)]))
    gradcond = \
        norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
    compcond = dot(z, mu) / (1 + norm(x, Inf))
    costcond = absolute(f - f0) / (1 + absolute(f0))

    # save history
    hist[i] = {'feascond': feascond, 'gradcond': gradcond,
        'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
        'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0}

    if opt["verbose"]:
#        s = '-sc' if opt["step_control"] else ''
#        version, date = '1.0b2', '24-Mar-2010'
#        print 'Python Interior Point Solver - PIPS%s, Version %s, %s' % \
#                    (s, version, date)
        print " it    objective   step size   feascond     gradcond     " \
              "compcond     costcond  "
        print "----  ------------ --------- ------------ ------------ " \
              "------------ ------------"
        print "%3d  %12.8g %10s %12g %12g %12g %12g" % \
            (i, (f / opt["cost_mult"]), "",
             feascond, gradcond, compcond, costcond)

    if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
        compcond < opt["comptol"] and costcond < opt["costtol"]:
        converged = True
        if opt["verbose"]:
            print "Converged!"

    # do Newton iterations
    while (not converged and i < opt["max_it"]):
        # update iteration counter
        i += 1

        # compute update step
        lmbda = {"eqnonlin": lam[range(neqnln)],
                 "ineqnonlin": mu[range(niqnln)]}
        if nonlinear:
            if hess_fcn is None:
                print "pips: Hessian evaluation via finite differences " \
                      "not yet implemented.\nPlease provide " \
                      "your own hessian evaluation function."
            Lxx = hess_fcn(x, lmbda)
        else:
            _, _, d2f = f_fcn(x)      # cost
            Lxx = d2f * opt["cost_mult"]
        rz = range(len(z))
        zinvdiag = csr_matrix((1.0 / z, (rz, rz))) if len(z) else None
        rmu = range(len(mu))
        mudiag = csr_matrix((mu, (rmu, rmu))) if len(mu) else None
        dh_zinv = None if dh is None else dh * zinvdiag
        M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
        N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)

        Ab = M if dg is None else vstack([
            hstack([M, dg]),
            hstack([dg.T, csr_matrix((neq, neq))])
        ])
        bb = r_[-N, -g]

        dxdlam = spsolve(Ab.tocsr(), bb)

        dx = dxdlam[:nx]
        dlam = dxdlam[nx:nx + neq]
        dz = -h - z if dh is None else -h - z - dh.T * dx
        dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)

        # optional step-size control
#        sc = False
        if opt["step_control"]:
            raise NotImplementedError
#            x1 = x + dx
#
#            # evaluate cost, constraints, derivatives at x1
#            f1, df1 = ipm_f(x1)          # cost
#            f1 = f1 * opt["cost_mult"]
#            df1 = df1 * opt["cost_mult"]
#            gn1, hn1, dgn1, dhn1 = ipm_gh(x1) # non-linear constraints
#            g1 = gn1 if Ai is None else r_[gn1, Ai * x1 - bi] # ieq constraints
#            h1 = hn1 if Ae is None else r_[hn1, Ae * x1 - be] # eq constraints
#            dg1 = dgn1 if Ai is None else r_[dgn1, Ai.T]      # 1st der of ieq
#            dh1 = dhn1 if Ae is None else r_[dhn1, Ae.T]      # 1st der of eqs
#
#            # check tolerance
#            Lx1 = df1 + dh1 * lam + dg1 * mu
#            feascond1 = max([ norm(h1, Inf), max(g1) ]) / \
#                (1 + max([ norm(x1, Inf), norm(z, Inf) ]))
#            gradcond1 = norm(Lx1, Inf) / \
#                (1 + max([ norm(lam, Inf), norm(mu, Inf) ]))
#
#            if feascond1 > feascond and gradcond1 > gradcond:
#                sc = True
#        if sc:
#            alpha = 1.0
#            for j in range(opt["max_red"]):
#                dx1 = alpha * dx
#                x1 = x + dx1
#                f1 = ipm_f(x1)             # cost
#                f1 = f1 * opt["cost_mult"]
#                gn1, hn1 = ipm_gh(x1)              # non-linear constraints
#                g1 = r_[gn1, Ai * x1 - bi]         # inequality constraints
#                h1 = r_[hn1, Ae * x1 - be]         # equality constraints
#                L1 = f1 + lam.H * h1 + mu.H * (g1 + z) - gamma * sum(log(z))
#                if opt["verbose"]:
#                    logger.info("\n   %3d            %10.f" % (-j, norm(dx1)))
#                rho = (L1 - L) / (Lx.H * dx1 + 0.5 * dx1.H * Lxx * dx1)
#                if rho > rho_min and rho < rho_max:
#                    break
#                else:
#                    alpha = alpha / 2.0
#            dx = alpha * dx
#            dz = alpha * dz
#            dlam = alpha * dlam
#            dmu = alpha * dmu

        # do the update
        k = flatnonzero(dz < 0.0)
        alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
        k = flatnonzero(dmu < 0.0)
        alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
        x = x + alphap * dx
        z = z + alphap * dz
        lam = lam + alphad * dlam
        mu = mu + alphad * dmu
        if niq > 0:
            gamma = sigma * dot(z, mu) / niq

        # evaluate cost, constraints, derivatives
        f, df, _ = f_fcn(x)             # cost
        f = f * opt["cost_mult"]
        df = df * opt["cost_mult"]
        if nonlinear:
            hn, gn, dhn, dgn = gh_fcn(x)                   # nln constraints
#            g = gn if Ai is None else r_[gn, Ai * x - bi] # ieq constraints
#            h = hn if Ae is None else r_[hn, Ae * x - be] # eq constraints
            h = hn if Ai is None else r_[hn, Ai * x - bi] # ieq constr
            g = gn if Ae is None else r_[gn, Ae * x - be]  # eq constr

            if (dhn is None) and (Ai is None):
                dh = None
            elif dhn is None:
                dh = Ai.T
            elif Ae is None:
                dh = dhn
            else:
                dh = hstack([dhn, Ai.T])

            if (dgn is None) and (Ae is None):
                dg = None
            elif dgn is None:
                dg = Ae.T
            elif Ae is None:
                dg = dgn
            else:
                dg = hstack([dgn, Ae.T])
        else:
            h = -bi if Ai is None else Ai * x - bi    # inequality constraints
            g = -be if Ae is None else Ae * x - be    # equality constraints
            # 1st derivatives are constant, still dh = Ai.T, dg = Ae.T

        Lx = df
        Lx = Lx + dg * lam if dg is not None else Lx
        Lx = Lx + dh * mu  if dh is not None else Lx

        gnorm = norm(g, Inf) if len(g) else 0.0
        lam_norm = norm(lam, Inf) if len(lam) else 0.0
        mu_norm = norm(mu, Inf) if len(mu) else 0.0
        feascond = \
            max([gnorm, max(h)]) / (1+max([norm(x, Inf), norm(z, Inf)]))
        gradcond = \
            norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
        compcond = dot(z, mu) / (1 + norm(x, Inf))
        costcond = float(absolute(f - f0) / (1 + absolute(f0)))

        hist[i] = {'feascond': feascond, 'gradcond': gradcond,
            'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
            'stepsize': norm(dx), 'obj': f / opt["cost_mult"],
            'alphap': alphap, 'alphad': alphad}

        if opt["verbose"]:
            print "%3d  %12.8g %10.5g %12g %12g %12g %12g" % \
                (i, (f / opt["cost_mult"]), norm(dx), feascond, gradcond,
                 compcond, costcond)

        if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
            compcond < opt["comptol"] and costcond < opt["costtol"]:
            converged = True
            if opt["verbose"]:
                print "Converged!"
        else:
            if any(isnan(x)) or (alphap < alpha_min) or \
                (alphad < alpha_min) or (gamma < EPS) or (gamma > 1.0 / EPS):
                if opt["verbose"]:
                    print "Numerically failed."
                eflag = -1
                break
            f0 = f

#            if opt["step_control"]:
#                L = f + dot(lam, g) + dot(mu * (h + z)) - gamma * sum(log(z))

    if opt["verbose"]:
        if not converged:
            print "Did not converge in %d iterations." % i

    # package results
    if eflag != -1:
        eflag = converged

    if eflag == 0:
        message = 'Did not converge'
    elif eflag == 1:
        message = 'Converged'
    elif eflag == -1:
        message = 'Numerically failed'
    else:
        raise

    output = {"iterations": i, "history": hist, "message": message}

    # zero out multipliers on non-binding constraints
    mu[flatnonzero( (h < -opt["feastol"]) & (mu < mu_threshold) )] = 0.0

    # un-scale cost and prices
    f = f / opt["cost_mult"]
    lam = lam / opt["cost_mult"]
    mu = mu / opt["cost_mult"]

    # re-package multipliers into struct
    lam_lin = lam[neqnln:neq]           # lambda for linear constraints
    mu_lin = mu[niqnln:niq]             # mu for linear constraints
    kl = flatnonzero(lam_lin < 0.0)     # lower bound binding
    ku = flatnonzero(lam_lin > 0.0)     # upper bound binding

    mu_l = zeros(nx + nA)
    mu_l[ieq[kl]] = -lam_lin[kl]
    mu_l[igt] = mu_lin[nlt:nlt + ngt]
    mu_l[ibx] = mu_lin[nlt + ngt + nbx:nlt + ngt + nbx + nbx]

    mu_u = zeros(nx + nA)
    mu_u[ieq[ku]] = lam_lin[ku]
    mu_u[ilt] = mu_lin[:nlt]
    mu_u[ibx] = mu_lin[nlt + ngt:nlt + ngt + nbx]

    lmbda = {'mu_l': mu_l[nx:], 'mu_u': mu_u[nx:],
             'lower': mu_l[:nx], 'upper': mu_u[:nx]}

    if niqnln > 0:
        lmbda['ineqnonlin'] = mu[:niqnln]
    if neqnln > 0:
        lmbda['eqnonlin'] = lam[:neqnln]

#    lmbda = {"eqnonlin": lam[:neqnln], 'ineqnonlin': mu[:niqnln],
#             "mu_l": mu_l[nx:], "mu_u": mu_u[nx:],
#             "lower": mu_l[:nx], "upper": mu_u[:nx]}

    solution =  {"x": x, "f": f, "converged": converged,
                 "lmbda": lmbda, "output": output}

    return solution

Example 42

Project: pylon
Source File: estimator.py
View license
    def run(self):
        """ Solves a state estimation problem.
        """
        case = self.case
        baseMVA = case.base_mva
        buses = self.case.connected_buses
        branches = case.online_branches
        generators = case.online_generators
        meas = self.measurements
        # Update indices.
        self.case.index_buses()
        self.case.index_branches()

        # Index buses.
#        ref = [b._i for b in buses if b.type == REFERENCE]
        pv  = [b._i for b in buses if b.type == PV]
        pq  = [b._i for b in buses if b.type == PQ]

        # Build admittance matrices.
        Ybus, Yf, Yt = case.Y

        # Prepare initial guess.
        V0 = self.getV0(self.v_mag_guess, buses, generators)

        # Start the clock.
        t0 = time()

        # Initialise SE.
        converged = False
        i = 0
        V = V0
        Va = angle(V0)
        Vm = abs(V0)

        nb = Ybus.shape[0]
        f = [b.from_bus._i for b in branches]
        t = [b.to_bus._i for b in branches]
        nonref = pv + pq

        # Form measurement vector.
        z = array([m.value for m in meas])

        # Form measurement index vectors.
        idx_zPf = [m.b_or_l._i for m in meas if m.type == PF]
        idx_zPt = [m.b_or_l._i for m in meas if m.type == PT]
        idx_zQf = [m.b_or_l._i for m in meas if m.type == QF]
        idx_zQt = [m.b_or_l._i for m in meas if m.type == QT]
        idx_zPg = [m.b_or_l._i for m in meas if m.type == PG]
        idx_zQg = [m.b_or_l._i for m in meas if m.type == QG]
        idx_zVm = [m.b_or_l._i for m in meas if m.type == VM]
        idx_zVa = [m.b_or_l._i for m in meas if m.type == VA]

        def col(seq):
            return [[k] for k in seq]

        # Create inverse of covariance matrix with all measurements.
#        full_scale = 30
#        sigma = [
#            0.02 * abs(Sf)      + 0.0052 * full_scale * ones(nbr,1),
#            0.02 * abs(St)      + 0.0052 * full_scale * ones(nbr,1),
#            0.02 * abs(Sbus)    + 0.0052 * full_scale * ones(nb,1),
#            0.2 * pi/180 * 3*ones(nb,1),
#            0.02 * abs(Sf)      + 0.0052 * full_scale * ones(nbr,1),
#            0.02 * abs(St)      + 0.0052 * full_scale * ones(nbr,1),
#            0.02 * abs(Sbus)    + 0.0052 * full_scale * ones(nb,1),
#            0.02 * abs(V0)      + 0.0052 * 1.1 * ones(nb,1),
#        ] ./ 3

        # Get R inverse matrix.
        sigma_vector = r_[
            self.sigma[0] * ones(len(idx_zPf)),
            self.sigma[1] * ones(len(idx_zPt)),
            self.sigma[2] * ones(len(idx_zQf)),
            self.sigma[3] * ones(len(idx_zQt)),
            self.sigma[4] * ones(len(idx_zPg)),
            self.sigma[5] * ones(len(idx_zQg)),
            self.sigma[6] * ones(len(idx_zVm)),
            self.sigma[7] * ones(len(idx_zVa))
        ]
        sigma_squared = sigma_vector**2

        rsig = range(len(sigma_squared))
        Rinv = csr_matrix((1.0 / sigma_squared, (rsig, rsig)))

        # Do Newton iterations.
        while (not converged) and (i < self.max_iter):
            i += 1

            # Compute estimated measurement.
            Sfe = V[f] * conj(Yf * V)
            Ste = V[t] * conj(Yt * V)
            # Compute net injection at generator buses.
            gbus = [g.bus._i for g in generators]
            Sgbus = V[gbus] * conj(Ybus[gbus, :] * V)
            # inj S + local Sd
            Sd = array([complex(b.p_demand, b.q_demand) for b in buses])
            Sgen = (Sgbus * baseMVA + Sd) / baseMVA

            z_est = r_[
                Sfe[idx_zPf].real,
                Ste[idx_zPt].real,
                Sfe[idx_zQf].imag,
                Ste[idx_zQt].imag,
                Sgen[idx_zPg].real,
                Sgen[idx_zQg].imag,
                abs(V[idx_zVm]),
                angle(V[idx_zVa])
            ]

            # Get H matrix.
            dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V)
            dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, _, _ = case.dSbr_dV(Yf, Yt,V)

            # Get sub-matrix of H relating to line flow.
            dPF_dVa = dSf_dVa.real # from end
            dQF_dVa = dSf_dVa.imag
            dPF_dVm = dSf_dVm.real
            dQF_dVm = dSf_dVm.imag
            dPT_dVa = dSt_dVa.real # to end
            dQT_dVa = dSt_dVa.imag
            dPT_dVm = dSt_dVm.real
            dQT_dVm = dSt_dVm.imag
            # Get sub-matrix of H relating to generator output.
            dPG_dVa = dSbus_dVa[gbus, :].real
            dQG_dVa = dSbus_dVa[gbus, :].imag
            dPG_dVm = dSbus_dVm[gbus, :].real
            dQG_dVm = dSbus_dVm[gbus, :].imag
            # Get sub-matrix of H relating to voltage angle.
            dVa_dVa = csr_matrix((ones(nb), (range(nb), range(nb))))
            dVa_dVm = csr_matrix((nb, nb))
            # Get sub-matrix of H relating to voltage magnitude.
            dVm_dVa = csr_matrix((nb, nb))
            dVm_dVm = csr_matrix((ones(nb), (range(nb), range(nb))))

            h = [(col(idx_zPf), dPF_dVa, dPF_dVm),
                 (col(idx_zQf), dQF_dVa, dQF_dVm),
                 (col(idx_zPt), dPT_dVa, dPT_dVm),
                 (col(idx_zQt), dQT_dVa, dQT_dVm),
                 (col(idx_zPg), dPG_dVa, dPG_dVm),
                 (col(idx_zQg), dQG_dVa, dQG_dVm),
                 (col(idx_zVm), dVm_dVa, dVm_dVm),
                 (col(idx_zVa), dVa_dVa, dVa_dVm)]

            H = vstack([hstack([dVa[idx, nonref], dVm[idx, nonref]])
                        for idx, dVa, dVm in h if len(idx) > 0 ])

            # Compute update step.
            J = H.T * Rinv * H
            F = H.T * Rinv * (z - z_est) # evalute F(x)
            dx = spsolve(J, F)

            # Check for convergence.
            normF = linalg.norm(F, Inf)

            if self.verbose:
                logger.info("Iteration [%d]: Norm of mismatch: %.3f" %
                            (i, normF))
            if normF < self.tolerance:
                converged = True

            # Update voltage.
            npvpq = len(nonref)

            Va[nonref] = Va[nonref] + dx[:npvpq]
            Vm[nonref] = Vm[nonref] + dx[npvpq:2 * npvpq]

            V = Vm * exp(1j * Va)
            Va = angle(V)
            Vm = abs(V)

        # Weighted sum squares of error.
        error_sqrsum = sum((z - z_est)**2 / sigma_squared)

        # Update case with solution.
        case.pf_solution(Ybus, Yf, Yt, V)

        # Stop the clock.
        elapsed = time() - t0

        if self.verbose and converged:
            print "State estimation converged in: %.3fs (%d iterations)" % \
            (elapsed, i)
#            self.output_solution(sys.stdout, z, z_est)

        solution = {"V": V, "converged": converged, "iterations": i,
                    "z": z, "z_est": z_est, "error_sqrsum": error_sqrsum,
                    "elapsed": elapsed}

        return solution

Example 43

Project: PYPOWER
Source File: case118.py
View license
def case118():
    """Power flow data for IEEE 118 bus test case.
    Please see L{caseformat} for details on the case file format.

    This data was converted from IEEE Common Data Format
    (ieee118cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
    See end of file for warnings generated during conversion.

    Converted from IEEE CDF file from:
    U{http://www.ee.washington.edu/research/pstca/}

    With baseKV data take from the PSAP format file from the same site,
    added manually on 10-Mar-2006.

    08/25/93 UW ARCHIVE           100.0  1961 W IEEE 118 Bus Test Case

    @return: Power flow data for IEEE 118 bus test case.
    """
    ppc = {"version": '2'}

    ##-----  Power Flow Data  -----##
    ## system MVA base
    ppc["baseMVA"] = 100.0

    ## bus data
    # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
    ppc["bus"] = array([
        [1, 2, 51, 27, 0, 0, 1, 0.955, 10.67, 138, 1, 1.06, 0.94],
        [2, 1, 20, 9, 0, 0, 1, 0.971, 11.22, 138, 1, 1.06, 0.94],
        [3, 1, 39, 10, 0, 0, 1, 0.968, 11.56, 138, 1, 1.06, 0.94],
        [4, 2, 39, 12, 0, 0, 1, 0.998, 15.28, 138, 1, 1.06, 0.94],
        [5, 1, 0, 0, 0, -40, 1, 1.002, 15.73, 138, 1, 1.06, 0.94],
        [6, 2, 52, 22, 0, 0, 1, 0.99, 13, 138, 1, 1.06, 0.94],
        [7, 1, 19, 2, 0, 0, 1, 0.989, 12.56, 138, 1, 1.06, 0.94],
        [8, 2, 28, 0, 0, 0, 1, 1.015, 20.77, 345, 1, 1.06, 0.94],
        [9, 1, 0, 0, 0, 0, 1, 1.043, 28.02, 345, 1, 1.06, 0.94],
        [10, 2, 0, 0, 0, 0, 1, 1.05, 35.61, 345, 1, 1.06, 0.94],
        [11, 1, 70, 23, 0, 0, 1, 0.985, 12.72, 138, 1, 1.06, 0.94],
        [12, 2, 47, 10, 0, 0, 1, 0.99, 12.2, 138, 1, 1.06, 0.94],
        [13, 1, 34, 16, 0, 0, 1, 0.968, 11.35, 138, 1, 1.06, 0.94],
        [14, 1, 14, 1, 0, 0, 1, 0.984, 11.5, 138, 1, 1.06, 0.94],
        [15, 2, 90, 30, 0, 0, 1, 0.97, 11.23, 138, 1, 1.06, 0.94],
        [16, 1, 25, 10, 0, 0, 1, 0.984, 11.91, 138, 1, 1.06, 0.94],
        [17, 1, 11, 3, 0, 0, 1, 0.995, 13.74, 138, 1, 1.06, 0.94],
        [18, 2, 60, 34, 0, 0, 1, 0.973, 11.53, 138, 1, 1.06, 0.94],
        [19, 2, 45, 25, 0, 0, 1, 0.963, 11.05, 138, 1, 1.06, 0.94],
        [20, 1, 18, 3, 0, 0, 1, 0.958, 11.93, 138, 1, 1.06, 0.94],
        [21, 1, 14, 8, 0, 0, 1, 0.959, 13.52, 138, 1, 1.06, 0.94],
        [22, 1, 10, 5, 0, 0, 1, 0.97, 16.08, 138, 1, 1.06, 0.94],
        [23, 1, 7, 3, 0, 0, 1, 1, 21, 138, 1, 1.06, 0.94],
        [24, 2, 13, 0, 0, 0, 1, 0.992, 20.89, 138, 1, 1.06, 0.94],
        [25, 2, 0, 0, 0, 0, 1, 1.05, 27.93, 138, 1, 1.06, 0.94],
        [26, 2, 0, 0, 0, 0, 1, 1.015, 29.71, 345, 1, 1.06, 0.94],
        [27, 2, 71, 13, 0, 0, 1, 0.968, 15.35, 138, 1, 1.06, 0.94],
        [28, 1, 17, 7, 0, 0, 1, 0.962, 13.62, 138, 1, 1.06, 0.94],
        [29, 1, 24, 4, 0, 0, 1, 0.963, 12.63, 138, 1, 1.06, 0.94],
        [30, 1, 0, 0, 0, 0, 1, 0.968, 18.79, 345, 1, 1.06, 0.94],
        [31, 2, 43, 27, 0, 0, 1, 0.967, 12.75, 138, 1, 1.06, 0.94],
        [32, 2, 59, 23, 0, 0, 1, 0.964, 14.8, 138, 1, 1.06, 0.94],
        [33, 1, 23, 9, 0, 0, 1, 0.972, 10.63, 138, 1, 1.06, 0.94],
        [34, 2, 59, 26, 0, 14, 1, 0.986, 11.3, 138, 1, 1.06, 0.94],
        [35, 1, 33, 9, 0, 0, 1, 0.981, 10.87, 138, 1, 1.06, 0.94],
        [36, 2, 31, 17, 0, 0, 1, 0.98, 10.87, 138, 1, 1.06, 0.94],
        [37, 1, 0, 0, 0, -25, 1, 0.992, 11.77, 138, 1, 1.06, 0.94],
        [38, 1, 0, 0, 0, 0, 1, 0.962, 16.91, 345, 1, 1.06, 0.94],
        [39, 1, 27, 11, 0, 0, 1, 0.97, 8.41, 138, 1, 1.06, 0.94],
        [40, 2, 66, 23, 0, 0, 1, 0.97, 7.35, 138, 1, 1.06, 0.94],
        [41, 1, 37, 10, 0, 0, 1, 0.967, 6.92, 138, 1, 1.06, 0.94],
        [42, 2, 96, 23, 0, 0, 1, 0.985, 8.53, 138, 1, 1.06, 0.94],
        [43, 1, 18, 7, 0, 0, 1, 0.978, 11.28, 138, 1, 1.06, 0.94],
        [44, 1, 16, 8, 0, 10, 1, 0.985, 13.82, 138, 1, 1.06, 0.94],
        [45, 1, 53, 22, 0, 10, 1, 0.987, 15.67, 138, 1, 1.06, 0.94],
        [46, 2, 28, 10, 0, 10, 1, 1.005, 18.49, 138, 1, 1.06, 0.94],
        [47, 1, 34, 0, 0, 0, 1, 1.017, 20.73, 138, 1, 1.06, 0.94],
        [48, 1, 20, 11, 0, 15, 1, 1.021, 19.93, 138, 1, 1.06, 0.94],
        [49, 2, 87, 30, 0, 0, 1, 1.025, 20.94, 138, 1, 1.06, 0.94],
        [50, 1, 17, 4, 0, 0, 1, 1.001, 18.9, 138, 1, 1.06, 0.94],
        [51, 1, 17, 8, 0, 0, 1, 0.967, 16.28, 138, 1, 1.06, 0.94],
        [52, 1, 18, 5, 0, 0, 1, 0.957, 15.32, 138, 1, 1.06, 0.94],
        [53, 1, 23, 11, 0, 0, 1, 0.946, 14.35, 138, 1, 1.06, 0.94],
        [54, 2, 113, 32, 0, 0, 1, 0.955, 15.26, 138, 1, 1.06, 0.94],
        [55, 2, 63, 22, 0, 0, 1, 0.952, 14.97, 138, 1, 1.06, 0.94],
        [56, 2, 84, 18, 0, 0, 1, 0.954, 15.16, 138, 1, 1.06, 0.94],
        [57, 1, 12, 3, 0, 0, 1, 0.971, 16.36, 138, 1, 1.06, 0.94],
        [58, 1, 12, 3, 0, 0, 1, 0.959, 15.51, 138, 1, 1.06, 0.94],
        [59, 2, 277, 113, 0, 0, 1, 0.985, 19.37, 138, 1, 1.06, 0.94],
        [60, 1, 78, 3, 0, 0, 1, 0.993, 23.15, 138, 1, 1.06, 0.94],
        [61, 2, 0, 0, 0, 0, 1, 0.995, 24.04, 138, 1, 1.06, 0.94],
        [62, 2, 77, 14, 0, 0, 1, 0.998, 23.43, 138, 1, 1.06, 0.94],
        [63, 1, 0, 0, 0, 0, 1, 0.969, 22.75, 345, 1, 1.06, 0.94],
        [64, 1, 0, 0, 0, 0, 1, 0.984, 24.52, 345, 1, 1.06, 0.94],
        [65, 2, 0, 0, 0, 0, 1, 1.005, 27.65, 345, 1, 1.06, 0.94],
        [66, 2, 39, 18, 0, 0, 1, 1.05, 27.48, 138, 1, 1.06, 0.94],
        [67, 1, 28, 7, 0, 0, 1, 1.02, 24.84, 138, 1, 1.06, 0.94],
        [68, 1, 0, 0, 0, 0, 1, 1.003, 27.55, 345, 1, 1.06, 0.94],
        [69, 3, 0, 0, 0, 0, 1, 1.035, 30, 138, 1, 1.06, 0.94],
        [70, 2, 66, 20, 0, 0, 1, 0.984, 22.58, 138, 1, 1.06, 0.94],
        [71, 1, 0, 0, 0, 0, 1, 0.987, 22.15, 138, 1, 1.06, 0.94],
        [72, 2, 12, 0, 0, 0, 1, 0.98, 20.98, 138, 1, 1.06, 0.94],
        [73, 2, 6, 0, 0, 0, 1, 0.991, 21.94, 138, 1, 1.06, 0.94],
        [74, 2, 68, 27, 0, 12, 1, 0.958, 21.64, 138, 1, 1.06, 0.94],
        [75, 1, 47, 11, 0, 0, 1, 0.967, 22.91, 138, 1, 1.06, 0.94],
        [76, 2, 68, 36, 0, 0, 1, 0.943, 21.77, 138, 1, 1.06, 0.94],
        [77, 2, 61, 28, 0, 0, 1, 1.006, 26.72, 138, 1, 1.06, 0.94],
        [78, 1, 71, 26, 0, 0, 1, 1.003, 26.42, 138, 1, 1.06, 0.94],
        [79, 1, 39, 32, 0, 20, 1, 1.009, 26.72, 138, 1, 1.06, 0.94],
        [80, 2, 130, 26, 0, 0, 1, 1.04, 28.96, 138, 1, 1.06, 0.94],
        [81, 1, 0, 0, 0, 0, 1, 0.997, 28.1, 345, 1, 1.06, 0.94],
        [82, 1, 54, 27, 0, 20, 1, 0.989, 27.24, 138, 1, 1.06, 0.94],
        [83, 1, 20, 10, 0, 10, 1, 0.985, 28.42, 138, 1, 1.06, 0.94],
        [84, 1, 11, 7, 0, 0, 1, 0.98, 30.95, 138, 1, 1.06, 0.94],
        [85, 2, 24, 15, 0, 0, 1, 0.985, 32.51, 138, 1, 1.06, 0.94],
        [86, 1, 21, 10, 0, 0, 1, 0.987, 31.14, 138, 1, 1.06, 0.94],
        [87, 2, 0, 0, 0, 0, 1, 1.015, 31.4, 161, 1, 1.06, 0.94],
        [88, 1, 48, 10, 0, 0, 1, 0.987, 35.64, 138, 1, 1.06, 0.94],
        [89, 2, 0, 0, 0, 0, 1, 1.005, 39.69, 138, 1, 1.06, 0.94],
        [90, 2, 163, 42, 0, 0, 1, 0.985, 33.29, 138, 1, 1.06, 0.94],
        [91, 2, 10, 0, 0, 0, 1, 0.98, 33.31, 138, 1, 1.06, 0.94],
        [92, 2, 65, 10, 0, 0, 1, 0.993, 33.8, 138, 1, 1.06, 0.94],
        [93, 1, 12, 7, 0, 0, 1, 0.987, 30.79, 138, 1, 1.06, 0.94],
        [94, 1, 30, 16, 0, 0, 1, 0.991, 28.64, 138, 1, 1.06, 0.94],
        [95, 1, 42, 31, 0, 0, 1, 0.981, 27.67, 138, 1, 1.06, 0.94],
        [96, 1, 38, 15, 0, 0, 1, 0.993, 27.51, 138, 1, 1.06, 0.94],
        [97, 1, 15, 9, 0, 0, 1, 1.011, 27.88, 138, 1, 1.06, 0.94],
        [98, 1, 34, 8, 0, 0, 1, 1.024, 27.4, 138, 1, 1.06, 0.94],
        [99, 2, 42, 0, 0, 0, 1, 1.01, 27.04, 138, 1, 1.06, 0.94],
        [100, 2, 37, 18, 0, 0, 1, 1.017, 28.03, 138, 1, 1.06, 0.94],
        [101, 1, 22, 15, 0, 0, 1, 0.993, 29.61, 138, 1, 1.06, 0.94],
        [102, 1, 5, 3, 0, 0, 1, 0.991, 32.3, 138, 1, 1.06, 0.94],
        [103, 2, 23, 16, 0, 0, 1, 1.001, 24.44, 138, 1, 1.06, 0.94],
        [104, 2, 38, 25, 0, 0, 1, 0.971, 21.69, 138, 1, 1.06, 0.94],
        [105, 2, 31, 26, 0, 20, 1, 0.965, 20.57, 138, 1, 1.06, 0.94],
        [106, 1, 43, 16, 0, 0, 1, 0.962, 20.32, 138, 1, 1.06, 0.94],
        [107, 2, 50, 12, 0, 6, 1, 0.952, 17.53, 138, 1, 1.06, 0.94],
        [108, 1, 2, 1, 0, 0, 1, 0.967, 19.38, 138, 1, 1.06, 0.94],
        [109, 1, 8, 3, 0, 0, 1, 0.967, 18.93, 138, 1, 1.06, 0.94],
        [110, 2, 39, 30, 0, 6, 1, 0.973, 18.09, 138, 1, 1.06, 0.94],
        [111, 2, 0, 0, 0, 0, 1, 0.98, 19.74, 138, 1, 1.06, 0.94],
        [112, 2, 68, 13, 0, 0, 1, 0.975, 14.99, 138, 1, 1.06, 0.94],
        [113, 2, 6, 0, 0, 0, 1, 0.993, 13.74, 138, 1, 1.06, 0.94],
        [114, 1, 8, 3, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
        [115, 1, 22, 7, 0, 0, 1, 0.96, 14.46, 138, 1, 1.06, 0.94],
        [116, 2, 184, 0, 0, 0, 1, 1.005, 27.12, 138, 1, 1.06, 0.94],
        [117, 1, 20, 8, 0, 0, 1, 0.974, 10.67, 138, 1, 1.06, 0.94],
        [118, 1, 33, 15, 0, 0, 1, 0.949, 21.92, 138, 1, 1.06, 0.94]
    ])

    ## generator data
    # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
    # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
    ppc["gen"] = array([
        [1, 0, 0, 15, -5, 0.955, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [4, 0, 0, 300, -300, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [6, 0, 0, 50, -13, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [8, 0, 0, 300, -300, 1.015, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [10, 450, 0, 200, -147, 1.05, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [12, 85, 0, 120, -35, 0.99, 100, 1, 185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [15, 0, 0, 30, -10, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [18, 0, 0, 50, -16, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [19, 0, 0, 24, -8, 0.962, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [24, 0, 0, 300, -300, 0.992, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [25, 220, 0, 140, -47, 1.05, 100, 1, 320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [26, 314, 0, 1000, -1000, 1.015, 100, 1, 414, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [27, 0, 0, 300, -300, 0.968, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [31, 7, 0, 300, -300, 0.967, 100, 1, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [32, 0, 0, 42, -14, 0.963, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [34, 0, 0, 24, -8, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [36, 0, 0, 24, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [40, 0, 0, 300, -300, 0.97, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [42, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [46, 19, 0, 100, -100, 1.005, 100, 1, 119, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [49, 204, 0, 210, -85, 1.025, 100, 1, 304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [54, 48, 0, 300, -300, 0.955, 100, 1, 148, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [55, 0, 0, 23, -8, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [56, 0, 0, 15, -8, 0.954, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [59, 155, 0, 180, -60, 0.985, 100, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [61, 160, 0, 300, -100, 0.995, 100, 1, 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [62, 0, 0, 20, -20, 0.998, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [65, 391, 0, 200, -67, 1.005, 100, 1, 491, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [66, 392, 0, 200, -67, 1.05, 100, 1, 492, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [69, 516.4, 0, 300, -300, 1.035, 100, 1, 805.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [70, 0, 0, 32, -10, 0.984, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [72, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [73, 0, 0, 100, -100, 0.991, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [74, 0, 0, 9, -6, 0.958, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [76, 0, 0, 23, -8, 0.943, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [77, 0, 0, 70, -20, 1.006, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [80, 477, 0, 280, -165, 1.04, 100, 1, 577, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [85, 0, 0, 23, -8, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [87, 4, 0, 1000, -100, 1.015, 100, 1, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [89, 607, 0, 300, -210, 1.005, 100, 1, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [90, 0, 0, 300, -300, 0.985, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [91, 0, 0, 100, -100, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [92, 0, 0, 9, -3, 0.99, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [99, 0, 0, 100, -100, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [100, 252, 0, 155, -50, 1.017, 100, 1, 352, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [103, 40, 0, 40, -15, 1.01, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [104, 0, 0, 23, -8, 0.971, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [105, 0, 0, 23, -8, 0.965, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [107, 0, 0, 200, -200, 0.952, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [110, 0, 0, 23, -8, 0.973, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [111, 36, 0, 1000, -100, 0.98, 100, 1, 136, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [112, 0, 0, 1000, -100, 0.975, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [113, 0, 0, 200, -100, 0.993, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [116, 0, 0, 1000, -1000, 1.005, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    ])

    ## branch data
    # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
    ppc["branch"] = array([
        [1, 2, 0.0303, 0.0999, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
        [1, 3, 0.0129, 0.0424, 0.01082, 9900, 0, 0, 0, 0, 1, -360, 360],
        [4, 5, 0.00176, 0.00798, 0.0021, 9900, 0, 0, 0, 0, 1, -360, 360],
        [3, 5, 0.0241, 0.108, 0.0284, 9900, 0, 0, 0, 0, 1, -360, 360],
        [5, 6, 0.0119, 0.054, 0.01426, 9900, 0, 0, 0, 0, 1, -360, 360],
        [6, 7, 0.00459, 0.0208, 0.0055, 9900, 0, 0, 0, 0, 1, -360, 360],
        [8, 9, 0.00244, 0.0305, 1.162, 9900, 0, 0, 0, 0, 1, -360, 360],
        [8, 5, 0, 0.0267, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
        [9, 10, 0.00258, 0.0322, 1.23, 9900, 0, 0, 0, 0, 1, -360, 360],
        [4, 11, 0.0209, 0.0688, 0.01748, 9900, 0, 0, 0, 0, 1, -360, 360],
        [5, 11, 0.0203, 0.0682, 0.01738, 9900, 0, 0, 0, 0, 1, -360, 360],
        [11, 12, 0.00595, 0.0196, 0.00502, 9900, 0, 0, 0, 0, 1, -360, 360],
        [2, 12, 0.0187, 0.0616, 0.01572, 9900, 0, 0, 0, 0, 1, -360, 360],
        [3, 12, 0.0484, 0.16, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
        [7, 12, 0.00862, 0.034, 0.00874, 9900, 0, 0, 0, 0, 1, -360, 360],
        [11, 13, 0.02225, 0.0731, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
        [12, 14, 0.0215, 0.0707, 0.01816, 9900, 0, 0, 0, 0, 1, -360, 360],
        [13, 15, 0.0744, 0.2444, 0.06268, 9900, 0, 0, 0, 0, 1, -360, 360],
        [14, 15, 0.0595, 0.195, 0.0502, 9900, 0, 0, 0, 0, 1, -360, 360],
        [12, 16, 0.0212, 0.0834, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
        [15, 17, 0.0132, 0.0437, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
        [16, 17, 0.0454, 0.1801, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
        [17, 18, 0.0123, 0.0505, 0.01298, 9900, 0, 0, 0, 0, 1, -360, 360],
        [18, 19, 0.01119, 0.0493, 0.01142, 9900, 0, 0, 0, 0, 1, -360, 360],
        [19, 20, 0.0252, 0.117, 0.0298, 9900, 0, 0, 0, 0, 1, -360, 360],
        [15, 19, 0.012, 0.0394, 0.0101, 9900, 0, 0, 0, 0, 1, -360, 360],
        [20, 21, 0.0183, 0.0849, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
        [21, 22, 0.0209, 0.097, 0.0246, 9900, 0, 0, 0, 0, 1, -360, 360],
        [22, 23, 0.0342, 0.159, 0.0404, 9900, 0, 0, 0, 0, 1, -360, 360],
        [23, 24, 0.0135, 0.0492, 0.0498, 9900, 0, 0, 0, 0, 1, -360, 360],
        [23, 25, 0.0156, 0.08, 0.0864, 9900, 0, 0, 0, 0, 1, -360, 360],
        [26, 25, 0, 0.0382, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
        [25, 27, 0.0318, 0.163, 0.1764, 9900, 0, 0, 0, 0, 1, -360, 360],
        [27, 28, 0.01913, 0.0855, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
        [28, 29, 0.0237, 0.0943, 0.0238, 9900, 0, 0, 0, 0, 1, -360, 360],
        [30, 17, 0, 0.0388, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
        [8, 30, 0.00431, 0.0504, 0.514, 9900, 0, 0, 0, 0, 1, -360, 360],
        [26, 30, 0.00799, 0.086, 0.908, 9900, 0, 0, 0, 0, 1, -360, 360],
        [17, 31, 0.0474, 0.1563, 0.0399, 9900, 0, 0, 0, 0, 1, -360, 360],
        [29, 31, 0.0108, 0.0331, 0.0083, 9900, 0, 0, 0, 0, 1, -360, 360],
        [23, 32, 0.0317, 0.1153, 0.1173, 9900, 0, 0, 0, 0, 1, -360, 360],
        [31, 32, 0.0298, 0.0985, 0.0251, 9900, 0, 0, 0, 0, 1, -360, 360],
        [27, 32, 0.0229, 0.0755, 0.01926, 9900, 0, 0, 0, 0, 1, -360, 360],
        [15, 33, 0.038, 0.1244, 0.03194, 9900, 0, 0, 0, 0, 1, -360, 360],
        [19, 34, 0.0752, 0.247, 0.0632, 9900, 0, 0, 0, 0, 1, -360, 360],
        [35, 36, 0.00224, 0.0102, 0.00268, 9900, 0, 0, 0, 0, 1, -360, 360],
        [35, 37, 0.011, 0.0497, 0.01318, 9900, 0, 0, 0, 0, 1, -360, 360],
        [33, 37, 0.0415, 0.142, 0.0366, 9900, 0, 0, 0, 0, 1, -360, 360],
        [34, 36, 0.00871, 0.0268, 0.00568, 9900, 0, 0, 0, 0, 1, -360, 360],
        [34, 37, 0.00256, 0.0094, 0.00984, 9900, 0, 0, 0, 0, 1, -360, 360],
        [38, 37, 0, 0.0375, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
        [37, 39, 0.0321, 0.106, 0.027, 9900, 0, 0, 0, 0, 1, -360, 360],
        [37, 40, 0.0593, 0.168, 0.042, 9900, 0, 0, 0, 0, 1, -360, 360],
        [30, 38, 0.00464, 0.054, 0.422, 9900, 0, 0, 0, 0, 1, -360, 360],
        [39, 40, 0.0184, 0.0605, 0.01552, 9900, 0, 0, 0, 0, 1, -360, 360],
        [40, 41, 0.0145, 0.0487, 0.01222, 9900, 0, 0, 0, 0, 1, -360, 360],
        [40, 42, 0.0555, 0.183, 0.0466, 9900, 0, 0, 0, 0, 1, -360, 360],
        [41, 42, 0.041, 0.135, 0.0344, 9900, 0, 0, 0, 0, 1, -360, 360],
        [43, 44, 0.0608, 0.2454, 0.06068, 9900, 0, 0, 0, 0, 1, -360, 360],
        [34, 43, 0.0413, 0.1681, 0.04226, 9900, 0, 0, 0, 0, 1, -360, 360],
        [44, 45, 0.0224, 0.0901, 0.0224, 9900, 0, 0, 0, 0, 1, -360, 360],
        [45, 46, 0.04, 0.1356, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
        [46, 47, 0.038, 0.127, 0.0316, 9900, 0, 0, 0, 0, 1, -360, 360],
        [46, 48, 0.0601, 0.189, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
        [47, 49, 0.0191, 0.0625, 0.01604, 9900, 0, 0, 0, 0, 1, -360, 360],
        [42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
        [42, 49, 0.0715, 0.323, 0.086, 9900, 0, 0, 0, 0, 1, -360, 360],
        [45, 49, 0.0684, 0.186, 0.0444, 9900, 0, 0, 0, 0, 1, -360, 360],
        [48, 49, 0.0179, 0.0505, 0.01258, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 50, 0.0267, 0.0752, 0.01874, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 51, 0.0486, 0.137, 0.0342, 9900, 0, 0, 0, 0, 1, -360, 360],
        [51, 52, 0.0203, 0.0588, 0.01396, 9900, 0, 0, 0, 0, 1, -360, 360],
        [52, 53, 0.0405, 0.1635, 0.04058, 9900, 0, 0, 0, 0, 1, -360, 360],
        [53, 54, 0.0263, 0.122, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
        [54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
        [54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
        [55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
        [56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
        [50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
        [56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
        [51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
        [54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
        [56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
        [56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
        [55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
        [59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
        [59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
        [60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
        [60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
        [61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
        [63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
        [63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
        [64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
        [38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
        [64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
        [62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
        [62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
        [65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
        [66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
        [65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
        [47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
        [49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
        [68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
        [69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
        [24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
        [70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
        [24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
        [71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
        [71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
        [70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
        [70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
        [69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
        [74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
        [76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
        [69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
        [75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
        [77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
        [78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
        [77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
        [77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
        [79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
        [68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
        [81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
        [77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
        [82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
        [83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
        [83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
        [84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
        [85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
        [86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
        [85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
        [85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
        [88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
        [89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
        [89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
        [90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
        [89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
        [89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
        [91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
        [92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
        [92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
        [93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
        [94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
        [80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
        [82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
        [94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
        [80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
        [80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
        [80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
        [92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
        [94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
        [95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
        [96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
        [98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
        [99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
        [100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
        [92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
        [101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
        [100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
        [100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
        [103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
        [103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
        [100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
        [104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
        [105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
        [105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
        [105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
        [106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
        [108, 109, 0.0105, 0.0288, 0.0076, 9900, 0, 0, 0, 0, 1, -360, 360],
        [103, 110, 0.03906, 0.1813, 0.0461, 9900, 0, 0, 0, 0, 1, -360, 360],
        [109, 110, 0.0278, 0.0762, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
        [110, 111, 0.022, 0.0755, 0.02, 9900, 0, 0, 0, 0, 1, -360, 360],
        [110, 112, 0.0247, 0.064, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
        [17, 113, 0.00913, 0.0301, 0.00768, 9900, 0, 0, 0, 0, 1, -360, 360],
        [32, 113, 0.0615, 0.203, 0.0518, 9900, 0, 0, 0, 0, 1, -360, 360],
        [32, 114, 0.0135, 0.0612, 0.01628, 9900, 0, 0, 0, 0, 1, -360, 360],
        [27, 115, 0.0164, 0.0741, 0.01972, 9900, 0, 0, 0, 0, 1, -360, 360],
        [114, 115, 0.0023, 0.0104, 0.00276, 9900, 0, 0, 0, 0, 1, -360, 360],
        [68, 116, 0.00034, 0.00405, 0.164, 9900, 0, 0, 0, 0, 1, -360, 360],
        [12, 117, 0.0329, 0.14, 0.0358, 9900, 0, 0, 0, 0, 1, -360, 360],
        [75, 118, 0.0145, 0.0481, 0.01198, 9900, 0, 0, 0, 0, 1, -360, 360],
        [76, 118, 0.0164, 0.0544, 0.01356, 9900, 0, 0, 0, 0, 1, -360, 360]
    ])

    ##-----  OPF Data  -----##
    ## generator cost data
    # 1 startup shutdown n x1 y1 ... xn yn
    # 2 startup shutdown n c(n-1) ... c0
    ppc["gencost"] = array([
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0222222, 20, 0],
        [2, 0, 0, 3, 0.117647, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0454545, 20, 0],
        [2, 0, 0, 3, 0.0318471, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 1.42857, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.526316, 20, 0],
        [2, 0, 0, 3, 0.0490196, 20, 0],
        [2, 0, 0, 3, 0.208333, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0645161, 20, 0],
        [2, 0, 0, 3, 0.0625, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0255754, 20, 0],
        [2, 0, 0, 3, 0.0255102, 20, 0],
        [2, 0, 0, 3, 0.0193648, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0209644, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 2.5, 20, 0],
        [2, 0, 0, 3, 0.0164745, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.0396825, 20, 0],
        [2, 0, 0, 3, 0.25, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.277778, 20, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0],
        [2, 0, 0, 3, 0.01, 40, 0]
    ])

    return ppc

Example 44

Project: PYPOWER
Source File: opf_args.py
View license
def opf_args(*args):
    """Parses and initializes OPF input arguments.

    Returns the full set of initialized OPF input arguments, filling in
    default values for missing arguments. See Examples below for the
    possible calling syntax options.

    Input arguments options::

        opf_args(ppc)
        opf_args(ppc, ppopt)
        opf_args(ppc, userfcn, ppopt)
        opf_args(ppc, A, l, u)
        opf_args(ppc, A, l, u, ppopt)
        opf_args(ppc, A, l, u, ppopt, N, fparm, H, Cw)
        opf_args(ppc, A, l, u, ppopt, N, fparm, H, Cw, z0, zl, zu)

        opf_args(baseMVA, bus, gen, branch, areas, gencost)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, ppopt)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, userfcn, ppopt)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, A, l, u)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, A, l, u, ppopt)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, A, l, u, ...
                                    ppopt, N, fparm, H, Cw)
        opf_args(baseMVA, bus, gen, branch, areas, gencost, A, l, u, ...
                                    ppopt, N, fparm, H, Cw, z0, zl, zu)

    The data for the problem can be specified in one of three ways:
      1. a string (ppc) containing the file name of a PYPOWER case
      which defines the data matrices baseMVA, bus, gen, branch, and
      gencost (areas is not used at all, it is only included for
      backward compatibility of the API).
      2. a dict (ppc) containing the data matrices as fields.
      3. the individual data matrices themselves.

    The optional user parameters for user constraints (C{A, l, u}), user costs
    (C{N, fparm, H, Cw}), user variable initializer (z0), and user variable
    limits (C{zl, zu}) can also be specified as fields in a case dict,
    either passed in directly or defined in a case file referenced by name.

    When specified, C{A, l, u} represent additional linear constraints on the
    optimization variables, C{l <= A*[x z] <= u}. If the user specifies an C{A}
    matrix that has more columns than the number of "C{x}" (OPF) variables,
    then there are extra linearly constrained "C{z}" variables. For an
    explanation of the formulation used and instructions for forming the
    C{A} matrix, see the MATPOWER manual.

    A generalized cost on all variables can be applied if input arguments
    C{N}, C{fparm}, C{H} and C{Cw} are specified.  First, a linear
    transformation of the optimization variables is defined by means of
    C{r = N * [x z]}. Then, to each element of r a function is applied as
    encoded in the C{fparm} matrix (see Matpower manual). If the resulting
    vector is named C{w}, then C{H} and C{Cw} define a quadratic cost on
    C{w}: C{(1/2)*w'*H*w + Cw * w}.
    C{H} and C{N} should be sparse matrices and C{H} should also be symmetric.

    The optional C{ppopt} vector specifies PYPOWER options. See L{ppoption}
    for details and default values.

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    """
#    nargin = len([arg for arg in [baseMVA, bus, gen, branch, areas, gencost,
#                                  Au, lbu, ubu, ppopt, N, fparm, H, Cw,
#                                  z0, zl, zu] if arg is not None])
    nargin = len(args)

    userfcn = array([])
    ## passing filename or dict
    if isinstance(args[0], basestring) or isinstance(args[0], dict):
        # ----opf( baseMVA,     bus,   gen, branch, areas, gencost,    Au, lbu,  ubu, ppopt,  N, fparm, H, Cw, z0, zl, zu)
        # 12  opf(casefile,      Au,   lbu,    ubu, ppopt,       N, fparm,    H,  Cw,    z0, zl,    zu)
        # 9   opf(casefile,      Au,   lbu,    ubu, ppopt,       N, fparm,    H,  Cw)
        # 5   opf(casefile,      Au,   lbu,    ubu, ppopt)
        # 4   opf(casefile,      Au,   lbu,    ubu)
        # 3   opf(casefile, userfcn, ppopt)
        # 2   opf(casefile,   ppopt)
        # 1   opf(casefile)
        if nargin in [1, 2, 3, 4, 5, 9, 12]:
            casefile = args[0]
            if nargin == 12:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu,  ubu, ppopt,  N, fparm = args
                zu    = fparm
                zl    = N
                z0    = ppopt
                Cw    = ubu
                H     = lbu
                fparm = Au
                N     = gencost
                ppopt = areas
                ubu   = branch
                lbu   = gen
                Au    = bus
            elif nargin == 9:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu, ubu = args
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = ubu
                H     = lbu
                fparm = Au
                N     = gencost
                ppopt = areas
                ubu   = branch
                lbu   = gen
                Au    = bus
            elif nargin == 5:
                baseMVA, bus, gen, branch, areas = args
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = array([])
                H     = None
                fparm = array([])
                N     = None
                ppopt = areas
                ubu   = branch
                lbu   = gen
                Au    = bus
            elif nargin == 4:
                baseMVA, bus, gen, branch = args
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = array([])
                H     = None
                fparm = array([])
                N     = None
                ppopt = ppoption()
                ubu   = branch
                lbu   = gen
                Au    = bus
            elif nargin == 3:
                baseMVA, bus, gen = args
                userfcn = bus
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = array([])
                H     = None
                fparm = array([])
                N     = None
                ppopt = gen
                ubu   = array([])
                lbu   = array([])
                Au    = None
            elif nargin == 2:
                baseMVA, bus = args
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = array([])
                H     = None
                fparm = array([])
                N     = None
                ppopt = bus
                ubu   = array([])
                lbu   = array([])
                Au    = None
            elif nargin == 1:
                zu    = array([])
                zl    = array([])
                z0    = array([])
                Cw    = array([])
                H     = None
                fparm = array([])
                N     = None
                ppopt = ppoption()
                ubu   = array([])
                lbu   = array([])
                Au    = None
        else:
            stderr.write('opf_args: Incorrect input arg order, number or type\n')

        ppc = loadcase(casefile)
        baseMVA, bus, gen, branch, gencost = \
            ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch'], ppc['gencost']
        if 'areas' in ppc:
            areas = ppc['areas']
        else:
            areas = array([])
        if Au is None and 'A' in ppc:
            Au, lbu, ubu = ppc["A"], ppc["l"], ppc["u"]
        if N is None and 'N' in ppc:  ## these two must go together
            N, Cw = ppc["N"], ppc["Cw"]
        if H is None and 'H' in ppc:  ## will default to zeros
            H = ppc["H"]
        if (fparm is None or len(fparm) == 0) and 'fparm' in ppc:  ## will default to [1 0 0 1]
            fparm = ppc["fparm"]
        if (z0 is None or len(z0) == 0) and 'z0' in ppc:
            z0 = ppc["z0"]
        if (zl is None or len(zl) == 0) and 'zl' in ppc:
            zl = ppc["zl"]
        if (zu is None or len(zu) == 0) and 'zu' in ppc:
            zu = ppc["zu"]
        if (userfcn is None or len(userfcn) == 0) and 'userfcn' in ppc:
            userfcn = ppc['userfcn']
    else: ## passing individual data matrices
        # ----opf(baseMVA, bus, gen, branch, areas, gencost,      Au, lbu, ubu, ppopt, N, fparm, H, Cw, z0, zl, zu)
        # 17  opf(baseMVA, bus, gen, branch, areas, gencost,      Au, lbu, ubu, ppopt, N, fparm, H, Cw, z0, zl, zu)
        # 14  opf(baseMVA, bus, gen, branch, areas, gencost,      Au, lbu, ubu, ppopt, N, fparm, H, Cw)
        # 10  opf(baseMVA, bus, gen, branch, areas, gencost,      Au, lbu, ubu, ppopt)
        # 9   opf(baseMVA, bus, gen, branch, areas, gencost,      Au, lbu, ubu)
        # 8   opf(baseMVA, bus, gen, branch, areas, gencost, userfcn, ppopt)
        # 7   opf(baseMVA, bus, gen, branch, areas, gencost, ppopt)
        # 6   opf(baseMVA, bus, gen, branch, areas, gencost)
        if nargin in [6, 7, 8, 9, 10, 14, 17]:
            if nargin == 17:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu, ubu, ppopt,  N, fparm, H, Cw, z0, zl, zu = args
            elif nargin == 14:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu, ubu, ppopt,  N, fparm, H, Cw = args
                zu = array([])
                zl = array([])
                z0 = array([])
            elif nargin == 10:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu, ubu, ppopt = args
                zu = array([])
                zl = array([])
                z0 = array([])
                Cw = array([])
                H = None
                fparm = array([])
                N = None
            elif nargin == 9:
                baseMVA, bus, gen, branch, areas, gencost, Au, lbu, ubu = args
                zu = array([])
                zl = array([])
                z0 = array([])
                Cw = array([])
                H = None
                fparm = array([])
                N = None
                ppopt = ppoption()
            elif nargin == 8:
                baseMVA, bus, gen, branch, areas, gencost, userfcn, ppopt = args
                zu = array([])
                zl = array([])
                z0 = array([])
                Cw = array([])
                H = None
                fparm = array([])
                N = None
                ubu = array([])
                lbu = array([])
                Au = None
            elif nargin == 7:
                baseMVA, bus, gen, branch, areas, gencost, ppopt = args
                zu = array([])
                zl = array([])
                z0 = array([])
                Cw = array([])
                H = None
                fparm = array([])
                N = None
                ubu = array([])
                lbu = array([])
                Au = None
            elif nargin == 6:
                baseMVA, bus, gen, branch, areas, gencost = args
                zu = array([])
                zl = array([])
                z0 = array([])
                Cw = array([])
                H = None
                fparm = array([])
                N = None
                ppopt = ppoption()
                ubu = array([])
                lbu = array([])
                Au = None
        else:
            stderr.write('opf_args: Incorrect input arg order, number or type\n')

    if N is not None:
        nw = N.shape[0]
    else:
        nw = 0

    if nw:
        if Cw.shape[0] != nw:
            stderr.write('opf_args.m: dimension mismatch between N and Cw in '
                         'generalized cost parameters\n')
        if len(fparm) > 0 and fparm.shape[0] != nw:
            stderr.write('opf_args.m: dimension mismatch between N and fparm '
                         'in generalized cost parameters\n')
        if (H is not None) and (H.shape[0] != nw | H.shape[0] != nw):
            stderr.write('opf_args.m: dimension mismatch between N and H in '
                         'generalized cost parameters\n')
        if Au is not None:
            if Au.shape[0] > 0 and N.shape[1] != Au.shape[1]:
                stderr.write('opf_args.m: A and N must have the same number '
                             'of columns\n')
        ## make sure N and H are sparse
        if not issparse(N):
            stderr.write('opf_args.m: N must be sparse in generalized cost '
                         'parameters\n')
        if not issparse(H):
            stderr.write('opf_args.m: H must be sparse in generalized cost parameters\n')

    if Au is not None and not issparse(Au):
        stderr.write('opf_args.m: Au must be sparse\n')
    if ppopt == None or len(ppopt) == 0:
        ppopt = ppoption()

    return baseMVA, bus, gen, branch, gencost, Au, lbu, ubu, \
        ppopt, N, fparm, H, Cw, z0, zl, zu, userfcn, areas

Example 45

Project: PYPOWER
Source File: pips.py
View license
def pips(f_fcn, x0=None, A=None, l=None, u=None, xmin=None, xmax=None,
         gh_fcn=None, hess_fcn=None, opt=None):
    """Primal-dual interior point method for NLP (nonlinear programming).
    Minimize a function F(X) beginning from a starting point M{x0}, subject to
    optional linear and nonlinear constraints and variable bounds::

            min f(x)
             x

    subject to::

            g(x) = 0            (nonlinear equalities)
            h(x) <= 0           (nonlinear inequalities)
            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note: The calling syntax is almost identical to that of FMINCON from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}. The functions for evaluating the objective function,
    constraints and Hessian are identical.

    Example from U{http://en.wikipedia.org/wiki/Nonlinear_programming}:
        >>> from numpy import array, r_, float64, dot
        >>> from scipy.sparse import csr_matrix
        >>> def f2(x):
        ...     f = -x[0] * x[1] - x[1] * x[2]
        ...     df = -r_[x[1], x[0] + x[2], x[1]]
        ...     # actually not used since 'hess_fcn' is provided
        ...     d2f = -array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], float64)
        ...     return f, df, d2f
        >>> def gh2(x):
        ...     h = dot(array([[1, -1, 1],
        ...                    [1,  1, 1]]), x**2) + array([-2.0, -10.0])
        ...     dh = 2 * csr_matrix(array([[ x[0], x[0]],
        ...                                [-x[1], x[1]],
        ...                                [ x[2], x[2]]]))
        ...     g = array([])
        ...     dg = None
        ...     return h, g, dh, dg
        >>> def hess2(x, lam, cost_mult=1):
        ...     mu = lam["ineqnonlin"]
        ...     a = r_[dot(2 * array([1, 1]), mu), -1, 0]
        ...     b = r_[-1, dot(2 * array([-1, 1]), mu),-1]
        ...     c = r_[0, -1, dot(2 * array([1, 1]), mu)]
        ...     Lxx = csr_matrix(array([a, b, c]))
        ...     return Lxx
        >>> x0 = array([1, 1, 0], float64)
        >>> solution = pips(f2, x0, gh_fcn=gh2, hess_fcn=hess2)
        >>> round(solution["f"], 11) == -7.07106725919
        True
        >>> solution["output"]["iterations"]
        8

    Ported by Richard Lincoln from the MATLAB Interior Point Solver (MIPS)
    (v1.9) by Ray Zimmerman.  MIPS is distributed as part of the MATPOWER
    project, developed at the Power System Engineering Research Center (PSERC) (PSERC),
    Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
    MIPS was ported by Ray Zimmerman from C code written by H. Wang for his
    PhD dissertation:
      - "On the Computation and Application of Multi-period
        Security-Constrained Optimal Power Flow for Real-time
        Electricity Market Operations", Cornell University, May 2007.

    See also:
      - H. Wang, C. E. Murillo-Sanchez, R. D. Zimmerman, R. J. Thomas,
        "On Computational Issues of Market-Based Optimal Power Flow",
        IEEE Transactions on Power Systems, Vol. 22, No. 3, Aug. 2007,
        pp. 1185-1193.

    All parameters are optional except C{f_fcn} and C{x0}.
    @param f_fcn: Function that evaluates the objective function, its gradients
                  and Hessian for a given value of M{x}. If there are
                  nonlinear constraints, the Hessian information is provided
                  by the 'hess_fcn' argument and is not required here.
    @type f_fcn: callable
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param gh_fcn: Function that evaluates the optional nonlinear constraints
                   and their gradients for a given value of M{x}.
    @type gh_fcn: callable
    @param hess_fcn: Handle to function that computes the Hessian of the
                     Lagrangian for given values of M{x}, M{lambda} and M{mu},
                     where M{lambda} and M{mu} are the multipliers on the
                     equality and inequality constraints, M{g} and M{h},
                     respectively.
    @type hess_fcn: callable
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: This
                    value is also passed as the 3rd argument to the Hessian
                    evaluation function so that it can appropriately scale the
                    objective function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - list of arrays with trajectories of the
                     following: feascond, gradcond, compcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - nonlinear equality constraints
                   - C{ineqnonlin} - nonlinear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @see: U{http://www.pserc.cornell.edu/matpower/}

    @author: Ray Zimmerman (PSERC Cornell)
    """
    if isinstance(f_fcn, dict):  ## problem dict
        p = f_fcn
        f_fcn = p['f_fcn']
        x0 = p['x0']
        if 'opt' in p: opt = p['opt']
        if 'hess_fcn' in p: hess_fcn = p['hess_fcn']
        if 'gh_fcn' in p: gh_fcn = p['gh_fcn']
        if 'xmax' in p: xmax = p['xmax']
        if 'xmin' in p: xmin = p['xmin']
        if 'u' in p: u = p['u']
        if 'l' in p: l = p['l']
        if 'A' in p: A = p['A']

    nx = x0.shape[0]                        # number of variables
    nA = A.shape[0] if A is not None else 0 # number of original linear constr

    # default argument values
    if l is None or len(l) == 0: l = -Inf * ones(nA)
    if u is None or len(u) == 0: u =  Inf * ones(nA)
    if xmin is None or len(xmin) == 0: xmin = -Inf * ones(x0.shape[0])
    if xmax is None or len(xmax) == 0: xmax =  Inf * ones(x0.shape[0])
    if gh_fcn is None:
        nonlinear = False
        gn = array([])
        hn = array([])
    else:
        nonlinear = True

    if opt is None: opt = {}
    # options
    if "feastol" not in opt:
        opt["feastol"] = 1e-06
    if "gradtol" not in opt:
        opt["gradtol"] = 1e-06
    if "comptol" not in opt:
        opt["comptol"] = 1e-06
    if "costtol" not in opt:
        opt["costtol"] = 1e-06
    if "max_it" not in opt:
        opt["max_it"] = 150
    if "max_red" not in opt:
        opt["max_red"] = 20
    if "step_control" not in opt:
        opt["step_control"] = False
    if "cost_mult" not in opt:
        opt["cost_mult"] = 1
    if "verbose" not in opt:
        opt["verbose"] = 0

    # initialize history
    hist = []

    # constants
    xi = 0.99995
    sigma = 0.1
    z0 = 1
    alpha_min = 1e-8
    rho_min = 0.95
    rho_max = 1.05
    mu_threshold = 1e-5

    # initialize
    i = 0                       # iteration counter
    converged = False           # flag
    eflag = False               # exit flag

    # add var limits to linear constraints
    eyex = eye(nx, nx, format="csr")
    AA = eyex if A is None else vstack([eyex, A], "csr")
    ll = r_[xmin, l]
    uu = r_[xmax, u]

    # split up linear constraints
    ieq = find( absolute(uu - ll) <= EPS )
    igt = find( (uu >=  1e10) & (ll > -1e10) )
    ilt = find( (ll <= -1e10) & (uu <  1e10) )
    ibx = find( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
    # zero-sized sparse matrices unsupported
    Ae = AA[ieq, :] if len(ieq) else None
    if len(ilt) or len(igt) or len(ibx):
        idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
        Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)], 'csr')
    else:
        Ai = None
    be = uu[ieq]
    bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]

    # evaluate cost f(x0) and constraints g(x0), h(x0)
    x = x0
    f, df = f_fcn(x)                 # cost
    f = f * opt["cost_mult"]
    df = df * opt["cost_mult"]
    if nonlinear:
        hn, gn, dhn, dgn = gh_fcn(x)        # nonlinear constraints
        h = hn if Ai is None else r_[hn, Ai * x - bi] # inequality constraints
        g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints

        if (dhn is None) and (Ai is None):
            dh = None
        elif dhn is None:
            dh = Ai.T
        elif Ai is None:
            dh = dhn
        else:
            dh = hstack([dhn, Ai.T])

        if (dgn is None) and (Ae is None):
            dg = None
        elif dgn is None:
            dg = Ae.T
        elif Ae is None:
            dg = dgn
        else:
            dg = hstack([dgn, Ae.T])
    else:
        h = -bi if Ai is None else Ai * x - bi        # inequality constraints
        g = -be if Ae is None else Ae * x - be        # equality constraints
        dh = None if Ai is None else Ai.T     # 1st derivative of inequalities
        dg = None if Ae is None else Ae.T     # 1st derivative of equalities

    # some dimensions
    neq = g.shape[0]           # number of equality constraints
    niq = h.shape[0]           # number of inequality constraints
    neqnln = gn.shape[0]       # number of nonlinear equality constraints
    niqnln = hn.shape[0]       # number of nonlinear inequality constraints
    nlt = len(ilt)             # number of upper bounded linear inequalities
    ngt = len(igt)             # number of lower bounded linear inequalities
    nbx = len(ibx)             # number of doubly bounded linear inequalities

    # initialize gamma, lam, mu, z, e
    gamma = 1                  # barrier coefficient
    lam = zeros(neq)
    z = z0 * ones(niq)
    mu = z0 * ones(niq)
    k = find(h < -z0)
    z[k] = -h[k]
    k = find((gamma / z) > z0)
    mu[k] = gamma / z[k]
    e = ones(niq)

    # check tolerance
    f0 = f
    if opt["step_control"]:
        L = f + dot(lam, g) + dot(mu, h + z) - gamma * sum(log(z))

    Lx = df.copy()
    Lx = Lx + dg * lam if dg is not None else Lx
    Lx = Lx + dh * mu  if dh is not None else Lx

    maxh = zeros(1) if len(h) == 0 else max(h)

    gnorm = norm(g, Inf) if len(g) else 0.0
    lam_norm = norm(lam, Inf) if len(lam) else 0.0
    mu_norm = norm(mu, Inf) if len(mu) else 0.0
    znorm = norm(z, Inf) if len(z) else 0.0
    feascond = \
        max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
    gradcond = \
        norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
    compcond = dot(z, mu) / (1 + norm(x, Inf))
    costcond = absolute(f - f0) / (1 + absolute(f0))

    # save history
    hist.append({'feascond': feascond, 'gradcond': gradcond,
        'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
        'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0})

    if opt["verbose"]:
        s = '-sc' if opt["step_control"] else ''
        v = pipsver('all')
        print('Python Interior Point Solver - PIPS%s, Version %s, %s' %
                    (s, v['Version'], v['Date']))
        if opt['verbose'] > 1:
            print(" it    objective   step size   feascond     gradcond     "
                  "compcond     costcond  ")
            print("----  ------------ --------- ------------ ------------ "
                  "------------ ------------")
            print("%3d  %12.8g %10s %12g %12g %12g %12g" %
                (i, (f / opt["cost_mult"]), "",
                 feascond, gradcond, compcond, costcond))

    if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
        compcond < opt["comptol"] and costcond < opt["costtol"]:
        converged = True
        if opt["verbose"]:
            print("Converged!")

    # do Newton iterations
    while (not converged) and (i < opt["max_it"]):
        # update iteration counter
        i += 1

        # compute update step
        lmbda = {"eqnonlin": lam[range(neqnln)],
                 "ineqnonlin": mu[range(niqnln)]}
        if nonlinear:
            if hess_fcn is None:
                print("pips: Hessian evaluation via finite differences "
                      "not yet implemented.\nPlease provide "
                      "your own hessian evaluation function.")
            Lxx = hess_fcn(x, lmbda, opt["cost_mult"])
        else:
            _, _, d2f = f_fcn(x, True)      # cost
            Lxx = d2f * opt["cost_mult"]
        rz = range(len(z))
        zinvdiag = sparse((1.0 / z, (rz, rz))) if len(z) else None
        rmu = range(len(mu))
        mudiag = sparse((mu, (rmu, rmu))) if len(mu) else None
        dh_zinv = None if dh is None else dh * zinvdiag
        M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
        N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)

        Ab = sparse(M) if dg is None else vstack([
            hstack([M, dg]),
            hstack([dg.T, sparse((neq, neq))])
        ])
        bb = r_[-N, -g]

        dxdlam = spsolve(Ab.tocsr(), bb)

        if any(isnan(dxdlam)):
            if opt["verbose"]:
                print('\nNumerically Failed\n')
            eflag = -1
            break

        dx = dxdlam[:nx]
        dlam = dxdlam[nx:nx + neq]
        dz = -h - z if dh is None else -h - z - dh.T * dx
        dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)

        # optional step-size control
        sc = False
        if opt["step_control"]:
            x1 = x + dx

            # evaluate cost, constraints, derivatives at x1
            f1, df1 = f_fcn(x1)          # cost
            f1 = f1 * opt["cost_mult"]
            df1 = df1 * opt["cost_mult"]
            if nonlinear:
                hn1, gn1, dhn1, dgn1 = gh_fcn(x1) # nonlinear constraints

                h1 = hn1 if Ai is None else r_[hn1, Ai * x1 - bi] # ieq constraints
                g1 = gn1 if Ae is None else r_[gn1, Ae * x1 - be] # eq constraints

                # 1st der of ieq
                if (dhn1 is None) and (Ai is None):
                    dh1 = None
                elif dhn1 is None:
                    dh1 = Ai.T
                elif Ai is None:
                    dh1 = dhn1
                else:
                    dh1 = hstack([dhn1, Ai.T])

                # 1st der of eqs
                if (dgn1 is None) and (Ae is None):
                    dg1 = None
                elif dgn is None:
                    dg1 = Ae.T
                elif Ae is None:
                    dg1 = dgn1
                else:
                    dg1 = hstack([dgn1, Ae.T])
            else:
                h1 = -bi if Ai is None else Ai * x1 - bi    # inequality constraints
                g1 = -be if Ae is None else Ae * x1 - be    # equality constraints

                dh1 = dh                       ## 1st derivative of inequalities
                dg1 = dg                       ## 1st derivative of equalities

            # check tolerance
            Lx1 = df1
            Lx1 = Lx1 + dg1 * lam if dg1 is not None else Lx1
            Lx1 = Lx1 + dh1 * mu  if dh1 is not None else Lx1

            maxh1 = zeros(1) if len(h1) == 0 else max(h1)

            g1norm = norm(g1, Inf) if len(g1) else 0.0
            lam1_norm = norm(lam, Inf) if len(lam) else 0.0
            mu1_norm = norm(mu, Inf) if len(mu) else 0.0
            z1norm = norm(z, Inf) if len(z) else 0.0

            feascond1 = max([ g1norm, maxh1 ]) / \
                (1 + max([ norm(x1, Inf), z1norm ]))
            gradcond1 = norm(Lx1, Inf) / (1 + max([ lam1_norm, mu1_norm ]))

            if (feascond1 > feascond) and (gradcond1 > gradcond):
                sc = True
        if sc:
            alpha = 1.0
            for j in range(opt["max_red"]):
                dx1 = alpha * dx
                x1 = x + dx1
                f1, _ = f_fcn(x1)             # cost
                f1 = f1 * opt["cost_mult"]
                if nonlinear:
                    hn1, gn1, _, _ = gh_fcn(x1)              # nonlinear constraints
                    h1 = hn1 if Ai is None else r_[hn1, Ai * x1 - bi]         # inequality constraints
                    g1 = gn1 if Ae is None else r_[gn1, Ae * x1 - be]         # equality constraints
                else:
                    h1 = -bi if Ai is None else Ai * x1 - bi    # inequality constraints
                    g1 = -be if Ae is None else Ae * x1 - be    # equality constraints

                L1 = f1 + dot(lam, g1) + dot(mu, h1 + z) - gamma * sum(log(z))

                if opt["verbose"] > 2:
                    print("   %3d            %10.5f" % (-j, norm(dx1)))

                rho = (L1 - L) / (dot(Lx, dx1) + 0.5 * dot(dx1, Lxx * dx1))

                if (rho > rho_min) and (rho < rho_max):
                    break
                else:
                    alpha = alpha / 2.0
            dx = alpha * dx
            dz = alpha * dz
            dlam = alpha * dlam
            dmu = alpha * dmu

        # do the update
        k = find(dz < 0.0)
        alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
        k = find(dmu < 0.0)
        alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
        x = x + alphap * dx
        z = z + alphap * dz
        lam = lam + alphad * dlam
        mu = mu + alphad * dmu
        if niq > 0:
            gamma = sigma * dot(z, mu) / niq

        # evaluate cost, constraints, derivatives
        f, df = f_fcn(x)             # cost
        f = f * opt["cost_mult"]
        df = df * opt["cost_mult"]
        if nonlinear:
            hn, gn, dhn, dgn = gh_fcn(x)                   # nln constraints
#            g = gn if Ai is None else r_[gn, Ai * x - bi] # ieq constraints
#            h = hn if Ae is None else r_[hn, Ae * x - be] # eq constraints
            h = hn if Ai is None else r_[hn, Ai * x - bi] # ieq constr
            g = gn if Ae is None else r_[gn, Ae * x - be]  # eq constr

            if (dhn is None) and (Ai is None):
                dh = None
            elif dhn is None:
                dh = Ai.T
            elif Ai is None:
                dh = dhn
            else:
                dh = hstack([dhn, Ai.T])

            if (dgn is None) and (Ae is None):
                dg = None
            elif dgn is None:
                dg = Ae.T
            elif Ae is None:
                dg = dgn
            else:
                dg = hstack([dgn, Ae.T])
        else:
            h = -bi if Ai is None else Ai * x - bi    # inequality constraints
            g = -be if Ae is None else Ae * x - be    # equality constraints
            # 1st derivatives are constant, still dh = Ai.T, dg = Ae.T

        Lx = df
        Lx = Lx + dg * lam if dg is not None else Lx
        Lx = Lx + dh * mu  if dh is not None else Lx

        if len(h) == 0:
            maxh = zeros(1)
        else:
            maxh = max(h)

        gnorm = norm(g, Inf) if len(g) else 0.0
        lam_norm = norm(lam, Inf) if len(lam) else 0.0
        mu_norm = norm(mu, Inf) if len(mu) else 0.0
        znorm = norm(z, Inf) if len(z) else 0.0
        feascond = \
            max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
        gradcond = \
            norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
        compcond = dot(z, mu) / (1 + norm(x, Inf))
        costcond = float(absolute(f - f0) / (1 + absolute(f0)))

        hist.append({'feascond': feascond, 'gradcond': gradcond,
            'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
            'stepsize': norm(dx), 'obj': f / opt["cost_mult"],
            'alphap': alphap, 'alphad': alphad})

        if opt["verbose"] > 1:
            print("%3d  %12.8g %10.5g %12g %12g %12g %12g" %
                (i, (f / opt["cost_mult"]), norm(dx), feascond, gradcond,
                 compcond, costcond))

        if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
            compcond < opt["comptol"] and costcond < opt["costtol"]:
            converged = True
            if opt["verbose"]:
                print("Converged!")
        else:
            if any(isnan(x)) or (alphap < alpha_min) or \
                (alphad < alpha_min) or (gamma < EPS) or (gamma > 1.0 / EPS):
                if opt["verbose"]:
                    print("Numerically failed.")
                eflag = -1
                break
            f0 = f

            if opt["step_control"]:
                L = f + dot(lam, g) + dot(mu, (h + z)) - gamma * sum(log(z))

    if opt["verbose"]:
        if not converged:
            print("Did not converge in %d iterations." % i)

    # package results
    if eflag != -1:
        eflag = converged

    if eflag == 0:
        message = 'Did not converge'
    elif eflag == 1:
        message = 'Converged'
    elif eflag == -1:
        message = 'Numerically failed'
    else:
        raise

    output = {"iterations": i, "hist": hist, "message": message}

    # zero out multipliers on non-binding constraints
    mu[find( (h < -opt["feastol"]) & (mu < mu_threshold) )] = 0.0

    # un-scale cost and prices
    f = f / opt["cost_mult"]
    lam = lam / opt["cost_mult"]
    mu = mu / opt["cost_mult"]

    # re-package multipliers into struct
    lam_lin = lam[neqnln:neq]           # lambda for linear constraints
    mu_lin = mu[niqnln:niq]             # mu for linear constraints
    kl = find(lam_lin < 0.0)     # lower bound binding
    ku = find(lam_lin > 0.0)     # upper bound binding

    mu_l = zeros(nx + nA)
    mu_l[ieq[kl]] = -lam_lin[kl]
    mu_l[igt] = mu_lin[nlt:nlt + ngt]
    mu_l[ibx] = mu_lin[nlt + ngt + nbx:nlt + ngt + nbx + nbx]

    mu_u = zeros(nx + nA)
    mu_u[ieq[ku]] = lam_lin[ku]
    mu_u[ilt] = mu_lin[:nlt]
    mu_u[ibx] = mu_lin[nlt + ngt:nlt + ngt + nbx]

    lmbda = {'mu_l': mu_l[nx:], 'mu_u': mu_u[nx:],
             'lower': mu_l[:nx], 'upper': mu_u[:nx]}

    if niqnln > 0:
        lmbda['ineqnonlin'] = mu[:niqnln]
    if neqnln > 0:
        lmbda['eqnonlin'] = lam[:neqnln]

#    lmbda = {"eqnonlin": lam[:neqnln], 'ineqnonlin': mu[:niqnln],
#             "mu_l": mu_l[nx:], "mu_u": mu_u[nx:],
#             "lower": mu_l[:nx], "upper": mu_u[:nx]}

    solution =  {"x": x, "f": f, "eflag": converged,
                 "output": output, "lmbda": lmbda}

    return solution

Example 46

Project: PYPOWER
Source File: qps_mosek.py
View license
def qps_mosek(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None,
              x0=None, opt=None):
    """Quadratic Program Solver based on MOSEK.

    A wrapper function providing a PYPOWER standardized interface for using
    MOSEKOPT to solve the following QP (quadratic programming) problem::

        min 1/2 x'*H*x + c'*x
         x

    subject to::

        l <= A*x <= u       (linear constraints)
        xmin <= x <= xmax   (variable bounds)

    Inputs (all optional except C{H}, C{C}, C{A} and C{L}):
        - C{H} : matrix (possibly sparse) of quadratic cost coefficients
        - C{C} : vector of linear cost coefficients
        - C{A, l, u} : define the optional linear constraints. Default
        values for the elements of L and U are -Inf and Inf, respectively.
        - xmin, xmax : optional lower and upper bounds on the
        C{x} variables, defaults are -Inf and Inf, respectively.
        - C{x0} : optional starting value of optimization vector C{x}
        - C{opt} : optional options structure with the following fields,
        all of which are also optional (default values shown in parentheses)
            - C{verbose} (0) - controls level of progress output displayed
                - 0 = no progress output
                - 1 = some progress output
                - 2 = verbose progress output
            - C{max_it} (0) - maximum number of iterations allowed
                - 0 = use algorithm default
            - C{mosek_opt} - options struct for MOSEK, values in
            C{verbose} and C{max_it} override these options
        - C{problem} : The inputs can alternatively be supplied in a single
        C{problem} struct with fields corresponding to the input arguments
        described above: C{H, c, A, l, u, xmin, xmax, x0, opt}

    Outputs:
        - C{x} : solution vector
        - C{f} : final objective function value
        - C{exitflag} : exit flag
              - 1 = success
              - 0 = terminated at maximum number of iterations
              - -1 = primal or dual infeasible
              < 0 = the negative of the MOSEK return code
        - C{output} : output dict with the following fields:
            - C{r} - MOSEK return code
            - C{res} - MOSEK result dict
        - C{lmbda} : dict containing the Langrange and Kuhn-Tucker
        multipliers on the constraints, with fields:
            - C{mu_l} - lower (left-hand) limit on linear constraints
            - C{mu_u} - upper (right-hand) limit on linear constraints
            - C{lower} - lower bound on optimization variables
            - C{upper} - upper bound on optimization variables

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ##----- input argument handling  -----
    ## gather inputs
    if isinstance(H, dict):       ## problem struct
        p = H
    else:                                ## individual args
        p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}
        if xmin is not None:
            p['xmin'] = xmin
        if xmax is not None:
            p['xmax'] = xmax
        if x0 is not None:
            p['x0'] = x0
        if opt is not None:
            p['opt'] = opt

    ## define nx, set default values for H and c
    if 'H' not in p or len(p['H']) or not any(any(p['H'])):
        if ('A' not in p) | len(p['A']) == 0 & \
                ('xmin' not in p) | len(p['xmin']) == 0 & \
                ('xmax' not in p) | len(p['xmax']) == 0:
            stderr.write('qps_mosek: LP problem must include constraints or variable bounds\n')
        else:
            if 'A' in p & len(p['A']) > 0:
                nx = shape(p['A'])[1]
            elif 'xmin' in p & len(p['xmin']) > 0:
                nx = len(p['xmin'])
            else:    # if isfield(p, 'xmax') && ~isempty(p.xmax)
                nx = len(p['xmax'])
        p['H'] = sparse((nx, nx))
        qp = 0
    else:
        nx = shape(p['H'])[0]
        qp = 1

    if 'c' not in p | len(p['c']) == 0:
        p['c'] = zeros(nx)

    if 'x0' not in p | len(p['x0']) == 0:
        p['x0'] = zeros(nx)

    ## default options
    if 'opt' not in p:
        p['opt'] = []

    if 'verbose' in p['opt']:
        verbose = p['opt']['verbose']
    else:
        verbose = 0

    if 'max_it' in p['opt']:
        max_it = p['opt']['max_it']
    else:
        max_it = 0

    if 'mosek_opt' in p['opt']:
        mosek_opt = mosek_options(p['opt']['mosek_opt'])
    else:
        mosek_opt = mosek_options()

    if max_it:
        mosek_opt['MSK_IPAR_INTPNT_MAX_ITERATIONS'] = max_it

    if qp:
        mosek_opt['MSK_IPAR_OPTIMIZER'] = 0   ## default solver only for QP

    ## set up problem struct for MOSEK
    prob = {}
    prob['c'] = p['c']
    if qp:
        prob['qosubi'], prob['qosubj'], prob['qoval'] = find(tril(sparse(p['H'])))

    if 'A' in p & len(p['A']) > 0:
        prob['a'] = sparse(p['A'])

    if 'l' in p & len(p['A']) > 0:
        prob['blc'] = p['l']

    if 'u' in p & len(p['A']) > 0:
        prob['buc'] = p['u']

    if 'xmin' in p & len(p['xmin']) > 0:
        prob['blx'] = p['xmin']

    if 'xmax' in p & len(p['xmax']) > 0:
        prob['bux'] = p['xmax']

    ## A is not allowed to be empty
    if 'a' not in prob | len(prob['a']) == 0:
        unconstrained = True
        prob['a'] = sparse((1, (1, 1)), (1, nx))
        prob.blc = -Inf
        prob.buc =  Inf
    else:
        unconstrained = False

    ##-----  run optimization  -----
    if verbose:
        methods = [
            'default',
            'interior point',
            '<default>',
            '<default>',
            'primal simplex',
            'dual simplex',
            'primal dual simplex',
            'automatic simplex',
            '<default>',
            '<default>',
            'concurrent'
        ]
        if len(H) == 0 or not any(any(H)):
            lpqp = 'LP'
        else:
            lpqp = 'QP'

        # (this code is also in mpver.m)
        # MOSEK Version 6.0.0.93 (Build date: 2010-10-26 13:03:27)
        # MOSEK Version 6.0.0.106 (Build date: 2011-3-17 10:46:54)
#        pat = 'Version (\.*\d)+.*Build date: (\d\d\d\d-\d\d-\d\d)';
        pat = 'Version (\.*\d)+.*Build date: (\d+-\d+-\d+)'
        s, e, tE, m, t = re.compile(eval('mosekopt'), pat)
        if len(t) == 0:
            vn = '<unknown>'
        else:
            vn = t[0][0]

        print('MOSEK Version %s -- %s %s solver\n' %
              (vn, methods[mosek_opt['MSK_IPAR_OPTIMIZER'] + 1], lpqp))

    cmd = 'minimize echo(%d)' % verbose
    r, res = mosekopt(cmd, prob, mosek_opt)

    ##-----  repackage results  -----
    if 'sol' in res:
        if 'bas' in res['sol']:
            sol = res['sol.bas']
        else:
            sol = res['sol.itr']
        x = sol['xx']
    else:
        sol = array([])
        x = array([])

    ##-----  process return codes  -----
    if 'symbcon' in res:
        sc = res['symbcon']
    else:
        r2, res2 = mosekopt('symbcon echo(0)')
        sc = res2['symbcon']

    eflag = -r
    msg = ''
    if r == sc.MSK_RES_OK:
        if len(sol) > 0:
#            if sol['solsta'] == sc.MSK_SOL_STA_OPTIMAL:
            if sol['solsta'] == 'OPTIMAL':
                msg = 'The solution is optimal.'
                eflag = 1
            else:
                eflag = -1
#                if sol['prosta'] == sc['MSK_PRO_STA_PRIM_INFEAS']:
                if sol['prosta'] == 'PRIMAL_INFEASIBLE':
                    msg = 'The problem is primal infeasible.'
#                elif sol['prosta'] == sc['MSK_PRO_STA_DUAL_INFEAS']:
                elif sol['prosta'] == 'DUAL_INFEASIBLE':
                    msg = 'The problem is dual infeasible.'
                else:
                    msg = sol['solsta']

    elif r == sc['MSK_RES_TRM_MAX_ITERATIONS']:
        eflag = 0
        msg = 'The optimizer terminated at the maximum number of iterations.'
    else:
        if 'rmsg' in res and 'rcodestr' in res:
            msg = '%s : %s' % (res['rcodestr'], res['rmsg'])
        else:
            msg = 'MOSEK return code = %d' % r

    ## always alert user if license is expired
    if (verbose or r == 1001) and len(msg) < 0:
        stdout.write('%s\n' % msg)

    ##-----  repackage results  -----
    if r == 0:
        f = p['c'].T * x
        if len(p['H']) > 0:
            f = 0.5 * x.T * p['H'] * x + f
    else:
        f = array([])

    output = {}
    output['r'] = r
    output['res'] = res

    if 'sol' in res:
        lmbda = {}
        lmbda['lower'] = sol['slx']
        lmbda['upper'] = sol['sux']
        lmbda['mu_l']  = sol['slc']
        lmbda['mu_u']  = sol['suc']
        if unconstrained:
            lmbda['mu_l']  = array([])
            lmbda['mu_u']  = array([])
    else:
        lmbda = array([])

    return x, f, eflag, output, lmbda

Example 47

Project: PYPOWER
Source File: savecase.py
View license
def savecase(fname, ppc, comment=None, version='2'):
    """Saves a PYPOWER case file, given a filename and the data.

    Writes a PYPOWER case file, given a filename and data dict. The C{fname}
    parameter is the name of the file to be created or overwritten. Returns
    the filename, with extension added if necessary. The optional C{comment}
    argument is either string (single line comment) or a list of strings which
    are inserted as comments. When using a PYPOWER case dict, if the
    optional C{version} argument is '1' it will modify the data matrices to
    version 1 format before saving.

    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    @author: Ray Zimmerman (PSERC Cornell)
    """
    ppc_ver = ppc["version"] = version
    baseMVA, bus, gen, branch = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
    areas = ppc["areas"] if "areas" in ppc else None
    gencost = ppc["gencost"] if "gencost" in ppc else None

    ## modifications for version 1 format
    if ppc_ver == "1":
        raise NotImplementedError
#        ## remove extra columns of gen
#        if gen.shape[1] >= MU_QMIN:
#            gen = c_[gen[:, :PMIN], gen[:, MU_PMAX:MU_QMIN]]
#        else:
#            gen = gen[:, :PMIN]
#        ## use the version 1 values for column names
#        shift = MU_PMAX - PMIN - 1
#        tmp = array([MU_PMAX, MU_PMIN, MU_QMAX, MU_QMIN]) - shift
#        MU_PMAX, MU_PMIN, MU_QMAX, MU_QMIN = tmp
#
#        ## remove extra columns of branch
#        if branch.shape[1] >= MU_ST:
#            branch = c_[branch[:, :BR_STATUS], branch[:, PF:MU_ST]]
#        elif branch.shape[1] >= QT:
#            branch = c_[branch[:, :BR_STATUS], branch[:, PF:QT]]
#        else:
#            branch = branch[:, :BR_STATUS]
#        ## use the version 1 values for column names
#        shift = PF - BR_STATUS - 1
#        tmp = array([PF, QF, PT, QT, MU_SF, MU_ST]) - shift
#        PF, QF, PT, QT, MU_SF, MU_ST = tmp

    ## verify valid filename
    l = len(fname)
    rootname = ""
    if l > 2:
        if fname[-3:] == ".py":
            rootname = fname[:-3]
            extension = ".py"
        elif l > 4:
            if fname[-4:] == ".mat":
                rootname = fname[:-4]
                extension = ".mat"

    if not rootname:
        rootname = fname
        extension = ".py"
        fname = rootname + extension

    indent = '    '  # four spaces
    indent2 = indent + indent

    ## open and write the file
    if extension == ".mat":     ## MAT-file
        ppc_mat = {}
        ppc_mat['version'] = ppc_ver
        ppc_mat['baseMVA'] = baseMVA
        ppc_keys = ['bus', 'gen', 'branch']
        # Assign non-scalar values as NumPy arrays
        for key in ppc_keys:
            ppc_mat[key] = array(ppc[key])
        if 'areas' in ppc:
            ppc_mat['areas'] = array(ppc['areas'])
        if 'gencost' in ppc:
            ppc_mat['gencost'] = array(ppc['gencost'])
        if "A" in ppc and len(ppc["A"]) > 0:
            ppc_mat["A"] = array(ppc["A"])
            if "l" in ppc and len(ppc["l"]) > 0:
                ppc_mat["l"] = array(ppc["l"])
            if "u" in ppc and len(ppc["u"]) > 0:
                ppc_mat["u"] = array(ppc["u"])
        if "N" in ppc and len(ppc["N"]) > 0:
            ppc_mat["N"] = array(ppc["N"])
            if "H" in ppc and len(ppc["H"]) > 0:
                ppc_mat["H"] = array(ppc["H"])
            if "fparm" in ppc and len(ppc["fparm"]) > 0:
                ppc_mat["fparm"] = array(ppc["fparm"])
            ppc_mat["Cw"] = array(ppc["Cw"])
        if 'z0' in ppc or 'zl' in ppc or 'zu' in ppc:
            if 'z0' in ppc and len(ppc['z0']) > 0:
                ppc_mat['z0'] = array(ppc['z0'])
            if 'zl' in ppc and len(ppc['zl']) > 0:
                ppc_mat['zl'] = array(ppc['zl'])
            if 'zu' in ppc and len(ppc['zu']) > 0:
                ppc_mat['zu'] = array(ppc['zu'])
        if 'userfcn' in ppc and len(ppc['userfcn']) > 0:
            ppc_mat['userfcn'] = array(ppc['userfcn'])
        elif 'userfcn' in ppc:
            ppc_mat['userfcn'] = ppc['userfcn']
        for key in ['x', 'f']:
            if key in ppc:
                ppc_mat[key] = ppc[key]
        for key in ['lin', 'order', 'nln', 'var', 'raw', 'mu']:
            if key in ppc:
                ppc_mat[key] = array(ppc[key])

        savemat(fname, ppc_mat)
    else:                       ## Python file
        try:
            fd = open(fname, writemode)
        except Exception as detail:
            stderr.write("savecase: %s.\n" % detail)
            return fname

        ## function header, etc.
        if ppc_ver == "1":
            raise NotImplementedError
#            if (areas != None) and (gencost != None) and (len(gencost) > 0):
#                fd.write('function [baseMVA, bus, gen, branch, areas, gencost] = %s\n' % rootname)
#            else:
#                fd.write('function [baseMVA, bus, gen, branch] = %s\n' % rootname)
#            prefix = ''
        else:
            fd.write('def %s():\n' % basename(rootname))
            prefix = 'ppc'
        if comment:
            if isinstance(comment, basestring):
                fd.write('#%s\n' % comment)
            elif isinstance(comment, list):
                for c in comment:
                    fd.write('#%s\n' % c)
        fd.write('\n%s## PYPOWER Case Format : Version %s\n' % (indent, ppc_ver))
        if ppc_ver != "1":
            fd.write("%sppc = {'version': '%s'}\n" % (indent, ppc_ver))
        fd.write('\n%s##-----  Power Flow Data  -----##\n' % indent)
        fd.write('%s## system MVA base\n' % indent)
        fd.write("%s%s['baseMVA'] = %.9g\n" % (indent, prefix, baseMVA))

        ## bus data
        ncols = bus.shape[1]
        fd.write('\n%s## bus data\n' % indent)
        fd.write('%s# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin' % indent)
        if ncols >= MU_VMIN + 1:             ## opf SOLVED, save with lambda's & mu's
            fd.write('lam_P lam_Q mu_Vmax mu_Vmin')
        fd.write("\n%s%s['bus'] = array([\n" % (indent, prefix))
        if ncols < MU_VMIN + 1:              ## opf NOT SOLVED, save without lambda's & mu's
            for i in range(bus.shape[0]):
                fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.9g, %d, %.9g, %.9g],\n' % ((indent2,) + tuple(bus[i, :VMIN + 1])))
        else:                            ## opf SOLVED, save with lambda's & mu's
            for i in range(bus.shape[0]):
                fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(bus[i, :MU_VMIN + 1])))
        fd.write('%s])\n' % indent)

        ## generator data
        ncols = gen.shape[1]
        fd.write('\n%s## generator data\n' % indent)
        fd.write('%s# bus Pg Qg Qmax Qmin Vg mBase status Pmax Pmin' % indent)
        if ppc_ver != "1":
            fd.write(' Pc1 Pc2 Qc1min Qc1max Qc2min Qc2max ramp_agc ramp_10 ramp_30 ramp_q apf')
        if ncols >= MU_QMIN + 1:             # opf SOLVED, save with mu's
            fd.write(' mu_Pmax mu_Pmin mu_Qmax mu_Qmin')
        fd.write("\n%s%s['gen'] = array([\n" % (indent, prefix))
        if ncols < MU_QMIN + 1:              ## opf NOT SOLVED, save without mu's
            if ppc_ver == "1":
                for i in range(gen.shape[0]):
                    fd.write('%s[%d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g],\n' % ((indent2,) + tuple(gen[i, :PMIN + 1])))
            else:
                for i in range(gen.shape[0]):
                    fd.write('%s[%d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g],\n' % ((indent2,) + tuple(gen[i, :APF + 1])))
        else:
            if ppc_ver == "1":
                for i in range(gen.shape[0]):
                    fd.write('%s[%d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(gen[i, :MU_QMIN + 1])))
            else:
                for i in range(gen.shape[0]):
                    fd.write('%s[%d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(gen[i, :MU_QMIN + 1])))
        fd.write('%s])\n' % indent)

        ## branch data
        ncols = branch.shape[1]
        fd.write('\n%s## branch data\n' % indent)
        fd.write('%s# fbus tbus r x b rateA rateB rateC ratio angle status' % indent)
        if ppc_ver != "1":
            fd.write(' angmin angmax')
        if ncols >= QT + 1:                  ## power flow SOLVED, save with line flows
            fd.write(' Pf Qf Pt Qt')
        if ncols >= MU_ST + 1:               ## opf SOLVED, save with mu's
            fd.write(' mu_Sf mu_St')
            if ppc_ver != "1":
                fd.write(' mu_angmin mu_angmax')
        fd.write('\n%s%s[\'branch\'] = array([\n' % (indent, prefix))
        if ncols < QT + 1:                   ## power flow NOT SOLVED, save without line flows or mu's
            if ppc_ver == "1":
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d],\n' % ((indent2,) + tuple(branch[i, :BR_STATUS + 1])))
            else:
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g],\n' % ((indent2,) + tuple(branch[i, :ANGMAX + 1])))
        elif ncols < MU_ST + 1:            ## power flow SOLVED, save with line flows but without mu's
            if ppc_ver == "1":
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(branch[i, :QT + 1])))
            else:
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(branch[i, :QT + 1])))
        else:                            ## opf SOLVED, save with lineflows & mu's
            if ppc_ver == "1":
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(branch[i, :MU_ST + 1])))
            else:
                for i in range(branch.shape[0]):
                    fd.write('%s[%d, %d, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %.9g, %d, %.9g, %.9g, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f, %.4f],\n' % ((indent2,) + tuple(branch[i, :MU_ANGMAX + 1])))
        fd.write('%s])\n' % indent)

        ## OPF data
        if (areas is not None) and (len(areas) > 0) or (gencost is not None) and (len(gencost) > 0):
            fd.write('\n%s##-----  OPF Data  -----##' % indent)
        if (areas is not None) and (len(areas) > 0):
            ## area data
            fd.write('\n%s## area data\n' % indent)
            fd.write('%s# area refbus\n' % indent)
            fd.write("%s%s['areas'] = array([\n" % (indent, prefix))
            if len(areas) > 0:
                for i in range(areas.shape[0]):
                    fd.write('%s[%d, %d],\n' % ((indent2,) + tuple(areas[i, :PRICE_REF_BUS + 1])))
            fd.write('%s])\n' % indent)
        if gencost is not None and len(gencost) > 0:
            ## generator cost data
            fd.write('\n%s## generator cost data\n' % indent)
            fd.write('%s# 1 startup shutdown n x1 y1 ... xn yn\n' % indent)
            fd.write('%s# 2 startup shutdown n c(n-1) ... c0\n' % indent)
            fd.write('%s%s[\'gencost\'] = array([\n' % (indent, prefix))
            if len(gencost > 0):
                if any(gencost[:, MODEL] == PW_LINEAR):
                    n1 = 2 * max(gencost[gencost[:, MODEL] == PW_LINEAR,  NCOST])
                else:
                    n1 = 0
                if any(gencost[:, MODEL] == POLYNOMIAL):
                    n2 =     max(gencost[gencost[:, MODEL] == POLYNOMIAL, NCOST])
                else:
                    n2 = 0
                n = int( max([n1, n2]) )
                if gencost.shape[1] < n + 4:
                    stderr.write('savecase: gencost data claims it has more columns than it does\n')
                template = '%s[%d, %.9g, %.9g, %d'
                for i in range(n):
                    template = template + ', %.9g'
                template = template + '],\n'
                for i in range(gencost.shape[0]):
                    fd.write(template % ((indent2,) + tuple(gencost[i])))
            fd.write('%s])\n' % indent)

        ## generalized OPF user data
        if ("A" in ppc) and (len(ppc["A"]) > 0) or ("N" in ppc) and (len(ppc["N"]) > 0):
            fd.write('\n%s##-----  Generalized OPF User Data  -----##' % indent)

        ## user constraints
        if ("A" in ppc) and (len(ppc["A"]) > 0):
            ## A
            fd.write('\n%s## user constraints\n' % indent)
            print_sparse(fd, prefix + "['A']", ppc["A"])
            if ("l" in ppc) and (len(ppc["l"]) > 0) and ("u" in ppc) and (len(ppc["u"]) > 0):
                fd.write('%slu = array([\n' % indent)
                for i in range(len(ppc["l"])):
                    fd.write('%s[%.9g, %.9g],\n' % (indent2, ppc["l"][i], ppc["u"][i]))
                fd.write('%s])\n' % indent)
                fd.write("%s%s['l'] = lu[:, 0]\n" % (indent, prefix))
                fd.write("%s%s['u'] = lu[:, 1]\n\n" % (indent, prefix))
            elif ("l" in ppc) and (len(ppc["l"]) > 0):
                fd.write("%s%s['l'] = array([\n" % (indent, prefix))
                for i in range(len(l)):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["l"][i]))
                fd.write('%s])\n\n' % indent)
            elif ("u" in ppc) and (len(ppc["u"]) > 0):
                fd.write("%s%s['u'] = array([\n" % (indent, prefix))
                for i in range(len(l)):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["u"][i]))
                fd.write('%s])\n\n' % indent)

        ## user costs
        if ("N" in ppc) and (len(ppc["N"]) > 0):
            fd.write('\n%s## user costs\n' % indent)
            print_sparse(fd, prefix + "['N']", ppc["N"])
            if ("H" in ppc) and (len(ppc["H"]) > 0):
                print_sparse(fd, prefix + "['H']", ppc["H"])
            if ("fparm" in ppc) and (len(ppc["fparm"]) > 0):
                fd.write("%sCw_fparm = array([\n" % indent)
                for i in range(ppc["Cw"]):
                    fd.write('%s[%.9g, %d, %.9g, %.9g, %.9g],\n' % ((indent2,) + tuple(ppc["Cw"][i]) + tuple(ppc["fparm"][i, :])))
                fd.write('%s])\n' % indent)
                fd.write('%s%s[\'Cw\']    = Cw_fparm[:, 0]\n' % (indent, prefix))
                fd.write("%s%s['fparm'] = Cw_fparm[:, 1:5]\n" % (indent, prefix))
            else:
                fd.write("%s%s['Cw'] = array([\n" % (indent, prefix))
                for i in range(len(ppc["Cw"])):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["Cw"][i]))
                fd.write('%s])\n' % indent)

        ## user vars
        if ('z0' in ppc) or ('zl' in ppc) or ('zu' in ppc):
            fd.write('\n%s## user vars\n' % indent)
            if ('z0' in ppc) and (len(ppc['z0']) > 0):
                fd.write('%s%s["z0"] = array([\n' % (indent, prefix))
                for i in range(len(ppc['z0'])):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["z0"]))
                fd.write('%s])\n' % indent)
            if ('zl' in ppc) and (len(ppc['zl']) > 0):
                fd.write('%s%s["zl"] = array([\n' % (indent2, prefix))
                for i in range(len(ppc['zl'])):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["zl"]))
                fd.write('%s])\n' % indent)
            if ('zu' in ppc) and (len(ppc['zu']) > 0):
                fd.write('%s%s["zu"] = array([\n' % (indent, prefix))
                for i in range(len(ppc['zu'])):
                    fd.write('%s[%.9g],\n' % (indent2, ppc["zu"]))
                fd.write('%s])\n' % indent)

        ## execute userfcn callbacks for 'savecase' stage
        if 'userfcn' in ppc:
            run_userfcn(ppc["userfcn"], 'savecase', ppc, fd, prefix)

        fd.write('\n%sreturn ppc\n' % indent)

        ## close file
        fd.close()

    return fname

Example 48

Project: PYPOWER
Source File: t_auction_pips.py
View license
def t_auction_pips(quiet=False):
    """Tests for code in auction.py, using PIPS solver.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    n_tests = 183

    t_begin(n_tests, quiet)

    try:
        from pypower.extras.smartmarket import runmkt
    except ImportError:
        t_skip(n_tests, 'smartmarket code not available')
        return

    ppopt = ppoption
    ppopt['OPF_VIOLATION'] = 1e-7
    ppopt['PDIPM_GRADTOL'] = 1e-6
    ppopt['PDIPM_COMPTOL'] = 1e-7
    ppopt['PDIPM_COSTTOL'] = 5e-9
    ppopt['OPF_ALG'] = 560
    ppopt['OUT_ALL_LIM'] = 1
    ppopt['OUT_BRANCH'] = 0
    ppopt['OUT_SYS_SUM'] = 0
    ppopt['OUT_ALL'] = 0
    ppopt['VERBOSE'] = 0
    q = array([
        [12, 24, 24],
        [12, 24, 24],
        [12, 24, 24],
        [12, 24, 24],
        [12, 24, 24],
        [12, 24, 24],
        [10, 10, 10],
        [10, 10, 10],
        [10, 10, 10],
    ])

    ##-----  one offer block marginal @ $50  -----
    p = array([
        [20, 50, 60],
        [20, 40, 70],
        [20, 42, 80],
        [20, 44, 90],
        [20, 46, 75],
        [20, 48, 60],
        [100, 70, 60],
        [100, 50, 20],
        [100, 60, 50]
    ])

    t = 'one marginal offer @ $50, auction_type = 5'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1150, 100, [], [], mpopt)
    cq5 = cq.copy()
    cp5 = cp.copy()
    i2e = bus.bus_i
    e2i = sparse((max(i2e), 1))
    e2i[i2e] = range(bus.size())
    G = find( isload(gen) == False )   ## real generators
    L = find( isload(gen) )   ## dispatchable loads
    Gbus = e2i[gen.gen_bus[G]]
    Lbus = e2i[gen.gen_bus[L]]
    Qfudge = zeros(p.shape)
    Qfudge[L, :] = \
        diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L :].shape)

    t_is( cq[G[0], 1:3], [23.32, 0], 2, t )
    t_is( cp[G[0], :], 50, 4, t )
    t_is( cq[L[1], 0:2], [10, 0], 2, t )
    t_is( cp[L[1], :], 54.0312, 4, t )
    t_is( cp[G, 0], bus.lam_P[Gbus], 8, [t, ' : gen prices'] )
    t_is( cp[L, 0], bus.lam_P[Lbus] + Qfudge[L, 0], 8, [t, ' : load prices'] )

    lao_X = p(G[0], 1) / bus.lam_P[Gbus[0], LAM_P]
    fro_X = p(G(5), 2) / bus.lam_P[Gbus[5], LAM_P]
    lab_X = p(L(2), 1) / (bus.lam_P[Lbus[2]] + Qfudge[L[2], 0])
    frb_X = p(L(1), 1) / (bus.lam_P[Lbus[1]] + Qfudge[L[1], 0])

    t_is( lao_X, 1, 4, 'lao_X')
    t_is( fro_X, 1.1324, 4, 'fro_X')
    t_is( lab_X, 1.0787, 4, 'lab_X')
    t_is( frb_X, 0.9254, 4, 'frb_X')

    t = 'one marginal offer @ $50, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1110, 100, [], [], mpopt)
    cp1 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 6, [t, ' : prices'] )

    t = 'one marginal offer @ $50, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1120, 100, [], [], mpopt)
    cp2 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :] * fro_X, 8, [t, ' : gen prices'] )
    t_is( cp[L[0:1], :], cp5[L[0:1], :] * fro_X, 8, [t, ' : load 1,2 prices'] )
    t_is( cp[L[2], :], 60, 5, [t, ' : load 3 price'] )   ## clipped by accepted bid

    t = 'one marginal offer @ $50, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1130, 100, [], [], mpopt)
    cp3 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5 * lab_X, 8, [t, ' : prices'] )

    t = 'one marginal offer @ $50, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1140, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], p[G[0], 1], 8, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp5[G[1:6], :] * frb_X, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp5[L, :] * frb_X, 8, [t, ' : load prices'] )

    t = 'one marginal offer @ $50, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp3, 8, [t, ' : prices'] )
    p2 = p.copy()
    p2[L, :] = array([
        [100, 100, 100],
        [100,   0,   0],
        [100, 100,   0]
    ])
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 5, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :] * fro_X, 4, [t, ' : gen prices'] )
    t_is( cp[L, :], cp5[L, :] * fro_X, 4, [t, ' : load prices'] ) ## load 3 not clipped as in FRO

    t = 'one marginal offer @ $50, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1170, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5 * (lao_X + lab_X) / 2, 8, [t, ' : prices'] )
    t_is( cp, (cp1 + cp3) / 2, 8, [t, ' : prices'] )

    t = 'one marginal offer @ $50, auction_type = 8'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1180, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp1[G, :], 8, [t, ' : gen prices'] )
    t_is( cp[L, :], cp3[L, :], 8, [t, ' : load prices'] )

    t = 'one marginal offer @ $50, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1100, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, p, 8, [t, ' : prices'] )


    ##-----  one bid block marginal @ $55  -----
    p[L[1], 1] = 55
    t = 'one marginal bid @ $55, auction_type = 5'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1150, 100, [], [], mpopt)
    cq5 = cq.copy()
    cp5 = cp.copy()
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)

    t_is( cq[G[0], 1:3], [24, 0], 2, t )
    t_is( cp[G[0], :], 50.016, 3, t )
    t_is( cq[L[1], 0:2], [10, 0.63], 2, t )
    t_is( cp[L[1], :], 55, 4, t )
    t_is( cp[G, 0], bus.lam_P[Gbus], 8, [t, ' : gen prices'] )
    t_is( cp[L, 0], bus.lam_P[Lbus] + Qfudge[L, 0], 8, [t, ' : load prices'] )

    lao_X = p[G[0], 1] / bus.lam_P[Gbus[0]]
    fro_X = p[G[5], 2] / bus.lam_P[Gbus[5]]
    lab_X = p[L[1], 1] / (bus.lam_P[Lbus[1]] + Qfudge[L[1], 0])
    frb_X = p[L[2], 2] / (bus.lam_P[Lbus[2]] + Qfudge[L[2], 0])

    t_is( lao_X, 0.9997, 4, 'lao_X')
    t_is( fro_X, 1.1111, 4, 'fro_X')
    t_is( lab_X, 1, 4, 'lab_X')
    t_is( frb_X, 0.8960, 4, 'frb_X')

    t = 'one marginal bid @ $55, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1110, 100, [], [], mpopt)
    cp1 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5 * lao_X, 8, [t, ' : prices'] )

    t = 'one marginal bid @ $55, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1120, 100, [], [], mpopt)
    cp2 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :] * fro_X, 8, [t, ' : gen prices'] )
    t_is( cp[L[0], :], cp5[L[0], :] * fro_X, 8, [t, ' : load 1 price'] )
    t_is( cp[L[1], :], 55, 5, [t, ' : load 2 price'] )
    t_is( cp[L[2], :], 60, 5, [t, ' : load 3 price'] )

    t = 'one marginal bid @ $55, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1130, 100, [], [], mpopt)
    cp3 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 7, [t, ' : prices'] )

    t = 'one marginal bid @ $55, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1140, 100, [], [], mpopt)
    cp4 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 50, 5, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp5[G[1:6], :] * frb_X, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp5[L, :] * frb_X, 8, [t, ' : load prices'] )

    t = 'one marginal bid @ $55, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp1, 8, [t, ' : prices'] )

    p2 = p.copy()
    p2[G, :] = array([
        [0, 0, 100],
        [0, 0, 100],
        [0, 0, 100],
        [0, 0, 100],
        [0, 0, 100],
        [0, 0, 100]
    ])
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 3, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :] * frb_X, 3, [t, ' : gen prices'] )  ## gen 1, not clipped this time
    t_is( cp[L, :], cp4[L, :], 3, [t, ' : load prices'] )

    t = 'one marginal bid @ $55, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1170, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5 * (lao_X + lab_X) / 2, 8, [t, ' : prices'] )
    t_is( cp, (cp1 + cp3) / 2, 8, [t, ' : prices'] )

    t = 'one marginal bid @ $55, auction_type = 8'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1180, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp1[G, :], 8, [t, ' : gen prices'] )
    t_is( cp[L, :], cp3[L, :], 8, [t, ' : load prices'] )

    t = 'one marginal bid @ $55, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1100, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, p, 8, [t, ' : prices'] )


    ##-----  one bid block marginal @ $54.50 and one offer block marginal @ $50  -----
    p[L[1], 1] = 54.5
    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 5'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1150, 100, [], [], mpopt)
    cq5 = cq.copy()
    cp5 = cp.copy()
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)

    t_is( cq[G[0], 1:3], [23.74, 0], 2, t )
    t_is( cp[G[0], :], 50, 4, t )
    t_is( cq[L[1], 0:2], [10, 0.39], 2, t )
    t_is( cp[L[1], :], 54.5, 4, t )
    t_is( cp[G, 0], bus.lam_P[Gbus], 8, [t, ' : gen prices'] )
    t_is( cp[L, 0], bus.lam_P[Lbus] + Qfudge[L, 0], 8, [t, ' : load prices'] )

    lao_X = p[G[0], 1] / bus.lam_P[Gbus[0]]
    fro_X = p[G[5], 2] / bus.lam_P[Gbus[5]]
    lab_X = p[L[1], 1] / (bus.lam_P[Lbus[1]] + Qfudge[L[1], 0])
    frb_X = p[L[2], 2] / (bus.lam_P[Lbus[2]] + Qfudge[L[2], 0])

    t_is( lao_X, 1, 4, 'lao_X')
    t_is( fro_X, 1.1221, 4, 'fro_X')
    t_is( lab_X, 1, 4, 'lab_X')
    t_is( frb_X, 0.8976, 4, 'frb_X')

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1110, 100, [], [], mpopt)
    cp1 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 4, [t, ' : prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1120, 100, [], [], mpopt)
    cp2 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :] * fro_X, 5, [t, ' : gen prices'] )
    t_is( cp[L[0], :], cp5[L[0], :] * fro_X, 5, [t, ' : load 1 price'] )
    t_is( cp[L[1], :], 54.5, 5, [t, ' : load 2 price'] )
    t_is( cp[L[2], :], 60, 5, [t, ' : load 3 price'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1130, 100, [], [], mpopt)
    cp3 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 6, [t, ' : prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1140, 100, [], [], mpopt)
    cp4 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 50, 5, [t, ' : gen 1 price'] )
    t_is( cp[G[1:5], :], cp5[G[1:5], :] * frb_X, 8, [t, ' : gen 2-5 prices'] )
    t_is( cp[G[5], :], 48, 5, [t, ' : gen 6 price'] )
    t_is( cp[L, :], cp5[L, :] * frb_X, 8, [t, ' : load prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 4, [t, ' : prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1170, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 4, [t, ' : prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 8'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1180, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 4, [t, ' : prices'] )

    t = 'marginal offer @ $50, bid @ $54.50, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1100, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, p, 8, [t, ' : prices'] )


    ##-----  gen 1 at Pmin, load 3 block 2 marginal @ $60  -----
    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 5'
    p[L[1], 1] = 50     ## undo previous change
    p2 = p.copy()
    p2[G[0], 1:3] = [65, 65]
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1150, 100, [], [], mpopt)
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)

    t_is( cp[G[0], :], 65, 4, [t, ' : gen 1 price'] )
    t_is( cp[G[1], :], 54.2974, 4, [t, ' : gen 2 price'] )
    cq5 = cq.copy()
    cp5 = cp.copy()
    cp_lam = cp5.copt()
    cp_lam[0, :] = bus.lam_P[Gbus[0]]  ## unclipped

    lao_X = p2[G[5], 1] / bus.lam_P[Gbus[5]]
    fro_X = p2[G[5], 2] / bus.lam_P[Gbus[5]]
    lab_X = p2[L[2], 1] / (bus.lam_P[Lbus[2]] + Qfudge[L[2], 0])
    frb_X = p2[L[1], 1] / (bus.lam_P[Lbus[1]] + Qfudge[L[1], 0])

    t_is( lao_X, 0.8389, 4, 'lao_X')
    t_is( fro_X, 1.0487, 4, 'fro_X')
    t_is( lab_X, 1, 4, 'lab_X')
    t_is( frb_X, 0.8569, 4, 'frb_X')

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1110, 100, [], [], mpopt)
    cp1 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 65, 8, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp_lam[G[1:6], :] * lao_X, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp_lam[L, :] * lao_X, 8, [t, ' : load prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1120, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 65, 8, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp_lam[G[1:6], :] * fro_X, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L[0:2], :], cp_lam[L[0:2], :] * fro_X, 8, [t, ' : load 1-2 prices'] )
    t_is( cp[L[2], :], 60, 8, [t, ' : load 3 price'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1130, 100, [], [], mpopt)
    cp3 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 65, 8, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp_lam[G[1:6], :], 6, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp_lam[L, :], 6, [t, ' : load prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1140, 100, [], [], mpopt)
    cp4 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 65, 5, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp5[G[1:6], :] * frb_X, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp5[L, :] * frb_X, 8, [t, ' : load prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp4, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1170, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G[0], :], 65, 4, [t, ' : gen 1 price'] )
    t_is( cp[G[1:6], :], cp_lam[G[1:6], :] * (lao_X + lab_X) / 2, 8, [t, ' : gen 2-6 prices'] )
    t_is( cp[L, :], cp_lam[L, :] * (lao_X + lab_X) / 2, 8, [t, ' : load prices'] )
    t_is( cp, (cp1 + cp3) / 2, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 8'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1180, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp1[G, :], 8, [t, ' : prices'] )
    t_is( cp[L, :], cp3[L, :], 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal bid @ $60, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1100, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, p2, 8, [t, ' : prices'] )


    ##-----  gen 1 at Pmin, gen 6 block 3 marginal @ $60  -----
    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 5'
    p2[L, :] = array([
        [100, 100, 100],
        [100,   0,   0],
        [100, 100,   0]
    ])
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1150, 100, [], [], mpopt)
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)

    t_is( cp[G[0], :], 65, 4, [t, ' : gen 1 price'] )
    t_is( cp[G[1], :], 57.1612, 4, [t, ' : gen 2 price'] )
    cq5 = cq.copy()
    cp5 = cp.copy()
    cp_lam = cp5.copy()
    cp_lam[0, :] = bus.lamP[Gbus[0]]  ## unclipped

    lao_X = p2[G[5], 2] / bus.lam_P[Gbus[5]]
    fro_X = p2[G[0], 2] / bus.lam_P[Gbus[0]]
    lab_X = p2[L[2], 1] / (bus.lam_P[Lbus[2]] + Qfudge[L[2], 0])
    frb_X = p2[L[1], 1] / (bus.lam_P[Lbus[1]] + Qfudge[L[1], 0])

    t_is( lao_X, 1, 4, 'lao_X')
    t_is( fro_X, 1.1425, 4, 'fro_X')
    t_is( lab_X, 1.5813, 4, 'lab_X')
    t_is( frb_X, 0, 4, 'frb_X')

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1110, 100, [], [], mpopt)
    cp1 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp5, 6, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1120, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp_lam * fro_X, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1130, 100, [], [], mpopt)
    cp3 = cp.copy()
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp_lam * lab_X, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1140, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, 0], [654042444660], 4, [t, ' : gen prices'] )
    t_is( cp[L, :], cp_lam[L, :] * frb_X, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1160, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp_lam * fro_X, 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1170, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, cp_lam * (lao_X + lab_X) / 2, 8, [t, ' : prices'] )
    t_is( cp, (cp_lam + cp3) / 2, 7, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 8'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1180, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp[G, :], cp5[G, :], 7, [t, ' : prices'] )
    t_is( cp[L, :], cp3[L, :], 8, [t, ' : prices'] )

    t = 'gen 1 @ Pmin, marginal offer @ $60, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p2, 1100, 100, [], [], mpopt)
    t_is( cq, cq5, 8, [t, ' : quantities'] )
    t_is( cp, p2, 8, [t, ' : prices'] )


    ##-----  gen 2 decommitted, one offer block marginal @ $60  -----
    p[G[1], :] = p[G[1], :] + 100

    t = 'price of decommited gen, auction_type = 5'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1150, 200, [], [], mpopt)
    cp5 = cp.copy()
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)
    t_is(sum(cq[1, :]), 0, 8, t)
    t_is(cp[1, 0], 59.194, 3, t)

    # Xo = p[0:6, :] / (diag(bus.lam_P[Gbus]) * ones(p[G, :].shape))
    # ao = (cq[0:6, :] != 0)
    # ro = (cq[0:6, :] == 0)
    # Xb = p[6:9, :] / (diag(bus.lam_P[Lbus] + gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape))
    # ab = (cq[6:9, :] != 0)
    # rb = (cq[6:9, :] == 0)
    # aXo = ao * Xo
    # rXo = ro * Xo
    # aXb = ab * Xb
    # rXb = rb * Xb

    lao_X = p[G[5], 2] / bus.lam_P[Gbus[5]]
    fro_X = p[G[0], 2] / bus.lam_P[Gbus[0]]
    lab_X = p[L[0], 1] / (bus.lam_P[Lbus[0]] + Qfudge[L[0], 0])
    frb_X = p[L[0], 2] / (bus.lam_P[Lbus[0]] + Qfudge[L[0], 0])

    t_is( lao_X, 1, 4, 'lao_X')
    t_is( fro_X, 1.0212, 4, 'fro_X')
    t_is( lab_X, 1.1649, 4, 'lab_X')
    t_is( frb_X, 0.9985, 4, 'frb_X')

    t = 'price of decommited gen, auction_type = 1'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1110, 200, [], [], mpopt)
    t_is(cp[1, 0], 59.194, 3, t)

    t = 'price of decommited gen, auction_type = 2'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1120, 200, [], [], mpopt)
    t_is(cp[1, 0], cp5[1, 0] * fro_X, 3, t)

    t = 'price of decommited gen, auction_type = 3'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1130, 200, [], [], mpopt)
    t_is(cp[1, 0], cp5[1, 0] * lab_X, 3, t)

    t = 'price of decommited gen, auction_type = 4'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1140, 200, [], [], mpopt)
    t_is(cp[1, 0], cp5[1, 0] * frb_X, 3, t)

    t = 'price of decommited gen, auction_type = 6'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1160, 200, [], [], mpopt)
    t_is(cp[1, 0], cp5[1, 0] * fro_X, 3, t)

    t = 'price of decommited gen, auction_type = 7'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1170, 200, [], [], mpopt)
    t_is(cp[1, 0], cp5[1, 0] * (lao_X + lab_X) / 2, 3, t)

    t = 'price of decommited gen, auction_type = 0'
    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1100, 200, [], [], mpopt)
    t_is(cp[1, 0], 120, 3, t)

    t = 'single block, marginal offer @ $50, auction_type = 5'
    q = array([
        [60],
        [36],
        [36],
        [36],
        [36],
        [36],
        [30],
        [10],
        [20]
    ])

    p = array([
        [50],
        [40],
        [42],
        [44],
        [46],
        [48],
        [100],
        [100],
        [100]
    ])

    MVAbase, cq, cp, bus, gen, gencost, branch, f, dispatch, success, et = \
        runmkt('t_auction_case', q, p, 1150, 100, [], [], mpopt)
    t_is( cq[G[0]], 35.32, 2, t )
    t_is( cq[G[1:6]], q[G[1:6]], 8, [t, ' : gen qtys'] )
    t_is( cp[G[0]], 50, 4, t )
    t_is( cq[L], q[L], 8, [t, ' : load qtys'] )
    t_is( cp[L[1], :], 54.03, 2, t )
    t_is( cp[G], bus.lam_P[Gbus], 8, [t, ' : gen prices'] )
    Qfudge =  zeros(p.shape)
    Qfudge[L, :] = diag(gen.Qg[L] / gen.Pg[L] * bus.lam_Q[Lbus]) * ones(p[L, :].shape)
    t_is( cp[L], bus.lam_P[Lbus] + Qfudge[L, 0], 8, [t, ' : load prices'] )

    t_end()

Example 49

Project: PYPOWER
Source File: t_off2case.py
View license
def t_off2case(quiet=False):
    """Tests for code in C{off2case}.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    n_tests = 35

    t_begin(n_tests, quiet)

    ## generator data
    #    bus Pg Qg Qmax Qmin Vg mBase status Pmax Pmin Pc1 Pc2 Qc1min Qc1max Qc2min Qc2max ramp_agc ramp_10 ramp_30 ramp_q apf
    gen0 = array([
        [1,   10,   0,  60, -15, 1, 100, 1, 60, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [2,   10,   0,  60, -15, 1, 100, 1, 60, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [7,  -30, -15,   0, -15, 1, 100, 1, 0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [13,  10,   0,  60, -15, 1, 100, 1, 60, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [30, -30, 7.5, 7.5,   0, 1, 100, 1, 0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    ], float)
    ## generator cost data
    #    1    startup    shutdown    n    x1    y1    ...    xn    yn
    #    2    startup    shutdown    n    c(n-1)    ...    c0
    gencost0 = array([
        [1, 0,   0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 100, 0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 0,   0, 4, -30, 0, -20, 1000, -10, 2000,  0, 3000],
        [1, 0,   0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 0,  50, 4, -30, 0, -20, 1000, -10, 2000,  0, 3000]
    ], float)

    try:
        from pypower.extras.smartmarket import off2case
    except ImportError:
        t_skip(n_tests, 'smartmarket code not available')
        return

    t = 'isload()'
    t_is(isload(gen0), array([0, 0, 1, 0, 1], bool), 8, t)

    G = find( ~isload(gen0) )
    L = find(  isload(gen0) )
    nGL = len(G) + len(L)

    t = 'P offers only';
    offers = {'P': {}}
    offers['P']['qty'] = array([[25], [26], [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty'].flatten()
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700],
    ]), zeros((3, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    offers['P']['qty'] = array([[25], [26], [0], [27],  [0]], float)
    offers['P']['prc'] = array([[10], [50], [0], [100], [0]], float)
    gen, gencost = off2case(gen0, gencost0, offers)
    t_is( gen, gen1, 8, [t, ' (all rows in offer) - gen'] )
    t_is( gencost, gencost1, 8, [t, ' (all rows in offer) - gencost'] )

    t = 'P offers only (GEN_STATUS=0 for 0 qty offer)';
    offers['P']['qty'] = array([ [0], [26],  [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G[1:3], PMAX] = offers['P']['qty'].flatten()[1:3]
    gen1[G[0], GEN_STATUS] = 0
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[1:3], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((2, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers, lim[\'P\'][\'max_offer\']';
    offers['P']['qty'] = array([[25], [26], [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    lim = {'P': {'max_offer': 75}}
    gen, gencost = off2case(gen0, gencost0, offers, lim=lim)

    gen1 = gen0.copy()
    gen1[G[:2], PMAX] = offers['P']['qty'].flatten()[:2, :]
    gen1[r_[G[2], L], GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[:2], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300]
    ]), zeros((2, 4))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids';
    bids = {'P': {'qty': array([ [20], [28]], float),
                  'prc': array([[100], [10]], float)}}
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-20, -10, 0],
        [-28,   0, 7]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :8].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 4))] = array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 4))] = array([
        [2, -20, -2000, 0, 0],
        [2, -28,  -280, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (all rows in bid)';
    bids['P']['qty'] = array([[0], [0],  [20], [0], [28]], float)
    bids['P']['prc'] = array([[0], [0], [100], [0], [10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    t_is( gen, gen1, 8, [t, ' - gen'] )
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (GEN_STATUS=0 for 0 qty bid)';
    bids['P']['qty'] = array([  [0], [28]], float)
    bids['P']['prc'] = array([[100], [10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[L[0], GEN_STATUS] = 0
    gen1[L[1], [PMIN, QMIN, QMAX]] = array([-28, 0, 7])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25, 250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((3, 4))]
    gencost1[L[1], NCOST:NCOST + 8] = c_[array([
        [2, -28, -280, 0, 0]
    ]), zeros((1, 4))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (1 gen with both)';
    gen2 = gen0.copy()
    gen2[1, PMIN] = -5
    bids['P']['qty'] = array([[0],  [3],  [20], [0], [28]], float)
    bids['P']['prc'] = array([[0], [50], [100], [0], [10]], float)
    gen, gencost = off2case(gen2, gencost0, offers, bids)

    gen1 = gen2.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[1, PMIN] = -sum( bids['P']['qty'][1, :] )
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-20, -10, 0],
        [-28,   0, 7]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :10].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 7))] = array([
        [2,  0,    0, 25,  250,  0,    0],
        [3, -3, -150,  0,    0, 26, 1300],
        [2,  0,    0, 27, 2700,  0,    0]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 7))] = c_[array([
        [2, -20, -2000, 0, 0],
        [2, -28,  -280, 0, 0]
    ]), zeros((2, 2))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids, lim[\'P\'][\'max_offer\']/[\'min_bid\']'
    bids['P']['qty'] = array([[20],  [28]], float)
    bids['P']['prc'] = array([[100], [10]], float)
    lim['P']['min_bid'] = 50.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[G[:2], PMAX] = offers['P']['qty'][:2, :]
    gen1[r_[G[2], L[1]], GEN_STATUS] = 0
    gen1[L[0], [PMIN, QMIN, QMAX]] = array([-20, -10, 0])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[:2], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300]
    ]), zeros((2, 4))]
    gencost1[L[0], NCOST:NCOST + 9] = array([2, -20, -2000, 0, 0, 0, 0, 0, 0])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids, lim[\'P\'][\'max_offer\']/[\'min_bid\'], multi-block'
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[ 20, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[G, PMAX] = array([10, 50, 25])
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-30, -15, 0],
        [-12,   0, 3]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :10].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 7))] = array([
        [2, 0, 0, 10,  100, 0,     0],
        [3, 0, 0, 20,  500, 50, 2450],
        [2, 0, 0, 25, 1250, 0,     0]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 7))] = array([
        [3, -30, -2600, -20, -2000, 0, 0],
        [2, -12,  -840,   0,     0, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    ##-----  reactive  -----
    ## generator cost data
    #    1    startup    shutdown    n    x1    y1    ...    xn    yn
    #    2    startup    shutdown    n    c(n-1)    ...    c0
    gencost0 = array([
        [1,   0,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1, 100,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1,   0,  0, 4, -30,    0, -20, 1000, -10, 2000,  0, 3000],
        [1,   0,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1,   0, 50, 4, -30,    0, -20, 1000, -10, 2000,  0, 3000],
        [1,   0,  0, 4, -15, -150,   0,    0,  30,  150, 60,  450],
        [1, 100,  0, 2,   0,    0,   0,    0,   0,    0,  0,    0],
        [1,   0,  0, 3, -20,  -15, -10,  -10,   0,    0,  0,    0],
        [1,   0,  0, 3,   0,    0,  40,   80,  60,  180,  0,    0],
        [1,   0, 50, 2,   0,    0,   0,    0,   0,    0,  0,    0]
    ], float)

    t = 'PQ offers only';
    offers['P']['qty'] = array([[25], [26],  [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    offers['Q']['qty'] = array([[10], [20],  [30]], float)
    offers['Q']['prc'] = array([[10],  [5],   [1]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[G, QMAX] = offers['Q']['qty']
    gen1[G, QMIN] = 0
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((3, 4))]
    gencost1[ix_(G + nGL - 1, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 10, 100],
        [2, 0, 0, 20, 100],
        [2, 0, 0, 30,  30]
    ]), zeros((3, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, lim.P/Q.max_offer/min_bid, multi-block';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[ 20, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    offers['Q']['qty'] = array([[ 5,  5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [ 5, 60], [ 1, 10]], float)
    bids['Q']['qty'] = array([ 15, 10, 15,  15,  0], float)
    bids['Q']['prc'] = array([-10,  0,  5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15,  10],
        [1,  12, 50, -10,  10],
        [1, -10,  0,  -5,   0],
        [1,  12, 25,   0,  30],
        [0, -30,  0,   0, 7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,     0,  10,   100,   0,    0,  0,    0],
        [3,   0,     0,  20,   500,  50, 2450,  0,    0],
        [3, -30, -2600, -20, -2000,   0,    0,  0,    0],
        [2,   0,     0,  25,  1250,   0,    0,  0,    0],
        [4, -30,     0, -20,  1000, -10, 2000,  0, 3000],
        [4, -15,   150,   0,     0,   5,   50, 10,  150],
        [3, -10,     0,   0,     0,  10,   50,  0,    0],
        [2, -15,   -75,   0,     0,   0,    0,  0,    0],
        [3,   0,     0,  15,    15,  30,  165,  0,    0],
        [2,   0,     0,   0,     0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, for gen, no P, no shutdown';
    gen2 = gen0.copy()
    gen2[0, PMIN] = 0
    offers['P']['qty'] = array([[0, 40], [20, 30], [25, 25]], float)
    gen, gencost = off2case(gen2, gencost0, offers, bids, lim)

    gen1[0, [PMIN, PMAX, QMIN, QMAX]] = array([0, 0, -15, 10])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1[0, NCOST:NCOST + 9] = gencost0[0, NCOST:NCOST + 9]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, for gen, no Q, no shutdown';
    offers['P']['qty'] = array([[10, 40], [20, 30], [25, 25]], float)
    offers['Q']['qty'] = array([[ 5,  5], [ 0, 10], [15, 15]], float)
    bids['Q']['qty'] = array([15, 0, 15, 15, 0], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1[0, [PMIN, PMAX, QMIN, QMAX]] = array([10, 10, -15, 10])    ## restore original
    gen1[1, [PMIN, PMAX, QMIN, QMAX]] = array([12, 50,   0,  0])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1[ix_([0, 1, 6], range(NCOST, NCOST + 9))] = array([
        [2, 0, 0, 10, 100,  0,    0, 0, 0],
        [3, 0, 0, 20, 500, 50, 2450, 0, 0],
        [2, 0, 0,  0,   0,  0,    0, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, lim.P/Q.max_offer/min_bid, multi-block';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[10,   0], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    offers['Q']['qty'] = array([[5, 5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [5, 60], [1, 10]], float)
    bids['Q']['qty'] = array([15, 10, 10, 15, 0], float)
    bids['Q']['prc'] = array([-10, 0, 5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15, 10],
        [1,  12, 50, -10, 10],
        [1, -10,  0,  -5,  0],
        [1,  12, 25,   0, 30],
        [0, -30,  0,   0,  7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST:NCOST + 9] = array([
        [2,   0,     0,  10,  100,   0,    0,  0,    0],
        [3,   0,     0,  20,  500,  50, 2450,  0,    0],
        [2, -10, -1000,   0,    0,   0,    0,  0,    0],
        [2,   0,     0,  25, 1250,   0,    0,  0,    0],
        [4, -30,     0, -20, 1000, -10, 2000,  0, 3000],
        [4, -15,   150,   0,    0,   5,   50, 10,  150],
        [3, -10,     0,   0,    0,  10,   50,  0,    0],
        [2, -10,   -50,   0,    0,   0,    0,  0,    0],
        [3,   0,     0,  15,   15,  30,  165,  0,    0],
        [2,   0,     0,   0,    0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, zero Q load w/P bid, shutdown bugfix';
    gen1 = gen0.copy()
    gen1[4, [QG, QMIN, QMAX]] = 0
    gen, gencost = off2case(gen1, gencost0, offers, bids, lim)

    gen1[:, [PMIN, PMAX, QMIN, QMAX]] = array([
        [ 10, 10, -15, 10],
        [ 12, 50, -10, 10],
        [-10,  0,  -5,  0],
        [ 12, 25,   0, 30],
        [-12,  0,   0,  0]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,     0, 10,  100,  0,    0,  0,   0],
        [3,   0,     0, 20,  500, 50, 2450,  0,   0],
        [2, -10, -1000,  0,    0,  0,    0,  0,   0],
        [2,   0,     0, 25, 1250,  0,    0,  0,   0],
        [2, -12,  -840,  0,    0,  0,    0,  0,   0],
        [4, -15,   150,  0,    0,  5,   50, 10, 150],
        [3, -10,     0,  0,    0, 10,   50,  0,   0],
        [2, -10,   -50,  0,    0,  0,    0,  0,   0],
        [3,   0,     0, 15,   15, 30,  165,  0,   0],
        [2,   0,     0,  0,    0,  0,    0,  0,   0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, non-zero Q load w/no P bid, shutdown bugfix';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[0, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 40], [70, 10]], float)
    offers['Q']['qty'] = array([[ 5,  5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [ 5, 60], [ 1, 10]], float)
    bids['Q']['qty'] = array([ 15, 10, 15,  15,  0], float)
    bids['Q']['prc'] = array([-10,  0,  5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15, 10],
        [1,  12, 50, -10, 10],
        [0, -30,  0, -15,  0],
        [1,  12, 25,   0, 30],
        [0, -30,  0,   0, 7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,   0,  10,  100,   0,    0,  0,    0],
        [3,   0,   0,  20,  500,  50, 2450,  0,    0],
        [4, -30,   0, -20, 1000, -10, 2000,  0, 3000],
        [2,   0,   0,  25, 1250,   0,    0,  0,    0],
        [4, -30,   0, -20, 1000, -10, 2000,  0, 3000],
        [4, -15, 150,   0,    0,   5,   50, 10,  150],
        [3, -10,   0,   0,    0,  10,   50,  0,    0],
        [3, -20, -15, -10,  -10,   0,    0,  0,    0],
        [3,   0,   0,  15,   15,  30,  165,  0,    0],
        [2,   0,   0,   0,    0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t_end()

Example 50

Project: PYPOWER
Source File: t_opf_ipopt.py
View license
def t_opf_ipopt(quiet=False):
    """Tests for IPOPT-based AC optimal power flow.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    num_tests = 101

    t_begin(num_tests, quiet)

    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    verbose = 0#not quiet

    t0 = 'IPOPT : '
    ppopt = ppoption(OPF_VIOLATION=1e-6, PDIPM_GRADTOL=1e-8,
                   PDIPM_COMPTOL=1e-8, PDIPM_COSTTOL=1e-9)
    ppopt = ppoption(ppopt, OUT_ALL=0, VERBOSE=verbose, OPF_ALG=580)

    ## set up indices
    ib_data     = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)]
    ib_voltage  = arange(VM, VA + 1)
    ib_lam      = arange(LAM_P, LAM_Q + 1)
    ib_mu       = arange(MU_VMAX, MU_VMIN + 1)
    ig_data     = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)]
    ig_disp     = array([PG, QG, VG])
    ig_mu       = arange(MU_PMAX, MU_QMIN + 1)
    ibr_data    = arange(ANGMAX + 1)
    ibr_flow    = arange(PF, QT + 1)
    ibr_mu      = array([MU_SF, MU_ST])
    ibr_angmu   = array([MU_ANGMIN, MU_ANGMAX])

    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF
    t = t0
    r = runopf(casefile, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ## run with automatic conversion of single-block pwl to linear costs
    t = ''.join([t0, '(single-block PWL) : '])
    ppc = loadcase(casefile)
    ppc['gencost'][2, NCOST] = 2
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    xr = r_[r['var']['val']['Va'], r['var']['val']['Vm'], r['var']['val']['Pg'],
            r['var']['val']['Qg'], 0, r['var']['val']['y']]
    t_is(r['x'], xr, 8, [t, 'check on raw x returned from OPF'])

    ## get solved AC power flow case from MAT-file
    soln9_opf_Plim = loadmat(join(tdir, 'soln9_opf_Plim.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_Plim['bus_soln']
    gen_soln = soln9_opf_Plim['gen_soln']
    branch_soln = soln9_opf_Plim['branch_soln']
    f_soln = soln9_opf_Plim['f_soln'][0]

    ## run OPF with active power line limits
    t = ''.join([t0, '(P line lim) : '])
    ppopt1 = ppoption(ppopt, OPF_FLOW_LIM=1)
    r = runopf(casefile, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with quadratic gen costs moved to generalized costs  -----
    ppc = loadcase(casefile)
    ppc['gencost'] = array([
        [2,   1500, 0,   3,   0.11,    5,   0],
        [2,   2000, 0,   3,   0.085,   1.2, 0],
        [2,   3000, 0,   3,   0.1225,  1,   0]
    ])
    r = runopf(ppc, ppopt)
    bus_soln, gen_soln, branch_soln, f_soln, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    branch_soln = branch_soln[:, :MU_ST + 1]

    A = None
    l = array([])
    u = array([])
    nb = ppc['bus'].shape[0]      # number of buses
    ng = ppc['gen'].shape[0]      # number of gens
    thbas = 0;                thend    = thbas + nb
    vbas     = thend;     vend     = vbas + nb
    pgbas    = vend;      pgend    = pgbas + ng
#    qgbas    = pgend;     qgend    = qgbas + ng
    nxyz = 2 * nb + 2 * ng
    N = sparse((ppc['baseMVA'] * ones(ng), (arange(ng), arange(pgbas, pgend))), (ng, nxyz))
    fparm = ones((ng, 1)) * array([[1, 0, 0, 1]])
    ix = argsort(ppc['gen'][:, 0])
    H = 2 * spdiags(ppc['gencost'][ix, 4], 0, ng, ng, 'csr')
    Cw = ppc['gencost'][ix, 5]
    ppc['gencost'][:, 4:7] = 0

    ## run OPF with quadratic gen costs moved to generalized costs
    t = ''.join([t0, 'w/quadratic generalized gen cost : '])
    r = opf(ppc, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['cost']['usr'], f, 12, [t, 'user cost'])

    ##-----  run OPF with extra linear user constraints & costs  -----
    ## single new z variable constrained to be greater than or equal to
    ## deviation from 1 pu voltage at bus 1, linear cost on this z
    ## get solved AC power flow case from MAT-file
    soln9_opf_extras1 = loadmat(join(tdir, 'soln9_opf_extras1.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_extras1['bus_soln']
    gen_soln = soln9_opf_extras1['gen_soln']
    branch_soln = soln9_opf_extras1['branch_soln']
    f_soln = soln9_opf_extras1['f_soln'][0]

    row = [0, 0, 1, 1]
    col = [9, 24, 9, 24]
    A = sparse(([-1, 1, 1, 1], (row, col)), (2, 25))
    u = array([Inf, Inf])
    l = array([-1, 1])

    N = sparse(([1], ([0], [24])), (1, 25))    ## new z variable only
    fparm = array([[1, 0, 0, 1]])              ## w = r = z
    H = sparse((1, 1))                ## no quadratic term
    Cw = array([100.0])

    t = ''.join([t0, 'w/extra constraints & costs 1 : '])
    r = opf(casefile, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['var']['val']['z'], 0.025419, 6, [t, 'user variable'])
    t_is(r['cost']['usr'], 2.5419, 4, [t, 'user cost'])

    ##-----  test OPF with capability curves  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove angle diff limits
    ppc['branch'][0, ANGMAX] =  360
    ppc['branch'][8, ANGMIN] = -360

    ## get solved AC power flow case from MAT-file
    soln9_opf_PQcap = loadmat(join(tdir, 'soln9_opf_PQcap.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_PQcap['bus_soln']
    gen_soln = soln9_opf_PQcap['gen_soln']
    branch_soln = soln9_opf_PQcap['branch_soln']
    f_soln = soln9_opf_PQcap['f_soln'][0]

    ## run OPF with capability curves
    t = ''.join([t0, 'w/capability curves : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with angle difference limits  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove capability curves
    ppc['gen'][ix_(arange(1, 3),
                   [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX])] = zeros((2, 6))

    ## get solved AC power flow case from MAT-file
    soln9_opf_ang = loadmat(join(tdir, 'soln9_opf_ang.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_ang['bus_soln']
    gen_soln = soln9_opf_ang['gen_soln']
    branch_soln = soln9_opf_ang['branch_soln']
    f_soln = soln9_opf_ang['f_soln'][0]

    ## run OPF with angle difference limits
    t = ''.join([t0, 'w/angle difference limits : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  1, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(branch[:, ibr_angmu ], branch_soln[:, ibr_angmu ],  2, [t, 'branch angle mu'])

    ##-----  test OPF with ignored angle difference limits  -----
    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF with ignored angle difference limits
    t = ''.join([t0, 'w/ignored angle difference limits : '])
    ppopt1 = ppoption(ppopt, OPF_IGNORE_ANG_LIM=1)
    r = runopf(ppc, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    ## ang limits are not in this solution data, so let's remove them
    branch[0, ANGMAX] =  360
    branch[8, ANGMIN] = -360
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    t_end()