numpy.zeros

Here are the examples of the python api numpy.zeros taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

162 Examples 7

Example 1

Project: pyAudioAnalysis Source File: audioSegmentation.py
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
    '''
    ARGUMENTS:
        - fileName:        the name of the WAV file to be analyzed
        - numOfSpeakers    the number of speakers (clusters) in the recording (<=0 for unknown)
        - mtSize (opt)     mid-term window size
        - mtStep (opt)     mid-term window step
        - stWin  (opt)     short-term window size
        - LDAdim (opt)     LDA dimension (0 for no LDA)
        - PLOT     (opt)   0 for not plotting the results 1 for plottingy
    '''
    [Fs, x] = audioBasicIO.readAudioFile(fileName)
    x = audioBasicIO.stereo2mono(x)
    Duration = len(x) / Fs

    [Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel(os.path.join("data","knnSpeakerAll"))
    [Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel(os.path.join("data","knnSpeakerFemaleMale"))

    [MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))

    MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))

    for i in range(MidTermFeatures.shape[1]):
        curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
        curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
        [Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
        [Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
        MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
        MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
        MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001

    MidTermFeatures = MidTermFeatures2    # TODO
    # SELECT FEATURES:
    #iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20];                                                                                         # SET 0A
    #iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100];                                                                                 # SET 0B
    #iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
    #   97,98, 99,100];     # SET 0C

    iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]                           # SET 1A
    #iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100];                                          # SET 1B
    #iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100];     # SET 1C

    #iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53];             # SET 2A
    #iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100];     # SET 2B
    #iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100];     # SET 2C

    #iFeaturesSelect = range(100);                                                                                                    # SET 3
    #MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010

    MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]

    (MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
    MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
    numOfWindows = MidTermFeatures.shape[1]

    # remove outliers:
    DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
    MDistancesAll = numpy.mean(DistancesAll)
    iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]

    # TODO: Combine energy threshold for outlier removal:
    #EnergyMin = numpy.min(MidTermFeatures[1,:])
    #EnergyMean = numpy.mean(MidTermFeatures[1,:])
    #Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
    #iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
    #print iNonOutLiers

    perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
    MidTermFeaturesNormOr = MidTermFeaturesNorm
    MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]

    # LDA dimensionality reduction:
    if LDAdim > 0:
        #[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
        # extract mid-term features with minimum step:
        mtWinRatio = int(round(mtSize / stWin))
        mtStepRatio = int(round(stWin / stWin))
        mtFeaturesToReduce = []
        numOfFeatures = len(ShortTermFeatures)
        numOfStatistics = 2
        #for i in range(numOfStatistics * numOfFeatures + 1):
        for i in range(numOfStatistics * numOfFeatures):
            mtFeaturesToReduce.append([])

        for i in range(numOfFeatures):        # for each of the short-term features:
            curPos = 0
            N = len(ShortTermFeatures[i])
            while (curPos < N):
                N1 = curPos
                N2 = curPos + mtWinRatio
                if N2 > N:
                    N2 = N
                curStFeatures = ShortTermFeatures[i][N1:N2]
                mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
                mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
                curPos += mtStepRatio
        mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
        mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
        for i in range(mtFeaturesToReduce.shape[1]):
            curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
            curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
            [Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
            [Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
            mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
            mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
            mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
        mtFeaturesToReduce = mtFeaturesToReduce2
        mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
        #mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
        (mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
        mtFeaturesToReduce = mtFeaturesToReduce[0].T
        #DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
        #MDistancesAll = numpy.mean(DistancesAll)
        #iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
        #mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
        Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
        LDAstep = 1.0
        LDAstepRatio = LDAstep / stWin
        #print LDAstep, LDAstepRatio
        for i in range(Labels.shape[0]):
            Labels[i] = int(i*stWin/LDAstepRatio);        
        clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=LDAdim)
        clf.fit(mtFeaturesToReduce.T, Labels)
        MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T

    if numOfSpeakers <= 0:
        sRange = range(2, 10)
    else:
        sRange = [numOfSpeakers]
    clsAll = []
    silAll = []
    centersAll = []
    
    for iSpeakers in sRange:        
        k_means = sklearn.cluster.KMeans(n_clusters = iSpeakers)
        k_means.fit(MidTermFeaturesNorm.T)
        cls = k_means.labels_        
        means = k_means.cluster_centers_

        # Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
        clsAll.append(cls)
        centersAll.append(means)
        silA = []; silB = []
        for c in range(iSpeakers):                                # for each speaker (i.e. for each extracted cluster)
            clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
            if clusterPerCent < 0.020:
                silA.append(0.0)
                silB.append(0.0)
            else:
                MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c]            # get subset of feature vectors
                Yt = distance.pdist(MidTermFeaturesNormTemp.T)                # compute average distance between samples that belong to the cluster (a values)
                silA.append(numpy.mean(Yt)*clusterPerCent)
                silBs = []
                for c2 in range(iSpeakers):                        # compute distances from samples of other clusters
                    if c2!=c:
                        clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
                        MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
                        Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
                        silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
                silBs = numpy.array(silBs)                            
                silB.append(min(silBs))                            # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
        silA = numpy.array(silA); 
        silB = numpy.array(silB); 
        sil = []
        for c in range(iSpeakers):                                # for each cluster (speaker)
            sil.append( ( silB[c] - silA[c]) / (max(silB[c],  silA[c])+0.00001)  )        # compute silhouette

        silAll.append(numpy.mean(sil))                                # keep the AVERAGE SILLOUETTE

    #silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
    imax = numpy.argmax(silAll)                                    # position of the maximum sillouette value
    nSpeakersFinal = sRange[imax]                                    # optimal number of clusters

    # generate the final set of cluster labels
    # (important: need to retrieve the outlier windows: this is achieved by giving them the value of their nearest non-outlier window)
    cls = numpy.zeros((numOfWindows,))
    for i in range(numOfWindows):
        j = numpy.argmin(numpy.abs(i-iNonOutLiers))        
        cls[i] = clsAll[imax][j]
        
    # Post-process method 1: hmm smoothing
    for i in range(1):
        startprob, transmat, means, cov = trainHMM_computeStatistics(MidTermFeaturesNormOr, cls)
        hmm = hmmlearn.hmm.GaussianHMM(startprob.shape[0], "diag")            # hmm training        
        hmm.startprob_ = startprob
        hmm.transmat_ = transmat            
        hmm.means_ = means; hmm.covars_ = cov
        cls = hmm.predict(MidTermFeaturesNormOr.T)                    
    
    # Post-process method 2: median filtering:
    cls = scipy.signal.medfilt(cls, 13)
    cls = scipy.signal.medfilt(cls, 11)

    sil = silAll[imax]                                        # final sillouette
    classNames = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];


    # load ground-truth if available
    gtFile = fileName.replace('.wav', '.segments');                            # open for annotated file
    if os.path.isfile(gtFile):                                    # if groundturh exists
        [segStart, segEnd, segLabels] = readSegmentGT(gtFile)                    # read GT data
        flagsGT, classNamesGT = segs2flags(segStart, segEnd, segLabels, mtStep)            # convert to flags

    if PLOT:
        fig = plt.figure()    
        if numOfSpeakers>0:
            ax1 = fig.add_subplot(111)
        else:
            ax1 = fig.add_subplot(211)
        ax1.set_yticks(numpy.array(range(len(classNames))))
        ax1.axis((0, Duration, -1, len(classNames)))
        ax1.set_yticklabels(classNames)
        ax1.plot(numpy.array(range(len(cls)))*mtStep+mtStep/2.0, cls)

    if os.path.isfile(gtFile):
        if PLOT:
            ax1.plot(numpy.array(range(len(flagsGT)))*mtStep+mtStep/2.0, flagsGT, 'r')
        purityClusterMean, puritySpeakerMean = evaluateSpeakerDiarization(cls, flagsGT)
        print "{0:.1f}\t{1:.1f}".format(100*purityClusterMean, 100*puritySpeakerMean)
        if PLOT:
            plt.title("Cluster purity: {0:.1f}% - Speaker purity: {1:.1f}%".format(100*purityClusterMean, 100*puritySpeakerMean) )
    if PLOT:
        plt.xlabel("time (seconds)")
        #print sRange, silAll    
        if numOfSpeakers<=0:
            plt.subplot(212)
            plt.plot(sRange, silAll)
            plt.xlabel("number of clusters");
            plt.ylabel("average clustering's sillouette");
        plt.show()
    return cls

Example 2

Project: spinalcordtoolbox Source File: plot_snr_and_tracts_std.py
def main():
    results_folder = param_default.results_folder
    methods_to_display = param_default.methods_to_display

    # Parameters for debug mode
    if param_default.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_methods_comparison"
        path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox'
        methods_to_display = 'bin,wa,wath,ml,map'
    else:
        status, path_sct = commands.getstatusoutput('echo $SCT_DIR')

        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'i:m:')  # define flags
        except getopt.GetoptError as err:  # check if the arguments are defined
            print str(err)  # error
            # usage() # display usage
        # if not opts:
        #     print 'Please enter the path to the result folder. Exit program.'
        #     sys.exit(1)
        #     # usage()
        for opt, arg in opts:  # explore flags
            if opt in '-i':
                results_folder = arg
            if opt in '-m':
                methods_to_display = arg

    # Append path that contains scripts, to be able to load modules
    sys.path.append(path_sct + '/scripts')
    import sct_utils as sct

    sct.printv("Working directory: " + os.getcwd())

    results_folder_noise = results_folder + '/noise'
    results_folder_tracts = results_folder + '/tracts'

    sct.printv('\n\nData will be extracted from folder ' + results_folder_noise + ' and ' + results_folder_tracts + '.', 'warning')
    sct.printv('\t\tCheck existence...')
    sct.check_folder_exist(results_folder_noise)
    sct.check_folder_exist(results_folder_tracts)


    # Extract methods to display
    methods_to_display = methods_to_display.strip().split(',')

    # Extract file names of the results files
    fname_results_noise = glob.glob(results_folder_noise + '/*.txt')
    fname_results_tracts = glob.glob(results_folder_tracts + '/*.txt')
    fname_results = fname_results_noise + fname_results_tracts
    # Remove doublons (due to the two folders)
    # for i_fname in range(0, len(fname_results)):
    #     for j_fname in range(0, len(fname_results)):
    #         if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])):
    #             fname_results.remove(fname_results[j_fname])
    file_results = []
    for fname in fname_results:
        file_results.append(os.path.basename(fname))
    for file in file_results:
        if file_results.count(file) > 1:
            ind = file_results.index(file)
            fname_results.remove(fname_results[ind])
            file_results.remove(file)

    nb_results_file = len(fname_results)

    # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error
    # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object)
    # SNR
    snr = numpy.zeros((nb_results_file))
    # Tracts std
    tracts_std = numpy.zeros((nb_results_file))
    # CSF value
    csf_values = numpy.zeros((nb_results_file))
    # methods' name
    methods_name = []  #numpy.empty((nb_results_file, nb_method), dtype=object)
    # labels
    error_per_label = []
    std_per_label = []
    labels_id = []
    # median
    median_results = numpy.zeros((nb_results_file, 5))
    # median std across bootstraps
    median_std = numpy.zeros((nb_results_file, 5))
    # min
    min_results = numpy.zeros((nb_results_file, 5))
    # max
    max_results = numpy.zeros((nb_results_file, 5))

    #
    for i_file in range(0, nb_results_file):

        # Open file
        f = open(fname_results[i_file])  # open file
        # Extract all lines in .txt file
        lines = [line for line in f.readlines() if line.strip()]

        # extract SNR
        # find all index of lines containing the string "sigma noise"
        ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise]
        if len(ind_line_noise) != 1:
            sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
            snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))

        # extract tract std
        ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if
                              "range tracts" in line_tract_std]
        if len(ind_line_tract_std) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))

        # extract CSF value
        ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if
                              "# value CSF" in line_csf_value]
        if len(ind_line_csf_value) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit()))


        # extract method name
        ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label]
        if len(ind_line_label) != 1:
            sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:])
            methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:])

        # extract median
        ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median]
        if len(ind_line_median) != 1:
            sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            median = lines[ind_line_median[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median]
            median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median])
            median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median])

        # extract min
        ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min]
        if len(ind_line_min) != 1:
            sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            min = lines[ind_line_min[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min]
            min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min])

        # extract max
        ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max]
        if len(ind_line_max) != 1:
            sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            max = lines[ind_line_max[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max]
            max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max])

        # extract error for each label
        error_per_label_for_file_i = []
        std_per_label_for_file_i = []
        labels_id_for_file_i = []
        # Due to 2 different kind of file structure, the number of the last label line must be adapted
        if not ind_line_median:
            ind_line_median = [len(lines) + 1]
        for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1):
            line_label_i = lines[i_line].strip().split(',')
            error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]])
            std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]])
            labels_id_for_file_i.append(line_label_i[0])
        error_per_label.append(error_per_label_for_file_i)
        std_per_label.append(std_per_label_for_file_i)
        labels_id.append(labels_id_for_file_i)

        # close file
        f.close()

    # check if all the files in the result folder were generated with the same number of methods
    if not all(x == methods_name[0] for x in methods_name):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.',
            'error')
        sys.exit(1)
    # check if all the files in the result folder were generated with the same labels
    if not all(x == labels_id[0] for x in labels_id):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.',
            'error')
        sys.exit(1)

    # convert the list "error_per_label" into a numpy array to ease further manipulations
    error_per_label = numpy.array(error_per_label)
    std_per_label = numpy.array(std_per_label)
    # compute different stats
    abs_error_per_labels = numpy.absolute(error_per_label)
    max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1)
    min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1)
    mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1)
    std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1)

    nb_method = len(methods_to_display)

    sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:')
    print snr
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:')
    print tracts_std
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:')
    print csf_values
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:')
    print methods_name
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv(
        'Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print median_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Minimum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print min_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Maximum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print max_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print labels_id
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv(
        'Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print error_per_label


    # cuem******************************************* START PLOTTING HERE **********************************************

    # # plot A (NOT GOOD)
    # fig0 = plt.figure(0)
    # fig0.suptitle('Absolute error within all tracts as a function of noise std')
    #
    # fig0_ax = fig0.add_subplot(111)
    # fig0_ax.grid(True)
    # fig0_ax.set_xlabel('Noise std')
    # fig0_ax.set_ylabel('Absolute error')
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    #
    # for meth, color in zip(methods_name[0], colors):
    #     if meth != 'mlwa':
    #         i_meth = methods_name[0].index(meth)
    #
    #         # median
    #         plt.plot(snr[ind_tracts_std_10], median_results[ind_tracts_std_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0)
    #         # min
    #         plt.plot(snr[ind_tracts_std_10], min_results[ind_tracts_std_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0)
    #         # max
    #         plt.plot(snr[ind_tracts_std_10], max_results[ind_tracts_std_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0)
    #
    # handles, labels = fig0_ax.get_legend_handles_labels()
    # fig0_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16)
    #
    #
    # # plot B (NOT GOOD)
    # fig1 = plt.figure(1)
    # fig1.suptitle('Absolute error within all tracts as a function of tract std')
    #
    # fig1_ax = fig1.add_subplot(111)
    # fig1_ax.grid(True)
    # fig1_ax.set_xlabel('Tract std (percentage of the true value in tract)')
    # fig1_ax.set_ylabel('Absolute error')
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    #
    # for meth, color in zip(methods_name[0], colors):
    #     if meth != 'mlwa':
    #         i_meth = methods_name[0].index(meth)
    #
    #         # median
    #         plt.plot(tracts_std[ind_snr_10], median_results[ind_snr_10, i_meth][0], label='median '+meth, color=color, marker='o', linestyle='None', markersize=5.0, linewidth=2.0)
    #         # min
    #         plt.plot(tracts_std[ind_snr_10], min_results[ind_snr_10, i_meth][0], label='min '+meth, color=color, marker='_', linestyle='None', markersize=10.0, linewidth=20.0)
    #         # max
    #         plt.plot(tracts_std[ind_snr_10], max_results[ind_snr_10, i_meth][0], label='max '+meth, color=color, marker='+', linestyle='None', markersize=10.0, linewidth=20.0)
    #
    # handles, labels = fig1_ax.get_legend_handles_labels()
    # fig1_ax.legend(handles, labels, loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)}, fontsize=16)

    # Plot A
    ind_tracts_std_10 = numpy.where((tracts_std == 10) & (snr != 50))  # indexes where TRACTS STD=10
    ind_ind_snr_sort_tracts_std_10 = numpy.argsort(snr[ind_tracts_std_10])  # indexes of indexes where TRACTS STD=10 sorted according to SNR values (in ascending order)
    ind_snr_sort_tracts_std_10 = ind_tracts_std_10[0][ind_ind_snr_sort_tracts_std_10]  # indexes where TRACTS STD=10 sorted according to SNR values (in ascending order)

    # fig2 = plt.figure(2)
    # ind_fig2 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2
    # width = 1.0 / (nb_method + 1)
    # plt.ylabel('Error (%)')
    # plt.xlabel('Noise std')
    # plt.title('Error within all tracts as a function of noise std')
    # plt.xticks(ind_fig2 + 0.5, snr[ind_snr_sort_tracts_std_10])
    # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig2) + 1])
    # plt.gca().yaxis.grid(True)
    #
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    # bar_plots = []
    # for meth, color in zip(methods_name[0], colors):
    #     i_meth = methods_name[0].index(meth)
    #
    #     plot_i = plt.bar(ind_fig2 + i_meth * width + (float(i_meth) * width) / (nb_method + 1),
    #                      max_results[ind_snr_sort_tracts_std_10, i_meth] - min_results[
    #                          ind_snr_sort_tracts_std_10, i_meth], width,
    #                      min_results[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white', linewidth=3)
    #     plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1),
    #              median_results[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None',
    #              markersize=200 * width, markeredgewidth=3)
    #     bar_plots.append(plot_i[0])
    #
    # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)

    # Plot B
    ind_snr_10 = numpy.where((snr == 10) & (tracts_std != 50))  # indexes where SNR=10
    ind_ind_tracts_std_sort_snr_10 = numpy.argsort(tracts_std[
        ind_snr_10])  # indexes of indexes where SNR=10 sorted according to tracts_std values (in ascending order)
    ind_tracts_std_sort_snr_10 = ind_snr_10[0][
        ind_ind_tracts_std_sort_snr_10]  # indexes where SNR=10 sorted according to tracts_std values (in ascending order)

    # fig3 = plt.figure(3)
    # ind_fig3 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2
    # width = 1.0 / (nb_method + 1)
    # plt.ylabel('Error (%)')
    # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)')
    # plt.title('Error within all tracts as a function of tracts std')
    # plt.xticks(ind_fig3 + 0.5, tracts_std[ind_tracts_std_sort_snr_10])
    # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig3) + 1])
    # plt.gca().yaxis.grid(True)
    #
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    # bar_plots = []
    # for meth, color in zip(methods_name[0], colors):
    #     i_meth = methods_name[0].index(meth)
    #
    #     plot_i = plt.bar(ind_fig3 + i_meth * width + (float(i_meth) * width) / (nb_method + 1),
    #                      max_results[ind_tracts_std_sort_snr_10, i_meth] - min_results[
    #                          ind_tracts_std_sort_snr_10, i_meth], width,
    #                      min_results[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white', linewidth=3)
    #     plt.plot(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1),
    #              median_results[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None',
    #              markersize=200 * width, markeredgewidth=3)
    #     bar_plots.append(plot_i[0])
    #
    # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)


    # # Plot A -- v2: Absolute error (min, max, mean)
    #
    # fig4 = plt.figure(4)
    # ind_fig4 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * 1.2
    # width = 1.0 / (nb_method + 1)
    # plt.ylabel('Absolute error (%)')
    # plt.xlabel('Noise std')
    # plt.title('Absolute error within all tracts as a function of noise std')
    # plt.xticks(ind_fig4 + 0.5, snr[ind_snr_sort_tracts_std_10])
    # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig4) + 1])
    # plt.gca().yaxis.grid(True)
    #
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    # bar_plots = []
    # for meth, color in zip(methods_name[0], colors):
    #     i_meth = methods_name[0].index(meth)
    #
    #     plot_i = plt.bar(ind_fig4 + i_meth * width + (float(i_meth) * width) / (nb_method + 1),
    #                      max_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth] - min_abs_error_per_meth[
    #                          ind_snr_sort_tracts_std_10, i_meth], width,
    #                      min_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], edgecolor=color, color='white',
    #                      linewidth=3)
    #     plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1),
    #                  mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth],
    #                  std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_',
    #                  linestyle='None', markersize=200 * width, markeredgewidth=3)
    #     bar_plots.append(plot_i[0])
    #
    # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    #
    # # Plot B -- v2: Absolute error (min, max, mean)
    # fig5 = plt.figure(5)
    # ind_fig5 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * 1.2
    # width = 1.0 / (nb_method + 1)
    # plt.ylabel('Absolute error (%)')
    # plt.xlabel('Tracts std (in percentage of the mean value of the tracts)')
    # plt.title('Absolute error within all tracts as a function of tracts std')
    # plt.xticks(ind_fig5 + 0.5, tracts_std[ind_tracts_std_sort_snr_10])
    # plt.gca().set_xlim([-width / (nb_method + 1), numpy.max(ind_fig5) + 1])
    # plt.gca().yaxis.grid(True)
    #
    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    # bar_plots = []
    # for meth, color in zip(methods_name[0], colors):
    #     i_meth = methods_name[0].index(meth)
    #
    #     plot_i = plt.bar(ind_fig5 + i_meth * width + (float(i_meth) * width) / (nb_method + 1),
    #                      max_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth] - min_abs_error_per_meth[
    #                          ind_tracts_std_sort_snr_10, i_meth], width,
    #                      min_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], edgecolor=color, color='white',
    #                      linewidth=3)
    #     plt.errorbar(ind_fig2 + i_meth * width + width / 2 + (float(i_meth) * width) / (nb_method + 1),
    #                  mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth],
    #                  std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_',
    #                  linestyle='None', markersize=200 * width, markeredgewidth=3)
    #     bar_plots.append(plot_i[0])
    #
    # plt.legend(bar_plots, methods_name[0], bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)

    matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'})
    plt.rcParams['xtick.major.pad'] = '9'
    plt.rcParams['ytick.major.pad'] = '15'

    # Plot A -- v3: Box plots absolute error
    fig6 = plt.figure(6, figsize=(30, 16))
    width = 1.0 / (nb_method + 1)
    ind_fig6 = numpy.arange(len(snr[ind_snr_sort_tracts_std_10])) * (1.0 + width)
    plt.ylabel('Absolute error (%)\n', fontsize=55)
    plt.xlabel('Noise STD (% of true WM value)', fontsize=55)
    plt.title('Absolute error within all tracts as a function of noise std\n', fontsize=65)

    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    box_plots = []
    for meth, color in zip(methods_to_display, colors):
        i_meth = methods_name[0].index(meth)
        i_meth_to_display = methods_to_display.index(meth)

        boxprops = dict(linewidth=4, color=color)
        flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.')
        whiskerprops = dict(color=color, linewidth=3)
        capprops = dict(color=color, linewidth=3)
        medianprops = dict(linewidth=4, color=color)
        meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
        meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')
        plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_snr_sort_tracts_std_10, :, i_meth]),
                             positions=ind_fig6 + i_meth_to_display * width + (float(i_meth_to_display) * width) / (
                                 nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops,
                             flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
        # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3)
        box_plots.append(plot_i['boxes'][0])

    # add alternated vertical background colored bars
    for i_xtick in range(0, len(ind_fig6), 2):
        plt.axvspan(ind_fig6[i_xtick] - width - width / 4, ind_fig6[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1)

    # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    # plt.legend(box_plots, methods_to_display, loc='best')
    # convert xtick labels into integers
    xtick_labels = [int(xtick) for xtick in snr[ind_snr_sort_tracts_std_10]]
    plt.xticks(ind_fig6 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels)
    plt.gca().set_xlim([-width, numpy.max(ind_fig6) + (nb_method + 0.5) * width])
    plt.gca().set_ylim([0, 18])
    plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2))
    plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5))
    plt.grid(b=True, axis='y', which='both', alpha=0.5)
    plt.subplots_adjust(left=0.1)


    plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_noise_std_Tracts_std_fixed_to_10.pdf', format='PDF')


    # Plot B -- v3: Box plots absolute error
    fig7 = plt.figure(7, figsize=(30, 16))
    width = 1.0 / (nb_method + 1)
    ind_fig7 = numpy.arange(len(tracts_std[ind_tracts_std_sort_snr_10])) * (1.0 + width)
    plt.ylabel('Absolute error (%)\n', fontsize=55)
    plt.xlabel('Tracts STD (% of true WM value)', fontsize=55)
    plt.title('Absolute error within all tracts as a function of tracts std\n', fontsize=65)

    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    box_plots = []
    for meth, color in zip(methods_to_display, colors):
        i_meth = methods_name[0].index(meth)
        i_meth_to_display = methods_to_display.index(meth)

        boxprops = dict(linewidth=4, color=color)
        flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.')
        whiskerprops = dict(color=color, linewidth=3)
        capprops = dict(color=color, linewidth=3)
        medianprops = dict(linewidth=4, color=color)
        meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
        meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')
        plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_tracts_std_sort_snr_10, :, i_meth]),
                             positions=ind_fig7 + i_meth_to_display * width + (float(i_meth_to_display) * width) / (
                                 nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops,
                             flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
        # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], std_abs_error_per_meth[ind_tracts_std_sort_snr_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3)
        box_plots.append(plot_i['boxes'][0])

    # add alternated vertical background colored bars
    for i_xtick in range(0, len(ind_fig7), 2):
        plt.axvspan(ind_fig7[i_xtick] - width - width / 4, ind_fig7[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1)


    # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    # plt.legend(box_plots, methods_to_display, loc='best')
    # convert xtick labels into integers
    xtick_labels = [int(xtick) for xtick in tracts_std[ind_tracts_std_sort_snr_10]]
    plt.xticks(ind_fig7 + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels)
    plt.gca().set_xlim([-width, numpy.max(ind_fig7) + (nb_method + 0.5) * width])
    plt.gca().set_ylim([0, 18])
    plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2))
    plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5))
    plt.grid(b=True, axis='y', which='both', alpha=0.5)
    plt.subplots_adjust(left=0.1)

    plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_tracts_std_Noise_std_fixed_to_10.pdf', format='PDF')


    plt.show(block=False)

Example 3

Project: pyscf Source File: kccsd.py
def update_amps(cc, t1, t2, eris, max_memory=2000):
    time0 = time.clock(), time.time()
    log = logger.Logger(cc.stdout, cc.verbose)
    nkpts, nocc, nvir = t1.shape
    #nov = nocc*nvir
    fock = eris.fock
    #t1new = numpy.zeros_like(t1)
    #t2new = numpy.zeros_like(t2)

    fov = fock[:,:nocc,nocc:].copy()
    foo = fock[:,:nocc,:nocc].copy()
    fvv = fock[:,nocc:,nocc:].copy()

    #mo_e = eris.fock.diagonal()
    #eia = mo_e[:nocc,None] - mo_e[None,nocc:]
    #eijab = lib.direct_sum('ia,jb->ijab',eia,eia)

    tau = imdk.make_tau(cc,t2,t1,t1)

    ### From eom-cc hackathon code ###
    Fvv = imdk.cc_Fvv(cc,t1,t2,eris)
    Foo = imdk.cc_Foo(cc,t1,t2,eris)
    Fov = imdk.cc_Fov(cc,t1,t2,eris)
    Woooo = imdk.cc_Woooo(cc,t1,t2,eris)
    Wvvvv = imdk.cc_Wvvvv(cc,t1,t2,eris)
    Wovvo = imdk.cc_Wovvo(cc,t1,t2,eris)

    # Move energy terms to the other side
    Fvv -= fvv
    Foo -= foo

    # Get the momentum conservation array
    # Note: chemist's notation for momentum conserving t2(ki,kj,ka,kb), even though
    # integrals are in physics notation
    kconserv = tools.get_kconserv(cc._scf.cell, cc.kpts)

    eris_ovvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nvir,nvir,nocc), dtype=t2.dtype)
    eris_oovo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nocc,nocc,nvir,nocc), dtype=t2.dtype)
    eris_vvvo = numpy.zeros(shape=(nkpts,nkpts,nkpts,nvir,nvir,nvir,nocc), dtype=t2.dtype)
    for km in range(nkpts):
        for kb in range(nkpts):
            for ke in range(nkpts):
                kj = kconserv[km,ke,kb]
                # <mb||je> -> -<mb||ej>
                eris_ovvo[km,kb,ke] = -eris.ovov[km,kb,kj].transpose(0,1,3,2)
                # <mn||je> -> -<mn||ej>
                # let kb = kn as a dummy variable
                eris_oovo[km,kb,ke] = -eris.ooov[km,kb,kj].transpose(0,1,3,2)
                # <ma||be> -> - <be||am>*
                # let kj = ka as a dummy variable
                kj = kconserv[km,ke,kb]
                eris_vvvo[ke,kj,kb] = -eris.ovvv[km,kb,ke].transpose(2,3,1,0).conj()

    # T1 equation
    t1new = numpy.zeros(shape=t1.shape, dtype=t1.dtype)
    for ka in range(nkpts):
        ki = ka
        # TODO: Does this fov need a conj()? Usually zero w/ canonical HF.
        t1new[ka] += fov[ka,:,:]
        t1new[ka] +=  einsum('ie,ae->ia',t1[ka],Fvv[ka])
        t1new[ka] += -einsum('ma,mi->ia',t1[ka],Foo[ka])
        for km in range(nkpts):
            t1new[ka] +=  einsum('imae,me->ia',t2[ka,km,ka],Fov[km])
            t1new[ka] += -einsum('nf,naif->ia',t1[km],eris.ovov[km,ka,ki])
            for kn in range(nkpts):
                ke = kconserv[km,ki,kn]
                t1new[ka] += -0.5*einsum('imef,maef->ia',t2[ki,km,ke],eris.ovvv[km,ka,ke])
                t1new[ka] += -0.5*einsum('mnae,nmei->ia',t2[km,kn,ka],eris_oovo[kn,km,ke])

    # T2 equation
    # For conj(), see Hirata and Bartlett, Eq. (36)
    t2new = eris.oovv.copy().conj()

    for ki in range(nkpts):
      for kj in range(nkpts):
        for ka in range(nkpts):
            # Chemist's notation for momentum conserving t2(ki,kj,ka,kb)
            kb = kconserv[ki,ka,kj]

            Ftmp = Fvv[kb] - 0.5*einsum('mb,me->be',t1[kb],Fov[kb])
            tmp = einsum('ijae,be->ijab',t2[ki,kj,ka],Ftmp)
            t2new[ki,kj,ka] += tmp

            Ftmp = Fvv[ka] - 0.5*einsum('ma,me->ae',t1[ka],Fov[ka])
            tmp = einsum('ijbe,ae->ijab',t2[ki,kj,kb],Ftmp)
            t2new[ki,kj,ka] -= tmp

            #t2new[ki,kj,kb] -= tmp.transpose(0,1,3,2)

            Ftmp = Foo[kj] + 0.5*einsum('je,me->mj',t1[kj],Fov[kj])
            tmp = einsum('imab,mj->ijab',t2[ki,kj,ka],Ftmp)
            t2new[ki,kj,ka] -= tmp

            Ftmp = Foo[ki] + 0.5*einsum('ie,me->mi',t1[ki],Fov[ki])
            tmp = einsum('jmab,mi->ijab',t2[kj,ki,ka],Ftmp)
            t2new[ki,kj,ka] += tmp

            #t2new[kj,ki,ka] += tmp.transpose(1,0,2,3)

            for km in range(nkpts):
                # Wminj
                #     - km - kn + ka + kb = 0
                # =>  kn = ka - km + kb
                kn = kconserv[ka,km,kb]
                t2new[ki,kj,ka] += 0.5*einsum('mnab,mnij->ijab',tau[km,kn,ka],Woooo[km,kn,ki])
                ke = km
                t2new[ki,kj,ka] += 0.5*einsum('ijef,abef->ijab',tau[ki,kj,ke],Wvvvv[ka,kb,ke])

                # Wmbej
                #     - km - kb + ke + kj = 0
                #  => ke = km - kj + kb
                ke = kconserv[km,kj,kb]
                tmp = einsum('imae,mbej->ijab',t2[ki,km,ka],Wovvo[km,kb,ke])
                #     - km - kb + ke + kj = 0
                # =>  ke = km - kj + kb
                #
                # t[i,e] => ki = ke
                # t[m,a] => km = ka
                if km == ka and ke == ki:
                    tmp -= einsum('ie,ma,mbej->ijab',t1[ki],t1[km],eris_ovvo[km,kb,ke])
                t2new[ki,kj,ka] += tmp
                t2new[ki,kj,kb] -= tmp.transpose(0,1,3,2)
                t2new[kj,ki,ka] -= tmp.transpose(1,0,2,3)
                t2new[kj,ki,kb] += tmp.transpose(1,0,3,2)

            ke = ki
            tmp = einsum('ie,abej->ijab',t1[ki],eris_vvvo[ka,kb,ke])
            t2new[ki,kj,ka] += tmp
            # P(ij) term
            ke = kj
            tmp = einsum('je,abei->ijab',t1[kj],eris_vvvo[ka,kb,ke])
            t2new[ki,kj,ka] -= tmp

            km = ka
            tmp = einsum('ma,mbij->ijab',t1[ka],eris.ovoo[km,kb,ki])
            t2new[ki,kj,ka] -= tmp
            # P(ab) term
            km = kb
            tmp = einsum('mb,maij->ijab',t1[kb],eris.ovoo[km,ka,ki])
            t2new[ki,kj,ka] += tmp

    eia = numpy.zeros(shape=t1new.shape, dtype=t1new.dtype)
    for ki in range(nkpts):
        for i in range(nocc):
            for a in range(nvir):
                eia[ki,i,a] = foo[ki,i,i] - fvv[ki,a,a]
        t1new[ki] /= eia[ki]

    eijab = numpy.zeros(shape=t2new.shape, dtype=t2new.dtype)
    kconserv = tools.get_kconserv(cc._scf.cell, cc.kpts)
    for ki in range(nkpts):
        for kj in range(nkpts):
            for ka in range(nkpts):
                kb = kconserv[ki,ka,kj]
                for i in range(nocc):
                    for a in range(nvir):
                        for j in range(nocc):
                            for b in range(nvir):
                                eijab[ki,kj,ka,i,j,a,b] = ( foo[ki,i,i] + foo[kj,j,j]
                                                          - fvv[ka,a,a] - fvv[kb,b,b] )
                t2new[ki,kj,ka] /= eijab[ki,kj,ka]

    time0 = log.timer_debug1('update t1 t2', *time0)

    return t1new, t2new

Example 4

Project: pyspace Source File: time_series.py
    def get_data(self, run_nr, split_nr, train_test):
        """ Return the train or test data for the given split in the given run.
        
        **Parameters**
          
          :run_nr: The number of the run whose data should be loaded.
          
          :split_nr: The number of the split whose data should be loaded.
          
          :train_test: "train" if the training data should be loaded.
                       "test" if the test data should be loaded.
    
        """
        # Do lazy loading of the time series objects.
        if isinstance(self.data[(run_nr, split_nr, train_test)], basestring):
            self._log("Lazy loading of %s time series windows from input "
                      "collection for run %s, split %s." % (train_test, run_nr, 
                                                            split_nr))
            s_format = self.meta_data["storage_format"]
            if type(s_format) == list:
                f_format = s_format[0]
            else:
                f_format = s_format
            if f_format == "pickle":
                # Load the time series from a pickled file
                f = open(self.data[(run_nr, split_nr, train_test)], 'r')
                try:
                    self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
                except ImportError:
                    # code for backward compatibility
                    # redirection of old path
                    f.seek(0)
                    self._log("Loading deprecated data. Please transfer it " +
                              "to new format.",level=logging.WARNING)
                    from pySPACE.resources.data_types import time_series
                    sys.modules['abri_dp.types.time_series'] = time_series
                    self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
                    del sys.modules['abri_dp.types.time_series']
                f.close()
            elif f_format in ["mat", "matlab", "MATLAB"]:
                from scipy.io import loadmat
                from pySPACE.resources.data_types.time_series import TimeSeries
                ts_fname = self.data[(run_nr, split_nr, train_test)]
                dataset = loadmat(ts_fname)
                channel_names = [name.strip() for name in dataset['channel_names']]
                sf = dataset["sampling_frequency"][0][0]
                self.data[(run_nr, split_nr, train_test)] = []
                # assume third axis to be trial axis
                if "channelXtime" in s_format:
                    for i in range(dataset["data"].shape[2]):
                        self.data[(run_nr, split_nr, train_test)].append(\
                            (TimeSeries(dataset["data"][:,:,i].T, channel_names,
                                        sf), dataset["labels"][i].strip()))
                else:
                    for i in range(dataset["data"].shape[2]):
                        self.data[(run_nr, split_nr, train_test)].append(\
                            (TimeSeries(dataset["data"][:,:,i], channel_names,
                                        sf), dataset["labels"][i].strip()))                    
            elif f_format.startswith("bci_comp"):
                from scipy.io import loadmat
                from pySPACE.resources.data_types.time_series import TimeSeries
                if self.comp_number == "2":
                    if self.comp_set == "4":
                        ts_fname = self.data[(run_nr, split_nr, train_test)]
                        d = loadmat(ts_fname)
                        channel_names = [name[0].astype('|S3') for name in \
                                                                   d["clab"][0]]
                        if train_test == "train":
                            self.data[(run_nr, split_nr, train_test)] = []
                            input_d = d["x_train"]
                            input_l = d["y_train"][0]
                            for i in range(input_d.shape[2]):
                                self.data[(run_nr, split_nr, 
                                           train_test)].append(\
                                            (TimeSeries(input_d[:,:,i],
                                                 channel_names, float(self.sf)), 
                                        "Left" if input_l[i] == 0 else "Right"))
                        else:
                            label_fname = glob.glob(os.path.join(
                                          os.path.dirname(ts_fname),"*.txt"))[0]
                            input_d = d["x_test"]
                            input_l = open(label_fname,'r')
                            self.data[(run_nr, split_nr, train_test)] = []
                            for i in range(input_d.shape[2]):
                                label = int(input_l.readline())
                                self.data[(run_nr, split_nr, 
                                           train_test)].append(\
                                            (TimeSeries(input_d[:,:,i],
                                                 channel_names, float(self.sf)), 
                                             "Left" if label == 0 else "Right"))
                elif self.comp_number == "3":
                    if self.comp_set == "2":
                        data = loadmat(self.data[(run_nr, split_nr, train_test)])
                        signal = data['Signal']
                        flashing = data['Flashing']
                        stimulus_code = data['StimulusCode']
                        stimulus_type = data['StimulusType']
                
                        window = 240
                        Fs = 240
                        channels = 64
                        epochs = signal.shape[0]
                        self.data[(run_nr, split_nr, train_test)] = []
                        self.start_offset_ms = 1000.0
                        self.end_offset_ms = 1000.0
                        
                        whole_len = (self.start_offset_ms + self.end_offset_ms)*Fs/1000.0 + window
                        responses = numpy.zeros((12, 15, whole_len, channels))
                        for epoch in range(epochs):
                            rowcolcnt=numpy.ones(12)
                            for n in range(1, signal.shape[1]):
                                if (flashing[epoch,n]==0 and flashing[epoch,n-1]==1):
                                    rowcol=stimulus_code[epoch,n-1]
                                    if n-24-self.start_offset_ms*Fs/1000.0 < 0:
                                        temp = signal[epoch,0:n+window+self.end_offset_ms*Fs/1000.0-24,:]
                                        temp = numpy.vstack((numpy.zeros((whole_len - temp.shape[0], temp.shape[1])), temp))
                                    elif n+window+self.end_offset_ms*Fs/1000.0-24> signal.shape[1]:
                                        temp = signal[epoch,n-24-self.start_offset_ms*Fs/1000.0:signal.shape[1],:]
                                        temp = numpy.vstack((temp, numpy.zeros((whole_len-temp.shape[0], temp.shape[1]))))
                                    else:
                                        temp = signal[epoch, n-24-self.start_offset_ms*Fs/1000.0:n+window+self.end_offset_ms*Fs/1000.0-24, :]
                                    responses[rowcol-1,rowcolcnt[rowcol-1]-1,:,:]=temp
                                    rowcolcnt[rowcol-1]=rowcolcnt[rowcol-1]+1
                
                            avgresp=numpy.mean(responses,1)
                
                            targets = stimulus_code[epoch,:]*stimulus_type[epoch,:]
                            target_rowcol = []
                            for value in targets:
                                if value not in target_rowcol:
                                    target_rowcol.append(value)
                
                            target_rowcol.sort()
                
                            for i in range(avgresp.shape[0]):
                                temp = avgresp[i,:,:]
                                data = TimeSeries(input_array = temp,
                                                  channel_names = range(64), 
                                                  sampling_frequency = window)
                                if i == target_rowcol[1]-1 or i == target_rowcol[2]-1:
                                    self.data[(run_nr, split_nr, train_test)].append((data,"Target"))
                                else:
                                    self.data[(run_nr, split_nr, train_test)].append((data,"Standard"))                 
        if self.stream_mode and not self.data[(run_nr, split_nr, train_test)] == []:
            # Create a connection to the TimeSeriesClient and return an iterator
            # that passes all received data through the windower.
            self.reader = TimeSeriesClient(self.data[(run_nr, split_nr, train_test)], blocksize=100)

            # Creates a windower that splits the training data into windows
            # based in the window definitions provided
            # and assigns correct labels to these windows
            self.reader.set_window_defs(self.window_definition)
            self.reader.connect()
            self.marker_windower = MarkerWindower(
                self.reader, self.window_definition,
                nullmarker_stride_ms=self.nullmarker_stride_ms,
                no_overlap=self.no_overlap,
                data_consistency_check=self.data_consistency_check)
            return self.marker_windower
        else:
            return self.data[(run_nr, split_nr, train_test)]

Example 5

Project: RMG-Py Source File: orcaparser.py
Function: extract
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""

        if line[0:15] == "Number of atoms":

            natom = int(line.split()[-1])
            if hasattr(self, "natom"):
                # I wonder whether this code will ever be executed.
                assert self.natom == natom
            else:
                self.natom = natom

        if line[1:13] == "Total Charge":
#get charge and multiplicity info
            self.charge = int(line.split()[-1])
            line = inputfile.next()
            self.mult = int(line.split()[-1])

        if line[25:50] == "Geometry Optimization Run":
#get geotarget info
            line = inputfile.next()
            while line[0:23] != "Convergence Tolerances:":
                line = inputfile.next()

            self.geotargets = numpy.zeros((5,), "d")
            for i in range(5):
                line = inputfile.next()
                self.geotargets[i] = float(line.split()[-2])

        # Read in scfvalues.
        if line [:14] == "SCF ITERATIONS":
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
            dashes = inputfile.next()
            line = inputfile.next().split()
            assert line[1] == "Energy"
            assert line[2] == "Delta-E"
            assert line[3] == "Max-DP"
            self.scfvalues.append([])
            while line != []:
                if line[0].isdigit():
                    energy = float(line[1])
                    deltaE = float(line[2])
                    maxDP = float(line[3])
                    rmsDP = float(line[4])
                    self.scfvalues[-1].append([deltaE, maxDP, rmsDP])
                line = inputfile.next().split()

        # Read in values for last SCF iteration and scftargets.
        if line[:15] == "SCF CONVERGENCE":
            if not hasattr(self, "scfvalues"):
                self.scfvalues = []
            if not hasattr(self, "scftargets"):
                self.scftargets = []
            dashes = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            assert line[:29].strip() == "Last Energy change"
            deltaE_value = float(line[33:46])
            deltaE_target = float(line[60:72])
            line = inputfile.next()
            assert line[:29].strip() == "Last MAX-Density change"
            maxDP_value = float(line[33:46])
            maxDP_target = float(line[60:72])
            line = inputfile.next()
            assert line[:29].strip() == "Last RMS-Density change"
            rmsDP_value = float(line[33:46])
            rmsDP_target = float(line[60:72])
            line = inputfile.next()
            assert line[:29].strip() == "Last DIIS Error"
            self.scfvalues[-1].append([deltaE_value,maxDP_value,rmsDP_value])
            self.scftargets.append([deltaE_target,maxDP_target,rmsDP_target])                    

        # Read in SCF energy, at least in SP calculation.
        if line [:16] == "TOTAL SCF ENERGY":
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            dashes = inputfile.next()
            blank = inputfile.next()
            line = inputfile.next()
            if line[:12] == "Total Energy":
                energy = float(line[50:67])
                self.scfenergies.append(energy)

        if line[33:53] == "Geometry convergence":
#get geometry convergence criteria
            if not hasattr(self, "geovalues"):
                self.geovalues = [ ]
            
            newlist = []
            headers = inputfile.next()
            dashes = inputfile.next()
            
            #check if energy change is present (steps > 1)
            line = inputfile.next()
            if line.find("Energy change") > 0:
                newlist.append(float(line.split()[2]))
                line = inputfile.next()
            else:
                newlist.append(0.0)

            #get rest of info
            for i in range(4):
                newlist.append(float(line.split()[2]))
                line = inputfile.next()
            
            self.geovalues.append(newlist)

        if line[0:21] == "CARTESIAN COORDINATES" and not hasattr(self, "atomcoords"):
#if not an optimization, determine structure used
            dashes = inputfile.next()
            
            atomnos = []
            atomcoords = []
            line = inputfile.next()
            while len(line) > 1:
                broken = line.split()
                atomnos.append(self.table.number[broken[0]])
                atomcoords.append(map(float, broken[1:4]))
                line = inputfile.next()

            self.atomcoords = [atomcoords]
            if not hasattr(self, "atomnos"):
                self.atomnos = atomnos
                self.natom = len(atomnos)
                
        if line[26:53] == "GEOMETRY OPTIMIZATION CYCLE":
#parse geometry coords
            stars = inputfile.next()
            dashes = inputfile.next()
            text = inputfile.next()
            dashes = inputfile.next()
           
            if not hasattr(self,"atomcoords"):
                self.atomcoords = []

            atomnos = []
            atomcoords = []
            for i in range(self.natom):
                line = inputfile.next()
                broken = line.split()
                atomnos.append(self.table.number[broken[0]])
                atomcoords.append(map(float, broken[1:4]))
            
            self.atomcoords.append(atomcoords)
            if not hasattr(self, "atomnos"):
                self.atomnos = numpy.array(atomnos,'i')

        if line[21:68] == "FINAL ENERGY EVALUATION AT THE STATIONARY POINT":
            text = inputfile.next()
            broken = text.split()
            assert int(broken[2]) == len(self.atomcoords)
            stars = inputfile.next()
            dashes = inputfile.next()
            text = inputfile.next()
            dashes = inputfile.next()

            atomcoords = []
            for i in range(self.natom):
                line = inputfile.next()
                broken = line.split()
                atomcoords.append(map(float, broken[1:4]))

            self.atomcoords.append(atomcoords)

        if line[0:16] == "ORBITAL ENERGIES":
#parser orbial energy information
            dashes = inputfile.next()
            text = inputfile.next()
            text = inputfile.next()

            self.moenergies = [[]]
            self.humos = [[0]]

            line = inputfile.next()
            while len(line) > 20: #restricted calcs are terminated by ------
                info = line.split()
                self.moenergies[0].append(float(info[3]))
                if float(info[1]) > 0.00: #might be 1 or 2, depending on restricted-ness
                    self.humos[0] = int(info[0])
                line = inputfile.next()

            line = inputfile.next()

            #handle beta orbitals
            if line[17:35] == "SPIN DOWN ORBITALS":
                text = inputfile.next()

                self.moenergies.append([])
                self.humos.append(0)

                line = inputfile.next()
                while len(line) > 20: #actually terminated by ------
                    info = line.split()
                    self.moenergies[1].append(float(info[3]))
                    if float(info[1]) == 1.00:
                        self.humos[1] = int(info[0])
                    line = inputfile.next()

        if line[1:32] == "# of contracted basis functions":
            self.nbasis = int(line.split()[-1])

        if line[0:14] == "OVERLAP MATRIX":
#parser the overlap matrix
            dashes = inputfile.next()

            self.aooverlaps = numpy.zeros( (self.nbasis, self.nbasis), "d")
            for i in range(0, self.nbasis, 6):
                header = inputfile.next()
                size = len(header.split())

                for j in range(self.nbasis):
                    line = inputfile.next()
                    broken = line.split()
                    self.aooverlaps[j, i:i+size] = map(float, broken[1:size+1])

        # Molecular orbital coefficients.
        # This is also where atombasis is parsed.
        if line[0:18] == "MOLECULAR ORBITALS":

            dashses = inputfile.next()

            mocoeffs = [ numpy.zeros((self.nbasis, self.nbasis), "d") ]
            self.aonames = []
            self.atombasis = []
            for n in range(self.natom):
                self.atombasis.append([])

            for spin in range(len(self.moenergies)):

                if spin == 1:
                    blank = inputfile.next()
                    mocoeffs.append(numpy.zeros((self.nbasis, self.nbasis), "d"))

                for i in range(0, self.nbasis, 6):
                    numbers = inputfile.next()
                    energies = inputfile.next()
                    occs = inputfile.next()
                    dashes = inputfile.next()
                    broken = dashes.split()
                    size = len(broken)

                    for j in range(self.nbasis):
                        line = inputfile.next()
                        broken = line.split()

                        #only need this on the first time through
                        if spin == 0 and i == 0:
                            atomname = line[3:5].split()[0]
                            num = int(line[0:3])
                            orbital = broken[1].upper()
                            
                            self.aonames.append("%s%i_%s"%(atomname, num+1, orbital))
                            self.atombasis[num].append(j)

                        temp = []
                        vals = line[16:-1] #-1 to remove the last blank space
                        for k in range(0, len(vals), 10):
                            temp.append(float(vals[k:k+10]))
                        mocoeffs[spin][i:i+size, j] = temp

            self.mocoeffs = mocoeffs

        if line[0:18] == "TD-DFT/TDA EXCITED":
            sym = "Triplet" # Could be singlets or triplets
            if line.find("SINGLETS") >= 0:
                sym = "Singlet"
                self.etsecs = []
                self.etenergies = []
                self.etsyms = []
            lookup = {'a':0, 'b':1}
            line = inputfile.next()
            while line.find("STATE") < 0:
                line = inputfile.next()
            # Contains STATE or is blank
            while line.find("STATE") >= 0:
                broken = line.split()
                self.etenergies.append(float(broken[-2]))
                self.etsyms.append(sym)
                line = inputfile.next()
                sec = []
                # Contains SEC or is blank
                while line.strip():
                    start = line[0:8].strip()
                    start = (int(start[:-1]), lookup[start[-1]])
                    end = line[10:17].strip()
                    end = (int(end[:-1]), lookup[end[-1]])
                    contrib = float(line[35:47].strip())
                    sec.append([start, end, contrib])
                    line = inputfile.next()
                self.etsecs.append(sec)
                line = inputfile.next()

        if line[25:44] == "ABSORPTION SPECTRUM":
            minus = inputfile.next()
            header = inputfile.next()
            header = inputfile.next()
            minus = inputfile.next()
            self.etoscs = []
            for x in self.etsyms:                
                osc = inputfile.next().split()[3]
                if osc == "spin": # "spin forbidden"    
                    osc = 0
                else:
                    osc = float(osc)
                self.etoscs.append(osc)
                
        if line[0:23] == "VIBRATIONAL FREQUENCIES":
#parse the vibrational frequencies
            dashes = inputfile.next()
            blank = inputfile.next()

            self.vibfreqs = numpy.zeros((3 * self.natom,),"d")

            for i in range(3 * self.natom):
                line = inputfile.next()
                self.vibfreqs[i] = float(line.split()[1])

        if line[0:11] == "IR SPECTRUM":
#parse ir intensities
            dashes = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            dashes = inputfile.next()

            self.vibirs = numpy.zeros((3 * self.natom,),"d")

            line = inputfile.next()
            while len(line) > 2:
                num = int(line[0:4])
                self.vibirs[num] = float(line.split()[2])
                line = inputfile.next()

        if line[0:14] == "RAMAN SPECTRUM":
#parser raman intensities
            dashes = inputfile.next()
            blank = inputfile.next()
            header = inputfile.next()
            dashes = inputfile.next()

            self.vibramans = numpy.zeros((3 * self.natom,),"d")

            line = inputfile.next()
            while len(line) > 2:
                num = int(line[0:4])
                self.vibramans[num] = float(line.split()[2])
                line = inputfile.next()

Example 6

Project: spinalcordtoolbox Source File: plot_map.py
def main():

    results_folder = param_default.results_folder
    methods_to_display = param_default.methods_to_display

    # Parameters for debug mode
    if param_default.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter/map" #"C:/cygwin64/home/Simon_2/data_map"
        path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox'
        methods_to_display = 'map'
    else:
        status, path_sct = commands.getstatusoutput('echo $SCT_DIR')

        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'i:m:')  # define flags
        except getopt.GetoptError as err:  # check if the arguments are defined
            print str(err)  # error
            # usage() # display usage
        # if not opts:
        #     print 'Please enter the path to the result folder. Exit program.'
        #     sys.exit(1)
        #     # usage()
        for opt, arg in opts:  # explore flags
            if opt in '-i':
                results_folder = arg
            if opt in '-m':
                methods_to_display = arg

    # Append path that contains scripts, to be able to load modules
    sys.path.append(path_sct + '/scripts')
    import sct_utils as sct

    sct.printv("Working directory: "+os.getcwd())

    sct.printv('\n\nData will be extracted from folder '+results_folder+' .', 'warning')
    sct.printv('\t\tCheck existence...')
    sct.check_folder_exist(results_folder)

    # Extract methods to display
    methods_to_display = methods_to_display.strip().split(',')

    fname_results = glob.glob(results_folder + '/*.txt')

    nb_results_file = len(fname_results)

    # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error
    # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object)
    # SNR
    snr = numpy.zeros((nb_results_file))
    # Tracts std
    tracts_std = numpy.zeros((nb_results_file))
    # methods' name
    methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object)
    # labels
    error_per_label = []
    std_per_label = []
    labels_id = []
    # median
    median_results = numpy.zeros((nb_results_file, 6))
    # median std across bootstraps
    median_std = numpy.zeros((nb_results_file, 6))
    # min
    min_results = numpy.zeros((nb_results_file, 6))
    # max
    max_results = numpy.zeros((nb_results_file, 6))

    # Extract variance within labels and variance of noise
    map_var_params = numpy.zeros((nb_results_file, 2))
    for i_file in range(0, nb_results_file):

        fname = fname_results[i_file].strip()
        ind_start, ind_end = fname.index('results_map')+11, fname.index('_all.txt')
        var = fname[ind_start:ind_end]
        map_var_params[i_file, 0] = float(var.split(",")[0])
        map_var_params[i_file, 1] = float(var.split(",")[1])


    # Read each file and extract data
    for i_file in range(0, nb_results_file):

        # Open file
        f = open(fname_results[i_file])  # open file
        # Extract all lines in .txt file
        lines = [line for line in f.readlines() if line.strip()]

        # extract SNR
        # find all index of lines containing the string "sigma noise"
        ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise]
        if len(ind_line_noise) != 1:
            sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
            snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))

        # extract tract std
        ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std]
        if len(ind_line_tract_std) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))


        # extract method name
        ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label]
        if len(ind_line_label) != 1:
            sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:])
            methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:])

        # extract median
        ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median]
        if len(ind_line_median) != 1:
            sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            median = lines[ind_line_median[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median]
            median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median])
            median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median])

        # extract min
        ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min]
        if len(ind_line_min) != 1:
            sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            min = lines[ind_line_min[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min]
            min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min])

        # extract max
        ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max]
        if len(ind_line_max) != 1:
            sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            max = lines[ind_line_max[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max]
            max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max])

        # extract error for each label
        error_per_label_for_file_i = []
        std_per_label_for_file_i = []
        labels_id_for_file_i = []
        # Due to 2 different kind of file structure, the number of the last label line must be adapted
        if not ind_line_median:
            ind_line_median = [len(lines)+1]
        for i_line in range(ind_line_label[0]+1, ind_line_median[0]-1):
            line_label_i = lines[i_line].strip().split(',')
            error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]])
            std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]])
            labels_id_for_file_i.append(line_label_i[0])
        error_per_label.append(error_per_label_for_file_i)
        std_per_label.append(std_per_label_for_file_i)
        labels_id.append(labels_id_for_file_i)

        # close file
        f.close()

    # check if all the files in the result folder were generated with the same number of methods
    if not all(x == methods_name[0] for x in methods_name):
        sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same number of methods. Exit program.', 'error')
        sys.exit(1)
    # check if all the files in the result folder were generated with the same labels
    if not all(x == labels_id[0] for x in labels_id):
        sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same labels. Exit program.', 'error')
        sys.exit(1)

    # convert the list "error_per_label" into a numpy array to ease further manipulations
    error_per_label = numpy.array(error_per_label)
    std_per_label = numpy.array(std_per_label)
    # compute different stats
    abs_error_per_labels = numpy.absolute(error_per_label)
    max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1)
    min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1)
    mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1)
    std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1)


    sct.printv('Noise std of the '+str(nb_results_file)+' generated files:')
    print snr
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Tracts std of the '+str(nb_results_file)+' generated files:')
    print tracts_std
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Methods used to generate results for the '+str(nb_results_file)+' generated files:')
    print methods_name
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Median obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
    print median_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Minimum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
    print min_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Maximum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
    print max_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Labels\' ID (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
    print labels_id
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Errors obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
    print error_per_label


    # cuem****************************** START PLOTTING HERE ***********************************************************
    matplotlib.rcParams.update({'font.size': 45, 'font.family': 'Trebuchet'})
    plt.rcParams['xtick.major.pad'] = '9'
    plt.rcParams['ytick.major.pad'] = '15'
    # matplotlib.rcParams['legend.handlelength'] = 0


    # find indexes of files to be plotted
    ind_var_noise20 = numpy.where(map_var_params[:, 1] == 20)  # indexes where noise variance = 20
    ind_ind_var_label_sort_var_noise20 = numpy.argsort(map_var_params[ind_var_noise20, 0])  # indexes of indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)
    ind_var_label_sort_var_noise20 = ind_var_noise20[0][ind_ind_var_label_sort_var_noise20][0]  # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)


    ind_var_label20 = numpy.where(map_var_params[:, 0] == 20)  # indexes where variance within labels = 20
    ind_ind_var_noise_sort_var_label20 = numpy.argsort(map_var_params[ind_var_label20, 1])  # indexes of indexes where label variance=20 sorted according to values of noise variance (in ascending order)
    ind_var_noise_sort_var_label20 = ind_var_label20[0][ind_ind_var_noise_sort_var_label20][0]  # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)

    plt.close('all')

    # Errorbar plot
    plt.figure()
    plt.ylabel('Mean absolute error (%)', fontsize=55)
    plt.xlabel('Variance within labels (in percentage of the mean)', fontsize=55)
    plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65)

    plt.errorbar(map_var_params[ind_var_label_sort_var_noise20, 0], mean_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], std_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], color='blue', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10)
    plt.errorbar(map_var_params[ind_var_noise_sort_var_label20, 1], mean_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], std_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], color='red', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10)

    # plt.legend(plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., handler_map={Line2D: HandlerLine2D(numpoints=1)})
    plt.legend(['noise variance = 20', 'variance within labels = 20% of the mean'], loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)})
    plt.gca().set_xlim([numpy.min(map_var_params[ind_var_label_sort_var_noise20, 0]) - 1, numpy.max(map_var_params[ind_var_label_sort_var_noise20, 0]) + 1])
    plt.grid(b=True, axis='both')
    # plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.5))


    # Box-and-whisker plots
    nb_box = 2
    plt.figure(figsize=(30, 15))
    width = 1.0 / (nb_box + 1)
    ind_fig = numpy.arange(len(map_var_params[ind_var_label_sort_var_noise20, 0])) * (1.0 + width)
    plt.ylabel('Absolute error (%)\n', fontsize=55)
    plt.xlabel('Variance', fontsize=55)
    plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65)

    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_box))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    box_plots = []


    boxprops = dict(linewidth=6, color='b')
    flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='b')
    whiskerprops = dict(linewidth=5, color='b')
    capprops = dict(linewidth=5, color='b')
    medianprops = dict(linewidth=6, color='b')
    meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
    meanlineprops = dict(linestyle='--', linewidth=2.5)
    plot_constant_noise_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_label_sort_var_noise20, :, 0]), positions=ind_fig, widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
    box_plots.append(plot_constant_noise_var['boxes'][0])

    boxprops = dict(linewidth=6, color='r')
    flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='r')
    whiskerprops = dict(linewidth=5, color='r')
    capprops = dict(linewidth=5, color='r')
    medianprops = dict(linewidth=6, color='r')
    meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
    meanlineprops = dict(linestyle='--', linewidth=2.5)
    plot_constant_label_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_noise_sort_var_label20, :, 0]), positions=ind_fig + width + width / (nb_box + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
    box_plots.append(plot_constant_label_var['boxes'][0])

    # add alternated vertical background colored bars
    for i_xtick in range(0, len(ind_fig), 2):
        plt.axvspan(ind_fig[i_xtick] - width - width / 4, ind_fig[i_xtick] + (nb_box+1) * width - width / 4, facecolor='grey', alpha=0.1)


    # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    # leg = plt.legend(box_plots, [r'$\mathrm{\mathsf{noise\ variance\ =\ 20\ voxels^2}}$', r'$\mathrm{\mathsf{variance\ within\ labels\ =\ 20\%\ of\ the\ mean\ value}}$'], loc=1, handletextpad=-2)
    # color_legend_texts(leg)
    # convert xtick labels into int
    xtick_labels = [int(xtick) for xtick in map_var_params[ind_var_label_sort_var_noise20, 0]]
    plt.xticks(ind_fig + (numpy.floor(nb_box / 2)) * (width/2) * (1.0 + 1.0 / (nb_box + 1)), xtick_labels)
    plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_box + 0.5) * width])
    plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0))
    plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.25))
    plt.grid(b=True, axis='y', which='both')

    plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_as_a_function_of_MAP_parameters.pdf', format='PDF')


    plt.show(block=False)

Example 7

Project: pyscf Source File: mc1step.py
def gen_g_hop(casscf, mo, u, casdm1, casdm2, eris):
    ncas = casscf.ncas
    ncore = casscf.ncore
    nocc = ncas + ncore
    nmo = mo.shape[1]

    dm1 = numpy.zeros((nmo,nmo))
    idx = numpy.arange(ncore)
    dm1[idx,idx] = 2
    dm1[ncore:nocc,ncore:nocc] = casdm1

    # part5
    jkcaa = numpy.empty((nocc,ncas))
    # part2, part3
    vhf_a = numpy.empty((nmo,nmo))
    # part1 ~ (J + 2K)
    dm2tmp = casdm2.transpose(1,2,0,3) + casdm2.transpose(0,2,1,3)
    dm2tmp = dm2tmp.reshape(ncas**2,-1)
    hdm2 = numpy.empty((nmo,ncas,nmo,ncas))
    g_dm2 = numpy.empty((nmo,ncas))
    for i in range(nmo):
        jbuf = eris.ppaa[i]
        kbuf = eris.papa[i]
        if i < nocc:
            jkcaa[i] = numpy.einsum('ik,ik->i', 6*kbuf[:,i]-2*jbuf[i], casdm1)
        vhf_a[i] =(numpy.einsum('quv,uv->q', jbuf, casdm1)
                 - numpy.einsum('uqv,uv->q', kbuf, casdm1) * .5)
        jtmp = lib.dot(jbuf.reshape(nmo,-1), casdm2.reshape(ncas*ncas,-1))
        jtmp = jtmp.reshape(nmo,ncas,ncas)
        ktmp = lib.dot(kbuf.transpose(1,0,2).reshape(nmo,-1), dm2tmp)
        hdm2[i] = (ktmp.reshape(nmo,ncas,ncas)+jtmp).transpose(1,0,2)
        g_dm2[i] = numpy.einsum('uuv->v', jtmp[ncore:nocc])
    jbuf = kbuf = jtmp = ktmp = dm2tmp = None
    vhf_ca = eris.vhf_c + vhf_a
    h1e_mo = reduce(numpy.dot, (mo.T, casscf.get_hcore(), mo))

    ################# gradient #################
    g = numpy.zeros_like(h1e_mo)
    g[:,:ncore] = (h1e_mo[:,:ncore] + vhf_ca[:,:ncore]) * 2
    g[:,ncore:nocc] = numpy.dot(h1e_mo[:,ncore:nocc]+eris.vhf_c[:,ncore:nocc],casdm1)
    g[:,ncore:nocc] += g_dm2

    def gdep1(u):
        dt = u - numpy.eye(u.shape[0])
        mo1 = numpy.dot(mo, dt)
        g = numpy.dot(h1e_mo, dt)
        g = h1e_mo + g + g.T
        g[:,nocc:] = 0
        mo_core = mo[:,:ncore]
        mo_cas = mo[:,ncore:nocc]
        dm_core = numpy.dot(mo_core, mo1[:,:ncore].T) * 2
        dm_core = dm_core + dm_core.T
        dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo1[:,ncore:nocc].T))
        dm_cas = dm_cas + dm_cas.T
        vj, vk = casscf._scf.get_jk(casscf.mol, (dm_core,dm_cas)) # first order response only
        vhfc = numpy.dot(eris.vhf_c, dt)
        vhfc = (vhfc + vhfc.T + eris.vhf_c
                + reduce(numpy.dot, (mo.T, vj[0]-vk[0]*.5, mo)))
        vhfa = numpy.dot(vhf_a, dt)
        vhfa = (vhfa + vhfa.T + vhf_a
                + reduce(numpy.dot, (mo.T, vj[1]-vk[1]*.5, mo)))
        g[:,:ncore] += vhfc[:,:ncore]+vhfa[:,:ncore]
        g[:,:ncore] *= 2
        g[:,ncore:nocc] = numpy.dot(g[:,ncore:nocc]+vhfc[:,ncore:nocc], casdm1)

        g[:,ncore:nocc] += numpy.einsum('purv,rv->pu', hdm2, dt[:,ncore:nocc])
        g[:,ncore:nocc] += numpy.dot(u.T, g_dm2)
        return g

    def gdep4(u):
        mo1 = numpy.dot(mo, u)
        g = numpy.zeros_like(h1e_mo)
        g[:,:nocc] = reduce(numpy.dot, (u.T, h1e_mo, u[:,:nocc]))
        dm_core0 = reduce(numpy.dot, (mo[:,:ncore], mo[:,:ncore].T)) * 2
        dm_core1 = reduce(numpy.dot, (mo1[:,:ncore], mo1[:,:ncore].T)) * 2
        dm_cas0  = reduce(numpy.dot, (mo[:,ncore:nocc], casdm1, mo[:,ncore:nocc].T))
        dm_cas1  = reduce(numpy.dot, (mo1[:,ncore:nocc], casdm1, mo1[:,ncore:nocc].T))
        vj, vk = casscf._scf.get_jk(casscf.mol, (dm_core1-dm_core0, dm_cas1-dm_cas0))
        vhfc1 =(reduce(numpy.dot, (mo1.T, vj[0]-vk[0]*.5, mo1[:,:nocc]))
              + reduce(numpy.dot, (u.T, eris.vhf_c, u[:,:nocc])))
        vhfa1 =(reduce(numpy.dot, (mo1.T, vj[1]-vk[1]*.5, mo1[:,:nocc]))
              + reduce(numpy.dot, (u.T, vhf_a, u[:,:nocc])))
        g[:,:ncore] += vhfc1[:,:ncore] + vhfa1[:,:ncore]
        g[:,:ncore] *= 2
        g[:,ncore:nocc] = numpy.dot(g[:,ncore:nocc]+vhfc1[:,ncore:nocc], casdm1)

        if hasattr(eris, '_paaa'):
            paaa = eris._paaa
        else:
            paaa = casscf._exact_paaa(mo, u)
        g[:,ncore:nocc] += numpy.einsum('puvw,tuvw->pt', paaa, casdm2)
        return g

    def gorb_update(u, dep4=False):
        if dep4:
            g = gdep4(u)
        else:  # DEP1/first order T-expansion
            g = gdep1(u)
        return casscf.pack_uniq_var(g-g.T)

    ############## hessian, diagonal ###########

    # part7
    h_diag = numpy.einsum('ii,jj->ij', h1e_mo, dm1) - h1e_mo * dm1
    h_diag = h_diag + h_diag.T

    # part8
    g_diag = g.diagonal()
    h_diag -= g_diag + g_diag.reshape(-1,1)
    idx = numpy.arange(nmo)
    h_diag[idx,idx] += g_diag * 2

    # part2, part3
    v_diag = vhf_ca.diagonal() # (pr|kl) * E(sq,lk)
    h_diag[:,:ncore] += v_diag.reshape(-1,1) * 2
    h_diag[:ncore] += v_diag * 2
    idx = numpy.arange(ncore)
    h_diag[idx,idx] -= v_diag[:ncore] * 4
    # V_{pr} E_{sq}
    tmp = numpy.einsum('ii,jj->ij', eris.vhf_c, casdm1)
    h_diag[:,ncore:nocc] += tmp
    h_diag[ncore:nocc,:] += tmp.T
    tmp = -eris.vhf_c[ncore:nocc,ncore:nocc] * casdm1
    h_diag[ncore:nocc,ncore:nocc] += tmp + tmp.T

    # part4
    # -2(pr|sq) + 4(pq|sr) + 4(pq|rs) - 2(ps|rq)
    tmp = 6 * eris.k_pc - 2 * eris.j_pc
    h_diag[ncore:,:ncore] += tmp[ncore:]
    h_diag[:ncore,ncore:] += tmp[ncore:].T

    # part5 and part6 diag
    # -(qr|kp) E_s^k  p in core, sk in active
    h_diag[:nocc,ncore:nocc] -= jkcaa
    h_diag[ncore:nocc,:nocc] -= jkcaa.T

    v_diag = numpy.einsum('ijij->ij', hdm2)
    h_diag[ncore:nocc,:] += v_diag.T
    h_diag[:,ncore:nocc] += v_diag

# Does this term contribute to internal rotation?
#    h_diag[ncore:nocc,ncore:nocc] -= v_diag[:,ncore:nocc]*2

    g_orb = casscf.pack_uniq_var(g-g.T)
    h_diag = casscf.pack_uniq_var(h_diag)

    def h_op(x):
        x1 = casscf.unpack_uniq_var(x)

        # part7
        # (-h_{sp} R_{rs} gamma_{rq} - h_{rq} R_{pq} gamma_{sp})/2 + (pr<->qs)
        x2 = reduce(lib.dot, (h1e_mo, x1, dm1))
        # part8
        # (g_{ps}\delta_{qr}R_rs + g_{qr}\delta_{ps}) * R_pq)/2 + (pr<->qs)
        x2 -= numpy.dot(g.T, x1)
        # part2
        # (-2Vhf_{sp}\delta_{qr}R_pq - 2Vhf_{qr}\delta_{sp}R_rs)/2 + (pr<->qs)
        x2[:ncore] += reduce(numpy.dot, (x1[:ncore,ncore:], vhf_ca[ncore:])) * 2
        # part3
        # (-Vhf_{sp}gamma_{qr}R_{pq} - Vhf_{qr}gamma_{sp}R_{rs})/2 + (pr<->qs)
        x2[ncore:nocc] += reduce(numpy.dot, (casdm1, x1[ncore:nocc], eris.vhf_c))
        # part1
        x2[:,ncore:nocc] += numpy.einsum('purv,rv->pu', hdm2, x1[:,ncore:nocc])

        if ncore > 0:
            # part4, part5, part6
# Due to x1_rs [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
#    == -x1_sr [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
# x2[:,:ncore] += H * x1[:,:ncore] => (becuase x1=-x1.T) =>
# x2[:,:ncore] += -H' * x1[:ncore] => (becuase x2-x2.T) =>
# x2[:ncore] += H' * x1[:ncore]
            va, vc = casscf.update_jk_in_ah(mo, x1, casdm1, eris)
            x2[ncore:nocc] += va
            x2[:ncore,ncore:] += vc

        # (pr<->qs)
        x2 = x2 - x2.T
        return casscf.pack_uniq_var(x2)

    return g_orb, gorb_update, h_op, h_diag

Example 8

Project: pygbe Source File: gmres.py
def gmres_mgs(surf_array, field_array, X, b, param, ind0, timing, kernel):
    """
    GMRES solver.

    Arguments
    ----------
    surf_array : array, contains the surface classes of each region on the
                        surface.
    field_array: array, contains the Field classes of each region on the surface.
    X          : array, initial guess.
    b          : array, right hand side.
    param      : class, parameters related to the surface.
    ind0       : class, it contains the indices related to the treecode
                        computation.
    timing     : class, it contains timing information for different parts of
                        the code.
    kernel     : pycuda source module.

    Returns
    --------
    X          : array, an updated guess to the solution.
    iteration  : int, number of outer iterations for convergence

    References
    ----------
    .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
       Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003
       http://www-users.cs.umn.edu/~saad/books.html
    .. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
    """

    # Defining xtype as dtype of the problem, to decide which BLAS functions
    # import.
    xtype = upcast(X.dtype, b.dtype)

    # Get fast access to underlying BLAS routines
    # dotc is the conjugate dot, dotu does no conjugation

    [lartg] = get_lapack_funcs(['lartg'], [X] )
    if numpy.iscomplexobj(numpy.zeros((1,), dtype=xtype)):
        [axpy, dotu, dotc, scal] =\
            get_blas_funcs(['axpy', 'dotu', 'dotc', 'scal'], [X])
    else:
        # real type
        [axpy, dotu, dotc, scal] =\
            get_blas_funcs(['axpy', 'dot', 'dot', 'scal'], [X])

    # Make full use of direct access to BLAS by defining own norm
    def norm(z):
        return numpy.sqrt(numpy.real(dotc(z, z)))

    # Defining dimension
    dimen = len(X)

    max_iter = param.max_iter
    R = param.restart
    tol = param.tol

    # Set number of outer and inner iterations
    max_outer = max_iter

    if R > dimen:
        warn('Setting number of inner iterations (restrt) to maximum\
              allowed, which is A.shape[0] ')
        R = dimen

    max_inner = R

    # Prep for method
    aux = gmres_dot(X, surf_array, field_array, ind0, param, timing, kernel)
    r = b - aux

    normr = norm(r)

    # Check initial guess ( scaling by b, if b != 0, must account for
    # case when norm(b) is very small)
    normb = norm(b)
    if normb == 0.0:
        normb = 1.0
    if normr < tol*normb:
        return X

    iteration = 0

    # Here start the GMRES
    for outer in range(max_outer):

        # Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
        # Space required is O(dimen*max_inner).
        # NOTE:  We are dealing with row-major matrices, so we traverse in a
        #        row-major fashion,
        #        i.e., H and V's transpose is what we store.

        Q = []  # Initialzing Givens Rotations
        # Upper Hessenberg matrix, which is then
        # converted to upper triagonal with Givens Rotations

        H = numpy.zeros((max_inner+1, max_inner+1), dtype=xtype)
        V = numpy.zeros((max_inner+1, dimen), dtype=xtype)  # Krylov space

        # vs store the pointers to each column of V.
        # This saves a considerable amount of time.
        vs = []

        # v = r/normr
        V[0, :] = scal(1.0/normr, r)  # scal wrapper of dscal --> x = a*x
        vs.append(V[0, :])

        #Saving initial residual to be used to calculate the rel_resid
        if iteration==0:
            res_0 = normb

        #RHS vector in the Krylov space
        g = numpy.zeros((dimen, ), dtype=xtype)
        g[0] = normr

        for inner in range(max_inner):
            #New search direction
            v= V[inner+1, :]
            v[:] = gmres_dot(vs[-1], surf_array, field_array, ind0, param,
 timing, kernel)
            vs.append(v)

            #Modified Gram Schmidt
            for k in range(inner+1):
                vk = vs[k]
                alpha = dotc(vk, v)
                H[inner, k] = alpha
                v[:] = axpy(vk, v, dimen, -alpha)  # y := a*x + y
                #axpy is a wrapper for daxpy (blas function)

            normv = norm(v)
            H[inner, inner+1] = normv


            #Check for breakdown
            if H[inner, inner+1] != 0.0:
                v[:] = scal(1.0/H[inner, inner+1], v)

            #Apply for Givens rotations to H
            if inner > 0:
                apply_givens(Q, H[inner, :], inner)

            #Calculate and apply next complex-valued Givens rotations

            #If max_inner = dimen, we don't need to calculate, this
            #is unnecessary for the last inner iteration when inner = dimen -1

            if inner != dimen - 1:
                if H[inner, inner+1] != 0:
                    #lartg is a lapack function that computes the parameters
                    #for a Givens rotation
                    [c, s, _] = lartg(H[inner, inner], H[inner, inner+1])
                    Qblock = numpy.array([[c, s], [-numpy.conjugate(s),c]], dtype=xtype)
                    Q.append(Qblock)

                    #Apply Givens Rotations to RHS for the linear system in
                    # the krylov space.
                    g[inner:inner+2] = scipy.dot(Qblock, g[inner:inner+2])

                    #Apply Givens rotations to H
                    H[inner, inner] = dotu(Qblock[0,:], H[inner, inner:inner+2])
                    H[inner, inner+1] = 0.0

            iteration+= 1

            if inner < max_inner-1:
                normr = abs(g[inner+1])
                rel_resid = normr/res_0

                if rel_resid < tol:
                    break

            if iteration%1==0:
                print('Iteration: {}, relative residual: {}'.format(iteration,rel_resid))

            if (inner + 1 == R):
                print('Residual: {}. Restart...'.format(rel_resid))

        # end inner loop, back to outer loop

        # Find best update to X in Krylov Space V.  Solve inner X inner system.
        y = scipy.linalg.solve (H[0:inner+1, 0:inner+1].T, g[0:inner+1])
        update = numpy.ravel(scipy.mat(V[:inner+1, :]).T * y.reshape(-1,1))
        X= X + update
        aux = gmres_dot(X, surf_array, field_array, ind0, param, timing, kernel)
        r = b - aux

        normr = norm(r)
        rel_resid = normr/res_0

        # test for convergence
        if rel_resid < tol:
            print('GMRES solve')
            print('Converged after %i iterations to a residual of %s'%(iteration,rel_resid))
            print('Time weight vector: {}'.format(timing.time_mass))
            print('Time sort         : {}'.format(timing.time_sort))
            print('Time data transfer: {}'.format(timing.time_trans))
            print('Time P2M          : {}'.format(timing.time_P2M))
            print('Time M2M          : {}'.format(timing.time_M2M))
            print('Time M2P          : {}'.format(timing.time_M2P))
            print('Time P2P          : {}'.format(timing.time_P2P))
            print('\tTime analy: {}'.format(timing.time_an))

            return X, iteration

    #end outer loop

    return X, iteration

Example 9

Project: InfVocLDA Source File: nchar.py
Function: init
    def __init__(self,
                 n,
                 train,
                 smoothing=1e9,
                 #lagrangian_parameter=1.,
                 #estimator=None,
                 maximum_length=20,
                 minimum_length=3,
                 char_set=string.lowercase + string.punctuation + string.digits,
                 #char_set=string.lowercase,
                 patch_char='#'):
        """
        Creates an nchar language model to capture patterns in n consecutive
        words of training text.  An estimator smooths the probabilities derived
        from the text and may allow generation of ngrams not seen during
        training.

        @param n: the order of the language model (nchar size)
        @type n: C{int}
        @param train: the training text
        @type train: C{list} of C{string}
        @param estimator: a function for generating a probability distribution
        @type estimator: a function that takes a C{ConditionalFreqDist} and
              returns a C{ConditionalProbDist}
        """

        self._smoothing = smoothing;
        #self.lagrangian_parameter = lagrangian_parameter;

        self._n = n

        self._maximum_length = maximum_length;
        self._minimum_length = minimum_length;
        self._char_set = char_set;
        
        #estimator = lambda fdist, bins: nltk.probability.WittenBellProbDist(fdist, len(char_set));
        estimator = lambda fdist, bins: nltk.probability.LidstoneProbDist(fdist, self._smoothing, len(self._char_set)+1);
        #estimator = lambda fdist, bins: nltk.probability.LidstoneProbDist(fdist, 1e-9, len(self._char_set));
        #estimator = lambda fdist, bins: nltk.probability.GoodTuringProbDist(fdist, len(self._char_set));
        #estimator = lambda fdist, bins: nltk.probability.SimpleGoodTuringProbDist(fdist, len(self._char_set));

        cfd = ConditionalFreqDist()
        self._ngrams = set()
        self._patch_char = patch_char;
        self._prefix = (self._patch_char,) * (n - 1)
        
        length = nltk.probability.FreqDist();
        word_freq_dist = nltk.probability.FreqDist();
        char_list = [];
        for word in train:
            word = word.strip().lower();
            if len(word)<self._minimum_length or len(word)>self._maximum_length:
                continue;
            length.inc(len(word));
            word_freq_dist.inc(word, 1);
            char_list.extend(self._prefix);
            char_list.extend([char for char in word if char in self._char_set]);
        self._length = nltk.probability.WittenBellProbDist(length, length.B()+1);
        #self._length = nltk.probability.WittenBellProbDist(length, self._maximum_length);
        
        #context_freq_dist = nltk.probability.FreqDist();
        #for nchar in ingrams(chain(self._prefix, train), n):
        for nchar in ngrams(char_list, n):
            self._ngrams.add(nchar)
            context = tuple(nchar[:-1])
            #context_freq_dist.inc(context);
            token = nchar[-1]
            cfd[context].inc(token)
        #self._context = nltk.probability.WittenBellProbDist(context_freq_dist, len(self._char_set)**(n-1)+1);

        '''
        if n==3:
            cond = 0;
            for x in self._char_set:
                for y in self._char_set:
                    print (x, y), context_freq_dist[(x, y)], self._context.prob((x, y));
                    cond += self._context.prob((x, y));
            print 'cond is', cond
        '''
        
        #self._model = ConditionalProbDist(cfd, estimator, len(cfd));
        #print self._char_set;
        self._model = ConditionalProbDist(cfd, estimator, len(self._char_set) ** (n - 1));

        #========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ==========
        '''
        consonant_freq_dist = nltk.probability.FreqDist();
        consonant_condition_freq_dist = nltk.probability.ConditionalFreqDist();
        for word in train:
            #word = re.sub(r'aeiou', ' ', word);
            word = word[0] + re.sub('aeiouy', ' ', word[1:]);
            
            consonant_list = word.split();
            #consonant_list = ['#', '#'] + consonant_list;
            for temp in consonant_list:
                consonant_freq_dist.inc(temp, 1);
                
        consonant_freq_dist.plot()
        '''
        #========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ========== ==========        
        word_prob_dist = nltk.probability.MLEProbDist(word_freq_dist);

        word_model_empirical_frequency = numpy.zeros((1, self._maximum_length - self._minimum_length + 1)) + 1e-300;
        word_model_square = numpy.zeros((1, self._maximum_length - self._minimum_length + 1)) + 1e-300;
        
        #word_model_empirical_frequency_old = numpy.zeros((1, self._maximum_length - self._minimum_length + 1));
        #word_model_square_old = numpy.zeros((1, self._maximum_length - self._minimum_length + 1));
        
        total_outcomes = 0;
        for x in xrange(self._minimum_length, self._maximum_length+1):
            total_outcomes += len(self._char_set) ** x;

        for word in word_freq_dist.keys():
            word_model_empirical_frequency[0, len(word)-self._minimum_length] += word_prob_dist.prob(word) * self.probability_without_length(word);
            #word_model_empirical_frequency[0, len(word)-self._minimum_length] += 1.0/total_outcomes * self.probability_without_length(word);
            word_model_square[0, len(word)-self._minimum_length] += self.probability_without_length(word) ** 2;
            
            #word_model_empirical_frequency_old[0, len(word)-self._minimum_length] += word_prob_dist.prob(word) * self.probability_without_length(word);
            #word_model_square_old[0, len(word)-self._minimum_length] += self.probability_without_length(word) ** 2;
        
        #print "alpha is", 2 * (1-numpy.sum(word_model_empirical_frequency / word_model_square))/numpy.sum(1.0/word_model_square)
        #print word_model_empirical_frequency, word_model_square

        #sum_word_model_square_inverse = numpy.sum(1.0 / word_model_square);
        #sum_word_model_empirical_frequency_over_word_model_square = numpy.sum(word_model_empirical_frequency / word_model_square);
        #self._multinomial_length = (word_model_empirical_frequency * sum_word_model_square_inverse - sum_word_model_empirical_frequency_over_word_model_square + 1) / (word_model_square * sum_word_model_square_inverse);
        #print sum_word_model_square_inverse, sum_word_model_empirical_frequency_over_word_model_square;
        #print self._multinomial_length, numpy.sum(self._multinomial_length);
            
        if True:
            lagrangian_parameter = 2 * (1-numpy.sum(word_model_empirical_frequency / word_model_square))/numpy.sum(1.0/word_model_square)
        else:
            lagrangian_parameter = 1.;
        #print "lagrangian parameter is", lagrangian_parameter
        self._multinomial_length = (word_model_empirical_frequency - lagrangian_parameter / 2) / word_model_square;
        self._multinomial_length /= numpy.sum(self._multinomial_length);
        
        #print self._multinomial_length, numpy.sum(self._multinomial_length);
        assert numpy.all(self._multinomial_length>=0), self._multinomial_length;

        # recursively construct the lower-order models
        if n > 1:
            self._backoff = NcharModel(n-1, train, self._smoothing, maximum_length,
                 minimum_length, self._char_set, self._patch_char);

Example 10

Project: mlat-server Source File: kalman.py
    @profile.trackcpu
    def update(self, position_time, measurements, altitude, altitude_error,
               leastsquares_position, leastsquares_cov, distinct, dof):
        """Update the filter given a new set of observations.

        position_time:         the time of these measurements, UTC seconds
        measurements:          a list of (receiver, timestamp, variance) tuples
        altitude:              reported altitude in meters, or None
        altitude_error:        reported altitude error in meters, or None
        leastsquares_position: the ECEF position computed by the least-squares
                               solver
        leastsquares_cov:      the covariance of leastsquares_position
        distinct:              the number of distinct receivers
        dof:                   the number of degrees of freedom in the solution
        """

        if self._acquiring and dof < self.min_acquiring_dof:
            # don't trust this result until we have converged
            return False

        if self._mean is None:
            # acquire an initial position
            glogger.info("{icao:06X} acquiring.".format(icao=self.icao))
            self.last_update = position_time
            self.set_initial_state(leastsquares_position, leastsquares_cov)
            return False

        if dof < self.min_tracking_dof:
            # don't use this one
            return False

        # update filter
        zero_pr = measurements[0][1] * constants.Cair
        positions = [measurements[0][0].position]

        n = len(measurements)

        if altitude is None:
            obs_fn = self.observation_function_without_altitude
            obs = numpy.zeros(n-1)
            obs_var = numpy.zeros(n-1)

            for i in range(1, n):
                receiver, timestamp, variance = measurements[i]
                positions.append(receiver.position)
                obs[i-1] = timestamp * constants.Cair - zero_pr
                obs_var[i-1] = (variance + measurements[0][2]) * constants.Cair**2
        else:
            obs_fn = self.observation_function_with_altitude
            obs = numpy.zeros(n)
            obs_var = numpy.zeros(n)

            obs[0] = altitude
            obs_var[0] = altitude_error**2

            for i in range(1, n):
                receiver, timestamp, variance = measurements[i]
                positions.append(receiver.position)
                obs[i] = timestamp * constants.Cair - zero_pr
                obs_var[i] = (variance + measurements[0][2]) * constants.Cair**2

        obs_covar = numpy.diag(obs_var)

        dt = position_time - self.last_update
        if dt < 0:
            return False

        try:
            trans_covar = self.transition_covariance(dt)
            transition_function = functools.partial(self.transition_function,
                                                    dt=dt)
            observation_function = functools.partial(obs_fn,
                                                     positions=positions)

            #
            # This is extracted from pykalman's
            # AdditiveUnscentedFilter.filter_update() because we want to access
            # the intermediate (prediction) result to decide whether to accept
            # this observation or not.
            #

            # make sigma points
            moments_state = pykalman.unscented.Moments(self._mean, self._cov)
            points_state = pykalman.unscented.moments2points(moments_state)

            # Predict.
            (_, moments_pred) = (
                pykalman.unscented.unscented_filter_predict(
                    transition_function=transition_function,
                    points_state=points_state,
                    sigma_transition=trans_covar
                )
            )
            points_pred = pykalman.unscented.moments2points(moments_pred)

            # Decide whether this is an outlier:
            # Get the predicted filter state mean and covariance
            # as an observation:
            (obs_points_pred, obs_moments_pred) = (
                pykalman.unscented.unscented_transform(
                    points_pred, observation_function,
                    sigma_noise=obs_covar
                )
            )

            # Find the Mahalanobis distance between the predicted observation
            # and our new observation, using the predicted observation's
            # covariance as our expected distribution.
            innovation = obs - obs_moments_pred.mean
            vi = numpy.linalg.inv(obs_moments_pred.covariance)
            md = math.sqrt(numpy.dot(numpy.dot(innovation.T, vi), innovation))

            # If the Mahalanobis distance is very large this observation is an
            # outlier
            if md > self.outlier_mahalanobis_distance:
                glogger.info("{icao:06X} outlier: md={md:.1f}".format(
                    icao=self.icao,
                    md=md))

                self._outliers += 1
                if self._outliers < 3 or (position_time - self.last_update) < 15.0:
                    # don't use this one
                    return False
                glogger.info("{icao:06X} reset due to outliers.".format(icao=self.icao))
                self._reset()
                return False

            self._outliers = 0

            # correct filter state using the current observation
            (self._mean, self._cov) = (
                pykalman.unscented.unscented_filter_correct(
                    observation_function=observation_function,
                    moments_pred=moments_pred,
                    points_pred=points_pred,
                    observation=obs,
                    sigma_observation=obs_covar
                )
            )

            self.last_update = position_time
            self._update_derived()

            # converged enough to start reporting?
            if ((self._acquiring and
                 self.position_error < self.min_acquiring_position_error and
                 self.velocity_error < self.min_acquiring_velocity_error)):
                glogger.info("{icao:06X} acquired.".format(icao=self.icao))
                self._acquiring = False
            elif (not self._acquiring and
                  (self.position_error > self.max_tracking_position_error or
                   self.velocity_error > self.max_tracking_velocity_error)):
                glogger.info("{icao:06X} tracking lost".format(icao=self.icao))
                self._acquiring = True

            self.valid = not self._acquiring
            return self.valid

        except Exception:
            glogger.exception("Kalman filter update failed. " +
                              "dt={dt} obs={obs} obs_covar={obs_covar} mean={mean} covar={covar}".format(
                                  dt=dt,
                                  obs=obs,
                                  obs_covar=obs_covar,
                                  mean=self._mean,
                                  covar=self._cov))
            self._reset()
            return False

Example 11

Project: dolo Source File: time_iteration.py
def time_iteration(model, initial_guess=None, with_complementarities=True,
                        verbose=True, grid={}, output_type='dr',
                        maxit=1000, inner_maxit=10, tol=1e-6, hook=None) :

    '''
    Finds a global solution for ``model`` using backward time-iteration.

    This algorithm iterates on the residuals of the arbitrage equations

    Parameters
    ----------
    model : NumericModel
        "dtmscc" model to be solved
    verbose : boolean
        if True, display iterations
    initial_dr : decision rule
        initial guess for the decision rule
    with_complementarities : boolean (True)
        if False, complementarity conditions are ignored
    grid: grid options
    maxit: maximum number of iterations
    inner_maxit: maximum number of iteration for inner solver
    tol: tolerance criterium for successive approximations

    Returns
    -------
    decision rule :
        approximated solution
    '''

    assert(model.model_type == 'dtmscc')

    def vprint(t):
        if verbose:
            print(t)

    [P, Q] = model.markov_chain

    n_ms = P.shape[0]   # number of markov states
    n_mv = P.shape[1] # number of markov variables

    x0 = model.calibration['controls']
    parms = model.calibration['parameters']
    n_x = len(x0)
    n_s = len(model.symbols['states'])

    approx = model.get_grid(**grid)
    a = approx.a
    b = approx.b
    orders = approx.orders
    interp_type = approx.interpolation # unused

    from dolo.numeric.decision_rules_markov import MarkovDecisionRule

    mdr = MarkovDecisionRule(n_ms, a, b, orders)

    grid = mdr.grid
    N = grid.shape[0]


    controls_0 = numpy.zeros((n_ms, N, n_x))

    if initial_guess is None:
        controls_0[:,:,:] = x0[None,None,:]
    else:
        for i_m in range(n_ms):
            m = P[i_m,:][None,:]
            controls_0[i_m,:,:] = initial_guess(i_m, grid)

    f = model.functions['arbitrage']
    g = model.functions['transition']

    if 'controls_lb' in model.functions and with_complementarities==True:
        lb_fun = model.functions['controls_lb']
        ub_fun = model.functions['controls_ub']
        lb = numpy.zeros_like(controls_0)*numpy.nan
        ub = numpy.zeros_like(controls_0)*numpy.nan
        for i_m in range(n_ms):
            m = P[i_m,:][None,:]
            p = parms[None,:]
            m = numpy.repeat(m, N, axis=0)
            p = numpy.repeat(p, N, axis=0)

            lb[i_m,:,:] = lb_fun(m, grid, p)
            ub[i_m,:,:] = ub_fun(m, grid, p)

    else:
        with_complementarities = False


    # mdr.set_values(controls)

    sh_c = controls_0.shape

    controls_0 = controls_0.reshape( (-1,n_x) )


    from dolo.numeric.optimize.newton import newton, SerialDifferentiableFunction
    from dolo.numeric.optimize.ncpsolve import ncpsolve

    err = 10
    it = 0

    if with_complementarities:
        vprint("Solving WITH complementarities.")
        lb = lb.reshape((-1,n_x))
        ub = ub.reshape((-1,n_x))


    if verbose:
        headline = '|{0:^4} | {1:10} | {2:8} | {3:8} | {4:3} |'.format( 'N',' Error', 'Gain','Time',  'nit' )
        stars = '-'*len(headline)
        print(stars)
        print(headline)
        print(stars)

    import time
    t1 = time.time()

    err_0 = numpy.nan

    verbit = (verbose == 'full')

    while err>tol and it<maxit:

        it += 1

        t_start = time.time()

        mdr.set_values(controls_0.reshape(sh_c))

        fn = lambda x: residuals(f, g, grid, x.reshape(sh_c), mdr, P, Q, parms).reshape((-1,n_x))
        dfn = SerialDifferentiableFunction(fn)


        if hook:
            hook()

        if with_complementarities:
            [controls,nit] = ncpsolve(dfn, lb, ub, controls_0, verbose=verbit, maxit=inner_maxit)
        else:
            [controls, nit] = newton(dfn, controls_0, verbose=verbit, maxit=inner_maxit)

        err = abs(controls-controls_0).max()

        err_SA = err/err_0
        err_0 = err

        controls_0 = controls

        t_finish = time.time()
        elapsed = t_finish - t_start

        if verbose:
            print('|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} | {4:3} |'.format( it, err, err_SA, elapsed, nit  ))

    controls_0 = controls.reshape(sh_c)

    t2 = time.time()

    if verbose:
        print(stars)
        print("Elapsed: {} seconds.".format(t2-t1))
        print(stars)


    if output_type == 'dr':
        return mdr
    elif output_type == 'controls':
        return controls_0
    else:
        raise Exception("Unsupported ouput type {}.".format(output_type))

Example 12

Project: nupic Source File: KNNClassifier.py
Function: learn
  def learn(self, inputPattern, inputCategory, partitionId=None, isSparse=0,
            rowID=None):
    """Train the classifier to associate specified input pattern with a
    particular category.

    @param inputPattern (list) The pattern to be assigned a category. If
        isSparse is 0, this should be a dense array (both ON and OFF bits
        present). Otherwise, if isSparse > 0, this should be a list of the
        indices of the non-zero bits in sorted order

    @param inputCategory (int) The category to be associated to the training
        pattern

    @param partitionId (int) partitionID allows you to associate an id with each
        input vector. It can be used to associate input patterns stored in the
        classifier with an external id. This can be useful for debugging or
        visualizing. Another use case is to ignore vectors with a specific id
        during inference (see description of infer() for details). There can be
        at most one partitionId per stored pattern (i.e. if two patterns are
        within distThreshold, only the first partitionId will be stored). This
        is an optional parameter.

    @param isSparse (int) If 0, the input pattern is a dense representation. If
        isSparse > 0, the input pattern is a list of non-zero indices and
        isSparse is the length of the dense representation

    @param rowID (int) UNKNOWN

    @return The number of patterns currently stored in the classifier
    """
    if self.verbosity >= 1:
      print "%s learn:" % g_debugPrefix
      print "  category:", int(inputCategory)
      print "  active inputs:", _labeledInput(inputPattern,
                                              cellsPerCol=self.cellsPerCol)

    if isSparse > 0:
      assert all(inputPattern[i] <= inputPattern[i+1]
                 for i in xrange(len(inputPattern)-1)), \
                     "Sparse inputPattern must be sorted."
      assert all(bit < isSparse for bit in inputPattern), \
        ("Sparse inputPattern must not index outside the dense "
         "representation's bounds.")

    if rowID is None:
      rowID = self._iterationIdx

    # Dense vectors
    if not self.useSparseMemory:

      # Not supported
      assert self.cellsPerCol == 0, "not implemented for dense vectors"

      # If the input was given in sparse form, convert it to dense
      if isSparse > 0:
        denseInput = numpy.zeros(isSparse)
        denseInput[inputPattern] = 1.0
        inputPattern = denseInput

      if self._specificIndexTraining and not self._nextTrainingIndices:
        # Specific index mode without any index provided - skip training
        return self._numPatterns

      if self._Memory is None:
        # Initialize memory with 100 rows and numPatterns = 0
        inputWidth = len(inputPattern)
        self._Memory = numpy.zeros((100,inputWidth))
        self._numPatterns = 0
        self._M = self._Memory[:self._numPatterns]

      addRow = True

      if self._vt is not None:
        # Compute projection
        inputPattern = numpy.dot(self._vt, inputPattern - self._mean)

      if self.distThreshold > 0:
        # Check if input is too close to an existing input to be accepted
        dist = self._calcDistance(inputPattern)
        minDist = dist.min()
        addRow = (minDist >= self.distThreshold)

      if addRow:
        self._protoSizes = None     # need to re-compute
        if self._numPatterns == self._Memory.shape[0]:
          # Double the size of the memory
          self._doubleMemoryNumRows()

        if not self._specificIndexTraining:
          # Normal learning - append the new input vector
          self._Memory[self._numPatterns] = inputPattern
          self._numPatterns += 1
          self._categoryList.append(int(inputCategory))
        else:
          # Specific index training mode - insert vector in specified slot
          vectorIndex = self._nextTrainingIndices.pop(0)
          while vectorIndex >= self._Memory.shape[0]:
            self._doubleMemoryNumRows()
          self._Memory[vectorIndex] = inputPattern
          self._numPatterns = max(self._numPatterns, vectorIndex + 1)
          if vectorIndex >= len(self._categoryList):
            self._categoryList += [-1] * (vectorIndex -
                                          len(self._categoryList) + 1)
          self._categoryList[vectorIndex] = int(inputCategory)

        # Set _M to the "active" part of _Memory
        self._M = self._Memory[0:self._numPatterns]

        self._addPartitionId(self._numPatterns-1, partitionId)

    # Sparse vectors
    else:

      # If the input was given in sparse form, convert it to dense if necessary
      if isSparse > 0 and (self._vt is not None or self.distThreshold > 0 \
              or self.numSVDDims is not None or self.numSVDSamples is not None \
              or self.numWinners > 0):
          denseInput = numpy.zeros(isSparse)
          denseInput[inputPattern] = 1.0
          inputPattern = denseInput
          isSparse = 0

      # Get the input width
      if isSparse > 0:
        inputWidth = isSparse
      else:
        inputWidth = len(inputPattern)

      # Allocate storage if this is the first training vector
      if self._Memory is None:
        self._Memory = NearestNeighbor(0, inputWidth)

      # Support SVD if it is on
      if self._vt is not None:
        inputPattern = numpy.dot(self._vt, inputPattern - self._mean)

      # Threshold the input, zeroing out entries that are too close to 0.
      #  This is only done if we are given a dense input.
      if isSparse == 0:
        thresholdedInput = self._sparsifyVector(inputPattern, True)
      addRow = True

      # If given the layout of the cells, then turn on the logic that stores
      # only the start cell for bursting columns.
      if self.cellsPerCol >= 1:
        burstingCols = thresholdedInput.reshape(-1,
                                  self.cellsPerCol).min(axis=1).nonzero()[0]
        for col in burstingCols:
          thresholdedInput[(col * self.cellsPerCol) + 1 :
                           (col * self.cellsPerCol) + self.cellsPerCol] = 0


      # Don't learn entries that are too close to existing entries.
      if self._Memory.nRows() > 0:
        dist = None
        # if this vector is a perfect match for one we already learned, then
        #  replace the category - it may have changed with online learning on.
        if self.replaceDuplicates:
          dist = self._calcDistance(thresholdedInput, distanceNorm=1)
          if dist.min() == 0:
            rowIdx = dist.argmin()
            self._categoryList[rowIdx] = int(inputCategory)
            if self.fixedCapacity:
              self._categoryRecencyList[rowIdx] = rowID
            addRow = False

        # Don't add this vector if it matches closely with another we already
        #  added
        if self.distThreshold > 0:
          if dist is None or self.distanceNorm != 1:
            dist = self._calcDistance(thresholdedInput)
          minDist = dist.min()
          addRow = (minDist >= self.distThreshold)
          if not addRow:
            if self.fixedCapacity:
              rowIdx = dist.argmin()
              self._categoryRecencyList[rowIdx] = rowID


      # If sparsity is too low, we do not want to add this vector
      if addRow and self.minSparsity > 0.0:
        if isSparse==0:
          sparsity = ( float(len(thresholdedInput.nonzero()[0])) /
                       len(thresholdedInput) )
        else:
          sparsity = float(len(inputPattern)) / isSparse
        if sparsity < self.minSparsity:
          addRow = False

      # Add the new sparse vector to our storage
      if addRow:
        self._protoSizes = None     # need to re-compute
        if isSparse == 0:
          self._Memory.addRow(thresholdedInput)
        else:
          self._Memory.addRowNZ(inputPattern, [1]*len(inputPattern))
        self._numPatterns += 1
        self._categoryList.append(int(inputCategory))
        self._addPartitionId(self._numPatterns-1, partitionId)
        if self.fixedCapacity:
          self._categoryRecencyList.append(rowID)
          if self._numPatterns > self.maxStoredPatterns and \
            self.maxStoredPatterns > 0:
            leastRecentlyUsedPattern = numpy.argmin(self._categoryRecencyList)
            self._Memory.deleteRow(leastRecentlyUsedPattern)
            self._categoryList.pop(leastRecentlyUsedPattern)
            self._categoryRecencyList.pop(leastRecentlyUsedPattern)
            self._numPatterns -= 1



    if self.numSVDDims is not None and self.numSVDSamples is not None \
          and self._numPatterns == self.numSVDSamples:
        self.computeSVD()

    return self._numPatterns

Example 13

Project: spinalcordtoolbox Source File: plot_abs_error_vs_csf_values.py
def main():
    results_folder = param_default.results_folder
    methods_to_display = param_default.methods_to_display

    # Parameters for debug mode
    if param_default.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_methods_comparison"
        path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox'
        methods_to_display = 'bin,wa,wath,ml,map'
    else:
        status, path_sct = commands.getstatusoutput('echo $SCT_DIR')

        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'i:m:')  # define flags
        except getopt.GetoptError as err:  # check if the arguments are defined
            print str(err)  # error
            # usage() # display usage
        # if not opts:
        #     print 'Please enter the path to the result folder. Exit program.'
        #     sys.exit(1)
        #     # usage()
        for opt, arg in opts:  # explore flags
            if opt in '-i':
                results_folder = arg
            if opt in '-m':
                methods_to_display = arg

    # Append path that contains scripts, to be able to load modules
    sys.path.append(path_sct + '/scripts')
    import sct_utils as sct

    sct.printv("Working directory: " + os.getcwd())

    results_folder_csf = results_folder + '/csf'

    sct.printv('\n\nData will be extracted from folder ' + results_folder_csf + '.', 'warning')
    sct.printv('\t\tCheck existence...')
    sct.check_folder_exist(results_folder_csf)

    # Extract methods to display
    methods_to_display = methods_to_display.strip().split(',')

    # Extract file names of the results files
    fname_results_csf = glob.glob(results_folder_csf + '/*.txt')
    # Remove doublons (due to the two folders)
    # for i_fname in range(0, len(fname_results)):
    #     for j_fname in range(0, len(fname_results)):
    #         if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])):
    #             fname_results.remove(fname_results[j_fname])
    file_results = []
    for fname in fname_results_csf:
        file_results.append(os.path.basename(fname))
    for file in file_results:
        if file_results.count(file) > 1:
            ind = file_results.index(file)
            fname_results_csf.remove(fname_results_csf[ind])
            file_results.remove(file)

    nb_results_file = len(fname_results_csf)

    # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error
    # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object)
    # SNR
    snr = numpy.zeros((nb_results_file))
    # Tracts std
    tracts_std = numpy.zeros((nb_results_file))
    # CSF value
    csf_values = numpy.zeros((nb_results_file))
    # methods' name
    methods_name = []  #numpy.empty((nb_results_file, nb_method), dtype=object)
    # labels
    error_per_label = []
    std_per_label = []
    labels_id = []
    # median
    median_results = numpy.zeros((nb_results_file, 5))
    # median std across bootstraps
    median_std = numpy.zeros((nb_results_file, 5))
    # min
    min_results = numpy.zeros((nb_results_file, 5))
    # max
    max_results = numpy.zeros((nb_results_file, 5))

    #
    for i_file in range(0, nb_results_file):

        # Open file
        f = open(fname_results_csf[i_file])  # open file
        # Extract all lines in .txt file
        lines = [line for line in f.readlines() if line.strip()]

        # extract SNR
        # find all index of lines containing the string "sigma noise"
        ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise]
        if len(ind_line_noise) != 1:
            sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
            snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))

        # extract tract std
        ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if
                              "range tracts" in line_tract_std]
        if len(ind_line_tract_std) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))

        # extract CSF value
        ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if
                              "# value CSF" in line_csf_value]
        if len(ind_line_csf_value) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit()))


        # extract method name
        ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label]
        if len(ind_line_label) != 1:
            sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:])
            methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:])

        # extract median
        ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median]
        if len(ind_line_median) != 1:
            sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            median = lines[ind_line_median[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median]
            median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median])
            median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median])

        # extract min
        ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min]
        if len(ind_line_min) != 1:
            sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            min = lines[ind_line_min[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min]
            min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min])

        # extract max
        ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max]
        if len(ind_line_max) != 1:
            sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            max = lines[ind_line_max[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max]
            max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max])

        # extract error for each label
        error_per_label_for_file_i = []
        std_per_label_for_file_i = []
        labels_id_for_file_i = []
        # Due to 2 different kind of file structure, the number of the last label line must be adapted
        if not ind_line_median:
            ind_line_median = [len(lines) + 1]
        for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1):
            line_label_i = lines[i_line].strip().split(',')
            error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]])
            std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]])
            labels_id_for_file_i.append(line_label_i[0])
        error_per_label.append(error_per_label_for_file_i)
        std_per_label.append(std_per_label_for_file_i)
        labels_id.append(labels_id_for_file_i)

        # close file
        f.close()

    # check if all the files in the result folder were generated with the same number of methods
    if not all(x == methods_name[0] for x in methods_name):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.',
            'error')
        sys.exit(1)
    # check if all the files in the result folder were generated with the same labels
    if not all(x == labels_id[0] for x in labels_id):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.',
            'error')
        sys.exit(1)

    # convert the list "error_per_label" into a numpy array to ease further manipulations
    error_per_label = numpy.array(error_per_label)
    std_per_label = numpy.array(std_per_label)
    # compute different stats
    abs_error_per_labels = numpy.absolute(error_per_label)
    max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1)
    min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1)
    mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1)
    std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1)

    nb_method = len(methods_to_display)

    sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:')
    print snr
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:')
    print tracts_std
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:')
    print csf_values
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:')
    print methods_name
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv(
        'Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print median_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Minimum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print min_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Maximum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print max_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print labels_id
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv(
        'Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print error_per_label


    # cuem******************************************* START PLOTTING HERE **********************************************

    ind_files_csf_sort = numpy.argsort(csf_values)

    matplotlib.rcParams.update({'font.size': 45, 'font.family': 'trebuchet'})
    plt.rcParams['xtick.major.pad'] = '9'
    plt.rcParams['ytick.major.pad'] = '15'

    plt.figure(figsize=(30, 16))
    width = 1.0 / (nb_method + 1)
    ind_fig = numpy.arange(len(ind_files_csf_sort)) * (1.0 + width)
    plt.ylabel('Absolute error (%)\n', fontsize=55)
    plt.xlabel('CSF values (% of true WM value)', fontsize=55)
    plt.title('Absolute error within all tracts as a function of CSF values\n', fontsize=65)

    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    box_plots = []
    for meth, color in zip(methods_to_display, colors):
        i_meth = methods_name[0].index(meth)
        i_meth_to_display = methods_to_display.index(meth)

        boxprops = dict(linewidth=4, color=color)
        flierprops = dict(color=color, markeredgewidth=0.7, markersize=15, marker='.')
        whiskerprops = dict(color=color, linewidth=3)
        capprops = dict(color=color, linewidth=3)
        medianprops = dict(linewidth=4, color=color)
        meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
        meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')
        plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_files_csf_sort, :, i_meth]), positions=ind_fig + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
        # plt.errorbar(ind_fig2+i_meth*width+width/2+(float(i_meth)*width)/(nb_method+1), mean_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], std_abs_error_per_meth[ind_snr_sort_tracts_std_10, i_meth], color=color, marker='_', linestyle='None', markersize=200*width, markeredgewidth=3)
        box_plots.append(plot_i['boxes'][0])

    # add alternated vertical background colored bars
    for i_xtick in range(0, len(ind_fig), 2):
        plt.axvspan(ind_fig[i_xtick] - width - width / 4, ind_fig[i_xtick] + (nb_method + 1) * width - width / 4, facecolor='grey', alpha=0.1)

    # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    # plt.legend(box_plots, methods_to_display, loc='best', fontsize=22)
    # convert xtick labels into integers
    xtick_labels = [int(xtick) for xtick in csf_values[ind_files_csf_sort]]
    plt.xticks(ind_fig + (numpy.floor(nb_method / 2)) * width * (1.0 + 1.0 / (nb_method + 1)), xtick_labels)
    plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_method + 0.5) * width])
    plt.gca().set_ylim([0, 18])
    plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.0))
    plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5))
    plt.grid(b=True, axis='y', which='both', alpha=0.5)
    plt.subplots_adjust(left=0.1)


    plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_csf_values.pdf', format='PDF')

    plt.show(block=False)

Example 14

Project: nupic.research Source File: union_pooling_trained_tm.py
def experiment1():
  paramDir = 'params/1024_baseline/5_trainingPasses.yaml'
  outputDir = 'results/'
  params = yaml.safe_load(open(paramDir, 'r'))
  options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
  plotVerbosity = 2
  consoleVerbosity = 1


  print "Running SDR overlap experiment...\n"
  print "Params dir: {0}".format(paramDir)
  print "Output dir: {0}\n".format(outputDir)

  # Dimensionality of sequence patterns
  patternDimensionality = params["patternDimensionality"]

  # Cardinality (ON / true bits) of sequence patterns
  patternCardinality = params["patternCardinality"]

  # TODO If this parameter is to be supported, the sequence generation code
  # below must change
  # Number of unique patterns from which sequences are built
  # patternAlphabetSize = params["patternAlphabetSize"]

  # Length of sequences shown to network
  sequenceLength = params["sequenceLength"]

  # Number of sequences used. Sequences may share common elements.
  numberOfSequences = params["numberOfSequences"]

  # Number of sequence passes for training the TM. Zero => no training.
  trainingPasses = params["trainingPasses"]

  tmParamOverrides = params["temporalMemoryParams"]
  upParamOverrides = params["unionPoolerParams"]

  # Generate a sequence list and an associated labeled list (both containing a
  # set of sequences separated by None)
  start = time.time()
  print "\nGenerating sequences..."
  patternAlphabetSize = sequenceLength * numberOfSequences
  patternMachine = PatternMachine(patternDimensionality, patternCardinality,
                                  patternAlphabetSize)
  sequenceMachine = SequenceMachine(patternMachine)

  numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
  generatedSequences = sequenceMachine.generateFromNumbers(numbers)
  sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
                    for i in xrange(numberOfSequences)]
  labeledSequences = []
  for label in sequenceLabels:
    for _ in xrange(sequenceLength):
      labeledSequences.append(label)
    labeledSequences.append(None)

  # Set up the Temporal Memory and Union Pooler network
  print "\nCreating network..."
  experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)

  # Train only the Temporal Memory on the generated sequences
  if trainingPasses > 0:

    print "\nTraining Temporal Memory..."
    if consoleVerbosity > 0:
      print "\nPass\tBursting Columns Mean\tStdDev\tMax"

    for i in xrange(trainingPasses):
      experiment.runNetworkOnSequences(generatedSequences,
                                       labeledSequences,
                                       tmLearn=True,
                                       upLearn=None,
                                       verbosity=consoleVerbosity,
                                       progressInterval=_SHOW_PROGRESS_INTERVAL)

      if consoleVerbosity > 0:
        stats = experiment.getBurstingColumnsStats()
        print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])

      # Reset the TM monitor mixin's records accrued during this training pass
      # experiment.tm.mmClearHistory()

    print
    print MonitorMixinBase.mmPrettyPrintMetrics(
      experiment.tm.mmGetDefaultMetrics())
    print


  experiment.tm.mmClearHistory()
  experiment.up.mmClearHistory()


  print "\nRunning test phase..."

  inputSequences = generatedSequences
  inputCategories = labeledSequences
  tmLearn = True
  upLearn = False
  classifierLearn = False
  currentTime = time.time()

  experiment.tm.reset()
  experiment.up.reset()

  poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1))
  activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1))
  activeSPTrace = numpy.zeros((experiment.up._numColumns, 1))

  for _ in xrange(trainingPasses):
    experiment.tm.reset()
    for i in xrange(len(inputSequences)):
      sensorPattern = inputSequences[i]
      inputCategory = inputCategories[i]
      if sensorPattern is None:
        pass
      else:
        experiment.tm.compute(sensorPattern,
                        learn=tmLearn,
                        sequenceLabel=inputCategory)

        if upLearn is not None:
          activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
          experiment.up.compute(activeCells,
                          predActiveCells,
                          learn=upLearn,
                          sequenceLabel=inputCategory)

          currentPoolingActivation = experiment.up._poolingActivation

          currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
          poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)

          currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
          currentUnionSDR[experiment.up._unionSDR] = 1
          activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)

          currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
          currentSPSDR[experiment.up._activeCells] = 1
          activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)

    print "\nPass\tBursting Columns Mean\tStdDev\tMax"
    stats = experiment.getBurstingColumnsStats()
    print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
    print
    print MonitorMixinBase.mmPrettyPrintMetrics(\
        experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
    print
    experiment.tm.mmClearHistory()


  # estimate fraction of shared bits across adjacent time point
  unionSDRshared = experiment.up._mmComputeUnionSDRdiff()

  bitLifeList = experiment.up._mmComputeBitLifeStats()
  bitLife = numpy.array(bitLifeList)


  # Plot SP outputs, UP persistence and UP outputs in testing phase
  def showSequenceStartLine(ax, trainingPasses, sequenceLength):
    for i in xrange(trainingPasses):
      ax.vlines(i*sequenceLength, 0, 100, linestyles='--')

  plt.figure()
  ncolShow = 100
  f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
  ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax1, trainingPasses, sequenceLength)
  ax1.set_title('SP SDR')
  ax1.set_ylabel('Columns')
  ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax2, trainingPasses, sequenceLength)
  ax2.set_title('Persistence')
  ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax3, trainingPasses, sequenceLength)
  plt.title('Union SDR')

  ax2.set_xlabel('Time (steps)')

  pp = PdfPages('results/UnionPoolingOnLearnedTM_Experiment1.pdf')
  pp.savefig()
  pp.close()


  f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1)
  ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100)
  ax1.set_ylabel('Union SDR size (%)')
  ax1.set_xlabel('Time (steps)')
  ax1.set_ylim(0,25)

  ax2.plot(unionSDRshared)
  ax2.set_ylabel('Shared Bits')
  ax2.set_xlabel('Time (steps)')

  ax3.hist(bitLife)
  ax3.set_xlabel('Life duration for each bit')
  pp = PdfPages('results/UnionSDRproperty_Experiment1.pdf')
  pp.savefig()
  pp.close()

Example 15

Project: fatiando Source File: wavefd.py
def elastic_psv(mu, lamb, density, area, dt, iterations, sources,
                stations=None, snapshot=None, padding=50, taper=0.002,
                xz2ps=False):
    """
    Simulate P and SV waves using the Parsimonious Staggered Grid (PSG) finite
    differences scheme of Luo and Schuster (1990).

    This is an iterator. It yields panels of $u_x$ and $u_z$ displacements
    and a list of arrays with recorded displacements in a time series.
    Parameter *snapshot* controls how often the iterator yields. The default
    is only at the end, so only the final panel and full time series are
    yielded.

    Uses absorbing boundary conditions (Gaussian taper) in the lower, left and
    right boundaries. The top implements the free-surface boundary condition
    of Vidale and Clayton (1986).

    Parameters:

    * mu : 2D-array (shape = *shape*)
        The :math:`\mu` Lame parameter at all the grid nodes
    * lamb : 2D-array (shape = *shape*)
        The :math:`\lambda` Lame parameter at all the grid nodes
    * density : 2D-array (shape = *shape*)
        The value of the density at all the grid nodes
    * area : [xmin, xmax, zmin, zmax]
        The x, z limits of the simulation area, e.g., the shallowest point is
        at zmin, the deepest at zmax.
    * dt : float
        The time interval between iterations
    * iterations : int
        Number of time steps to take
    * sources : [xsources, zsources] : lists
        A lists of the sources of waves for the particle movement in the x and
        z directions
        (see :class:`~fatiando.seismic.wavefd.MexHatSource` for an example
        source)
    * stations : None or list
        If not None, then a list of [x, z] pairs with the x and z coordinates
        of the recording stations. These are physical coordinates, not the
        indexes!
    * snapshot : None or int
        If not None, than yield a snapshot of the displacements at every
        *snapshot* iterations.
    * padding : int
        Number of grid nodes to use for the absorbing boundary region
    * taper : float
        The intensity of the Gaussian taper function used for the absorbing
        boundary conditions
    * xz2ps : True or False
        If True, will yield P and S wave panels instead of ux, uz. See
        :func:`~fatiando.seismic.wavefd.xz2ps`.

    Yields:

    * [t, ux, uz, xseismograms, zseismograms]
        The current iteration, the particle displacements in the x and z
        directions, lists of arrays containing the displacements recorded at
        each station until the current iteration.

    References:

    Vidale, J. E., and R. W. Clayton (1986), A stable free-surface boundary
    condition for two-dimensional elastic finite-difference wave simulation,
    Geophysics, 51(12), 2247-2249.

    """
    if mu.shape != lamb.shape != density.shape:
        raise ValueError('Density lambda, and mu grids should have same shape')
    x1, x2, z1, z2 = area
    nz, nx = mu.shape
    dz, dx = (z2 - z1) / (nz - 1), (x2 - x1) / (nx - 1)
    xsources, zsources = sources
    # Get the index of the closest point to the stations and start the
    # seismograms
    if stations is not None:
        stations = [[int(round((z - z1) / dz)), int(round((x - x1) / dx))]
                    for x, z in stations]
        xseismograms = [numpy.zeros(iterations) for i in xrange(len(stations))]
        zseismograms = [numpy.zeros(iterations) for i in xrange(len(stations))]
    else:
        stations, xseismograms, zseismograms = [], [], []
    # Add padding to have an absorbing region to simulate an infinite medium
    pad = int(padding)
    nx += 2 * pad
    nz += pad
    mu_pad = _add_pad(mu, pad, (nz, nx))
    lamb_pad = _add_pad(lamb, pad, (nz, nx))
    dens_pad = _add_pad(density, pad, (nz, nx))
    # Pre-compute the matrices required for the free-surface boundary
    dzdx = dz / dx
    identity = scipy.sparse.identity(nx)
    B = scipy.sparse.eye(nx, nx, k=1) - scipy.sparse.eye(nx, nx, k=-1)
    gamma = scipy.sparse.spdiags(lamb_pad[0] / (lamb_pad[0] + 2 * mu_pad[0]),
                                 [0], nx, nx)
    Mx1 = identity - 0.0625 * (dzdx ** 2) * B * gamma * B
    Mx2 = identity + 0.0625 * (dzdx ** 2) * B * gamma * B
    Mx3 = 0.5 * dzdx * B
    Mz1 = identity - 0.0625 * (dzdx ** 2) * gamma * B * B
    Mz2 = identity + 0.0625 * (dzdx ** 2) * gamma * B * B
    Mz3 = 0.5 * dzdx * gamma * B
    # Compute and yield the initial solutions
    ux = numpy.zeros((2, nz, nx), dtype=numpy.float)
    uz = numpy.zeros((2, nz, nx), dtype=numpy.float)
    if xz2ps:
        p, s = numpy.empty_like(mu), numpy.empty_like(mu)
    for src in xsources:
        i, j = src.indexes()
        ux[1, i, j + pad] += (dt ** 2 / density[i, j]) * src(0)
    for src in zsources:
        i, j = src.indexes()
        uz[1, i, j + pad] += (dt ** 2 / density[i, j]) * src(0)
    # Update seismograms
    for station, xseis, zseis in zip(stations, xseismograms, zseismograms):
        i, j = station
        xseis[0] = ux[1, i, j + pad]
        zseis[0] = uz[1, i, j + pad]
    if snapshot is not None:
        if xz2ps:
            _xz2ps(ux[1, :-pad, pad:-pad], uz[1, :-pad, pad:-pad], p, s,
                   p.shape[1], p.shape[0], dx, dz)
            yield [0, p, s, xseismograms, zseismograms]
        else:
            yield [0, ux[1, :-pad, pad:-pad], uz[1, :-pad, pad:-pad],
                   xseismograms, zseismograms]
    for iteration in xrange(1, iterations):
        t, tm1 = iteration % 2, (iteration + 1) % 2
        tp1 = tm1
        _step_elastic_psv(ux, uz, tp1, t, tm1, 1, nx - 1,  1, nz - 1, dt, dx,
                          dz, mu_pad, lamb_pad, dens_pad)
        _apply_damping(ux[t], nx, nz, pad, taper)
        _apply_damping(uz[t], nx, nz, pad, taper)
        # Free-surface boundary conditions
        ux[tp1, 0, :] = scipy.sparse.linalg.spsolve(
            Mx1, Mx2*ux[tp1, 1, :] + Mx3*uz[tp1, 1, :])
        uz[tp1, 0, :] = scipy.sparse.linalg.spsolve(
            Mz1, Mz2*uz[tp1, 1, :] + Mz3*ux[tp1, 1, :])
        _nonreflexive_psv_boundary_conditions(ux, uz, tp1, t, tm1, nx, nz, dt,
                                              dx, dz, mu_pad, lamb_pad,
                                              dens_pad)
        _apply_damping(ux[tp1], nx, nz, pad, taper)
        _apply_damping(uz[tp1], nx, nz, pad, taper)
        for src in xsources:
            i, j = src.indexes()
            ux[tp1, i, j + pad] += (dt**2 / density[i, j])*src(iteration*dt)
        for src in zsources:
            i, j = src.indexes()
            uz[tp1, i, j +
                pad] += (dt ** 2 / density[i, j]) * src(iteration * dt)
        for station, xseis, zseis in zip(stations, xseismograms, zseismograms):
            i, j = station
            xseis[iteration] = ux[tp1, i, j + pad]
            zseis[iteration] = uz[tp1, i, j + pad]
        if snapshot is not None and iteration % snapshot == 0:
            if xz2ps:
                _xz2ps(ux[tp1, :-pad, pad:-pad], uz[tp1, :-pad, pad:-pad], p,
                       s, p.shape[1], p.shape[0], dx, dz)
                yield [iteration, p, s, xseismograms, zseismograms]
            else:
                yield [iteration, ux[tp1, :-pad, pad:-pad],
                       uz[tp1, :-pad, pad:-pad], xseismograms, zseismograms]
    if xz2ps:
        _xz2ps(ux[tp1, :-pad, pad:-pad], uz[tp1, :-pad, pad:-pad], p,
               s, p.shape[1], p.shape[0], dx, dz)
        yield [iteration, p, s, xseismograms, zseismograms]
    else:
        yield [iteration, ux[tp1, :-pad, pad:-pad], uz[tp1, :-pad, pad:-pad],
               xseismograms, zseismograms]

Example 16

Project: pyscf Source File: ccsd_grad_incore.py
def IX_intermediates(mycc, t1, t2, l1, l2, eris=None, d1=None, d2=None):
    if eris is None:
# Note eris are in Chemist's notation
        eris = ccsd._ERIS(mycc)
    if d1 is None:
        d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2)
    if d2 is None:
        d2 = ccsd_rdm.gamma2_incore(mycc, t1, t2, l1, l2)
    doo, dov, dvo, dvv = d1
    dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2

    log = logger.Logger(mycc.stdout, mycc.verbose)
    nocc, nvir = t1.shape
    nov = nocc * nvir

# Note Ioo, Ivv are not hermitian
    Ioo = numpy.zeros((nocc,nocc))
    Ivv = numpy.zeros((nvir,nvir))
    Ivo = numpy.zeros((nvir,nocc))
    Xvo = numpy.zeros((nvir,nocc))

    eris_oooo = _cp(eris.oooo)
    eris_ooov = _cp(eris.ooov)
    d_oooo = _cp(doooo)
    d_oooo = _cp(d_oooo + d_oooo.transpose(1,0,2,3))
    #:Ioo += numpy.einsum('jmlk,imlk->ij', d_oooo, eris_oooo) * 2
    Ioo += lib.dot(eris_oooo.reshape(nocc,-1), d_oooo.reshape(nocc,-1).T, 2)
    d_oooo = _cp(d_oooo.transpose(0,2,3,1))
    #:Xvo += numpy.einsum('iljk,ljka->ai', d_oooo, eris_ooov) * 2
    Xvo += lib.dot(eris_ooov.reshape(-1,nvir).T, d_oooo.reshape(nocc,-1).T, 2)
    Xvo +=(numpy.einsum('kj,kjia->ai', doo, eris_ooov) * 4
         - numpy.einsum('kj,ikja->ai', doo+doo.T, eris_ooov))
    eris_oooo = eris_ooov = d_oooo = None

    d_ooov = _cp(dooov)
    eris_oooo = _cp(eris.oooo)
    eris_ooov = _cp(eris.ooov)
    #:Ivv += numpy.einsum('ijkb,ijka->ab', d_ooov, eris_ooov)
    #:Ivo += numpy.einsum('jlka,jlki->ai', d_ooov, eris_oooo)
    Ivv += lib.dot(eris_ooov.reshape(-1,nvir).T, d_ooov.reshape(-1,nvir))
    Ivo += lib.dot(d_ooov.reshape(-1,nvir).T, eris_oooo.reshape(-1,nocc))
    #:Ioo += numpy.einsum('klja,klia->ij', d_ooov, eris_ooov)
    #:Xvo += numpy.einsum('kjib,kjba->ai', d_ooov, eris.oovv)
    eris_oovv = _cp(eris.oovv)
    tmp = _cp(d_ooov.transpose(0,1,3,2).reshape(-1,nocc))
    tmpooov = _cp(eris_ooov.transpose(0,1,3,2))
    Ioo += lib.dot(tmpooov.reshape(-1,nocc).T, tmp)
    Xvo += lib.dot(eris_oovv.reshape(-1,nvir).T, tmp)
    eris_oooo = tmp = None

    d_ooov = d_ooov + d_ooov.transpose(1,0,2,3)
    eris_ovov = _cp(eris.ovov)
    #:Ioo += numpy.einsum('jlka,ilka->ij', d_ooov, eris_ooov)
    #:Xvo += numpy.einsum('ijkb,kbja->ai', d_ooov, eris.ovov)
    Ioo += lib.dot(eris_ooov.reshape(nocc,-1), d_ooov.reshape(nocc,-1).T)
    Xvo += lib.dot(eris_ovov.reshape(-1,nvir).T,
                   _cp(d_ooov.transpose(0,2,3,1).reshape(nocc,-1)).T)
    d_ooov = None

    #:Ioo += numpy.einsum('kjba,kiba->ij', d_oovv, eris.oovv)
    #:Ivv += numpy.einsum('ijcb,ijca->ab', d_oovv, eris.oovv)
    #:Ivo += numpy.einsum('kjba,kjib->ai', d_oovv, eris.ooov)
    d_oovv = _cp(doovv + doovv.transpose(1,0,3,2))
    for i in range(nocc):
        Ioo += lib.dot(eris_oovv[i].reshape(nocc, -1), d_oovv[i].reshape(nocc,-1).T)
    Ivv += lib.dot(eris_oovv.reshape(-1,nvir).T, d_oovv.reshape(-1,nvir))
    Ivo += lib.dot(d_oovv.reshape(-1,nvir).T, tmpooov.reshape(-1,nocc))
    d_oovv = _ccsd.precontract(d_oovv.reshape(-1,nvir,nvir)).reshape(nocc,nocc,-1)
    eris_ooov = tmpooov = None

    blksize = 4
    d_ovov = numpy.empty((nocc,nvir,nocc,nvir))
    for p0, p1 in prange(0, nocc, blksize):
        d_ovov[p0:p1] = _cp(dovov[p0:p1])
        d_ovvo = _cp(dovvo[p0:p1])
        for i in range(p0,p1):
            d_ovov[i] += d_ovvo[i-p0].transpose(0,2,1)
    d_ovvo = None
    #:d_ovov = d_ovov + d_ovov.transpose(2,3,0,1)
    lib.transpose_sum(d_ovov.reshape(nov,nov), inplace=True)
    #:Ivo += numpy.einsum('jbka,jbki->ai', d_ovov, eris.ovoo)
    Ivo += lib.dot(d_ovov.reshape(-1,nvir).T, _cp(eris.ovoo).reshape(-1,nocc))
    #:Ioo += numpy.einsum('jakb,iakb->ij', d_ovov, eris.ovov)
    #:Ivv += numpy.einsum('jcib,jcia->ab', d_ovov, eris.ovov)
    Ioo += lib.dot(eris_ovov.reshape(nocc,-1), d_ovov.reshape(nocc,-1).T)
    Ivv += lib.dot(eris_ovov.reshape(-1,nvir).T, d_ovov.reshape(-1,nvir))

    nvir_pair = nvir * (nvir+1) // 2
    bufe_ovvv = numpy.empty((blksize,nvir,nvir,nvir))
    bufc_ovvv = numpy.empty((blksize,nvir,nvir_pair))
    bufc_ovvv.data = bufe_ovvv.data
    c_vvvo = numpy.empty((nvir_pair,nvir,nocc))
    for p0, p1 in prange(0, nocc, blksize):
        d_ovvv = numpy.empty((p1-p0,nvir,nvir,nvir))
        #:Ivo += numpy.einsum('jadc,jidc->ai', d_ovvv, eris_oovv)
        for i in range(p1-p0):
            lib.dot(dovvv[p0+i].reshape(nvir,-1),
                    eris_oovv[p0+i].reshape(nocc,-1).T, 1, Ivo, 1)

        c_ovvv = bufc_ovvv[:p1-p0]
        # tril part of (d_ovvv + d_ovvv.transpose(0,1,3,2))
        _ccsd.precontract(dovvv[p0:p1].reshape(-1,nvir,nvir), out=c_ovvv)
        for i0, i1, in prange(0, nvir_pair, BLKSIZE):
            for j0, j1 in prange(0, nvir, BLKSIZE//(p1-p0)+1):
                c_vvvo[i0:i1,j0:j1,p0:p1] = c_ovvv[:,j0:j1,i0:i1].transpose(2,1,0)
        eris_ovx = _cp(eris.ovvv[p0:p1])
        #:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
        #:Ivv += numpy.einsum('ibdc,iadc->ab', d_ovvv, eris_ovvv)
        for i in range(p1-p0):
            lib.dot(eris_ovx[i].reshape(nvir,-1),
                    d_oovv[p0+i].reshape(nocc,-1).T, 1, Xvo, 1)
            lib.dot(eris_ovx[i].reshape(nvir,-1),
                    c_ovvv[i].reshape(nvir,-1).T, 1, Ivv, 1)

        eris_ovvv = bufe_ovvv[:p1-p0]
        _ccsd.unpack_tril(eris_ovx.reshape(-1,nvir_pair),
                          out=eris_ovvv.reshape(-1,nvir**2))
        eris_ovx = None
        #:Xvo += numpy.einsum('icjb,acjb->ai', d_ovov, eris_vvov)
        d_ovvo = _cp(d_ovov[p0:p1].transpose(0,1,3,2))
        lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvo.reshape(-1,nocc), 1, Xvo, 1)

        e_ovvo, d_ovvo = d_ovvo, None
        for i in range(p1-p0):
            d_ovvv[i] = _ccsd.sum021(dovvv[p0+i])
            e_ovvo[i] = eris_ovov[p0+i].transpose(0,2,1)
        #:Ivo += numpy.einsum('jcab,jcib->ai', d_ovvv, eris_ovov)
        #:Ivv += numpy.einsum('icdb,icda->ab', d_ovvv, eris_ovvv)
        lib.dot(d_ovvv.reshape(-1,nvir).T,
                e_ovvo[:p1-p0].reshape(-1,nocc), 1, Ivo, 1)
        lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvv.reshape(-1,nvir), 1, Ivv, 1)

        Xvo[:,p0:p1] +=(numpy.einsum('cb,iacb->ai', dvv, eris_ovvv) * 4
                      - numpy.einsum('cb,icba->ai', dvv+dvv.T, eris_ovvv))
    d_oovv = d_ovvv = bufc_ovvv = bufe_ovvv = None
    eris_ovov = eris_ovvv = eris_oovv = e_ovvo = None

    eris_ovvv = _cp(eris.ovvv)
    bufe_vvvo = numpy.empty((blksize*nvir,nvir,nocc))
    bufe_vvvv = numpy.empty((blksize*nvir,nvir,nvir))
    bufd_vvvv = numpy.empty((blksize*nvir,nvir,nvir))
    for p0, p1 in prange(0, nvir, blksize):
        off0 = p0*(p0+1)//2
        off1 = p1*(p1+1)//2
        d_vvvv = _cp(dvvvv[off0:off1]) * 4
        for i in range(p0, p1):
            d_vvvv[i*(i+1)//2+i-off0] *= .5
        d_vvvv = _ccsd.unpack_tril(d_vvvv, out=bufd_vvvv[:off1-off0])
        eris_vvvv = _ccsd.unpack_tril(eris.vvvv[off0:off1], out=bufe_vvvv[:off1-off0])
        #:Ivv += numpy.einsum('decb,deca->ab', d_vvvv, eris_vvvv) * 2
        #:Xvo += numpy.einsum('icdb,acdb->ai', d_ovvv, eris_vvvv)
        lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvv.reshape(-1,nvir), 2, Ivv, 1)
        d_vvvo = _cp(c_vvvo[off0:off1])
        lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvo.reshape(-1,nocc), 1, Xvo, 1)

        #:Ioo += numpy.einsum('abjc,abci->ij', d_vvov, eris_vvvo)
        #:Ivo += numpy.einsum('dbca,dbci->ai', d_vvvv, eris_vvvo) * 2
        eris_vvvo = bufe_vvvo[:off1-off0]
        for i0, i1 in prange(off0, off1, BLKSIZE):
            for j0, j1, in prange(0, nvir, BLKSIZE//nocc+1):
                eris_vvvo[i0-off0:i1-off0,j0:j1,:] = eris_ovvv[:,j0:j1,i0:i1].transpose(2,1,0)
        lib.dot(eris_vvvo.reshape(-1,nocc).T, d_vvvo.reshape(-1,nocc), 1, Ioo, 1)
        lib.dot(d_vvvv.reshape(-1,nvir).T, eris_vvvo.reshape(-1,nocc), 2, Ivo, 1)

    Ioo *= -1
    Ivv *= -1
    Ivo *= -1
    Xvo += Ivo
    return Ioo, Ivv, Ivo, Xvo

Example 17

Project: pyscf Source File: ccsd_grad.py
def IX_intermediates(mycc, t1, t2, l1, l2, eris=None, d1=None, d2=None,
                     max_memory=2000):
    if eris is None:
# Note eris are in Chemist's notation
        eris = ccsd._ERIS(mycc)
    if d1 is None:
        d1 = ccsd_rdm.gamma1_intermediates(mycc, t1, t2, l1, l2, max_memory)
    doo, dov, dvo, dvv = d1
    if d2 is None:
        _d2tmpfile = tempfile.NamedTemporaryFile()
        fd2intermediate = h5py.File(_d2tmpfile.name, 'w')
        ccsd_rdm.gamma2_outcore(mycc, t1, t2, l1, l2, fd2intermediate, max_memory)
        dovov = fd2intermediate['dovov']
        dvvvv = fd2intermediate['dvvvv']
        doooo = fd2intermediate['doooo']
        doovv = fd2intermediate['doovv']
        dovvo = fd2intermediate['dovvo']
        dovvv = fd2intermediate['dovvv']
        dooov = fd2intermediate['dooov']
    else:
        dovov, dvvvv, doooo, doovv, dovvo, dvvov, dovvv, dooov = d2

    log = logger.Logger(mycc.stdout, mycc.verbose)
    nocc, nvir = t1.shape
    nov = nocc * nvir
    nvir_pair = nvir * (nvir+1) //2
    _tmpfile = tempfile.NamedTemporaryFile()
    fswap = h5py.File(_tmpfile.name, 'w')
    fswap.create_group('e_vvov')
    fswap.create_group('c_vvov')

# Note Ioo, Ivv are not hermitian
    Ioo = numpy.zeros((nocc,nocc))
    Ivv = numpy.zeros((nvir,nvir))
    Ivo = numpy.zeros((nvir,nocc))
    Xvo = numpy.zeros((nvir,nocc))

    eris_oooo = _cp(eris.oooo)
    eris_ooov = _cp(eris.ooov)
    d_oooo = _cp(doooo)
    d_oooo = _cp(d_oooo + d_oooo.transpose(1,0,2,3))
    #:Ioo += numpy.einsum('jmlk,imlk->ij', d_oooo, eris_oooo) * 2
    Ioo += lib.dot(eris_oooo.reshape(nocc,-1), d_oooo.reshape(nocc,-1).T, 2)
    d_oooo = _cp(d_oooo.transpose(0,2,3,1))
    #:Xvo += numpy.einsum('iljk,ljka->ai', d_oooo, eris_ooov) * 2
    Xvo += lib.dot(eris_ooov.reshape(-1,nvir).T, d_oooo.reshape(nocc,-1).T, 2)
    Xvo +=(numpy.einsum('kj,kjia->ai', doo, eris_ooov) * 4
         - numpy.einsum('kj,ikja->ai', doo+doo.T, eris_ooov))
    eris_oooo = eris_ooov = d_oooo = None

    d_ovov = numpy.empty((nocc,nvir,nocc,nvir))
    blksize = 8
    for p0, p1 in prange(0, nocc, blksize):
        d_ovov[p0:p1] = _cp(dovov[p0:p1])
        d_ovvo = _cp(dovvo[p0:p1])
        for i in range(p0,p1):
            d_ovov[i] += d_ovvo[i-p0].transpose(0,2,1)
    d_ovvo = None
    d_ovov = lib.transpose_sum(d_ovov.reshape(nov,nov)).reshape(nocc,nvir,nocc,nvir)
    #:Ivo += numpy.einsum('jbka,jbki->ai', d_ovov, eris.ovoo)
    Ivo += lib.dot(d_ovov.reshape(-1,nvir).T, _cp(eris.ovoo).reshape(-1,nocc))
    eris_ovov = _cp(eris.ovov)
    #:Ioo += numpy.einsum('jakb,iakb->ij', d_ovov, eris.ovov)
    #:Ivv += numpy.einsum('jcib,jcia->ab', d_ovov, eris.ovov)
    Ioo += lib.dot(eris_ovov.reshape(nocc,-1), d_ovov.reshape(nocc,-1).T)
    Ivv += lib.dot(eris_ovov.reshape(-1,nvir).T, d_ovov.reshape(-1,nvir))
    eris_ovov = None
    fswap['dovvo'] = d_ovov.transpose(0,1,3,2)
    d_ovov = None

    max_memory1 = max_memory - lib.current_memory()[0]
    unit = max(nvir**3*2.5, nvir**3*2+nocc*nvir**2)
    blksize = max(ccsd.BLKMIN, int(max_memory1*1e6/8/unit))
    iobuflen = int(256e6/8/(blksize*nvir))
    log.debug1('IX_intermediates pass 1: block size = %d, nocc = %d in %d blocks',
               blksize, nocc, int((nocc+blksize-1)/blksize))
    for istep, (p0, p1) in enumerate(prange(0, nocc, blksize)):
        d_ooov = _cp(dooov[p0:p1])
        eris_oooo = _cp(eris.oooo[p0:p1])
        eris_ooov = _cp(eris.ooov[p0:p1])
        #:Ivv += numpy.einsum('ijkb,ijka->ab', d_ooov, eris_ooov)
        #:Ivo += numpy.einsum('jlka,jlki->ai', d_ooov, eris_oooo)
        Ivv += lib.dot(eris_ooov.reshape(-1,nvir).T, d_ooov.reshape(-1,nvir))
        Ivo += lib.dot(d_ooov.reshape(-1,nvir).T, eris_oooo.reshape(-1,nocc))
        #:Ioo += numpy.einsum('klja,klia->ij', d_ooov, eris_ooov)
        #:Xvo += numpy.einsum('kjib,kjba->ai', d_ooov, eris.oovv)
        eris_oovv = _cp(eris.oovv[p0:p1])
        tmp = _cp(d_ooov.transpose(0,1,3,2).reshape(-1,nocc))
        Ioo += lib.dot(_cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)).T, tmp)
        Xvo += lib.dot(eris_oovv.reshape(-1,nvir).T, tmp)
        eris_oooo = tmp = None

        d_ooov = d_ooov + dooov[:,p0:p1].transpose(1,0,2,3)
        eris_ovov = _cp(eris.ovov[p0:p1])
        #:Ioo += numpy.einsum('ljka,lika->ij', d_ooov, eris_ooov)
        #:Xvo += numpy.einsum('jikb,jakb->ai', d_ooov, eris_ovov)
        for i in range(p1-p0):
            lib.dot(eris_ooov[i].reshape(nocc,-1),
                    d_ooov[i].reshape(nocc,-1).T, 1, Ioo, 1)
            lib.dot(eris_ovov[i].reshape(nvir,-1),
                    d_ooov[i].reshape(nocc,-1).T, 1, Xvo, 1)
        d_ooov = None

        #:Ioo += numpy.einsum('kjba,kiba->ij', d_oovv, eris.oovv)
        #:Ivv += numpy.einsum('ijcb,ijca->ab', d_oovv, eris.oovv)
        #:Ivo += numpy.einsum('kjba,kjib->ai', d_oovv, eris.ooov)
        d_oovv = _cp(doovv[p0:p1]) + doovv[:,p0:p1].transpose(1,0,3,2)
        for i in range(p1-p0):
            Ioo += lib.dot(eris_oovv[i].reshape(nocc, -1), d_oovv[i].reshape(nocc,-1).T)
        Ivv += lib.dot(eris_oovv.reshape(-1,nvir).T, d_oovv.reshape(-1,nvir))
        Ivo += lib.dot(d_oovv.reshape(-1,nvir).T,
                       _cp(eris_ooov.transpose(0,1,3,2).reshape(-1,nocc)))
        eris_ooov = None
        d_oovv = _ccsd.precontract(d_oovv.reshape(-1,nvir,nvir)).reshape(p1-p0,nocc,-1)

        d_ovvv = numpy.empty((p1-p0,nvir,nvir,nvir))
        ao2mo.outcore._load_from_h5g(dovvv, p0*nvir, p1*nvir,
                                     d_ovvv.reshape(-1,nvir**2))
        #:Ivo += numpy.einsum('jadc,jidc->ai', d_ovvv, eris_oovv)
        for i in range(p1-p0):
            Ivo += lib.dot(d_ovvv[i].reshape(nvir,-1), eris_oovv[i].reshape(nocc,-1).T)
        eris_oovv = None

        # tril part of (d_ovvv + d_ovvv.transpose(0,1,3,2))
        c_ovvv = _ccsd.precontract(d_ovvv.reshape(-1,nvir,nvir))
        ao2mo.outcore._transpose_to_h5g(fswap, 'c_vvov/%d'%istep, c_ovvv, iobuflen)
        c_ovvv = c_ovvv.reshape(-1,nvir,nvir_pair)
        eris_ovx = _cp(eris.ovvv[p0:p1])
        ao2mo.outcore._transpose_to_h5g(fswap, 'e_vvov/%d'%istep,
                                        eris_ovx.reshape(-1,nvir_pair), iobuflen)
        #:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
        #:Ivv += numpy.einsum('ibdc,iadc->ab', d_ovvv, eris_ovvv)
        for i in range(p1-p0):
            lib.dot(eris_ovx[i].reshape(nvir,-1),
                    d_oovv[i].reshape(nocc,-1).T, 1, Xvo, 1)
            lib.dot(eris_ovx[i].reshape(nvir,-1),
                    c_ovvv[i].reshape(nvir,-1).T, 1, Ivv, 1)
        c_ovvv = d_oovv = None

        eris_ovvo = numpy.empty((p1-p0,nvir,nvir,nocc))
        for i in range(p1-p0):
            d_ovvv[i] = _ccsd.sum021(d_ovvv[i])
            eris_ovvo[i] = eris_ovov[i].transpose(0,2,1)
        #:Ivo += numpy.einsum('abjc,ibjc->ai', d_ovvv, eris_ovov)
        Ivo += lib.dot(d_ovvv.reshape(-1,nvir).T, eris_ovvo.reshape(-1,nocc))
        eris_ovvo = eris_ovov = None

        eris_ovvv = _ccsd.unpack_tril(eris_ovx.reshape(-1,nvir_pair))
        eris_ovx = None
        eris_ovvv = eris_ovvv.reshape(p1-p0,nvir,nvir,nvir)
        #:Ivv += numpy.einsum('icdb,icda->ab', d_ovvv, eris_ovvv)
        #:Xvo += numpy.einsum('jibc,jabc->ai', d_oovv, eris_ovvv)
        Ivv += lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvv.reshape(-1,nvir))
        Xvo[:,p0:p1] +=(numpy.einsum('cb,iacb->ai', dvv, eris_ovvv) * 4
                      - numpy.einsum('cb,icba->ai', dvv+dvv.T, eris_ovvv))

        d_ovvo = _cp(fswap['dovvo'][p0:p1])
        #:Xvo += numpy.einsum('jbic,jbca->ai', d_ovov, eris_ovvv)
        lib.dot(eris_ovvv.reshape(-1,nvir).T, d_ovvo.reshape(-1,nocc), 1, Xvo, 1)

        d_ovvv = d_ovvo = eris_ovvv = None

    max_memory1 = max_memory - lib.current_memory()[0]
    unit = nocc*nvir**2 + nvir**3*2.5
    blksize = max(ccsd.BLKMIN, int(max_memory1*1e6/8/unit))
    log.debug1('IX_intermediates pass 2: block size = %d, nocc = %d in %d blocks',
               blksize, nocc, int((nocc+blksize-1)/blksize))
    for p0, p1 in prange(0, nvir, blksize):
        off0 = p0*(p0+1)//2
        off1 = p1*(p1+1)//2
        d_vvvv = _cp(dvvvv[off0:off1]) * 4
        for i in range(p0, p1):
            d_vvvv[i*(i+1)//2+i-off0] *= .5
        d_vvvv = _ccsd.unpack_tril(d_vvvv)
        eris_vvvv = _ccsd.unpack_tril(_cp(eris.vvvv[off0:off1]))
        #:Ivv += numpy.einsum('decb,deca->ab', d_vvvv, eris_vvvv) * 2
        #:Xvo += numpy.einsum('dbic,dbca->ai', d_vvov, eris_vvvv)
        lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvv.reshape(-1,nvir), 2, Ivv, 1)
        #:d_vvvv = _cp(d_vvvv + d_vvvv.transpose(0,1,3,2))
        d_vvov = numpy.empty((off1-off0,nocc,nvir))
        ao2mo.outcore._load_from_h5g(fswap['c_vvov'], off0, off1, d_vvov.reshape(-1,nov))
        d_vvvo = _cp(d_vvov.transpose(0,2,1))
        lib.dot(eris_vvvv.reshape(-1,nvir).T, d_vvvo.reshape(-1,nocc), 1, Xvo, 1)
        d_vvov = eris_vvvv = None

        eris_vvov = numpy.empty((off1-off0,nocc,nvir))
        ao2mo.outcore._load_from_h5g(fswap['e_vvov'], off0, off1,
                                     eris_vvov.reshape(-1,nov))
        eris_vvvo = _cp(eris_vvov.transpose(0,2,1))
        #:Ioo += numpy.einsum('abjc,abci->ij', d_vvov, eris_vvvo)
        #:Ivo += numpy.einsum('dbca,dbci->ai', d_vvvv, eris_vvvo) * 2
        lib.dot(d_vvvv.reshape(-1,nvir).T, eris_vvvo.reshape(-1,nocc), 2, Ivo, 1)
        lib.dot(eris_vvvo.reshape(-1,nocc).T, d_vvvo.reshape(-1,nocc), 1, Ioo, 1)
        eris_vvov = eris_vovv = d_vvvv = None

    del(fswap['e_vvov'])
    del(fswap['c_vvov'])
    del(fswap['dovvo'])
    fswap.close()
    _tmpfile = None

    if d2 is None:
        for key in fd2intermediate.keys():
            del(fd2intermediate[key])
        fd2intermediate.close()
        _d2tmpfile = None

    Ioo *= -1
    Ivv *= -1
    Ivo *= -1
    Xvo += Ivo
    return Ioo, Ivv, Ivo, Xvo

Example 18

Project: sed_eval Source File: sound_event.py
Function: evaluate
    def evaluate(self, reference_event_list, estimated_event_list):
        """Evaluate file pair (reference and estimated)

        Parameters
        ----------

        reference_event_list : event list
            Reference event list

        estimated_event_list : event list
            Estimated event list

        Returns
        -------
            Nothing

        """
        self.evaluated_length += util.max_event_offset(reference_event_list)
        self.evaluated_files += 1

        # Overall metrics

        # Total number of detected and reference events
        Nsys = len(estimated_event_list)
        Nref = len(reference_event_list)

        sys_correct = numpy.zeros(Nsys, dtype=bool)
        ref_correct = numpy.zeros(Nref, dtype=bool)

        # Number of correctly detected events
        for j in range(0, len(reference_event_list)):
            for i in range(0, len(estimated_event_list)):
                if not sys_correct[i]:  # skip already matched events
                    label_condition = reference_event_list[j]['event_label'] == estimated_event_list[i]['event_label']

                    if self.evaluate_onset:
                        onset_condition = self.validate_onset(reference_event=reference_event_list[j],
                                                              estimated_event=estimated_event_list[i],
                                                              t_collar=self.t_collar)
                    else:
                        onset_condition = True

                    if self.evaluate_offset:
                        offset_condition = self.validate_offset(reference_event=reference_event_list[j],
                                                                estimated_event=estimated_event_list[i],
                                                                t_collar=self.t_collar,
                                                                percentage_of_length=self.percentage_of_length)
                    else:
                        offset_condition = True

                    if label_condition and onset_condition and offset_condition:
                        ref_correct[j] = True
                        sys_correct[i] = True
                        break

        Ntp = numpy.sum(sys_correct)

        ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
        sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]

        # Substitutions
        Nsubs = 0
        sys_counted = numpy.zeros(Nsys, dtype=bool)
        for j in ref_leftover:
            for i in sys_leftover:
                if not sys_counted[i]:
                    if self.evaluate_onset:
                        onset_condition = self.validate_onset(reference_event=reference_event_list[j],
                                                              estimated_event=estimated_event_list[i],
                                                              t_collar=self.t_collar)
                    else:
                        onset_condition = True

                    if self.evaluate_offset:
                        offset_condition = self.validate_offset(reference_event=reference_event_list[j],
                                                                estimated_event=estimated_event_list[i],
                                                                t_collar=self.t_collar,
                                                                percentage_of_length=self.percentage_of_length)
                    else:
                        offset_condition = True

                    if onset_condition and offset_condition:
                        sys_counted[i] = True
                        Nsubs += 1
                        break

        Nfp = Nsys - Ntp - Nsubs
        Nfn = Nref - Ntp - Nsubs

        self.overall['Nref'] += Nref
        self.overall['Nsys'] += Nsys
        self.overall['Ntp'] += Ntp
        self.overall['Nsubs'] += Nsubs
        self.overall['Nfp'] += Nfp
        self.overall['Nfn'] += Nfn

        # Class-wise metrics
        for class_id, class_label in enumerate(self.event_label_list):
            Nref = 0.0
            Nsys = 0.0
            Ntp = 0.0

            # Count event frequencies in the ground truth
            for i in range(0, len(reference_event_list)):
                if reference_event_list[i]['event_label'] == class_label:
                    Nref += 1

            # Count event frequencies in the system output
            for i in range(0, len(estimated_event_list)):
                if estimated_event_list[i]['event_label'] == class_label:
                    Nsys += 1

            sys_counted = numpy.zeros(len(estimated_event_list), dtype=bool)
            for j in range(0, len(reference_event_list)):
                if reference_event_list[j]['event_label'] == class_label:
                    for i in range(0, len(estimated_event_list)):
                        if estimated_event_list[i]['event_label'] == class_label and not sys_counted[i]:
                            if self.evaluate_onset:
                                onset_condition = self.validate_onset(reference_event=reference_event_list[j],
                                                                      estimated_event=estimated_event_list[i],
                                                                      t_collar=self.t_collar)
                            else:
                                onset_condition = True

                            if self.evaluate_offset:
                                offset_condition = self.validate_offset(reference_event=reference_event_list[j],
                                                                        estimated_event=estimated_event_list[i],
                                                                        t_collar=self.t_collar,
                                                                        percentage_of_length=self.percentage_of_length)
                            else:
                                offset_condition = True

                            if onset_condition and offset_condition:
                                sys_counted[i] = True
                                Ntp += 1
                                break

            Nfp = Nsys - Ntp
            Nfn = Nref - Ntp

            self.class_wise[class_label]['Nref'] += Nref
            self.class_wise[class_label]['Nsys'] += Nsys

            self.class_wise[class_label]['Ntp'] += Ntp
            self.class_wise[class_label]['Nfp'] += Nfp
            self.class_wise[class_label]['Nfn'] += Nfn

Example 19

Project: Printrun Source File: actors.py
    def load_data(self, model_data, callback=None):
        t_start = time.time()
        self.gcode = model_data

        self.count_travel_indices = count_travel_indices = [0]
        self.count_print_indices = count_print_indices = [0]
        self.count_print_vertices = count_print_vertices = [0]

        # Some trivial computations, but that's mostly for docuementation :)
        # Not like 10 multiplications are going to cost much time vs what's
        # about to happen :)

        # Max number of values which can be generated per gline
        # to store coordinates/colors/normals.
        # Nicely enough we have 3 per kind of thing for all kinds.
        coordspervertex = 3
        verticesperline = 8
        coordsperline = coordspervertex * verticesperline
        coords_count = lambda nlines: nlines * coordsperline

        travelverticesperline = 2
        travelcoordsperline = coordspervertex * travelverticesperline
        travel_coords_count = lambda nlines: nlines * travelcoordsperline

        trianglesperface = 2
        facesperbox = 4
        trianglesperbox = trianglesperface * facesperbox
        verticespertriangle = 3
        indicesperbox = verticespertriangle * trianglesperbox
        boxperline = 2
        indicesperline = indicesperbox * boxperline
        indices_count = lambda nlines: nlines * indicesperline

        nlines = len(model_data)
        ntravelcoords = travel_coords_count(nlines)
        ncoords = coords_count(nlines)
        nindices = indices_count(nlines)
        travel_vertices = self.travels = numpy.zeros(ntravelcoords, dtype = GLfloat)
        travel_vertex_k = 0
        vertices = self.vertices = numpy.zeros(ncoords, dtype = GLfloat)
        vertex_k = 0
        colors = self.colors = numpy.zeros(ncoords, dtype = GLfloat)
        color_k = 0
        normals = self.normals = numpy.zeros(ncoords, dtype = GLfloat)
        normal_k = 0
        indices = self.indices = numpy.zeros(nindices, dtype = GLuint)
        index_k = 0
        self.layer_idxs_map = {}
        self.layer_stops = [0]

        prev_is_extruding = False
        prev_move_normal_x = None
        prev_move_normal_y = None
        prev_move_angle = None

        prev_pos = (0, 0, 0)
        layer_idx = 0

        self.printed_until = 0
        self.only_current = False

        twopi = 2 * math.pi

        processed_lines = 0

        while layer_idx < len(model_data.all_layers):
            with self.lock:
                nlines = len(model_data)
                remaining_lines = nlines - processed_lines
                # Only reallocate memory which might be needed, not memory
                # for everything
                ntravelcoords = coords_count(remaining_lines) + travel_vertex_k
                ncoords = coords_count(remaining_lines) + vertex_k
                nindices = indices_count(remaining_lines) + index_k
                if ncoords > vertices.size:
                    self.travels.resize(ntravelcoords, refcheck = False)
                    self.vertices.resize(ncoords, refcheck = False)
                    self.colors.resize(ncoords, refcheck = False)
                    self.normals.resize(ncoords, refcheck = False)
                    self.indices.resize(nindices, refcheck = False)
                layer = model_data.all_layers[layer_idx]
                has_movement = False
                for gline_idx, gline in enumerate(layer):
                    if not gline.is_move:
                        continue
                    if gline.x is None and gline.y is None and gline.z is None:
                        continue
                    has_movement = True
                    current_pos = (gline.current_x, gline.current_y, gline.current_z)
                    if not gline.extruding:
                        travel_vertices[travel_vertex_k] = prev_pos[0]
                        travel_vertices[travel_vertex_k + 1] = prev_pos[1]
                        travel_vertices[travel_vertex_k + 2] = prev_pos[2]
                        travel_vertices[travel_vertex_k + 3] = current_pos[0]
                        travel_vertices[travel_vertex_k + 4] = current_pos[1]
                        travel_vertices[travel_vertex_k + 5] = current_pos[2]
                        travel_vertex_k += 6
                        prev_is_extruding = False
                    else:
                        gline_color = self.movement_color(gline)

                        next_move = get_next_move(model_data, layer_idx, gline_idx)
                        next_is_extruding = (next_move.extruding
                                             if next_move is not None else False)

                        delta_x = current_pos[0] - prev_pos[0]
                        delta_y = current_pos[1] - prev_pos[1]
                        norm = delta_x * delta_x + delta_y * delta_y
                        if norm == 0:  # Don't draw anything if this move is Z+E only
                            continue
                        norm = math.sqrt(norm)
                        move_normal_x = - delta_y / norm
                        move_normal_y = delta_x / norm
                        move_angle = math.atan2(delta_y, delta_x)

                        # FIXME: compute these dynamically
                        path_halfwidth = self.path_halfwidth * 1.2
                        path_halfheight = self.path_halfheight * 1.2

                        new_indices = []
                        new_vertices = []
                        new_normals = []
                        if prev_is_extruding:
                            # Store previous vertices indices
                            prev_id = vertex_k / 3 - 4
                            avg_move_normal_x = (prev_move_normal_x + move_normal_x) / 2
                            avg_move_normal_y = (prev_move_normal_y + move_normal_y) / 2
                            norm = avg_move_normal_x * avg_move_normal_x + avg_move_normal_y * avg_move_normal_y
                            if norm == 0:
                                avg_move_normal_x = move_normal_x
                                avg_move_normal_y = move_normal_y
                            else:
                                norm = math.sqrt(norm)
                                avg_move_normal_x /= norm
                                avg_move_normal_y /= norm
                            delta_angle = move_angle - prev_move_angle
                            delta_angle = (delta_angle + twopi) % twopi
                            fact = abs(math.cos(delta_angle / 2))
                            # If move is turning too much, avoid creating a big peak
                            # by adding an intermediate box
                            if fact < 0.5:
                                # FIXME: It looks like there's some heavy code duplication here...
                                hw = path_halfwidth
                                p1x = prev_pos[0] - hw * prev_move_normal_x
                                p2x = prev_pos[0] + hw * prev_move_normal_x
                                p1y = prev_pos[1] - hw * prev_move_normal_y
                                p2y = prev_pos[1] + hw * prev_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-prev_move_normal_x, -prev_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((prev_move_normal_x, prev_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                                p1x = prev_pos[0] - hw * move_normal_x
                                p2x = prev_pos[0] + hw * move_normal_x
                                p1y = prev_pos[1] - hw * move_normal_y
                                p2y = prev_pos[1] + hw * move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-move_normal_x, -move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((move_normal_x, move_normal_y, 0))
                                prev_id += 4
                                first += 4
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                            else:
                                hw = path_halfwidth / fact
                                # Compute vertices
                                p1x = prev_pos[0] - hw * avg_move_normal_x
                                p2x = prev_pos[0] + hw * avg_move_normal_x
                                p1y = prev_pos[1] - hw * avg_move_normal_y
                                p2y = prev_pos[1] + hw * avg_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-avg_move_normal_x, -avg_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((avg_move_normal_x, avg_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                        else:
                            # Compute vertices normal to the current move and cap it
                            p1x = prev_pos[0] - path_halfwidth * move_normal_x
                            p2x = prev_pos[0] + path_halfwidth * move_normal_x
                            p1y = prev_pos[1] - path_halfwidth * move_normal_y
                            p2y = prev_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, prev_pos[2]))
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, prev_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            first = vertex_k / 3
                            new_indices = triangulate_rectangle(first, first + 1,
                                                                first + 2, first + 3)

                        if not next_is_extruding:
                            # Compute caps and link everything
                            p1x = current_pos[0] - path_halfwidth * move_normal_x
                            p2x = current_pos[0] + path_halfwidth * move_normal_x
                            p1y = current_pos[1] - path_halfwidth * move_normal_y
                            p2y = current_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, current_pos[2]))
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, current_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            end_first = vertex_k / 3 + len(new_vertices) / 3 - 4
                            new_indices += triangulate_rectangle(end_first + 3, end_first + 2,
                                                                 end_first + 1, end_first)
                            new_indices += triangulate_box(first, first + 1,
                                                           first + 2, first + 3,
                                                           end_first, end_first + 1,
                                                           end_first + 2, end_first + 3)

                        for new_i, item in enumerate(new_indices):
                            indices[index_k + new_i] = item
                        index_k += len(new_indices)
                        for new_i, item in enumerate(new_vertices):
                            vertices[vertex_k + new_i] = item
                        vertex_k += len(new_vertices)
                        for new_i, item in enumerate(new_normals):
                            normals[normal_k + new_i] = item
                        normal_k += len(new_normals)
                        new_colors = list(gline_color)[:-1] * (len(new_vertices) / 3)
                        for new_i, item in enumerate(new_colors):
                            colors[color_k + new_i] = item
                        color_k += len(new_colors)

                        prev_is_extruding = True
                        prev_move_normal_x = move_normal_x
                        prev_move_normal_y = move_normal_y
                        prev_move_angle = move_angle

                    prev_pos = current_pos
                    count_travel_indices.append(travel_vertex_k / 3)
                    count_print_indices.append(index_k)
                    count_print_vertices.append(vertex_k / 3)
                    gline.gcview_end_vertex = len(count_print_indices) - 1

                if has_movement:
                    self.layer_stops.append(len(count_print_indices) - 1)
                    self.layer_idxs_map[layer_idx] = len(self.layer_stops) - 1
                    self.max_layers = len(self.layer_stops) - 1
                    self.num_layers_to_draw = self.max_layers + 1
                    self.initialized = False
                    self.loaded = True

            processed_lines += len(layer)

            if callback:
                callback(layer_idx + 1)

            yield layer_idx
            layer_idx += 1

        with self.lock:
            self.dims = ((model_data.xmin, model_data.xmax, model_data.width),
                         (model_data.ymin, model_data.ymax, model_data.depth),
                         (model_data.zmin, model_data.zmax, model_data.height))

            self.travels.resize(travel_vertex_k, refcheck = False)
            self.vertices.resize(vertex_k, refcheck = False)
            self.colors.resize(color_k, refcheck = False)
            self.normals.resize(normal_k, refcheck = False)
            self.indices.resize(index_k, refcheck = False)

            self.layer_stops = array.array('L', self.layer_stops)
            self.count_travel_indices = array.array('L', count_travel_indices)
            self.count_print_indices = array.array('L', count_print_indices)
            self.count_print_vertices = array.array('L', count_print_vertices)

            self.max_layers = len(self.layer_stops) - 1
            self.num_layers_to_draw = self.max_layers + 1
            self.loaded = True
            self.initialized = False
            self.loaded = True
            self.fully_loaded = True

        t_end = time.time()

        logging.debug(_('Initialized 3D visualization in %.2f seconds') % (t_end - t_start))
        logging.debug(_('Vertex count: %d') % ((len(self.vertices) + len(self.travels)) / 3))
        yield None

Example 20

Project: spinalcordtoolbox Source File: plot_abs_error_vs_fractional_volume.py
def main():
    results_folder = param_default.results_folder
    methods_to_display = param_default.methods_to_display
    noise_std_to_display = param_default.noise_std_to_display
    tracts_std_to_display = param_default.tracts_std_to_display
    csf_value_to_display = param_default.csf_value_to_display
    nb_RL_labels = param_default.nb_RL_labels

    # Parameters for debug mode
    if param_default.debug:
        print '\n*** WARNING: DEBUG MODE ON ***\n'
        results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter"#"C:/cygwin64/home/Simon_2/data_methods_comparison"
        path_sct = '/Users/slevy_local/spinalcordtoolbox' #'C:/cygwin64/home/Simon_2/spinalcordtoolbox'
    else:
        status, path_sct = commands.getstatusoutput('echo $SCT_DIR')

        # Check input parameters
        try:
            opts, args = getopt.getopt(sys.argv[1:], 'i:m:')  # define flags
        except getopt.GetoptError as err:  # check if the arguments are defined
            print str(err)  # error
            # usage() # display usage
        # if not opts:
        #     print 'Please enter the path to the result folder. Exit program.'
        #     sys.exit(1)
        #     # usage()
        for opt, arg in opts:  # explore flags
            if opt in '-i':
                results_folder = arg
            if opt in '-m':
                methods_to_display = arg

    # Append path that contains scripts, to be able to load modules
    sys.path.append(path_sct + '/scripts')
    import sct_utils as sct
    import isct_get_fractional_volume

    sct.printv("Working directory: " + os.getcwd())

    results_folder_noise = results_folder + '/noise'
    results_folder_tracts = results_folder + '/tracts'
    results_folder_csf = results_folder + '/csf'

    sct.printv('\n\nData will be extracted from folder ' + results_folder_noise + ' , ' + results_folder_tracts + ' and ' + results_folder_csf + '.', 'warning')
    sct.printv('\t\tCheck existence...')
    sct.check_folder_exist(results_folder_noise)
    sct.check_folder_exist(results_folder_tracts)
    sct.check_folder_exist(results_folder_csf)

    # Extract methods to display
    methods_to_display = methods_to_display.strip().split(',')

    # Extract file names of the results files
    fname_results_noise = glob.glob(results_folder_noise + '/*.txt')
    fname_results_tracts = glob.glob(results_folder_tracts + '/*.txt')
    fname_results_csf = glob.glob(results_folder_csf + '/*.txt')
    fname_results = fname_results_noise + fname_results_tracts + fname_results_csf
    # Remove doublons (due to the two folders)
    # for i_fname in range(0, len(fname_results)):
    #     for j_fname in range(0, len(fname_results)):
    #         if (i_fname != j_fname) & (os.path.basename(fname_results[i_fname]) == os.path.basename(fname_results[j_fname])):
    #             fname_results.remove(fname_results[j_fname])
    file_results = []
    for fname in fname_results:
        file_results.append(os.path.basename(fname))
    for file in file_results:
        if file_results.count(file) > 1:
            ind = file_results.index(file)
            fname_results.remove(fname_results[ind])
            file_results.remove(file)

    nb_results_file = len(fname_results)

    # 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error
    # result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object)
    # SNR
    snr = numpy.zeros((nb_results_file))
    # Tracts std
    tracts_std = numpy.zeros((nb_results_file))
    # CSF value
    csf_values = numpy.zeros((nb_results_file))
    # methods' name
    methods_name = []  #numpy.empty((nb_results_file, nb_method), dtype=object)
    # labels
    error_per_label = []
    std_per_label = []
    labels_id = []
    # median
    median_results = numpy.zeros((nb_results_file, 5))
    # median std across bootstraps
    median_std = numpy.zeros((nb_results_file, 5))
    # min
    min_results = numpy.zeros((nb_results_file, 5))
    # max
    max_results = numpy.zeros((nb_results_file, 5))

    #
    for i_file in range(0, nb_results_file):

        # Open file
        f = open(fname_results[i_file])  # open file
        # Extract all lines in .txt file
        lines = [line for line in f.readlines() if line.strip()]

        # extract SNR
        # find all index of lines containing the string "sigma noise"
        ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise]
        if len(ind_line_noise) != 1:
            sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
            snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))

        # extract tract std
        ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if
                              "range tracts" in line_tract_std]
        if len(ind_line_tract_std) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))

        # extract CSF value
        ind_line_csf_value = [lines.index(line_csf_value) for line_csf_value in lines if
                              "# value CSF" in line_csf_value]
        if len(ind_line_csf_value) != 1:
            sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
            # regex = re.compile(''('(.*)':)  # re.I permet d'ignorer la case (majuscule/minuscule)
            # match = regex.search(lines[ind_line_tract_std[0]])
            # result_array[:, i_file, :, :] = match.group(1)  # le groupe 1 correspond a '.*'
            csf_values[i_file] = int(''.join(c for c in lines[ind_line_csf_value[0]].split(':')[1] if c.isdigit()))


        # extract method name
        ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label]
        if len(ind_line_label) != 1:
            sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error')
            sys.exit(1)
        else:
            # methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:])
            methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:])

        # extract median
        ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median]
        if len(ind_line_median) != 1:
            sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            median = lines[ind_line_median[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median]
            median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median])
            median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median])

        # extract min
        ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min]
        if len(ind_line_min) != 1:
            sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            min = lines[ind_line_min[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min]
            min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min])

        # extract max
        ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max]
        if len(ind_line_max) != 1:
            sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning')
            # sys.exit(1)
        else:
            max = lines[ind_line_max[0]].strip().split(',')[1:]
            # result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max]
            max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max])

        # extract error for each label
        error_per_label_for_file_i = []
        std_per_label_for_file_i = []
        labels_id_for_file_i = []
        # Due to 2 different kind of file structure, the number of the last label line must be adapted
        if not ind_line_median:
            ind_line_median = [len(lines) + 1]
        for i_line in range(ind_line_label[0] + 1, ind_line_median[0] - 1):
            line_label_i = lines[i_line].strip().split(',')
            error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]])
            std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]])
            labels_id_for_file_i.append(int(line_label_i[0]))
        error_per_label.append(error_per_label_for_file_i)
        std_per_label.append(std_per_label_for_file_i)
        labels_id.append(labels_id_for_file_i)

        # close file
        f.close()

    # check if all the files in the result folder were generated with the same number of methods
    if not all(x == methods_name[0] for x in methods_name):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same number of methods. Exit program.',
            'error')
        sys.exit(1)
    # check if all the files in the result folder were generated with the same labels
    if not all(x == labels_id[0] for x in labels_id):
        sct.printv(
            'ERROR: All the generated files in folder ' + results_folder + ' have not been generated with the same labels. Exit program.',
            'error')
        sys.exit(1)

    # convert the list "error_per_label" into a numpy array to ease further manipulations
    error_per_label = numpy.array(error_per_label)
    std_per_label = numpy.array(std_per_label)
    # compute different stats
    abs_error_per_labels = numpy.absolute(error_per_label)
    max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1)
    min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1)
    mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1)
    std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1)

    # average error and std across sides
    meanRL_abs_error_per_labels = numpy.zeros((error_per_label.shape[0], nb_RL_labels, error_per_label.shape[2]))
    meanRL_std_abs_error_per_labels = numpy.zeros((std_per_label.shape[0], nb_RL_labels, std_per_label.shape[2]))
    for i_file in range(0, nb_results_file):
        for i_meth in range(0, len(methods_name[i_file])):
            for i_label in range(0, nb_RL_labels):
                # find indexes of corresponding labels
                ind_ID_first_side = labels_id[i_file].index(i_label)
                ind_ID_other_side = labels_id[i_file].index(i_label + nb_RL_labels)
                # compute mean across 2 sides
                meanRL_abs_error_per_labels[i_file, i_label, i_meth] = float(error_per_label[i_file, ind_ID_first_side, i_meth] + error_per_label[i_file, ind_ID_other_side, i_meth]) / 2
                meanRL_std_abs_error_per_labels[i_file, i_label, i_meth] = float(std_per_label[i_file, ind_ID_first_side, i_meth] + std_per_label[i_file, ind_ID_other_side, i_meth]) / 2

    nb_method = len(methods_to_display)

    sct.printv('Noise std of the ' + str(nb_results_file) + ' generated files:')
    print snr
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Tracts std of the ' + str(nb_results_file) + ' generated files:')
    print tracts_std
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('CSF value of the ' + str(nb_results_file) + ' generated files:')
    print csf_values
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Methods used to generate results for the ' + str(nb_results_file) + ' generated files:')
    print methods_name
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Median obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print median_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Minimum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print min_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Maximum obtained with each method (in colons) for the ' + str(
        nb_results_file) + ' generated files (in lines):')
    print max_results
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Labels\' ID (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print labels_id
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Errors obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print error_per_label
    print '----------------------------------------------------------------------------------------------------------------'
    sct.printv('Mean errors across both sides obtained with each method (in colons) for the ' + str(nb_results_file) + ' generated files (in lines):')
    print meanRL_abs_error_per_labels


    # Compute fractional volume per label
    labels_id_FV, labels_name_FV, fract_vol_per_lab, labels_name_FV_RL_gathered, fract_vol_per_lab_RL_gathered = isct_get_fractional_volume.get_fractional_volume_per_label('./cropped_atlas/', 'info_label.txt')
    # # Get the number of voxels including at least one tract
    # nb_voxels_in_WM = isct_get_fractional_volume.get_nb_voxel_in_WM('./cropped_atlas/', 'info_label.txt')
    # normalize by the number of voxels in WM and express it as a percentage
    fract_vol_norm = numpy.divide(fract_vol_per_lab_RL_gathered, numpy.sum(fract_vol_per_lab_RL_gathered)/100)

    # NOT NECESSARY NOW WE AVERAGE ACROSS BOTH SIDES (which orders the labels)
    # # check if the order of the labels returned by the function computing the fractional volumes is the same (which should be the case)
    # if labels_id_FV != labels_id[0]:
    #     sct.printv('\n\nERROR: the labels IDs returned by the function \'i_sct_get_fractional_volume\' are different from the labels IDs of the results files\n\n', 'error')

    # # Remove labels #30 and #31
    # labels_id_FV_29, labels_name_FV_29, fract_vol_per_lab_29 = labels_id_FV[:-2], labels_name_FV[:-2], fract_vol_per_lab[:-2]

    # indexes of labels sort according to the fractional volume
    ind_labels_sort = numpy.argsort(fract_vol_norm)

    # Find index of the file generated with noise variance = 10 and tracts std = 10
    ind_file_to_display = numpy.where((snr == noise_std_to_display) & (tracts_std == tracts_std_to_display) & (csf_values == csf_value_to_display))

    # sort arrays in this order
    meanRL_abs_error_per_labels_sort = meanRL_abs_error_per_labels[ind_file_to_display[0], ind_labels_sort, :]
    meanRL_std_abs_error_per_labels_sort = meanRL_std_abs_error_per_labels[ind_file_to_display[0], ind_labels_sort, :]
    labels_name_sort = numpy.array(labels_name_FV_RL_gathered)[ind_labels_sort]

    # cuem******************************************* START PLOTTING HERE **********************************************

    # stringColor = Color()
    matplotlib.rcParams.update({'font.size': 50, 'font.family': 'trebuchet'})
    # plt.rcParams['xtick.major.pad'] = '11'
    plt.rcParams['ytick.major.pad'] = '15'

    fig = plt.figure(figsize=(60, 37))
    width = 1.0 / (nb_method + 1)
    ind_fig = numpy.arange(len(labels_name_sort)) * (1.0 + width)
    plt.ylabel('Absolute error (%)\n', fontsize=65)
    plt.xlabel('Fractional volume (% of the total number of voxels in WM)', fontsize=65)
    plt.title('Absolute error per tract as a function of their fractional volume\n\n', fontsize=30)
    plt.suptitle('(Noise std='+str(snr[ind_file_to_display[0]][0])+', Tracts std='+str(tracts_std[ind_file_to_display[0]][0])+', CSF value='+str(csf_values[ind_file_to_display[0]][0])+')', fontsize=30)

    # colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_method))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    markers = ['o', 's', '^', 'D']
    errorbar_plots = []
    for meth, color, marker in zip(methods_to_display, colors, markers):
        i_meth = methods_name[0].index(meth)
        i_meth_to_display = methods_to_display.index(meth)

        plot_i = plt.errorbar(ind_fig + i_meth_to_display * width, meanRL_abs_error_per_labels_sort[:, i_meth], meanRL_std_abs_error_per_labels_sort[:, i_meth], color=color, marker=marker, markersize=35, lw=7, elinewidth=1, capthick=5, capsize=10)
        # plot_i = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_files_csf_sort, :, i_meth]), positions=ind_fig + i_meth_to_display * width + (float(i_meth_to_display) * width) / (nb_method + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
        errorbar_plots.append(plot_i)

    # add alternated vertical background colored bars
    for i_xtick in range(0, len(ind_fig), 2):
        plt.axvspan(ind_fig[i_xtick] - width - width / 2, ind_fig[i_xtick] + (nb_method + 1) * width - width / 2, facecolor='grey', alpha=0.1)

    # concatenate value of fractional volume to labels'name
    xtick_labels = [labels_name_sort[i_lab]+'\n'+r'$\bf{['+str(round(fract_vol_norm[ind_labels_sort][i_lab], 2))+']}$' for i_lab in range(0, len(labels_name_sort))]
    ind_lemniscus = numpy.where(labels_name_sort == 'spinal lemniscus (spinothalamic and spinoreticular tracts)')[0][0]
    xtick_labels[ind_lemniscus] = 'spinal lemniscus\n'+r'$\bf{['+str(round(fract_vol_norm[ind_labels_sort][ind_lemniscus], 2))+']}$'

    # plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
    plt.legend(errorbar_plots, methods_to_display, loc=1, fontsize=50, numpoints=1)
    plt.xticks(ind_fig + (numpy.floor(float(nb_method-1)/2)) * width, xtick_labels, fontsize=45)
    # Tweak spacing to prevent clipping of tick-labels
    plt.subplots_adjust(bottom=0, top=0.95, right=0.96)
    plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_method + 0.5) * width])
    plt.gca().set_ylim([0, 17])
    plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0))
    plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.5))
    plt.grid(b=True, axis='y', which='both')
    fig.autofmt_xdate()

    plt.savefig(param_default.fname_folder_to_save_fig+'/absolute_error_vs_fractional_volume.pdf', format='PDF')

    plt.show(block=False)

Example 21

Project: nupic.research Source File: union_pooling_tm_learning.py
def experiment2():
  paramDir = 'params/1024_baseline/5_trainingPasses.yaml'
  outputDir = 'results/'
  params = yaml.safe_load(open(paramDir, 'r'))
  options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
  plotVerbosity = 2
  consoleVerbosity = 1


  print "Running SDR overlap experiment...\n"
  print "Params dir: {0}".format(paramDir)
  print "Output dir: {0}\n".format(outputDir)

  # Dimensionality of sequence patterns
  patternDimensionality = params["patternDimensionality"]

  # Cardinality (ON / true bits) of sequence patterns
  patternCardinality = params["patternCardinality"]

  # TODO If this parameter is to be supported, the sequence generation code
  # below must change
  # Number of unique patterns from which sequences are built
  # patternAlphabetSize = params["patternAlphabetSize"]

  # Length of sequences shown to network
  sequenceLength = params["sequenceLength"]

  # Number of sequences used. Sequences may share common elements.
  numberOfSequences = params["numberOfSequences"]

  # Number of sequence passes for training the TM. Zero => no training.
  trainingPasses = params["trainingPasses"]

  tmParamOverrides = params["temporalMemoryParams"]
  upParamOverrides = params["unionPoolerParams"]

  # Generate a sequence list and an associated labeled list (both containing a
  # set of sequences separated by None)
  start = time.time()
  print "\nGenerating sequences..."
  patternAlphabetSize = sequenceLength * numberOfSequences
  patternMachine = PatternMachine(patternDimensionality, patternCardinality,
                                  patternAlphabetSize)
  sequenceMachine = SequenceMachine(patternMachine)

  numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
  generatedSequences = sequenceMachine.generateFromNumbers(numbers)
  sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
                    for i in xrange(numberOfSequences)]
  labeledSequences = []
  for label in sequenceLabels:
    for _ in xrange(sequenceLength):
      labeledSequences.append(label)
    labeledSequences.append(None)

  # Set up the Temporal Memory and Union Pooler network
  print "\nCreating network..."
  experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)

  # Train only the Temporal Memory on the generated sequences
  # if trainingPasses > 0:
  #
  #   print "\nTraining Temporal Memory..."
  #   if consoleVerbosity > 0:
  #     print "\nPass\tBursting Columns Mean\tStdDev\tMax"
  #
  #   for i in xrange(trainingPasses):
  #     experiment.runNetworkOnSequences(generatedSequences,
  #                                      labeledSequences,
  #                                      tmLearn=True,
  #                                      upLearn=None,
  #                                      verbosity=consoleVerbosity,
  #                                      progressInterval=_SHOW_PROGRESS_INTERVAL)
  #
  #     if consoleVerbosity > 0:
  #       stats = experiment.getBurstingColumnsStats()
  #       print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
  #
  #     # Reset the TM monitor mixin's records accrued during this training pass
  #     # experiment.tm.mmClearHistory()
  #
  #   print
  #   print MonitorMixinBase.mmPrettyPrintMetrics(
  #     experiment.tm.mmGetDefaultMetrics())
  #   print
  #
  #   if plotVerbosity >= 2:
  #     plotNetworkState(experiment, plotVerbosity, trainingPasses, phase="Training")
  #
  # experiment.tm.mmClearHistory()
  # experiment.up.mmClearHistory()


  print "\nRunning test phase..."

  inputSequences = generatedSequences
  inputCategories = labeledSequences
  tmLearn = True
  upLearn = False
  classifierLearn = False
  currentTime = time.time()

  experiment.tm.reset()
  experiment.up.reset()

  poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 1))
  activeCellsTrace = numpy.zeros((experiment.up._numColumns, 1))
  activeSPTrace = numpy.zeros((experiment.up._numColumns, 1))

  for _ in xrange(trainingPasses):
    for i in xrange(len(inputSequences)):
      sensorPattern = inputSequences[i]
      inputCategory = inputCategories[i]
      if sensorPattern is None:
        pass
      else:
        experiment.tm.compute(sensorPattern,
                        learn=tmLearn,
                        sequenceLabel=inputCategory)

        if upLearn is not None:
          activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
          experiment.up.compute(activeCells,
                          predActiveCells,
                          learn=upLearn,
                          sequenceLabel=inputCategory)

          currentPoolingActivation = experiment.up._poolingActivation

          currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
          poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)

          currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
          currentUnionSDR[experiment.up._unionSDR] = 1
          activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)

          currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
          currentSPSDR[experiment.up._activeCells] = 1
          activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)

    print "\nPass\tBursting Columns Mean\tStdDev\tMax"
    stats = experiment.getBurstingColumnsStats()
    print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
    print
    print MonitorMixinBase.mmPrettyPrintMetrics(\
        experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
    print
    experiment.tm.mmClearHistory()


  # estimate fraction of shared bits across adjacent time point
  unionSDRshared = experiment.up._mmComputeUnionSDRdiff()

  bitLifeList = experiment.up._mmComputeBitLifeStats()
  bitLife = numpy.array(bitLifeList)


  # Plot SP outputs, UP persistence and UP outputs in testing phase
  def showSequenceStartLine(ax, trainingPasses, sequenceLength):
    for i in xrange(trainingPasses):
      ax.vlines(i*sequenceLength, 0, 100, linestyles='--')

  plt.figure()
  ncolShow = 100
  f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
  ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax1, trainingPasses, sequenceLength)
  ax1.set_title('SP SDR')
  ax1.set_ylabel('Columns')
  ax2.imshow(poolingActivationTrace[1:100,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax2, trainingPasses, sequenceLength)
  ax2.set_title('Persistence')
  ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
  showSequenceStartLine(ax3, trainingPasses, sequenceLength)
  plt.title('Union SDR')

  ax2.set_xlabel('Time (steps)')

  pp = PdfPages('results/UnionPoolingDuringTMlearning_Experiment2.pdf')
  pp.savefig()
  pp.close()

  f, (ax1, ax2, ax3) = plt.subplots(nrows=3,ncols=1)
  ax1.plot((sum(activeCellsTrace))/experiment.up._numColumns*100)
  ax1.set_ylabel('Union SDR size (%)')
  ax1.set_xlabel('Time (steps)')
  ax1.set_ylim(0,25)

  ax2.plot(unionSDRshared)
  ax2.set_ylabel('Shared Bits')
  ax2.set_xlabel('Time (steps)')

  ax3.hist(bitLife)
  ax3.set_xlabel('Life duration for each bit')
  pp = PdfPages('results/UnionSDRproperty_Experiment2.pdf')
  pp.savefig()
  pp.close()

Example 22

Project: NearPy Source File: example2.py
def example2():

    # Dimension of feature space
    DIM = 100

    # Number of data points (dont do too much because of exact search)
    POINTS = 20000

    ##########################################################

    print('Performing indexing with HashPermutations...')
    t0 = time.time()

    # Create permutations meta-hash
    permutations = HashPermutations('permut')

    # Create binary hash as child hash
    rbp_perm = RandomBinaryProjections('rbp_perm', 14)
    rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}

    # Add rbp as child hash of permutations hash
    permutations.add_child_hash(rbp_perm, rbp_conf)

    # Create engine
    engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())

    # First index some random vectors
    matrix = numpy.zeros((POINTS,DIM))
    for i in range(POINTS):
        v = numpy.random.randn(DIM)
        matrix[i] = v
        engine_perm.store_vector(v)

    # Then update permuted index
    permutations.build_permuted_index()

    t1 = time.time()
    print('Indexing took %f seconds' % (t1-t0))

    # Get random query vector
    query = numpy.random.randn(DIM)

    # Do random query on engine 3
    print('\nNeighbour distances with HashPermutations:')
    print('  -> Candidate count is %d' % engine_perm.candidate_count(query))
    results = engine_perm.neighbours(query)
    dists = [x[2] for x in results]
    print(dists)

    # Real neighbours
    print('\nReal neighbour distances:')
    query = query.reshape((DIM))
    dists = CosineDistance().distance(matrix, query)
    dists = dists.reshape((-1,))
    dists = sorted(dists)
    print(dists[:10])

    ##########################################################

    print('\nPerforming indexing with HashPermutationMapper...')
    t0 = time.time()

    # Create permutations meta-hash
    permutations2 = HashPermutationMapper('permut2')

    # Create binary hash as child hash
    rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)

    # Add rbp as child hash of permutations hash
    permutations2.add_child_hash(rbp_perm2)

    # Create engine
    engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())

    # First index some random vectors
    matrix = numpy.zeros((POINTS,DIM))
    for i in range(POINTS):
        v = numpy.random.randn(DIM)
        matrix[i] = v
        engine_perm2.store_vector(v)

    t1 = time.time()
    print('Indexing took %f seconds' % (t1-t0))

    # Get random query vector
    query = numpy.random.randn(DIM)

    # Do random query on engine 4
    print('\nNeighbour distances with HashPermutationMapper:')
    print('  -> Candidate count is %d' % engine_perm2.candidate_count(query))
    results = engine_perm2.neighbours(query)
    dists = [x[2] for x in results]
    print(dists)

    # Real neighbours
    print('\nReal neighbour distances:')
    query = query.reshape((DIM))
    dists = CosineDistance().distance(matrix,query)
    dists = dists.reshape((-1,))
    dists = sorted(dists)
    print(dists[:10])

    ##########################################################

    print('\nPerforming indexing with multiple binary hashes...')
    t0 = time.time()

    hashes = []
    for k in range(20):
        hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))

    # Create engine
    engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())

    # First index some random vectors
    matrix = numpy.zeros((POINTS,DIM))
    for i in range(POINTS):
        v = numpy.random.randn(DIM)
        matrix[i] = v
        engine_rbps.store_vector(v)

    t1 = time.time()
    print('Indexing took %f seconds' % (t1-t0))

    # Get random query vector
    query = numpy.random.randn(DIM)

    # Do random query on engine 4
    print('\nNeighbour distances with multiple binary hashes:')
    print('  -> Candidate count is %d' % engine_rbps.candidate_count(query))
    results = engine_rbps.neighbours(query)
    dists = [x[2] for x in results]
    print(dists)

    # Real neighbours
    print('\nReal neighbour distances:')
    query = query.reshape((DIM))
    dists = CosineDistance().distance(matrix,query)
    dists = dists.reshape((-1,))
    dists = sorted(dists)
    print(dists[:10])

Example 23

Project: pylon Source File: pips.py
def pips(f_fcn, x0, A=None, l=None, u=None, xmin=None, xmax=None,
         gh_fcn=None, hess_fcn=None, opt=None):
    """Primal-dual interior point method for NLP (non-linear programming).
    Minimize a function F(X) beginning from a starting point M{x0}, subject to
    optional linear and non-linear constraints and variable bounds::

            min f(x)
             x

    subject to::

            g(x) = 0            (non-linear equalities)
            h(x) <= 0           (non-linear inequalities)
            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note: The calling syntax is almost identical to that of FMINCON from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}. The functions for evaluating the objective function,
    constraints and Hessian are identical.

    Example from U{http://en.wikipedia.org/wiki/Nonlinear_programming}:
        >>> from numpy import array, r_, float64, dot
        >>> from scipy.sparse import csr_matrix
        >>> def f2(x):
        ...     f = -x[0] * x[1] - x[1] * x[2]
        ...     df = -r_[x[1], x[0] + x[2], x[1]]
        ...     # actually not used since 'hess_fcn' is provided
        ...     d2f = -array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], float64)
        ...     return f, df, d2f
        >>> def gh2(x):
        ...     h = dot(array([[1, -1, 1],
        ...                    [1,  1, 1]]), x**2) + array([-2.0, -10.0])
        ...     dh = 2 * csr_matrix(array([[ x[0], x[0]],
        ...                                [-x[1], x[1]],
        ...                                [ x[2], x[2]]]))
        ...     g = array([])
        ...     dg = None
        ...     return h, g, dh, dg
        >>> def hess2(x, lam):
        ...     mu = lam["ineqnonlin"]
        ...     a = r_[dot(2 * array([1, 1]), mu), -1, 0]
        ...     b = r_[-1, dot(2 * array([-1, 1]),mu),-1]
        ...     c = r_[0, -1, dot(2 * array([1, 1]),mu)]
        ...     Lxx = csr_matrix(array([a, b, c]))
        ...     return Lxx
        >>> x0 = array([1, 1, 0], float64)
        >>> solution = pips(f2, x0, gh_fcn=gh2, hess_fcn=hess2)
        >>> round(solution["f"], 11) == -7.07106725919
        True
        >>> solution["output"]["iterations"]
        8

    Ported by Richard Lincoln from the MATLAB Interior Point Solver (MIPS)
    (v1.9) by Ray Zimmerman.  MIPS is distributed as part of the MATPOWER
    project, developed at the Power System Engineering Research Center (PSERC),
    Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
    MIPS was ported by Ray Zimmerman from C code written by H. Wang for his
    PhD dissertation:
      - "On the Computation and Application of Multi-period
        Security-Constrained Optimal Power Flow for Real-time
        Electricity Market Operations", Cornell University, May 2007.

    See also:
      - H. Wang, C. E. Murillo-Sanchez, R. D. Zimmerman, R. J. Thomas,
        "On Computational Issues of Market-Based Optimal Power Flow",
        IEEE Transactions on Power Systems, Vol. 22, No. 3, Aug. 2007,
        pp. 1185-1193.

    All parameters are optional except C{f_fcn} and C{x0}.
    @param f_fcn: Function that evaluates the objective function, its gradients
                  and Hessian for a given value of M{x}. If there are
                  non-linear constraints, the Hessian information is provided
                  by the 'hess_fcn' argument and is not required here.
    @type f_fcn: callable
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param gh_fcn: Function that evaluates the optional non-linear constraints
                   and their gradients for a given value of M{x}.
    @type gh_fcn: callable
    @param hess_fcn: Handle to function that computes the Hessian of the
                     Lagrangian for given values of M{x}, M{lambda} and M{mu},
                     where M{lambda} and M{mu} are the multipliers on the
                     equality and inequality constraints, M{g} and M{h},
                     respectively.
    @type hess_fcn: callable
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: The
                    same value must also be passed to the Hessian evaluation
                    function so that it can appropriately scale the objective
                    function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - dictionary of arrays with trajectories of the
                     following: feascond, gradcond, compcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - non-linear equality constraints
                   - C{ineqnonlin} - non-linear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @license: Apache License version 2.0
    """
    nx = x0.shape[0]                        # number of variables
    nA = A.shape[0] if A is not None else 0 # number of original linear constr

    # default argument values
#    l = array([]) if A is None else l
#    u = array([]) if A is None else u
    l = -Inf * ones(nA) if l is None else l
    u =  Inf * ones(nA) if u is None else u
    xmin = -Inf * ones(x0.shape[0]) if xmin is None else xmin
    xmax =  Inf * ones(x0.shape[0]) if xmax is None else xmax
    if gh_fcn is None:
        nonlinear = False
        gn = array([])
        hn = array([])
    else:
        nonlinear = True

    opt = {} if opt is None else opt
    # options
    if not opt.has_key("feastol"):
        opt["feastol"] = 1e-06
    if not opt.has_key("gradtol"):
        opt["gradtol"] = 1e-06
    if not opt.has_key("comptol"):
        opt["comptol"] = 1e-06
    if not opt.has_key("costtol"):
        opt["costtol"] = 1e-06
    if not opt.has_key("max_it"):
        opt["max_it"] = 150
    if not opt.has_key("max_red"):
        opt["max_red"] = 20
    if not opt.has_key("step_control"):
        opt["step_control"] = False
    if not opt.has_key("cost_mult"):
        opt["cost_mult"] = 1
    if not opt.has_key("verbose"):
        opt["verbose"] = False

    # initialize history
    hist = {}

    # constants
    xi = 0.99995
    sigma = 0.1
    z0 = 1
    alpha_min = 1e-8
#    rho_min = 0.95
#    rho_max = 1.05
    mu_threshold = 1e-5

    # initialize
    i = 0                       # iteration counter
    converged = False           # flag
    eflag = False               # exit flag

    # add var limits to linear constraints
    eyex = eye(nx, nx, format="csr")
    AA = eyex if A is None else vstack([eyex, A], "csr")
    ll = r_[xmin, l]
    uu = r_[xmax, u]

    # split up linear constraints
    ieq = flatnonzero( absolute(uu - ll) <= EPS )
    igt = flatnonzero( (uu >=  1e10) & (ll > -1e10) )
    ilt = flatnonzero( (ll <= -1e10) & (uu <  1e10) )
    ibx = flatnonzero( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
    # zero-sized sparse matrices unsupported
    Ae = AA[ieq, :] if len(ieq) else None
    if len(ilt) or len(igt) or len(ibx):
        idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
        Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)])
    else:
        Ai = None
    be = uu[ieq, :]
    bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]

    # evaluate cost f(x0) and constraints g(x0), h(x0)
    x = x0
    f, df, _ = f_fcn(x)                 # cost
    f = f * opt["cost_mult"]
    df = df * opt["cost_mult"]
    if nonlinear:
        hn, gn, dhn, dgn = gh_fcn(x)        # non-linear constraints
        h = hn if Ai is None else r_[hn, Ai * x - bi] # inequality constraints
        g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints

        if (dhn is None) and (Ai is None):
            dh = None
        elif dhn is None:
            dh = Ai.T
        elif Ae is None:
            dh = dhn
        else:
            dh = hstack([dhn, Ai.T])

        if (dgn is None) and (Ae is None):
            dg = None
        elif dgn is None:
            dg = Ae.T
        elif Ae is None:
            dg = dgn
        else:
            dg = hstack([dgn, Ae.T])
    else:
        h = -bi if Ai is None else Ai * x - bi        # inequality constraints
        g = -be if Ae is None else Ae * x - be        # equality constraints
        dh = None if Ai is None else Ai.T     # 1st derivative of inequalities
        dg = None if Ae is None else Ae.T     # 1st derivative of equalities

    # some dimensions
    neq = g.shape[0]           # number of equality constraints
    niq = h.shape[0]           # number of inequality constraints
    neqnln = gn.shape[0]       # number of non-linear equality constraints
    niqnln = hn.shape[0]       # number of non-linear inequality constraints
    nlt = len(ilt)             # number of upper bounded linear inequalities
    ngt = len(igt)             # number of lower bounded linear inequalities
    nbx = len(ibx)             # number of doubly bounded linear inequalities

    # initialize gamma, lam, mu, z, e
    gamma = 1                  # barrier coefficient
    lam = zeros(neq)
    z = z0 * ones(niq)
    mu = z0 * ones(niq)
    k = flatnonzero(h < -z0)
    z[k] = -h[k]
    k = flatnonzero((gamma / z) > z0)
    mu[k] = gamma / z[k]
    e = ones(niq)

    # check tolerance
    f0 = f
#    if opt["step_control"]:
#        L = f + lam.T * g + mu.T * (h + z) - gamma * sum(log(z))

    Lx = df
    Lx = Lx + dg * lam if dg is not None else Lx
    Lx = Lx + dh * mu  if dh is not None else Lx

    gnorm = norm(g, Inf) if len(g) else 0.0
    lam_norm = norm(lam, Inf) if len(lam) else 0.0
    mu_norm = norm(mu, Inf) if len(mu) else 0.0
    feascond = \
        max([gnorm, max(h)]) / (1 + max([norm(x, Inf), norm(z, Inf)]))
    gradcond = \
        norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
    compcond = dot(z, mu) / (1 + norm(x, Inf))
    costcond = absolute(f - f0) / (1 + absolute(f0))

    # save history
    hist[i] = {'feascond': feascond, 'gradcond': gradcond,
        'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
        'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0}

    if opt["verbose"]:
#        s = '-sc' if opt["step_control"] else ''
#        version, date = '1.0b2', '24-Mar-2010'
#        print 'Python Interior Point Solver - PIPS%s, Version %s, %s' % \
#                    (s, version, date)
        print " it    objective   step size   feascond     gradcond     " \
              "compcond     costcond  "
        print "----  ------------ --------- ------------ ------------ " \
              "------------ ------------"
        print "%3d  %12.8g %10s %12g %12g %12g %12g" % \
            (i, (f / opt["cost_mult"]), "",
             feascond, gradcond, compcond, costcond)

    if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
        compcond < opt["comptol"] and costcond < opt["costtol"]:
        converged = True
        if opt["verbose"]:
            print "Converged!"

    # do Newton iterations
    while (not converged and i < opt["max_it"]):
        # update iteration counter
        i += 1

        # compute update step
        lmbda = {"eqnonlin": lam[range(neqnln)],
                 "ineqnonlin": mu[range(niqnln)]}
        if nonlinear:
            if hess_fcn is None:
                print "pips: Hessian evaluation via finite differences " \
                      "not yet implemented.\nPlease provide " \
                      "your own hessian evaluation function."
            Lxx = hess_fcn(x, lmbda)
        else:
            _, _, d2f = f_fcn(x)      # cost
            Lxx = d2f * opt["cost_mult"]
        rz = range(len(z))
        zinvdiag = csr_matrix((1.0 / z, (rz, rz))) if len(z) else None
        rmu = range(len(mu))
        mudiag = csr_matrix((mu, (rmu, rmu))) if len(mu) else None
        dh_zinv = None if dh is None else dh * zinvdiag
        M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
        N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)

        Ab = M if dg is None else vstack([
            hstack([M, dg]),
            hstack([dg.T, csr_matrix((neq, neq))])
        ])
        bb = r_[-N, -g]

        dxdlam = spsolve(Ab.tocsr(), bb)

        dx = dxdlam[:nx]
        dlam = dxdlam[nx:nx + neq]
        dz = -h - z if dh is None else -h - z - dh.T * dx
        dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)

        # optional step-size control
#        sc = False
        if opt["step_control"]:
            raise NotImplementedError
#            x1 = x + dx
#
#            # evaluate cost, constraints, derivatives at x1
#            f1, df1 = ipm_f(x1)          # cost
#            f1 = f1 * opt["cost_mult"]
#            df1 = df1 * opt["cost_mult"]
#            gn1, hn1, dgn1, dhn1 = ipm_gh(x1) # non-linear constraints
#            g1 = gn1 if Ai is None else r_[gn1, Ai * x1 - bi] # ieq constraints
#            h1 = hn1 if Ae is None else r_[hn1, Ae * x1 - be] # eq constraints
#            dg1 = dgn1 if Ai is None else r_[dgn1, Ai.T]      # 1st der of ieq
#            dh1 = dhn1 if Ae is None else r_[dhn1, Ae.T]      # 1st der of eqs
#
#            # check tolerance
#            Lx1 = df1 + dh1 * lam + dg1 * mu
#            feascond1 = max([ norm(h1, Inf), max(g1) ]) / \
#                (1 + max([ norm(x1, Inf), norm(z, Inf) ]))
#            gradcond1 = norm(Lx1, Inf) / \
#                (1 + max([ norm(lam, Inf), norm(mu, Inf) ]))
#
#            if feascond1 > feascond and gradcond1 > gradcond:
#                sc = True
#        if sc:
#            alpha = 1.0
#            for j in range(opt["max_red"]):
#                dx1 = alpha * dx
#                x1 = x + dx1
#                f1 = ipm_f(x1)             # cost
#                f1 = f1 * opt["cost_mult"]
#                gn1, hn1 = ipm_gh(x1)              # non-linear constraints
#                g1 = r_[gn1, Ai * x1 - bi]         # inequality constraints
#                h1 = r_[hn1, Ae * x1 - be]         # equality constraints
#                L1 = f1 + lam.H * h1 + mu.H * (g1 + z) - gamma * sum(log(z))
#                if opt["verbose"]:
#                    logger.info("\n   %3d            %10.f" % (-j, norm(dx1)))
#                rho = (L1 - L) / (Lx.H * dx1 + 0.5 * dx1.H * Lxx * dx1)
#                if rho > rho_min and rho < rho_max:
#                    break
#                else:
#                    alpha = alpha / 2.0
#            dx = alpha * dx
#            dz = alpha * dz
#            dlam = alpha * dlam
#            dmu = alpha * dmu

        # do the update
        k = flatnonzero(dz < 0.0)
        alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
        k = flatnonzero(dmu < 0.0)
        alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
        x = x + alphap * dx
        z = z + alphap * dz
        lam = lam + alphad * dlam
        mu = mu + alphad * dmu
        if niq > 0:
            gamma = sigma * dot(z, mu) / niq

        # evaluate cost, constraints, derivatives
        f, df, _ = f_fcn(x)             # cost
        f = f * opt["cost_mult"]
        df = df * opt["cost_mult"]
        if nonlinear:
            hn, gn, dhn, dgn = gh_fcn(x)                   # nln constraints
#            g = gn if Ai is None else r_[gn, Ai * x - bi] # ieq constraints
#            h = hn if Ae is None else r_[hn, Ae * x - be] # eq constraints
            h = hn if Ai is None else r_[hn, Ai * x - bi] # ieq constr
            g = gn if Ae is None else r_[gn, Ae * x - be]  # eq constr

            if (dhn is None) and (Ai is None):
                dh = None
            elif dhn is None:
                dh = Ai.T
            elif Ae is None:
                dh = dhn
            else:
                dh = hstack([dhn, Ai.T])

            if (dgn is None) and (Ae is None):
                dg = None
            elif dgn is None:
                dg = Ae.T
            elif Ae is None:
                dg = dgn
            else:
                dg = hstack([dgn, Ae.T])
        else:
            h = -bi if Ai is None else Ai * x - bi    # inequality constraints
            g = -be if Ae is None else Ae * x - be    # equality constraints
            # 1st derivatives are constant, still dh = Ai.T, dg = Ae.T

        Lx = df
        Lx = Lx + dg * lam if dg is not None else Lx
        Lx = Lx + dh * mu  if dh is not None else Lx

        gnorm = norm(g, Inf) if len(g) else 0.0
        lam_norm = norm(lam, Inf) if len(lam) else 0.0
        mu_norm = norm(mu, Inf) if len(mu) else 0.0
        feascond = \
            max([gnorm, max(h)]) / (1+max([norm(x, Inf), norm(z, Inf)]))
        gradcond = \
            norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
        compcond = dot(z, mu) / (1 + norm(x, Inf))
        costcond = float(absolute(f - f0) / (1 + absolute(f0)))

        hist[i] = {'feascond': feascond, 'gradcond': gradcond,
            'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
            'stepsize': norm(dx), 'obj': f / opt["cost_mult"],
            'alphap': alphap, 'alphad': alphad}

        if opt["verbose"]:
            print "%3d  %12.8g %10.5g %12g %12g %12g %12g" % \
                (i, (f / opt["cost_mult"]), norm(dx), feascond, gradcond,
                 compcond, costcond)

        if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
            compcond < opt["comptol"] and costcond < opt["costtol"]:
            converged = True
            if opt["verbose"]:
                print "Converged!"
        else:
            if any(isnan(x)) or (alphap < alpha_min) or \
                (alphad < alpha_min) or (gamma < EPS) or (gamma > 1.0 / EPS):
                if opt["verbose"]:
                    print "Numerically failed."
                eflag = -1
                break
            f0 = f

#            if opt["step_control"]:
#                L = f + dot(lam, g) + dot(mu * (h + z)) - gamma * sum(log(z))

    if opt["verbose"]:
        if not converged:
            print "Did not converge in %d iterations." % i

    # package results
    if eflag != -1:
        eflag = converged

    if eflag == 0:
        message = 'Did not converge'
    elif eflag == 1:
        message = 'Converged'
    elif eflag == -1:
        message = 'Numerically failed'
    else:
        raise

    output = {"iterations": i, "history": hist, "message": message}

    # zero out multipliers on non-binding constraints
    mu[flatnonzero( (h < -opt["feastol"]) & (mu < mu_threshold) )] = 0.0

    # un-scale cost and prices
    f = f / opt["cost_mult"]
    lam = lam / opt["cost_mult"]
    mu = mu / opt["cost_mult"]

    # re-package multipliers into struct
    lam_lin = lam[neqnln:neq]           # lambda for linear constraints
    mu_lin = mu[niqnln:niq]             # mu for linear constraints
    kl = flatnonzero(lam_lin < 0.0)     # lower bound binding
    ku = flatnonzero(lam_lin > 0.0)     # upper bound binding

    mu_l = zeros(nx + nA)
    mu_l[ieq[kl]] = -lam_lin[kl]
    mu_l[igt] = mu_lin[nlt:nlt + ngt]
    mu_l[ibx] = mu_lin[nlt + ngt + nbx:nlt + ngt + nbx + nbx]

    mu_u = zeros(nx + nA)
    mu_u[ieq[ku]] = lam_lin[ku]
    mu_u[ilt] = mu_lin[:nlt]
    mu_u[ibx] = mu_lin[nlt + ngt:nlt + ngt + nbx]

    lmbda = {'mu_l': mu_l[nx:], 'mu_u': mu_u[nx:],
             'lower': mu_l[:nx], 'upper': mu_u[:nx]}

    if niqnln > 0:
        lmbda['ineqnonlin'] = mu[:niqnln]
    if neqnln > 0:
        lmbda['eqnonlin'] = lam[:neqnln]

#    lmbda = {"eqnonlin": lam[:neqnln], 'ineqnonlin': mu[:niqnln],
#             "mu_l": mu_l[nx:], "mu_u": mu_u[nx:],
#             "lower": mu_l[:nx], "upper": mu_u[:nx]}

    solution =  {"x": x, "f": f, "converged": converged,
                 "lmbda": lmbda, "output": output}

    return solution

Example 24

Project: pystruct Source File: one_slack_ssvm.py
Function: fit
    def fit(self, X, Y, constraints=None, warm_start=False, initialize=True):
        """Learn parameters using cutting plane method.

        Parameters
        ----------
        X : iterable
            Traing instances. Contains the structured input objects.
            No requirement on the particular form of entries of X is made.

        Y : iterable
            Training labels. Contains the strctured labels for inputs in X.
            Needs to have the same length as X.

        contraints : ignored

        warm_start : bool, default=False
            Whether we are warmstarting from a previous fit.

        initialize : boolean, default=True
            Whether to initialize the model for the data.
            Leave this true except if you really know what you are doing.
        """
        if self.verbose:
            print("Training 1-slack dual structural SVM")
        cvxopt.solvers.options['show_progress'] = self.verbose > 3
        if initialize:
            self.model.initialize(X, Y)

        # parse cache_tol parameter
        if self.cache_tol is None or self.cache_tol == 'auto':
            self.cache_tol_ = self.tol
        else:
            self.cache_tol_ = self.cache_tol

        if not warm_start:
            self.w = np.zeros(self.model.size_joint_feature)
            constraints = []
            self.objective_curve_, self.primal_objective_curve_ = [], []
            self.cached_constraint_ = []
            self.alphas = []  # dual solutions
            # append constraint given by ground truth to make our life easier
            constraints.append((np.zeros(self.model.size_joint_feature), 0))
            self.alphas.append([self.C])
            self.inference_cache_ = None
            self.timestamps_ = [time()]
        elif warm_start == "soft":
            self.w = np.zeros(self.model.size_joint_feature)
            constraints = []
            self.alphas = []  # dual solutions
            # append constraint given by ground truth to make our life easier
            constraints.append((np.zeros(self.model.size_joint_feature), 0))
            self.alphas.append([self.C])

        else:
            constraints = self.constraints_

        self.last_slack_ = -1

        # get the joint_feature of the ground truth
        if getattr(self.model, 'rescale_C', False):
            joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
        else:
            joint_feature_gt = self.model.batch_joint_feature(X, Y)

        try:
            # catch ctrl+c to stop training

            for iteration in range(self.max_iter):
                # main loop
                cached_constraint = False
                if self.verbose > 0:
                    print("iteration %d" % iteration)
                if self.verbose > 2:
                    print(self)
                try:
                    Y_hat, djoint_feature, loss_mean = self._constraint_from_cache(
                        X, Y, joint_feature_gt, constraints)
                    cached_constraint = True
                except NoConstraint:
                    try:
                        Y_hat, djoint_feature, loss_mean = self._find_new_constraint(
                            X, Y, joint_feature_gt, constraints)
                        self._update_cache(X, Y, Y_hat)
                    except NoConstraint:
                        if self.verbose:
                            print("no additional constraints")
                        if (self.switch_to is not None
                                and self.model.inference_method !=
                                self.switch_to):
                            if self.verbose:
                                print("Switching to %s inference" %
                                      str(self.switch_to))
                            self.model.inference_method_ = \
                                self.model.inference_method
                            self.model.inference_method = self.switch_to
                            continue
                        else:
                            break

                self.timestamps_.append(time() - self.timestamps_[0])
                self._compute_training_loss(X, Y, iteration)
                constraints.append((djoint_feature, loss_mean))

                # compute primal objective
                last_slack = -np.dot(self.w, djoint_feature) + loss_mean
                primal_objective = (self.C * len(X)
                                    * max(last_slack, 0)
                                    + np.sum(self.w ** 2) / 2)
                self.primal_objective_curve_.append(primal_objective)
                self.cached_constraint_.append(cached_constraint)

                objective = self._solve_1_slack_qp(constraints,
                                                   n_samples=len(X))

                # update cache tolerance if cache_tol is auto:
                if self.cache_tol == "auto" and not cached_constraint:
                    self.cache_tol_ = (primal_objective - objective) / 4

                self.last_slack_ = np.max([(-np.dot(self.w, djoint_feature) + loss_mean)
                                           for djoint_feature, loss_mean in constraints])
                self.last_slack_ = max(self.last_slack_, 0)

                if self.verbose > 0:
                    # the cutting plane objective can also be computed as
                    # self.C * len(X) * self.last_slack_ + np.sum(self.w**2)/2
                    print("cutting plane objective: %f, primal objective %f"
                          % (objective, primal_objective))
                # we only do this here because we didn't add the gt to the
                # constraints, which makes the dual behave a bit oddly
                self.objective_curve_.append(objective)
                self.constraints_ = constraints
                if self.logger is not None:
                    self.logger(self, iteration)

                if self.verbose > 5:
                    print(self.w)
        except KeyboardInterrupt:
            pass
        if self.verbose and self.n_jobs == 1:
            print("calls to inference: %d" % self.model.inference_calls)
        # compute final objective:
        self.timestamps_.append(time() - self.timestamps_[0])
        primal_objective = self._objective(X, Y)
        self.primal_objective_curve_.append(primal_objective)
        self.objective_curve_.append(objective)
        self.cached_constraint_.append(False)

        if self.logger is not None:
            self.logger(self, 'final')

        if self.verbose > 0:
            print("final primal objective: %f gap: %f"
                  % (primal_objective, primal_objective - objective))

        return self

Example 25

Project: PYPOWER Source File: ipoptopf_solver.py
def ipoptopf_solver(om, ppopt):
    """Solves AC optimal power flow using IPOPT.

    Inputs are an OPF model object and a PYPOWER options vector.

    Outputs are a C{results} dict, C{success} flag and C{raw} output dict.

    C{results} is a PYPOWER case dict (ppc) with the usual C{baseMVA}, C{bus}
    C{branch}, C{gen}, C{gencost} fields, along with the following additional
    fields:
        - C{order}      see 'help ext2int' for details of this field
        - C{x}          final value of optimization variables (internal order)
        - C{f}          final objective function value
        - C{mu}         shadow prices on ...
            - C{var}
                - C{l}  lower bounds on variables
                - C{u}  upper bounds on variables
            - C{nln}
                - C{l}  lower bounds on nonlinear constraints
                - C{u}  upper bounds on nonlinear constraints
            - C{lin}
                - C{l}  lower bounds on linear constraints
                - C{u}  upper bounds on linear constraints

    C{success} is C{True} if solver converged successfully, C{False} otherwise

    C{raw} is a raw output dict in form returned by MINOS
        - C{xr}     final value of optimization variables
        - C{pimul}  constraint multipliers
        - C{info}   solver specific termination code
        - C{output} solver specific output information

    @see: L{opf}, L{pips}

    @author: Ray Zimmerman (PSERC Cornell)
    @author: Carlos E. Murillo-Sanchez (PSERC Cornell & Universidad
    Autonoma de Manizales)
    """
    import pyipopt

    ## unpack data
    ppc = om.get_ppc()
    baseMVA, bus, gen, branch, gencost = \
        ppc['baseMVA'], ppc['bus'], ppc['gen'], ppc['branch'], ppc['gencost']
    vv, _, nn, _ = om.get_idx()

    ## problem dimensions
    nb = shape(bus)[0]          ## number of buses
    ng = shape(gen)[0]          ## number of gens
    nl = shape(branch)[0]       ## number of branches
    ny = om.getN('var', 'y')    ## number of piece-wise linear costs

    ## linear constraints
    A, l, u = om.linear_constraints()

    ## bounds on optimization vars
    _, xmin, xmax = om.getv()

    ## build admittance matrices
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

    ## try to select an interior initial point
    ll = xmin.copy(); uu = xmax.copy()
    ll[xmin == -Inf] = -2e19   ## replace Inf with numerical proxies
    uu[xmax ==  Inf] =  2e19
    x0 = (ll + uu) / 2
    Varefs = bus[bus[:, BUS_TYPE] == REF, VA] * (pi / 180)
    x0[vv['i1']['Va']:vv['iN']['Va']] = Varefs[0]  ## angles set to first reference angle
    if ny > 0:
        ipwl = find(gencost[:, MODEL] == PW_LINEAR)
#        PQ = r_[gen[:, PMAX], gen[:, QMAX]]
#        c = totcost(gencost[ipwl, :], PQ[ipwl])
        ## largest y-value in CCV data
        c = gencost.flatten('F')[sub2ind(shape(gencost), ipwl, NCOST + 2 * gencost[ipwl, NCOST])]
        x0[vv['i1']['y']:vv['iN']['y']] = max(c) + 0.1 * abs(max(c))
#        x0[vv['i1']['y']:vv['iN']['y']) = c + 0.1 * abs(c)

    ## find branches with flow limits
    il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
    nl2 = len(il)           ## number of constrained lines

    ##-----  run opf  -----
    ## build Jacobian and Hessian structure
    if A is not None and issparse(A):
        nA = A.shape[0]                ## number of original linear constraints
    else:
        nA = 0
    nx = len(x0)
    f = branch[:, F_BUS]                           ## list of "from" buses
    t = branch[:, T_BUS]                           ## list of "to" buses
    Cf = sparse((ones(nl), (arange(nl), f)), (nl, nb))      ## connection matrix for line & from buses
    Ct = sparse((ones(nl), (arange(nl), t)), (nl, nb))      ## connection matrix for line & to buses
    Cl = Cf + Ct
    Cb = Cl.T * Cl + speye(nb, nb)
    Cl2 = Cl[il, :]
    Cg = sparse((ones(ng), (gen[:, GEN_BUS], arange(ng))), (nb, ng))
    nz = nx - 2 * (nb + ng)
    nxtra = nx - 2 * nb
    if nz > 0:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng)),   sparse((nb,  nz))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,              sparse((nb,  nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),               sparse((nl2, nz))])
        ], 'coo')
    else:
        Js = vstack([
            hstack([Cb,      Cb,      Cg,              sparse((nb, ng))]),
            hstack([Cb,      Cb,      sparse((nb, ng)),   Cg,          ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ]),
            hstack([Cl2,     Cl2,     sparse((nl2, 2 * ng)),           ])
        ], 'coo')

    if A is not None and issparse(A):
        Js = vstack([Js, A], 'coo')

    f, _, d2f = opf_costfcn(x0, om, True)
    Hs = tril(d2f + vstack([
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        hstack([Cb,  Cb,  sparse((nb, nxtra))]),
        sparse((nxtra, nx))
    ]), format='coo')

    ## set options struct for IPOPT
#    options = {}
#    options['ipopt'] = ipopt_options([], ppopt)

    ## extra data to pass to functions
    userdata = {
        'om':       om,
        'Ybus':     Ybus,
        'Yf':       Yf[il, :],
        'Yt':       Yt[il, :],
        'ppopt':    ppopt,
        'il':       il,
        'A':        A,
        'nA':       nA,
        'neqnln':   2 * nb,
        'niqnln':   2 * nl2,
        'Js':       Js,
        'Hs':       Hs
    }

    ## check Jacobian and Hessian structure
    #xr                  = rand(x0.shape)
    #lmbda               = rand( 2 * nb + 2 * nl2)
    #Js1 = eval_jac_g(x, flag, userdata) #(xr, options.auxdata)
    #Hs1  = eval_h(xr, 1, lmbda, userdata)
    #i1, j1, s = find(Js)
    #i2, j2, s = find(Js1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Jacobian structure'
    #
    #i1, j1, s = find(Hs)
    #i2, j2, s = find(Hs1)
    #if (len(i1) != len(i2)) | (norm(i1 - i2) != 0) | (norm(j1 - j2) != 0):
    #    raise ValueError, 'something''s wrong with the Hessian structure'

    ## define variable and constraint bounds
    # n is the number of variables
    n = x0.shape[0]
    # xl is the lower bound of x as bounded constraints
    xl = xmin
    # xu is the upper bound of x as bounded constraints
    xu = xmax

    neqnln = 2 * nb
    niqnln = 2 * nl2

    # number of constraints
    m = neqnln + niqnln + nA
    # lower bound of constraint
    gl = r_[zeros(neqnln), -Inf * ones(niqnln), l]
    # upper bound of constraints
    gu = r_[zeros(neqnln),       zeros(niqnln), u]

    # number of nonzeros in Jacobi matrix
    nnzj = Js.nnz
    # number of non-zeros in Hessian matrix, you can set it to 0
    nnzh = Hs.nnz

    eval_hessian = True
    if eval_hessian:
        hessian = lambda x, lagrange, obj_factor, flag, user_data=None: \
                eval_h(x, lagrange, obj_factor, flag, userdata)

        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g, hessian)
    else:
        nnzh = 0
        nlp = pyipopt.create(n, xl, xu, m, gl, gu, nnzj, nnzh,
                             eval_f, eval_grad_f, eval_g, eval_jac_g)

    nlp.int_option('print_level', 5)
    nlp.num_option('tol', 1.0000e-12)
    nlp.int_option('max_iter', 250)
    nlp.num_option('dual_inf_tol', 0.10000)
    nlp.num_option('constr_viol_tol', 1.0000e-06)
    nlp.num_option('compl_inf_tol', 1.0000e-05)
    nlp.num_option('acceptable_tol', 1.0000e-08)
    nlp.num_option('acceptable_constr_viol_tol', 1.0000e-04)
    nlp.num_option('acceptable_compl_inf_tol', 0.0010000)
    nlp.str_option('mu_strategy', 'adaptive')

    iter = 0
    def intermediate_callback(algmod, iter_count, obj_value, inf_pr, inf_du,
            mu, d_norm, regularization_size, alpha_du, alpha_pr, ls_trials,
            user_data=None):
        iter = iter_count
        return True

    nlp.set_intermediate_callback(intermediate_callback)

    ## run the optimization
    # returns final solution x, upper and lower bound for multiplier, final
    # objective function obj and the return status of ipopt
    x, zl, zu, obj, status, zg = nlp.solve(x0, m, userdata)

    info = {'x': x, 'zl': zl, 'zu': zu, 'obj': obj, 'status': status, 'lmbda': zg}

    nlp.close()

    success = (status == 0) | (status == 1)

    output = {'iterations': iter}

    f, _ = opf_costfcn(x, om)

    ## update solution data
    Va = x[vv['i1']['Va']:vv['iN']['Va']]
    Vm = x[vv['i1']['Vm']:vv['iN']['Vm']]
    Pg = x[vv['i1']['Pg']:vv['iN']['Pg']]
    Qg = x[vv['i1']['Qg']:vv['iN']['Qg']]
    V = Vm * exp(1j * Va)

    ##-----  calculate return values  -----
    ## update voltages & generator outputs
    bus[:, VA] = Va * 180 / pi
    bus[:, VM] = Vm
    gen[:, PG] = Pg * baseMVA
    gen[:, QG] = Qg * baseMVA
    gen[:, VG] = Vm[gen[:, GEN_BUS].astype(int)]

    ## compute branch flows
    f_br = branch[:, F_BUS].astype(int)
    t_br = branch[:, T_BUS].astype(int)
    Sf = V[f_br] * conj(Yf * V)  ## cplx pwr at "from" bus, p.u.
    St = V[t_br] * conj(Yt * V)  ## cplx pwr at "to" bus, p.u.
    branch[:, PF] = Sf.real * baseMVA
    branch[:, QF] = Sf.imag * baseMVA
    branch[:, PT] = St.real * baseMVA
    branch[:, QT] = St.imag * baseMVA

    ## line constraint is actually on square of limit
    ## so we must fix multipliers
    muSf = zeros(nl)
    muSt = zeros(nl)
    if len(il) > 0:
        muSf[il] = 2 * info['lmbda'][2 * nb +       arange(nl2)] * branch[il, RATE_A] / baseMVA
        muSt[il] = 2 * info['lmbda'][2 * nb + nl2 + arange(nl2)] * branch[il, RATE_A] / baseMVA

    ## update Lagrange multipliers
    bus[:, MU_VMAX]  = info['zu'][vv['i1']['Vm']:vv['iN']['Vm']]
    bus[:, MU_VMIN]  = info['zl'][vv['i1']['Vm']:vv['iN']['Vm']]
    gen[:, MU_PMAX]  = info['zu'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_PMIN]  = info['zl'][vv['i1']['Pg']:vv['iN']['Pg']] / baseMVA
    gen[:, MU_QMAX]  = info['zu'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    gen[:, MU_QMIN]  = info['zl'][vv['i1']['Qg']:vv['iN']['Qg']] / baseMVA
    bus[:, LAM_P]    = info['lmbda'][nn['i1']['Pmis']:nn['iN']['Pmis']] / baseMVA
    bus[:, LAM_Q]    = info['lmbda'][nn['i1']['Qmis']:nn['iN']['Qmis']] / baseMVA
    branch[:, MU_SF] = muSf / baseMVA
    branch[:, MU_ST] = muSt / baseMVA

    ## package up results
    nlnN = om.getN('nln')

    ## extract multipliers for nonlinear constraints
    kl = find(info['lmbda'][:2 * nb] < 0)
    ku = find(info['lmbda'][:2 * nb] > 0)
    nl_mu_l = zeros(nlnN)
    nl_mu_u = r_[zeros(2 * nb), muSf, muSt]
    nl_mu_l[kl] = -info['lmbda'][kl]
    nl_mu_u[ku] =  info['lmbda'][ku]

    ## extract multipliers for linear constraints
    lam_lin = info['lmbda'][2 * nb + 2 * nl2 + arange(nA)]   ## lmbda for linear constraints
    kl = find(lam_lin < 0)                     ## lower bound binding
    ku = find(lam_lin > 0)                     ## upper bound binding
    mu_l = zeros(nA)
    mu_l[kl] = -lam_lin[kl]
    mu_u = zeros(nA)
    mu_u[ku] = lam_lin[ku]

    mu = {
      'var': {'l': info['zl'], 'u': info['zu']},
      'nln': {'l': nl_mu_l, 'u': nl_mu_u}, \
      'lin': {'l': mu_l, 'u': mu_u}
    }

    results = ppc
    results['bus'], results['branch'], results['gen'], \
        results['om'], results['x'], results['mu'], results['f'] = \
            bus, branch, gen, om, x, mu, f

    pimul = r_[
        results['mu']['nln']['l'] - results['mu']['nln']['u'],
        results['mu']['lin']['l'] - results['mu']['lin']['u'],
        -ones(ny > 0),
        results['mu']['var']['l'] - results['mu']['var']['u']
    ]
    raw = {'xr': x, 'pimul': pimul, 'info': info['status'], 'output': output}

    return results, success, raw

Example 26

Project: PYPOWER Source File: pips.py
def pips(f_fcn, x0=None, A=None, l=None, u=None, xmin=None, xmax=None,
         gh_fcn=None, hess_fcn=None, opt=None):
    """Primal-dual interior point method for NLP (nonlinear programming).
    Minimize a function F(X) beginning from a starting point M{x0}, subject to
    optional linear and nonlinear constraints and variable bounds::

            min f(x)
             x

    subject to::

            g(x) = 0            (nonlinear equalities)
            h(x) <= 0           (nonlinear inequalities)
            l <= A*x <= u       (linear constraints)
            xmin <= x <= xmax   (variable bounds)

    Note: The calling syntax is almost identical to that of FMINCON from
    MathWorks' Optimization Toolbox. The main difference is that the linear
    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},
    C{Aeq}, C{Beq}. The functions for evaluating the objective function,
    constraints and Hessian are identical.

    Example from U{http://en.wikipedia.org/wiki/Nonlinear_programming}:
        >>> from numpy import array, r_, float64, dot
        >>> from scipy.sparse import csr_matrix
        >>> def f2(x):
        ...     f = -x[0] * x[1] - x[1] * x[2]
        ...     df = -r_[x[1], x[0] + x[2], x[1]]
        ...     # actually not used since 'hess_fcn' is provided
        ...     d2f = -array([[0, 1, 0], [1, 0, 1], [0, 1, 0]], float64)
        ...     return f, df, d2f
        >>> def gh2(x):
        ...     h = dot(array([[1, -1, 1],
        ...                    [1,  1, 1]]), x**2) + array([-2.0, -10.0])
        ...     dh = 2 * csr_matrix(array([[ x[0], x[0]],
        ...                                [-x[1], x[1]],
        ...                                [ x[2], x[2]]]))
        ...     g = array([])
        ...     dg = None
        ...     return h, g, dh, dg
        >>> def hess2(x, lam, cost_mult=1):
        ...     mu = lam["ineqnonlin"]
        ...     a = r_[dot(2 * array([1, 1]), mu), -1, 0]
        ...     b = r_[-1, dot(2 * array([-1, 1]), mu),-1]
        ...     c = r_[0, -1, dot(2 * array([1, 1]), mu)]
        ...     Lxx = csr_matrix(array([a, b, c]))
        ...     return Lxx
        >>> x0 = array([1, 1, 0], float64)
        >>> solution = pips(f2, x0, gh_fcn=gh2, hess_fcn=hess2)
        >>> round(solution["f"], 11) == -7.07106725919
        True
        >>> solution["output"]["iterations"]
        8

    Ported by Richard Lincoln from the MATLAB Interior Point Solver (MIPS)
    (v1.9) by Ray Zimmerman.  MIPS is distributed as part of the MATPOWER
    project, developed at the Power System Engineering Research Center (PSERC) (PSERC),
    Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
    MIPS was ported by Ray Zimmerman from C code written by H. Wang for his
    PhD dissertation:
      - "On the Computation and Application of Multi-period
        Security-Constrained Optimal Power Flow for Real-time
        Electricity Market Operations", Cornell University, May 2007.

    See also:
      - H. Wang, C. E. Murillo-Sanchez, R. D. Zimmerman, R. J. Thomas,
        "On Computational Issues of Market-Based Optimal Power Flow",
        IEEE Transactions on Power Systems, Vol. 22, No. 3, Aug. 2007,
        pp. 1185-1193.

    All parameters are optional except C{f_fcn} and C{x0}.
    @param f_fcn: Function that evaluates the objective function, its gradients
                  and Hessian for a given value of M{x}. If there are
                  nonlinear constraints, the Hessian information is provided
                  by the 'hess_fcn' argument and is not required here.
    @type f_fcn: callable
    @param x0: Starting value of optimization vector M{x}.
    @type x0: array
    @param A: Optional linear constraints.
    @type A: csr_matrix
    @param l: Optional linear constraints. Default values are M{-Inf}.
    @type l: array
    @param u: Optional linear constraints. Default values are M{Inf}.
    @type u: array
    @param xmin: Optional lower bounds on the M{x} variables, defaults are
                 M{-Inf}.
    @type xmin: array
    @param xmax: Optional upper bounds on the M{x} variables, defaults are
                 M{Inf}.
    @type xmax: array
    @param gh_fcn: Function that evaluates the optional nonlinear constraints
                   and their gradients for a given value of M{x}.
    @type gh_fcn: callable
    @param hess_fcn: Handle to function that computes the Hessian of the
                     Lagrangian for given values of M{x}, M{lambda} and M{mu},
                     where M{lambda} and M{mu} are the multipliers on the
                     equality and inequality constraints, M{g} and M{h},
                     respectively.
    @type hess_fcn: callable
    @param opt: optional options dictionary with the following keys, all of
                which are also optional (default values shown in parentheses)
                  - C{verbose} (False) - Controls level of progress output
                    displayed
                  - C{feastol} (1e-6) - termination tolerance for feasibility
                    condition
                  - C{gradtol} (1e-6) - termination tolerance for gradient
                    condition
                  - C{comptol} (1e-6) - termination tolerance for
                    complementarity condition
                  - C{costtol} (1e-6) - termination tolerance for cost
                    condition
                  - C{max_it} (150) - maximum number of iterations
                  - C{step_control} (False) - set to True to enable step-size
                    control
                  - C{max_red} (20) - maximum number of step-size reductions if
                    step-control is on
                  - C{cost_mult} (1.0) - cost multiplier used to scale the
                    objective function for improved conditioning. Note: This
                    value is also passed as the 3rd argument to the Hessian
                    evaluation function so that it can appropriately scale the
                    objective function term in the Hessian of the Lagrangian.
    @type opt: dict

    @rtype: dict
    @return: The solution dictionary has the following keys:
               - C{x} - solution vector
               - C{f} - final objective function value
               - C{converged} - exit status
                   - True = first order optimality conditions satisfied
                   - False = maximum number of iterations reached
                   - None = numerically failed
               - C{output} - output dictionary with keys:
                   - C{iterations} - number of iterations performed
                   - C{hist} - list of arrays with trajectories of the
                     following: feascond, gradcond, compcond, costcond, gamma,
                     stepsize, obj, alphap, alphad
                   - C{message} - exit message
               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker
                 multipliers on the constraints, with keys:
                   - C{eqnonlin} - nonlinear equality constraints
                   - C{ineqnonlin} - nonlinear inequality constraints
                   - C{mu_l} - lower (left-hand) limit on linear constraints
                   - C{mu_u} - upper (right-hand) limit on linear constraints
                   - C{lower} - lower bound on optimization variables
                   - C{upper} - upper bound on optimization variables

    @see: U{http://www.pserc.cornell.edu/matpower/}

    @author: Ray Zimmerman (PSERC Cornell)
    """
    if isinstance(f_fcn, dict):  ## problem dict
        p = f_fcn
        f_fcn = p['f_fcn']
        x0 = p['x0']
        if 'opt' in p: opt = p['opt']
        if 'hess_fcn' in p: hess_fcn = p['hess_fcn']
        if 'gh_fcn' in p: gh_fcn = p['gh_fcn']
        if 'xmax' in p: xmax = p['xmax']
        if 'xmin' in p: xmin = p['xmin']
        if 'u' in p: u = p['u']
        if 'l' in p: l = p['l']
        if 'A' in p: A = p['A']

    nx = x0.shape[0]                        # number of variables
    nA = A.shape[0] if A is not None else 0 # number of original linear constr

    # default argument values
    if l is None or len(l) == 0: l = -Inf * ones(nA)
    if u is None or len(u) == 0: u =  Inf * ones(nA)
    if xmin is None or len(xmin) == 0: xmin = -Inf * ones(x0.shape[0])
    if xmax is None or len(xmax) == 0: xmax =  Inf * ones(x0.shape[0])
    if gh_fcn is None:
        nonlinear = False
        gn = array([])
        hn = array([])
    else:
        nonlinear = True

    if opt is None: opt = {}
    # options
    if "feastol" not in opt:
        opt["feastol"] = 1e-06
    if "gradtol" not in opt:
        opt["gradtol"] = 1e-06
    if "comptol" not in opt:
        opt["comptol"] = 1e-06
    if "costtol" not in opt:
        opt["costtol"] = 1e-06
    if "max_it" not in opt:
        opt["max_it"] = 150
    if "max_red" not in opt:
        opt["max_red"] = 20
    if "step_control" not in opt:
        opt["step_control"] = False
    if "cost_mult" not in opt:
        opt["cost_mult"] = 1
    if "verbose" not in opt:
        opt["verbose"] = 0

    # initialize history
    hist = []

    # constants
    xi = 0.99995
    sigma = 0.1
    z0 = 1
    alpha_min = 1e-8
    rho_min = 0.95
    rho_max = 1.05
    mu_threshold = 1e-5

    # initialize
    i = 0                       # iteration counter
    converged = False           # flag
    eflag = False               # exit flag

    # add var limits to linear constraints
    eyex = eye(nx, nx, format="csr")
    AA = eyex if A is None else vstack([eyex, A], "csr")
    ll = r_[xmin, l]
    uu = r_[xmax, u]

    # split up linear constraints
    ieq = find( absolute(uu - ll) <= EPS )
    igt = find( (uu >=  1e10) & (ll > -1e10) )
    ilt = find( (ll <= -1e10) & (uu <  1e10) )
    ibx = find( (absolute(uu - ll) > EPS) & (uu < 1e10) & (ll > -1e10) )
    # zero-sized sparse matrices unsupported
    Ae = AA[ieq, :] if len(ieq) else None
    if len(ilt) or len(igt) or len(ibx):
        idxs = [(1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
        Ai = vstack([sig * AA[idx, :] for sig, idx in idxs if len(idx)], 'csr')
    else:
        Ai = None
    be = uu[ieq]
    bi = r_[uu[ilt], -ll[igt], uu[ibx], -ll[ibx]]

    # evaluate cost f(x0) and constraints g(x0), h(x0)
    x = x0
    f, df = f_fcn(x)                 # cost
    f = f * opt["cost_mult"]
    df = df * opt["cost_mult"]
    if nonlinear:
        hn, gn, dhn, dgn = gh_fcn(x)        # nonlinear constraints
        h = hn if Ai is None else r_[hn, Ai * x - bi] # inequality constraints
        g = gn if Ae is None else r_[gn, Ae * x - be] # equality constraints

        if (dhn is None) and (Ai is None):
            dh = None
        elif dhn is None:
            dh = Ai.T
        elif Ai is None:
            dh = dhn
        else:
            dh = hstack([dhn, Ai.T])

        if (dgn is None) and (Ae is None):
            dg = None
        elif dgn is None:
            dg = Ae.T
        elif Ae is None:
            dg = dgn
        else:
            dg = hstack([dgn, Ae.T])
    else:
        h = -bi if Ai is None else Ai * x - bi        # inequality constraints
        g = -be if Ae is None else Ae * x - be        # equality constraints
        dh = None if Ai is None else Ai.T     # 1st derivative of inequalities
        dg = None if Ae is None else Ae.T     # 1st derivative of equalities

    # some dimensions
    neq = g.shape[0]           # number of equality constraints
    niq = h.shape[0]           # number of inequality constraints
    neqnln = gn.shape[0]       # number of nonlinear equality constraints
    niqnln = hn.shape[0]       # number of nonlinear inequality constraints
    nlt = len(ilt)             # number of upper bounded linear inequalities
    ngt = len(igt)             # number of lower bounded linear inequalities
    nbx = len(ibx)             # number of doubly bounded linear inequalities

    # initialize gamma, lam, mu, z, e
    gamma = 1                  # barrier coefficient
    lam = zeros(neq)
    z = z0 * ones(niq)
    mu = z0 * ones(niq)
    k = find(h < -z0)
    z[k] = -h[k]
    k = find((gamma / z) > z0)
    mu[k] = gamma / z[k]
    e = ones(niq)

    # check tolerance
    f0 = f
    if opt["step_control"]:
        L = f + dot(lam, g) + dot(mu, h + z) - gamma * sum(log(z))

    Lx = df.copy()
    Lx = Lx + dg * lam if dg is not None else Lx
    Lx = Lx + dh * mu  if dh is not None else Lx

    maxh = zeros(1) if len(h) == 0 else max(h)

    gnorm = norm(g, Inf) if len(g) else 0.0
    lam_norm = norm(lam, Inf) if len(lam) else 0.0
    mu_norm = norm(mu, Inf) if len(mu) else 0.0
    znorm = norm(z, Inf) if len(z) else 0.0
    feascond = \
        max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
    gradcond = \
        norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
    compcond = dot(z, mu) / (1 + norm(x, Inf))
    costcond = absolute(f - f0) / (1 + absolute(f0))

    # save history
    hist.append({'feascond': feascond, 'gradcond': gradcond,
        'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
        'stepsize': 0, 'obj': f / opt["cost_mult"], 'alphap': 0, 'alphad': 0})

    if opt["verbose"]:
        s = '-sc' if opt["step_control"] else ''
        v = pipsver('all')
        print('Python Interior Point Solver - PIPS%s, Version %s, %s' %
                    (s, v['Version'], v['Date']))
        if opt['verbose'] > 1:
            print(" it    objective   step size   feascond     gradcond     "
                  "compcond     costcond  ")
            print("----  ------------ --------- ------------ ------------ "
                  "------------ ------------")
            print("%3d  %12.8g %10s %12g %12g %12g %12g" %
                (i, (f / opt["cost_mult"]), "",
                 feascond, gradcond, compcond, costcond))

    if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
        compcond < opt["comptol"] and costcond < opt["costtol"]:
        converged = True
        if opt["verbose"]:
            print("Converged!")

    # do Newton iterations
    while (not converged) and (i < opt["max_it"]):
        # update iteration counter
        i += 1

        # compute update step
        lmbda = {"eqnonlin": lam[range(neqnln)],
                 "ineqnonlin": mu[range(niqnln)]}
        if nonlinear:
            if hess_fcn is None:
                print("pips: Hessian evaluation via finite differences "
                      "not yet implemented.\nPlease provide "
                      "your own hessian evaluation function.")
            Lxx = hess_fcn(x, lmbda, opt["cost_mult"])
        else:
            _, _, d2f = f_fcn(x, True)      # cost
            Lxx = d2f * opt["cost_mult"]
        rz = range(len(z))
        zinvdiag = sparse((1.0 / z, (rz, rz))) if len(z) else None
        rmu = range(len(mu))
        mudiag = sparse((mu, (rmu, rmu))) if len(mu) else None
        dh_zinv = None if dh is None else dh * zinvdiag
        M = Lxx if dh is None else Lxx + dh_zinv * mudiag * dh.T
        N = Lx if dh is None else Lx + dh_zinv * (mudiag * h + gamma * e)

        Ab = sparse(M) if dg is None else vstack([
            hstack([M, dg]),
            hstack([dg.T, sparse((neq, neq))])
        ])
        bb = r_[-N, -g]

        dxdlam = spsolve(Ab.tocsr(), bb)

        if any(isnan(dxdlam)):
            if opt["verbose"]:
                print('\nNumerically Failed\n')
            eflag = -1
            break

        dx = dxdlam[:nx]
        dlam = dxdlam[nx:nx + neq]
        dz = -h - z if dh is None else -h - z - dh.T * dx
        dmu = -mu if dh is None else -mu + zinvdiag * (gamma * e - mudiag * dz)

        # optional step-size control
        sc = False
        if opt["step_control"]:
            x1 = x + dx

            # evaluate cost, constraints, derivatives at x1
            f1, df1 = f_fcn(x1)          # cost
            f1 = f1 * opt["cost_mult"]
            df1 = df1 * opt["cost_mult"]
            if nonlinear:
                hn1, gn1, dhn1, dgn1 = gh_fcn(x1) # nonlinear constraints

                h1 = hn1 if Ai is None else r_[hn1, Ai * x1 - bi] # ieq constraints
                g1 = gn1 if Ae is None else r_[gn1, Ae * x1 - be] # eq constraints

                # 1st der of ieq
                if (dhn1 is None) and (Ai is None):
                    dh1 = None
                elif dhn1 is None:
                    dh1 = Ai.T
                elif Ai is None:
                    dh1 = dhn1
                else:
                    dh1 = hstack([dhn1, Ai.T])

                # 1st der of eqs
                if (dgn1 is None) and (Ae is None):
                    dg1 = None
                elif dgn is None:
                    dg1 = Ae.T
                elif Ae is None:
                    dg1 = dgn1
                else:
                    dg1 = hstack([dgn1, Ae.T])
            else:
                h1 = -bi if Ai is None else Ai * x1 - bi    # inequality constraints
                g1 = -be if Ae is None else Ae * x1 - be    # equality constraints

                dh1 = dh                       ## 1st derivative of inequalities
                dg1 = dg                       ## 1st derivative of equalities

            # check tolerance
            Lx1 = df1
            Lx1 = Lx1 + dg1 * lam if dg1 is not None else Lx1
            Lx1 = Lx1 + dh1 * mu  if dh1 is not None else Lx1

            maxh1 = zeros(1) if len(h1) == 0 else max(h1)

            g1norm = norm(g1, Inf) if len(g1) else 0.0
            lam1_norm = norm(lam, Inf) if len(lam) else 0.0
            mu1_norm = norm(mu, Inf) if len(mu) else 0.0
            z1norm = norm(z, Inf) if len(z) else 0.0

            feascond1 = max([ g1norm, maxh1 ]) / \
                (1 + max([ norm(x1, Inf), z1norm ]))
            gradcond1 = norm(Lx1, Inf) / (1 + max([ lam1_norm, mu1_norm ]))

            if (feascond1 > feascond) and (gradcond1 > gradcond):
                sc = True
        if sc:
            alpha = 1.0
            for j in range(opt["max_red"]):
                dx1 = alpha * dx
                x1 = x + dx1
                f1, _ = f_fcn(x1)             # cost
                f1 = f1 * opt["cost_mult"]
                if nonlinear:
                    hn1, gn1, _, _ = gh_fcn(x1)              # nonlinear constraints
                    h1 = hn1 if Ai is None else r_[hn1, Ai * x1 - bi]         # inequality constraints
                    g1 = gn1 if Ae is None else r_[gn1, Ae * x1 - be]         # equality constraints
                else:
                    h1 = -bi if Ai is None else Ai * x1 - bi    # inequality constraints
                    g1 = -be if Ae is None else Ae * x1 - be    # equality constraints

                L1 = f1 + dot(lam, g1) + dot(mu, h1 + z) - gamma * sum(log(z))

                if opt["verbose"] > 2:
                    print("   %3d            %10.5f" % (-j, norm(dx1)))

                rho = (L1 - L) / (dot(Lx, dx1) + 0.5 * dot(dx1, Lxx * dx1))

                if (rho > rho_min) and (rho < rho_max):
                    break
                else:
                    alpha = alpha / 2.0
            dx = alpha * dx
            dz = alpha * dz
            dlam = alpha * dlam
            dmu = alpha * dmu

        # do the update
        k = find(dz < 0.0)
        alphap = min([xi * min(z[k] / -dz[k]), 1]) if len(k) else 1.0
        k = find(dmu < 0.0)
        alphad = min([xi * min(mu[k] / -dmu[k]), 1]) if len(k) else 1.0
        x = x + alphap * dx
        z = z + alphap * dz
        lam = lam + alphad * dlam
        mu = mu + alphad * dmu
        if niq > 0:
            gamma = sigma * dot(z, mu) / niq

        # evaluate cost, constraints, derivatives
        f, df = f_fcn(x)             # cost
        f = f * opt["cost_mult"]
        df = df * opt["cost_mult"]
        if nonlinear:
            hn, gn, dhn, dgn = gh_fcn(x)                   # nln constraints
#            g = gn if Ai is None else r_[gn, Ai * x - bi] # ieq constraints
#            h = hn if Ae is None else r_[hn, Ae * x - be] # eq constraints
            h = hn if Ai is None else r_[hn, Ai * x - bi] # ieq constr
            g = gn if Ae is None else r_[gn, Ae * x - be]  # eq constr

            if (dhn is None) and (Ai is None):
                dh = None
            elif dhn is None:
                dh = Ai.T
            elif Ai is None:
                dh = dhn
            else:
                dh = hstack([dhn, Ai.T])

            if (dgn is None) and (Ae is None):
                dg = None
            elif dgn is None:
                dg = Ae.T
            elif Ae is None:
                dg = dgn
            else:
                dg = hstack([dgn, Ae.T])
        else:
            h = -bi if Ai is None else Ai * x - bi    # inequality constraints
            g = -be if Ae is None else Ae * x - be    # equality constraints
            # 1st derivatives are constant, still dh = Ai.T, dg = Ae.T

        Lx = df
        Lx = Lx + dg * lam if dg is not None else Lx
        Lx = Lx + dh * mu  if dh is not None else Lx

        if len(h) == 0:
            maxh = zeros(1)
        else:
            maxh = max(h)

        gnorm = norm(g, Inf) if len(g) else 0.0
        lam_norm = norm(lam, Inf) if len(lam) else 0.0
        mu_norm = norm(mu, Inf) if len(mu) else 0.0
        znorm = norm(z, Inf) if len(z) else 0.0
        feascond = \
            max([gnorm, maxh]) / (1 + max([norm(x, Inf), znorm]))
        gradcond = \
            norm(Lx, Inf) / (1 + max([lam_norm, mu_norm]))
        compcond = dot(z, mu) / (1 + norm(x, Inf))
        costcond = float(absolute(f - f0) / (1 + absolute(f0)))

        hist.append({'feascond': feascond, 'gradcond': gradcond,
            'compcond': compcond, 'costcond': costcond, 'gamma': gamma,
            'stepsize': norm(dx), 'obj': f / opt["cost_mult"],
            'alphap': alphap, 'alphad': alphad})

        if opt["verbose"] > 1:
            print("%3d  %12.8g %10.5g %12g %12g %12g %12g" %
                (i, (f / opt["cost_mult"]), norm(dx), feascond, gradcond,
                 compcond, costcond))

        if feascond < opt["feastol"] and gradcond < opt["gradtol"] and \
            compcond < opt["comptol"] and costcond < opt["costtol"]:
            converged = True
            if opt["verbose"]:
                print("Converged!")
        else:
            if any(isnan(x)) or (alphap < alpha_min) or \
                (alphad < alpha_min) or (gamma < EPS) or (gamma > 1.0 / EPS):
                if opt["verbose"]:
                    print("Numerically failed.")
                eflag = -1
                break
            f0 = f

            if opt["step_control"]:
                L = f + dot(lam, g) + dot(mu, (h + z)) - gamma * sum(log(z))

    if opt["verbose"]:
        if not converged:
            print("Did not converge in %d iterations." % i)

    # package results
    if eflag != -1:
        eflag = converged

    if eflag == 0:
        message = 'Did not converge'
    elif eflag == 1:
        message = 'Converged'
    elif eflag == -1:
        message = 'Numerically failed'
    else:
        raise

    output = {"iterations": i, "hist": hist, "message": message}

    # zero out multipliers on non-binding constraints
    mu[find( (h < -opt["feastol"]) & (mu < mu_threshold) )] = 0.0

    # un-scale cost and prices
    f = f / opt["cost_mult"]
    lam = lam / opt["cost_mult"]
    mu = mu / opt["cost_mult"]

    # re-package multipliers into struct
    lam_lin = lam[neqnln:neq]           # lambda for linear constraints
    mu_lin = mu[niqnln:niq]             # mu for linear constraints
    kl = find(lam_lin < 0.0)     # lower bound binding
    ku = find(lam_lin > 0.0)     # upper bound binding

    mu_l = zeros(nx + nA)
    mu_l[ieq[kl]] = -lam_lin[kl]
    mu_l[igt] = mu_lin[nlt:nlt + ngt]
    mu_l[ibx] = mu_lin[nlt + ngt + nbx:nlt + ngt + nbx + nbx]

    mu_u = zeros(nx + nA)
    mu_u[ieq[ku]] = lam_lin[ku]
    mu_u[ilt] = mu_lin[:nlt]
    mu_u[ibx] = mu_lin[nlt + ngt:nlt + ngt + nbx]

    lmbda = {'mu_l': mu_l[nx:], 'mu_u': mu_u[nx:],
             'lower': mu_l[:nx], 'upper': mu_u[:nx]}

    if niqnln > 0:
        lmbda['ineqnonlin'] = mu[:niqnln]
    if neqnln > 0:
        lmbda['eqnonlin'] = lam[:neqnln]

#    lmbda = {"eqnonlin": lam[:neqnln], 'ineqnonlin': mu[:niqnln],
#             "mu_l": mu_l[nx:], "mu_u": mu_u[nx:],
#             "lower": mu_l[:nx], "upper": mu_u[:nx]}

    solution =  {"x": x, "f": f, "eflag": converged,
                 "output": output, "lmbda": lmbda}

    return solution

Example 27

Project: pysb Source File: scipyode.py
Function: init
    def __init__(self, model, tspan=None, initials=None, param_values=None,
                 verbose=False, **kwargs):
        super(ScipyOdeSimulator, self).__init__(model,
                                                tspan=tspan,
                                                initials=initials,
                                                param_values=param_values,
                                                verbose=verbose,
                                                **kwargs)
        # We'll need to know if we're using the Jacobian when we get to run()
        self._use_analytic_jacobian = kwargs.get('use_analytic_jacobian',
                                                 False)
        self.cleanup = kwargs.get('cleanup', True)
        integrator = kwargs.get('integrator', 'vode')
        # Generate the equations for the model
        pysb.bng.generate_equations(self._model, self.cleanup, self.verbose)

        def _eqn_substitutions(eqns):
            """String substitutions on the sympy C code for the ODE RHS and
            Jacobian functions to use appropriate terms for variables and
            parameters."""
            # Substitute expanded parameter formulas for any named expressions
            for e in self._model.expressions:
                eqns = re.sub(r'\b(%s)\b' % e.name, '(' + sympy.ccode(
                    e.expand_expr()) + ')', eqns)

            # Substitute sums of observable species that could've been added
            # by expressions
            for obs in self._model.observables:
                obs_string = ''
                for i in range(len(obs.coefficients)):
                    if i > 0:
                        obs_string += "+"
                    if obs.coefficients[i] > 1:
                        obs_string += str(obs.coefficients[i]) + "*"
                    obs_string += "__s" + str(obs.species[i])
                if len(obs.coefficients) > 1:
                    obs_string = '(' + obs_string + ')'
                eqns = re.sub(r'\b(%s)\b' % obs.name, obs_string, eqns)

            # Substitute 'y[i]' for 'si'
            eqns = re.sub(r'\b__s(\d+)\b',
                          lambda m: 'y[%s]' % (int(m.group(1))),
                          eqns)

            # Substitute 'p[i]' for any named parameters
            for i, p in enumerate(self._model.parameters):
                eqns = re.sub(r'\b(%s)\b' % p.name, 'p[%d]' % i, eqns)
            return eqns

        # ODE RHS -----------------------------------------------
        # Prepare the string representations of the RHS equations
        code_eqs = '\n'.join(['ydot[%d] = %s;' %
                              (i, sympy.ccode(self._model.odes[i]))
                              for i in range(len(self._model.odes))])
        code_eqs = _eqn_substitutions(code_eqs)

        self._test_inline()

        # If we can't use weave.inline to run the C code, compile it as
        # Python code instead for use with
        # exec. Note: C code with array indexing, basic math operations,
        # and pow() just happens to also
        # be valid Python.  If the equations ever have more complex things
        # in them, this might fail.
        if not self._use_inline:
            code_eqs_py = compile(code_eqs, '<%s odes>' % self._model.name,
                                  'exec')
        else:
            for arr_name in ('ydot', 'y', 'p'):
                macro = arr_name.upper() + '1'
                code_eqs = re.sub(r'\b%s\[(\d+)\]' % arr_name,
                                  '%s(\\1)' % macro, code_eqs)

        def rhs(t, y, p):
            ydot = self.ydot
            # note that the evaluated code sets ydot as a side effect
            if self._use_inline:
                weave_inline(code_eqs, ['ydot', 't', 'y', 'p'])
            else:
                _exec(code_eqs_py, locals())
            return ydot

        # JACOBIAN -----------------------------------------------
        # We'll keep the code for putting together the matrix in Sympy
        # in case we want to do manipulations of the matrix later (e.g., to
        # put together the sensitivity matrix)
        jac_fn = None
        if self._use_analytic_jacobian:
            species_names = ['__s%d' % i for i in
                             range(len(self._model.species))]
            jac_matrix = []
            # Rows of jac_matrix are by equation f_i:
            # [[df1/x1, df1/x2, ..., df1/xn],
            #  [   ...                     ],
            #  [dfn/x1, dfn/x2, ..., dfn/xn],
            for eqn in self._model.odes:
                # Derivatives for f_i...
                jac_row = []
                for species_name in species_names:
                    # ... with respect to s_j
                    d = sympy.diff(eqn, species_name)
                    jac_row.append(d)
                jac_matrix.append(jac_row)

            # Next, prepare the stringified Jacobian equations
            jac_eqs_list = []
            for i, row in enumerate(jac_matrix):
                for j, entry in enumerate(row):
                    # Skip zero entries in the Jacobian
                    if entry == 0:
                        continue
                    jac_eq_str = 'jac[%d, %d] = %s;' % (
                    i, j, sympy.ccode(entry))
                    jac_eqs_list.append(jac_eq_str)
            jac_eqs = _eqn_substitutions('\n'.join(jac_eqs_list))

            # Try to inline the Jacobian if possible (as above for RHS)
            if not self._use_inline:
                jac_eqs_py = compile(jac_eqs,
                                     '<%s jacobian>' % self._model.name, 'exec')
            else:
                # Substitute array refs with calls to the JAC1 macro for inline
                jac_eqs = re.sub(r'\bjac\[(\d+), (\d+)\]',
                                 r'JAC2(\1, \2)', jac_eqs)
                # Substitute calls to the Y1 and P1 macros
                for arr_name in ('y', 'p'):
                    macro = arr_name.upper() + '1'
                    jac_eqs = re.sub(r'\b%s\[(\d+)\]' % arr_name,
                                     '%s(\\1)' % macro, jac_eqs)

            def jacobian(t, y, p):
                jac = self.jac
                # note that the evaluated code sets jac as a side effect
                if self._use_inline:
                    weave_inline(jac_eqs, ['jac', 't', 'y', 'p']);
                else:
                    _exec(jac_eqs_py, locals())
                return jac

            # Initialize the jacobian argument to None if we're not going to
            #  use it
            # jac = self.jac as defined in jacobian() earlier
            # Initialization of matrix for storing the Jacobian
            self.jac = np.zeros(
                (len(self._model.odes), len(self._model.species)))
            jac_fn = jacobian

        # build integrator options list from our defaults and any kwargs
        # passed to this function
        options = {}
        if self.default_integrator_options.get(integrator):
            options.update(
                self.default_integrator_options[integrator])  # default options

        options.update(kwargs.get('integrator_options', {}))  # overwrite
        # defaults
        self.opts = options
        self.ydot = np.ndarray(len(self._model.species))

        # Integrator
        if integrator == 'lsoda':
            # lsoda is accessed via scipy.integrate.odeint which,
            # as a function,
            # requires that we pass its args at the point of call. Thus we need
            # to stash stuff like the rhs and jacobian functions in self so we
            # can pass them in later.
            self.integrator = integrator
            # lsoda's rhs and jacobian function arguments are in a different
            # order to other integrators, so we define these shims that swizzle
            # the argument order appropriately.
            self.func = lambda t, y, p: rhs(y, t, p)
            if jac_fn is None:
                self.jac_fn = None
            else:
                self.jac_fn = lambda t, y, p: jac_fn(y, t, p)
        else:
            # The scipy.integrate.ode integrators on the other hand are object
            # oriented and hold the functions and such internally. Once we set
            # up the integrator object we only need to retain a reference to it
            # and can forget about the other bits.
            self.integrator = scipy.integrate.ode(rhs, jac=jac_fn)
            with warnings.catch_warnings():
                warnings.filterwarnings('error', 'No integrator name match')
                self.integrator.set_integrator(integrator, **options)

Example 28

Project: RLScore Source File: space_efficient_greedy_rls.py
    def solve_tradeoff(self, regparam):
        """Trains RLS with the given value of the regularization parameter
        
        @param regparam: value of the regularization parameter
        @type regparam: float
        """
        
        self.regparam = regparam
        X = self.X
        Y = self.Y
        
        if not hasattr(self, "bias"):
            self.bias = 0.
        
        tsize = self.size
        fsize = X.shape[0]
        assert X.shape[1] == tsize
        self.A = np.mat(np.zeros((fsize, Y.shape[1])))
        
        rp = regparam
        rpinv = 1. / rp
        
        if not self.resource_pool.has_key('subsetsize'):
            raise Exception("Parameter 'subsetsize' must be given.")
        desiredfcount = int(self.resource_pool['subsetsize'])
        if not fsize >= desiredfcount:
            raise Exception('The overall number of features ' + str(fsize) + ' is smaller than the desired number ' + str(desiredfcount) + ' of features to be selected.')
        
        #Biaz
        bias_slice = np.sqrt(self.bias)*np.mat(np.ones((1,X.shape[1]),dtype=np.float64))
        cv = bias_slice
        ca = rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv)
        
        self.dualvec = rpinv * Y - cv.T * rpinv * (1. / (1. + cv * rpinv * cv.T)) * (cv * rpinv * Y)
        
        diagG = []
        for i in range(tsize):
            diagGi = rpinv - cv.T[i, 0] * ca[0, i]
            diagG.append(diagGi)
        diagG = np.mat(diagG).T
        
        #listX = []
        #for ci in range(fsize):
        #    listX.append(X[ci])
        
        U, S, VT = la.svd(cv, full_matrices = False)
        U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
        Omega = 1. / (S * S + rp) - rpinv
        
        self.selected = []
        
        blocksize = 1000
        blocks = []
        blockcount = 0
        while True:
            startind = blockcount * blocksize
            if (blockcount + 1) * blocksize < fsize:
                print blockcount, fsize, (blockcount + 1) * blocksize
                endind = (blockcount + 1) * blocksize
                blocks.append(range(startind, endind))
                blockcount += 1
            else:
                blocks.append(range(startind, fsize))
                blockcount += 1
                break
        
        
        currentfcount = 0
        self.performances = []
        while currentfcount < desiredfcount:
            
            if not self.measure is None:
                self.bestlooperf = None
            else:
                self.bestlooperf = float('inf')
            
            
            looperf = np.mat(np.zeros((1, fsize)))
            
            for blockind in range(blockcount):
                
                block = blocks[blockind]
                
                tempmatrix = np.mat(np.zeros((tsize, len(block))))
                temp2 = np.mat(np.zeros((tsize, len(block))))
                
                X_block = X[block]
                GXT_block = VT.T * np.multiply(Omega.T, (VT * X_block.T)) + rpinv * X_block.T
                
                np.multiply(X_block.T, GXT_block, tempmatrix)
                XGXTdiag = sum(tempmatrix, axis = 0)
                
                XGXTdiag = 1. / (1. + XGXTdiag)
                np.multiply(GXT_block, XGXTdiag, tempmatrix)
                
                tempvec1 = np.multiply((X_block * self.dualvec).T, XGXTdiag)
                np.multiply(GXT_block, tempvec1, temp2)
                np.subtract(self.dualvec, temp2, temp2)
                
                np.multiply(tempmatrix, GXT_block, tempmatrix)
                np.subtract(diagG, tempmatrix, tempmatrix)
                np.divide(1, tempmatrix, tempmatrix)
                np.multiply(tempmatrix, temp2, tempmatrix)
                
                
                if not self.measure is None:
                    np.subtract(Y, tempmatrix, tempmatrix)
                    np.multiply(temp2, 0, temp2)
                    np.add(temp2, Y, temp2)
                    looperf_block = self.measure.multiTaskPerformance(temp2, tempmatrix)
                    looperf_block = np.mat(looperf_block)
                else:
                    np.multiply(tempmatrix, tempmatrix, tempmatrix)
                    looperf_block = sum(tempmatrix, axis = 0)
                looperf[:, block] = looperf_block
                
            if not self.measure is None:
                if self.measure.isErrorMeasure():
                    looperf[0, self.selected] = float('inf')
                    bestcind = np.argmin(looperf)
                    self.bestlooperf = np.amin(looperf)
                else:
                    looperf[0, self.selected] = - float('inf')
                    bestcind = np.argmax(looperf)
                    self.bestlooperf = np.amax(looperf)
            else:
                looperf[0, self.selected] = float('inf')
                bestcind = np.argmin(looperf)
                self.bestlooperf = np.amin(looperf)
                
            self.looperf = looperf
            
            self.performances.append(self.bestlooperf)
            #cv = listX[bestcind]
            cv = X[bestcind]
            #GXT_bci = GXT[:, bestcind]
            GXT_bci = VT.T * np.multiply(Omega.T, (VT * cv.T)) + rpinv * cv.T
            ca = GXT_bci * (1. / (1. + cv * GXT_bci))
            self.dualvec = self.dualvec - ca * (cv * self.dualvec)
            diagG = diagG - np.multiply(ca, GXT_bci)
            #GXT = GXT - ca * (cv * GXT)
            self.selected.append(bestcind)
            X_sel = X[self.selected]
            if isinstance(X_sel, sp.base.spmatrix):
                X_sel = X_sel.todense()
            U, S, VT = la.svd(np.vstack([X_sel, bias_slice]), full_matrices = False)
            U, S, VT = np.mat(U), np.mat(S), np.mat(VT)
            #print U.shape, S.shape, VT.shape
            Omega = 1. / (np.multiply(S, S) + rp) - rpinv
            #print self.selected
            #print self.performances
            currentfcount += 1
            
            #Linear predictor with bias
            self.A[self.selected] = X[self.selected] * self.dualvec
            self.b = bias_slice * self.dualvec
            
            self.callback()
            #print who(locals())
        self.finished()
        self.A[self.selected] = X[self.selected] * self.dualvec
        self.b = bias_slice * self.dualvec
        self.results['selected_features'] = self.selected
        self.results['GreedyRLS_LOO_performances'] = self.performances
        #self.results['predictor'] = self.getModel()
        self.predictor = predictor.LinearPredictor(self.A, self.b)

Example 29

Project: PYPOWER Source File: printpf.py
def printpf(baseMVA, bus=None, gen=None, branch=None, f=None, success=None,
            et=None, fd=None, ppopt=None):
    """Prints power flow results.

    Prints power flow and optimal power flow results to C{fd} (a file
    descriptor which defaults to C{stdout}), with the details of what
    gets printed controlled by the optional C{ppopt} argument, which is a
    PYPOWER options vector (see L{ppoption} for details).

    The data can either be supplied in a single C{results} dict, or
    in the individual arguments: C{baseMVA}, C{bus}, C{gen}, C{branch}, C{f},
    C{success} and C{et}, where C{f} is the OPF objective function value,
    C{success} is C{True} if the solution converged and C{False} otherwise,
    and C{et} is the elapsed time for the computation in seconds. If C{f} is
    given, it is assumed that the output is from an OPF run, otherwise it is
    assumed to be a simple power flow run.

    Examples::
        ppopt = ppoptions(OUT_GEN=1, OUT_BUS=0, OUT_BRANCH=0)
        fd = open(fname, 'w+b')
        results = runopf(ppc)
        printpf(results)
        printpf(results, fd)
        printpf(results, fd, ppopt)
        printpf(baseMVA, bus, gen, branch, f, success, et)
        printpf(baseMVA, bus, gen, branch, f, success, et, fd)
        printpf(baseMVA, bus, gen, branch, f, success, et, fd, ppopt)
        fd.close()

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ##----- initialization -----
    ## default arguments
    if isinstance(baseMVA, dict):
        have_results_struct = 1
        results = baseMVA
        if gen is None:
            ppopt = ppoption()   ## use default options
        else:
            ppopt = gen
        if (ppopt['OUT_ALL'] == 0):
            return     ## nothin' to see here, bail out now
        if bus is None:
            fd = stdout         ## print to stdout by default
        else:
            fd = bus
        baseMVA, bus, gen, branch, success, et = \
            results["baseMVA"], results["bus"], results["gen"], \
            results["branch"], results["success"], results["et"]
        if 'f' in results:
            f = results["f"]
        else:
            f = None
    else:
        have_results_struct = 0
        if ppopt is None:
            ppopt = ppoption()   ## use default options
            if fd is None:
                fd = stdout         ## print to stdout by default
        if ppopt['OUT_ALL'] == 0:
            return     ## nothin' to see here, bail out now

    isOPF = f is not None    ## FALSE -> only simple PF data, TRUE -> OPF data

    ## options
    isDC            = ppopt['PF_DC']        ## use DC formulation?
    OUT_ALL         = ppopt['OUT_ALL']
    OUT_ANY         = OUT_ALL == 1     ## set to true if any pretty output is to be generated
    OUT_SYS_SUM     = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_SYS_SUM'])
    OUT_AREA_SUM    = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_AREA_SUM'])
    OUT_BUS         = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_BUS'])
    OUT_BRANCH      = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_BRANCH'])
    OUT_GEN         = (OUT_ALL == 1) or ((OUT_ALL == -1) and ppopt['OUT_GEN'])
    OUT_ANY         = OUT_ANY | ((OUT_ALL == -1) and
                        (OUT_SYS_SUM or OUT_AREA_SUM or OUT_BUS or
                         OUT_BRANCH or OUT_GEN))

    if OUT_ALL == -1:
        OUT_ALL_LIM = ppopt['OUT_ALL_LIM']
    elif OUT_ALL == 1:
        OUT_ALL_LIM = 2
    else:
        OUT_ALL_LIM = 0

    OUT_ANY         = OUT_ANY or (OUT_ALL_LIM >= 1)
    if OUT_ALL_LIM == -1:
        OUT_V_LIM       = ppopt['OUT_V_LIM']
        OUT_LINE_LIM    = ppopt['OUT_LINE_LIM']
        OUT_PG_LIM      = ppopt['OUT_PG_LIM']
        OUT_QG_LIM      = ppopt['OUT_QG_LIM']
    else:
        OUT_V_LIM       = OUT_ALL_LIM
        OUT_LINE_LIM    = OUT_ALL_LIM
        OUT_PG_LIM      = OUT_ALL_LIM
        OUT_QG_LIM      = OUT_ALL_LIM

    OUT_ANY         = OUT_ANY or ((OUT_ALL_LIM == -1) and (OUT_V_LIM or OUT_LINE_LIM or OUT_PG_LIM or OUT_QG_LIM))
    ptol = 1e-4        ## tolerance for displaying shadow prices

    ## create map of external bus numbers to bus indices
    i2e = bus[:, BUS_I].astype(int)
    e2i = zeros(max(i2e) + 1, int)
    e2i[i2e] = arange(bus.shape[0])

    ## sizes of things
    nb = bus.shape[0]      ## number of buses
    nl = branch.shape[0]   ## number of branches
    ng = gen.shape[0]      ## number of generators

    ## zero out some data to make printout consistent for DC case
    if isDC:
        bus[:, r_[QD, BS]]          = zeros((nb, 2))
        gen[:, r_[QG, QMAX, QMIN]]  = zeros((ng, 3))
        branch[:, r_[BR_R, BR_B]]   = zeros((nl, 2))

    ## parameters
    ties = find(bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] !=
                   bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA])
                            ## area inter-ties
    tap = ones(nl)                           ## default tap ratio = 1 for lines
    xfmr = find(branch[:, TAP])           ## indices of transformers
    tap[xfmr] = branch[xfmr, TAP]            ## include transformer tap ratios
    tap = tap * exp(1j * pi / 180 * branch[:, SHIFT]) ## add phase shifters
    nzld = find((bus[:, PD] != 0.0) | (bus[:, QD] != 0.0))
    sorted_areas = sort(bus[:, BUS_AREA])
    ## area numbers
    s_areas = sorted_areas[r_[1, find(diff(sorted_areas)) + 1]]
    nzsh = find((bus[:, GS] != 0.0) | (bus[:, BS] != 0.0))
    allg = find( ~isload(gen) )
    ong  = find( (gen[:, GEN_STATUS] > 0) & ~isload(gen) )
    onld = find( (gen[:, GEN_STATUS] > 0) &  isload(gen) )
    V = bus[:, VM] * exp(-1j * pi / 180 * bus[:, VA])
    out = find(branch[:, BR_STATUS] == 0)        ## out-of-service branches
    nout = len(out)
    if isDC:
        loss = zeros(nl)
    else:
        loss = baseMVA * abs(V[e2i[ branch[:, F_BUS].astype(int) ]] / tap -
                             V[e2i[ branch[:, T_BUS].astype(int) ]])**2 / \
                    (branch[:, BR_R] - 1j * branch[:, BR_X])

    fchg = abs(V[e2i[ branch[:, F_BUS].astype(int) ]] / tap)**2 * branch[:, BR_B] * baseMVA / 2
    tchg = abs(V[e2i[ branch[:, T_BUS].astype(int) ]]      )**2 * branch[:, BR_B] * baseMVA / 2
    loss[out] = zeros(nout)
    fchg[out] = zeros(nout)
    tchg[out] = zeros(nout)

    ##----- print the stuff -----
    if OUT_ANY:
        ## convergence & elapsed time
        if success:
            fd.write('\nConverged in %.2f seconds' % et)
        else:
            fd.write('\nDid not converge (%.2f seconds)\n' % et)

        ## objective function value
        if isOPF:
            fd.write('\nObjective Function Value = %.2f $/hr' % f)

    if OUT_SYS_SUM:
        fd.write('\n================================================================================')
        fd.write('\n|     System Summary                                                           |')
        fd.write('\n================================================================================')
        fd.write('\n\nHow many?                How much?              P (MW)            Q (MVAr)')
        fd.write('\n---------------------    -------------------  -------------  -----------------')
        fd.write('\nBuses         %6d     Total Gen Capacity   %7.1f       %7.1f to %.1f' % (nb, sum(gen[allg, PMAX]), sum(gen[allg, QMIN]), sum(gen[allg, QMAX])))
        fd.write('\nGenerators     %5d     On-line Capacity     %7.1f       %7.1f to %.1f' % (len(allg), sum(gen[ong, PMAX]), sum(gen[ong, QMIN]), sum(gen[ong, QMAX])))
        fd.write('\nCommitted Gens %5d     Generation (actual)  %7.1f           %7.1f' % (len(ong), sum(gen[ong, PG]), sum(gen[ong, QG])))
        fd.write('\nLoads          %5d     Load                 %7.1f           %7.1f' % (len(nzld)+len(onld), sum(bus[nzld, PD])-sum(gen[onld, PG]), sum(bus[nzld, QD])-sum(gen[onld, QG])))
        fd.write('\n  Fixed        %5d       Fixed              %7.1f           %7.1f' % (len(nzld), sum(bus[nzld, PD]), sum(bus[nzld, QD])))
        fd.write('\n  Dispatchable %5d       Dispatchable       %7.1f of %-7.1f%7.1f' % (len(onld), -sum(gen[onld, PG]), -sum(gen[onld, PMIN]), -sum(gen[onld, QG])))
        fd.write('\nShunts         %5d     Shunt (inj)          %7.1f           %7.1f' % (len(nzsh),
            -sum(bus[nzsh, VM]**2 * bus[nzsh, GS]), sum(bus[nzsh, VM]**2 * bus[nzsh, BS]) ))
        fd.write('\nBranches       %5d     Losses (I^2 * Z)     %8.2f          %8.2f' % (nl, sum(loss.real), sum(loss.imag) ))
        fd.write('\nTransformers   %5d     Branch Charging (inj)     -            %7.1f' % (len(xfmr), sum(fchg) + sum(tchg) ))
        fd.write('\nInter-ties     %5d     Total Inter-tie Flow %7.1f           %7.1f' % (len(ties), sum(abs(branch[ties, PF]-branch[ties, PT])) / 2, sum(abs(branch[ties, QF]-branch[ties, QT])) / 2))
        fd.write('\nAreas          %5d' % len(s_areas))
        fd.write('\n')
        fd.write('\n                          Minimum                      Maximum')
        fd.write('\n                 -------------------------  --------------------------------')
        minv = min(bus[:, VM])
        mini = argmin(bus[:, VM])
        maxv = max(bus[:, VM])
        maxi = argmax(bus[:, VM])
        fd.write('\nVoltage Magnitude %7.3f p.u. @ bus %-4d     %7.3f p.u. @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        minv = min(bus[:, VA])
        mini = argmin(bus[:, VA])
        maxv = max(bus[:, VA])
        maxi = argmax(bus[:, VA])
        fd.write('\nVoltage Angle   %8.2f deg   @ bus %-4d   %8.2f deg   @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        if not isDC:
            maxv = max(loss.real)
            maxi = argmax(loss.real)
            fd.write('\nP Losses (I^2*R)             -              %8.2f MW    @ line %d-%d' % (maxv, branch[maxi, F_BUS], branch[maxi, T_BUS]))
            maxv = max(loss.imag)
            maxi = argmax(loss.imag)
            fd.write('\nQ Losses (I^2*X)             -              %8.2f MVAr  @ line %d-%d' % (maxv, branch[maxi, F_BUS], branch[maxi, T_BUS]))
        if isOPF:
            minv = min(bus[:, LAM_P])
            mini = argmin(bus[:, LAM_P])
            maxv = max(bus[:, LAM_P])
            maxi = argmax(bus[:, LAM_P])
            fd.write('\nLambda P        %8.2f $/MWh @ bus %-4d   %8.2f $/MWh @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
            minv = min(bus[:, LAM_Q])
            mini = argmin(bus[:, LAM_Q])
            maxv = max(bus[:, LAM_Q])
            maxi = argmax(bus[:, LAM_Q])
            fd.write('\nLambda Q        %8.2f $/MWh @ bus %-4d   %8.2f $/MWh @ bus %-4d' % (minv, bus[mini, BUS_I], maxv, bus[maxi, BUS_I]))
        fd.write('\n')

    if OUT_AREA_SUM:
        fd.write('\n================================================================================')
        fd.write('\n|     Area Summary                                                             |')
        fd.write('\n================================================================================')
        fd.write('\nArea  # of      # of Gens        # of Loads         # of    # of   # of   # of')
        fd.write('\n Num  Buses   Total  Online   Total  Fixed  Disp    Shunt   Brchs  Xfmrs   Ties')
        fd.write('\n----  -----   -----  ------   -----  -----  -----   -----   -----  -----  -----')
        for i in range(len(s_areas)):
            a = s_areas[i]
            ib = find(bus[:, BUS_AREA] == a)
            ig = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & ~isload(gen))
            igon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & ~isload(gen))
            ildon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & isload(gen))
            inzld = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, PD], bus[:, QD]))
            inzsh = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, GS], bus[:, BS]))
            ibrch = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a))
            in_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] != a))
            out_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] != a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a))
            if not any(xfmr + 1):
                nxfmr = 0
            else:
                nxfmr = len(find((bus[e2i[branch[xfmr, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[xfmr, T_BUS].astype(int)], BUS_AREA] == a)))
            fd.write('\n%3d  %6d   %5d  %5d   %5d  %5d  %5d   %5d   %5d  %5d  %5d' %
                (a, len(ib), len(ig), len(igon), \
                len(inzld)+len(ildon), len(inzld), len(ildon), \
                len(inzsh), len(ibrch), nxfmr, len(in_tie)+len(out_tie)))

        fd.write('\n----  -----   -----  ------   -----  -----  -----   -----   -----  -----  -----')
        fd.write('\nTot: %6d   %5d  %5d   %5d  %5d  %5d   %5d   %5d  %5d  %5d' %
            (nb, len(allg), len(ong), len(nzld)+len(onld),
            len(nzld), len(onld), len(nzsh), nl, len(xfmr), len(ties)))
        fd.write('\n')
        fd.write('\nArea      Total Gen Capacity           On-line Gen Capacity         Generation')
        fd.write('\n Num     MW           MVAr            MW           MVAr             MW    MVAr')
        fd.write('\n----   ------  ------------------   ------  ------------------    ------  ------')
        for i in range(len(s_areas)):
            a = s_areas[i]
            ig = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & ~isload(gen))
            igon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & ~isload(gen))
            fd.write('\n%3d   %7.1f  %7.1f to %-7.1f  %7.1f  %7.1f to %-7.1f   %7.1f %7.1f' %
                (a, sum(gen[ig, PMAX]), sum(gen[ig, QMIN]), sum(gen[ig, QMAX]),
                sum(gen[igon, PMAX]), sum(gen[igon, QMIN]), sum(gen[igon, QMAX]),
                sum(gen[igon, PG]), sum(gen[igon, QG]) ))

        fd.write('\n----   ------  ------------------   ------  ------------------    ------  ------')
        fd.write('\nTot:  %7.1f  %7.1f to %-7.1f  %7.1f  %7.1f to %-7.1f   %7.1f %7.1f' %
                (sum(gen[allg, PMAX]), sum(gen[allg, QMIN]), sum(gen[allg, QMAX]),
                sum(gen[ong, PMAX]), sum(gen[ong, QMIN]), sum(gen[ong, QMAX]),
                sum(gen[ong, PG]), sum(gen[ong, QG]) ))
        fd.write('\n')
        fd.write('\nArea    Disp Load Cap       Disp Load         Fixed Load        Total Load')
        fd.write('\n Num      MW     MVAr       MW     MVAr       MW     MVAr       MW     MVAr')
        fd.write('\n----    ------  ------    ------  ------    ------  ------    ------  ------')
        Qlim = (gen[:, QMIN] == 0) * gen[:, QMAX] + (gen[:, QMAX] == 0) * gen[:, QMIN]
        for i in range(len(s_areas)):
            a = s_areas[i]
            ildon = find((bus[e2i[gen[:, GEN_BUS].astype(int)], BUS_AREA] == a) & (gen[:, GEN_STATUS] > 0) & isload(gen))
            inzld = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, PD], bus[:, QD]))
            fd.write('\n%3d    %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f' %
                (a, -sum(gen[ildon, PMIN]),
                -sum(Qlim[ildon]),
                -sum(gen[ildon, PG]), -sum(gen[ildon, QG]),
                sum(bus[inzld, PD]), sum(bus[inzld, QD]),
                -sum(gen[ildon, PG]) + sum(bus[inzld, PD]),
                -sum(gen[ildon, QG]) + sum(bus[inzld, QD]) ))

        fd.write('\n----    ------  ------    ------  ------    ------  ------    ------  ------')
        fd.write('\nTot:   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f   %7.1f %7.1f' %
                (-sum(gen[onld, PMIN]),
                -sum(Qlim[onld]),
                -sum(gen[onld, PG]), -sum(gen[onld, QG]),
                sum(bus[nzld, PD]), sum(bus[nzld, QD]),
                -sum(gen[onld, PG]) + sum(bus[nzld, PD]),
                -sum(gen[onld, QG]) + sum(bus[nzld, QD])) )
        fd.write('\n')
        fd.write('\nArea      Shunt Inj        Branch      Series Losses      Net Export')
        fd.write('\n Num      MW     MVAr     Charging      MW     MVAr       MW     MVAr')
        fd.write('\n----    ------  ------    --------    ------  ------    ------  ------')
        for i in range(len(s_areas)):
            a = s_areas[i]
            inzsh   = find((bus[:, BUS_AREA] == a) & logical_or(bus[:, GS], bus[:, BS]))
            ibrch   = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a) & branch[:, BR_STATUS].astype(bool))
            in_tie  = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] != a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] == a) & branch[:, BR_STATUS].astype(bool))
            out_tie = find((bus[e2i[branch[:, F_BUS].astype(int)], BUS_AREA] == a) & (bus[e2i[branch[:, T_BUS].astype(int)], BUS_AREA] != a) & branch[:, BR_STATUS].astype(bool))
            fd.write('\n%3d    %7.1f %7.1f    %7.1f    %7.2f %7.2f   %7.1f %7.1f' %
                (a, -sum(bus[inzsh, VM]**2 * bus[inzsh, GS]),
                 sum(bus[inzsh, VM]**2 * bus[inzsh, BS]),
                 sum(fchg[ibrch]) + sum(tchg[ibrch]) + sum(fchg[out_tie]) + sum(tchg[in_tie]),
                 sum(real(loss[ibrch])) + sum(real(loss[r_[in_tie, out_tie]])) / 2,
                 sum(imag(loss[ibrch])) + sum(imag(loss[r_[in_tie, out_tie]])) / 2,
                 sum(branch[in_tie, PT])+sum(branch[out_tie, PF]) - sum(real(loss[r_[in_tie, out_tie]])) / 2,
                 sum(branch[in_tie, QT])+sum(branch[out_tie, QF]) - sum(imag(loss[r_[in_tie, out_tie]])) / 2  ))

        fd.write('\n----    ------  ------    --------    ------  ------    ------  ------')
        fd.write('\nTot:   %7.1f %7.1f    %7.1f    %7.2f %7.2f       -       -' %
            (-sum(bus[nzsh, VM]**2 * bus[nzsh, GS]),
             sum(bus[nzsh, VM]**2 * bus[nzsh, BS]),
             sum(fchg) + sum(tchg), sum(real(loss)), sum(imag(loss)) ))
        fd.write('\n')

    ## generator data
    if OUT_GEN:
        if isOPF:
            genlamP = bus[e2i[gen[:, GEN_BUS].astype(int)], LAM_P]
            genlamQ = bus[e2i[gen[:, GEN_BUS].astype(int)], LAM_Q]

        fd.write('\n================================================================================')
        fd.write('\n|     Generator Data                                                           |')
        fd.write('\n================================================================================')
        fd.write('\n Gen   Bus   Status     Pg        Qg   ')
        if isOPF: fd.write('   Lambda ($/MVA-hr)')
        fd.write('\n  #     #              (MW)     (MVAr) ')
        if isOPF: fd.write('     P         Q    ')
        fd.write('\n----  -----  ------  --------  --------')
        if isOPF: fd.write('  --------  --------')
        for k in range(len(ong)):
            i = ong[k]
            fd.write('\n%3d %6d     %2d ' % (i, gen[i, GEN_BUS], gen[i, GEN_STATUS]))
            if (gen[i, GEN_STATUS] > 0) & logical_or(gen[i, PG], gen[i, QG]):
                fd.write('%10.2f%10.2f' % (gen[i, PG], gen[i, QG]))
            else:
                fd.write('       -         -  ')
            if isOPF: fd.write('%10.2f%10.2f' % (genlamP[i], genlamQ[i]))

        fd.write('\n                     --------  --------')
        fd.write('\n            Total: %9.2f%10.2f' % (sum(gen[ong, PG]), sum(gen[ong, QG])))
        fd.write('\n')
        if any(onld + 1):
            fd.write('\n================================================================================')
            fd.write('\n|     Dispatchable Load Data                                                   |')
            fd.write('\n================================================================================')
            fd.write('\n Gen   Bus   Status     Pd        Qd   ')
            if isOPF: fd.write('   Lambda ($/MVA-hr)')
            fd.write('\n  #     #              (MW)     (MVAr) ')
            if isOPF: fd.write('     P         Q    ')
            fd.write('\n----  -----  ------  --------  --------')
            if isOPF: fd.write('  --------  --------')
            for k in range(len(onld)):
                i = onld[k]
                fd.write('\n%3d %6d     %2d ' % (i, gen[i, GEN_BUS], gen[i, GEN_STATUS]))
                if (gen[i, GEN_STATUS] > 0) & logical_or(gen[i, PG], gen[i, QG]):
                    fd.write('%10.2f%10.2f' % (-gen[i, PG], -gen[i, QG]))
                else:
                    fd.write('       -         -  ')

                if isOPF: fd.write('%10.2f%10.2f' % (genlamP[i], genlamQ[i]))
            fd.write('\n                     --------  --------')
            fd.write('\n            Total: %9.2f%10.2f' % (-sum(gen[onld, PG]), -sum(gen[onld, QG])))
            fd.write('\n')

    ## bus data
    if OUT_BUS:
        fd.write('\n================================================================================')
        fd.write('\n|     Bus Data                                                                 |')
        fd.write('\n================================================================================')
        fd.write('\n Bus      Voltage          Generation             Load        ')
        if isOPF: fd.write('  Lambda($/MVA-hr)')
        fd.write('\n  #   Mag(pu) Ang(deg)   P (MW)   Q (MVAr)   P (MW)   Q (MVAr)')
        if isOPF: fd.write('     P        Q   ')
        fd.write('\n----- ------- --------  --------  --------  --------  --------')
        if isOPF: fd.write('  -------  -------')
        for i in range(nb):
            fd.write('\n%5d%7.3f%9.3f' % tuple(bus[i, [BUS_I, VM, VA]]))
            if bus[i, BUS_TYPE] == REF:
                fd.write('*')
            else:
                fd.write(' ')
            g  = find((gen[:, GEN_STATUS] > 0) & (gen[:, GEN_BUS] == bus[i, BUS_I]) &
                        ~isload(gen))
            ld = find((gen[:, GEN_STATUS] > 0) & (gen[:, GEN_BUS] == bus[i, BUS_I]) &
                        isload(gen))
            if any(g + 1):
                fd.write('%9.2f%10.2f' % (sum(gen[g, PG]), sum(gen[g, QG])))
            else:
                fd.write('      -         -  ')

            if logical_or(bus[i, PD], bus[i, QD]) | any(ld + 1):
                if any(ld + 1):
                    fd.write('%10.2f*%9.2f*' % (bus[i, PD] - sum(gen[ld, PG]),
                                                bus[i, QD] - sum(gen[ld, QG])))
                else:
                    fd.write('%10.2f%10.2f ' % tuple(bus[i, [PD, QD]]))
            else:
                fd.write('       -         -   ')
            if isOPF:
                fd.write('%9.3f' % bus[i, LAM_P])
                if abs(bus[i, LAM_Q]) > ptol:
                    fd.write('%8.3f' % bus[i, LAM_Q])
                else:
                    fd.write('     -')
        fd.write('\n                        --------  --------  --------  --------')
        fd.write('\n               Total: %9.2f %9.2f %9.2f %9.2f' %
            (sum(gen[ong, PG]), sum(gen[ong, QG]),
             sum(bus[nzld, PD]) - sum(gen[onld, PG]),
             sum(bus[nzld, QD]) - sum(gen[onld, QG])))
        fd.write('\n')

    ## branch data
    if OUT_BRANCH:
        fd.write('\n================================================================================')
        fd.write('\n|     Branch Data                                                              |')
        fd.write('\n================================================================================')
        fd.write('\nBrnch   From   To    From Bus Injection   To Bus Injection     Loss (I^2 * Z)  ')
        fd.write('\n  #     Bus    Bus    P (MW)   Q (MVAr)   P (MW)   Q (MVAr)   P (MW)   Q (MVAr)')
        fd.write('\n-----  -----  -----  --------  --------  --------  --------  --------  --------')
        for i in range(nl):
            fd.write('\n%4d%7d%7d%10.2f%10.2f%10.2f%10.2f%10.3f%10.2f' %
                (i, branch[i, F_BUS], branch[i, T_BUS],
                     branch[i, PF], branch[i, QF], branch[i, PT], branch[i, QT],
                     loss[i].real, loss[i].imag))
        fd.write('\n                                                             --------  --------')
        fd.write('\n                                                    Total:%10.3f%10.2f' %
                (sum(real(loss)), sum(imag(loss))))
        fd.write('\n')

    ##-----  constraint data  -----
    if isOPF:
        ctol = ppopt['OPF_VIOLATION']   ## constraint violation tolerance
        ## voltage constraints
        if (not isDC) & (OUT_V_LIM == 2 | (OUT_V_LIM == 1 &
                             (any(bus[:, VM] < bus[:, VMIN] + ctol) |
                              any(bus[:, VM] > bus[:, VMAX] - ctol) |
                              any(bus[:, MU_VMIN] > ptol) |
                              any(bus[:, MU_VMAX] > ptol)))):
            fd.write('\n================================================================================')
            fd.write('\n|     Voltage Constraints                                                      |')
            fd.write('\n================================================================================')
            fd.write('\nBus #  Vmin mu    Vmin    |V|   Vmax    Vmax mu')
            fd.write('\n-----  --------   -----  -----  -----   --------')
            for i in range(nb):
                if (OUT_V_LIM == 2) | (OUT_V_LIM == 1 &
                             ((bus[i, VM] < bus[i, VMIN] + ctol) |
                              (bus[i, VM] > bus[i, VMAX] - ctol) |
                              (bus[i, MU_VMIN] > ptol) |
                              (bus[i, MU_VMAX] > ptol))):
                    fd.write('\n%5d' % bus[i, BUS_I])
                    if ((bus[i, VM] < bus[i, VMIN] + ctol) |
                            (bus[i, MU_VMIN] > ptol)):
                        fd.write('%10.3f' % bus[i, MU_VMIN])
                    else:
                        fd.write('      -   ')

                    fd.write('%8.3f%7.3f%7.3f' % tuple(bus[i, [VMIN, VM, VMAX]]))
                    if (bus[i, VM] > bus[i, VMAX] - ctol) | (bus[i, MU_VMAX] > ptol):
                        fd.write('%10.3f' % bus[i, MU_VMAX])
                    else:
                        fd.write('      -    ')
            fd.write('\n')

        ## generator P constraints
        if (OUT_PG_LIM == 2) | \
                ((OUT_PG_LIM == 1) & (any(gen[ong, PG] < gen[ong, PMIN] + ctol) |
                                      any(gen[ong, PG] > gen[ong, PMAX] - ctol) |
                                      any(gen[ong, MU_PMIN] > ptol) |
                                      any(gen[ong, MU_PMAX] > ptol))) | \
                ((not isDC) & ((OUT_QG_LIM == 2) |
                ((OUT_QG_LIM == 1) & (any(gen[ong, QG] < gen[ong, QMIN] + ctol) |
                                      any(gen[ong, QG] > gen[ong, QMAX] - ctol) |
                                      any(gen[ong, MU_QMIN] > ptol) |
                                      any(gen[ong, MU_QMAX] > ptol))))):
            fd.write('\n================================================================================')
            fd.write('\n|     Generation Constraints                                                   |')
            fd.write('\n================================================================================')

        if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                                 (any(gen[ong, PG] < gen[ong, PMIN] + ctol) |
                                  any(gen[ong, PG] > gen[ong, PMAX] - ctol) |
                                  any(gen[ong, MU_PMIN] > ptol) |
                                  any(gen[ong, MU_PMAX] > ptol))):
            fd.write('\n Gen   Bus                Active Power Limits')
            fd.write('\n  #     #    Pmin mu    Pmin       Pg       Pmax    Pmax mu')
            fd.write('\n----  -----  -------  --------  --------  --------  -------')
            for k in range(len(ong)):
                i = ong[k]
                if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                            ((gen[i, PG] < gen[i, PMIN] + ctol) |
                             (gen[i, PG] > gen[i, PMAX] - ctol) |
                             (gen[i, MU_PMIN] > ptol) | (gen[i, MU_PMAX] > ptol))):
                    fd.write('\n%4d%6d ' % (i, gen[i, GEN_BUS]))
                    if (gen[i, PG] < gen[i, PMIN] + ctol) | (gen[i, MU_PMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_PMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, PG]:
                        fd.write('%10.2f%10.2f%10.2f' % tuple(gen[i, [PMIN, PG, PMAX]]))
                    else:
                        fd.write('%10.2f       -  %10.2f' % tuple(gen[i, [PMIN, PMAX]]))
                    if (gen[i, PG] > gen[i, PMAX] - ctol) | (gen[i, MU_PMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_PMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## generator Q constraints
        if (not isDC) & ((OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                                 (any(gen[ong, QG] < gen[ong, QMIN] + ctol) |
                                  any(gen[ong, QG] > gen[ong, QMAX] - ctol) |
                                  any(gen[ong, MU_QMIN] > ptol) |
                                  any(gen[ong, MU_QMAX] > ptol)))):
            fd.write('\nGen  Bus              Reactive Power Limits')
            fd.write('\n #    #   Qmin mu    Qmin       Qg       Qmax    Qmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(ong)):
                i = ong[k]
                if (OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                            ((gen[i, QG] < gen[i, QMIN] + ctol) |
                             (gen[i, QG] > gen[i, QMAX] - ctol) |
                             (gen[i, MU_QMIN] > ptol) |
                             (gen[i, MU_QMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen[i, GEN_BUS]))
                    if (gen[i, QG] < gen[i, QMIN] + ctol) | (gen[i, MU_QMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_QMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, QG]:
                        fd.write('%10.2f%10.2f%10.2f' % tuple(gen[i, [QMIN, QG, QMAX]]))
                    else:
                        fd.write('%10.2f       -  %10.2f' % tuple(gen[i, [QMIN, QMAX]]))

                    if (gen[i, QG] > gen[i, QMAX] - ctol) | (gen[i, MU_QMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_QMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## dispatchable load P constraints
        if (OUT_PG_LIM == 2) | (OUT_QG_LIM == 2) | \
                ((OUT_PG_LIM == 1) & (any(gen[onld, PG] < gen[onld, PMIN] + ctol) |
                                      any(gen[onld, PG] > gen[onld, PMAX] - ctol) |
                                      any(gen[onld, MU_PMIN] > ptol) |
                                      any(gen[onld, MU_PMAX] > ptol))) | \
                ((OUT_QG_LIM == 1) & (any(gen[onld, QG] < gen[onld, QMIN] + ctol) |
                                      any(gen[onld, QG] > gen[onld, QMAX] - ctol) |
                                      any(gen[onld, MU_QMIN] > ptol) |
                                      any(gen[onld, MU_QMAX] > ptol))):
            fd.write('\n================================================================================')
            fd.write('\n|     Dispatchable Load Constraints                                            |')
            fd.write('\n================================================================================')
        if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                                 (any(gen[onld, PG] < gen[onld, PMIN] + ctol) |
                                  any(gen[onld, PG] > gen[onld, PMAX] - ctol) |
                                  any(gen[onld, MU_PMIN] > ptol) |
                                  any(gen[onld, MU_PMAX] > ptol))):
            fd.write('\nGen  Bus               Active Power Limits')
            fd.write('\n #    #   Pmin mu    Pmin       Pg       Pmax    Pmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(onld)):
                i = onld[k]
                if (OUT_PG_LIM == 2) | ((OUT_PG_LIM == 1) &
                            ((gen[i, PG] < gen[i, PMIN] + ctol) |
                             (gen[i, PG] > gen[i, PMAX] - ctol) |
                             (gen[i, MU_PMIN] > ptol) |
                             (gen[i, MU_PMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen[i, GEN_BUS]))
                    if (gen[i, PG] < gen[i, PMIN] + ctol) | (gen[i, MU_PMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_PMIN])
                    else:
                        fd.write('     -  ')
                    if gen[i, PG]:
                        fd.write('%10.2f%10.2f%10.2f' % gen[i, [PMIN, PG, PMAX]])
                    else:
                        fd.write('%10.2f       -  %10.2f' % gen[i, [PMIN, PMAX]])

                    if (gen[i, PG] > gen[i, PMAX] - ctol) | (gen[i, MU_PMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_PMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## dispatchable load Q constraints
        if (not isDC) & ((OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                                 (any(gen[onld, QG] < gen[onld, QMIN] + ctol) |
                                  any(gen[onld, QG] > gen[onld, QMAX] - ctol) |
                                  any(gen[onld, MU_QMIN] > ptol) |
                                  any(gen[onld, MU_QMAX] > ptol)))):
            fd.write('\nGen  Bus              Reactive Power Limits')
            fd.write('\n #    #   Qmin mu    Qmin       Qg       Qmax    Qmax mu')
            fd.write('\n---  ---  -------  --------  --------  --------  -------')
            for k in range(len(onld)):
                i = onld[k]
                if (OUT_QG_LIM == 2) | ((OUT_QG_LIM == 1) &
                            ((gen[i, QG] < gen[i, QMIN] + ctol) |
                             (gen[i, QG] > gen[i, QMAX] - ctol) |
                             (gen[i, MU_QMIN] > ptol) |
                             (gen[i, MU_QMAX] > ptol))):
                    fd.write('\n%3d%5d' % (i, gen(i, GEN_BUS)))
                    if (gen[i, QG] < gen[i, QMIN] + ctol) | (gen[i, MU_QMIN] > ptol):
                        fd.write('%8.3f' % gen[i, MU_QMIN])
                    else:
                        fd.write('     -  ')

                    if gen[i, QG]:
                        fd.write('%10.2f%10.2f%10.2f' % gen[i, [QMIN, QG, QMAX]])
                    else:
                        fd.write('%10.2f       -  %10.2f' % gen[i, [QMIN, QMAX]])

                    if (gen[i, QG] > gen[i, QMAX] - ctol) | (gen[i, MU_QMAX] > ptol):
                        fd.write('%9.3f' % gen[i, MU_QMAX])
                    else:
                        fd.write('      -  ')
            fd.write('\n')

        ## line flow constraints
        if (ppopt['OPF_FLOW_LIM'] == 1) | isDC:  ## P limit
            Ff = branch[:, PF]
            Ft = branch[:, PT]
            strg = '\n  #     Bus    Pf  mu     Pf      |Pmax|      Pt      Pt  mu   Bus'
        elif ppopt['OPF_FLOW_LIM'] == 2:   ## |I| limit
            Ff = abs( (branch[:, PF] + 1j * branch[:, QF]) / V[e2i[branch[:, F_BUS].astype(int)]] )
            Ft = abs( (branch[:, PT] + 1j * branch[:, QT]) / V[e2i[branch[:, T_BUS].astype(int)]] )
            strg = '\n  #     Bus   |If| mu    |If|     |Imax|     |It|    |It| mu   Bus'
        else:                ## |S| limit
            Ff = abs(branch[:, PF] + 1j * branch[:, QF])
            Ft = abs(branch[:, PT] + 1j * branch[:, QT])
            strg = '\n  #     Bus   |Sf| mu    |Sf|     |Smax|     |St|    |St| mu   Bus'

        if (OUT_LINE_LIM == 2) | ((OUT_LINE_LIM == 1) &
                            (any((branch[:, RATE_A] != 0) & (abs(Ff) > branch[:, RATE_A] - ctol)) |
                             any((branch[:, RATE_A] != 0) & (abs(Ft) > branch[:, RATE_A] - ctol)) |
                             any(branch[:, MU_SF] > ptol) |
                             any(branch[:, MU_ST] > ptol))):
            fd.write('\n================================================================================')
            fd.write('\n|     Branch Flow Constraints                                                  |')
            fd.write('\n================================================================================')
            fd.write('\nBrnch   From     "From" End        Limit       "To" End        To')
            fd.write(strg)
            fd.write('\n-----  -----  -------  --------  --------  --------  -------  -----')
            for i in range(nl):
                if (OUT_LINE_LIM == 2) | ((OUT_LINE_LIM == 1) &
                       (((branch[i, RATE_A] != 0) & (abs(Ff[i]) > branch[i, RATE_A] - ctol)) |
                        ((branch[i, RATE_A] != 0) & (abs(Ft[i]) > branch[i, RATE_A] - ctol)) |
                        (branch[i, MU_SF] > ptol) | (branch[i, MU_ST] > ptol))):
                    fd.write('\n%4d%7d' % (i, branch[i, F_BUS]))
                    if (Ff[i] > branch[i, RATE_A] - ctol) | (branch[i, MU_SF] > ptol):
                        fd.write('%10.3f' % branch[i, MU_SF])
                    else:
                        fd.write('      -   ')

                    fd.write('%9.2f%10.2f%10.2f' %
                        (Ff[i], branch[i, RATE_A], Ft[i]))
                    if (Ft[i] > branch[i, RATE_A] - ctol) | (branch[i, MU_ST] > ptol):
                        fd.write('%10.3f' % branch[i, MU_ST])
                    else:
                        fd.write('      -   ')
                    fd.write('%6d' % branch[i, T_BUS])
            fd.write('\n')

    ## execute userfcn callbacks for 'printpf' stage
    if have_results_struct and 'userfcn' in results:
        if not isOPF:  ## turn off option for all constraints if it isn't an OPF
            ppopt = ppoption(ppopt, 'OUT_ALL_LIM', 0)
        run_userfcn(results["userfcn"], 'printpf', results, fd, ppopt)

Example 30

Project: PYPOWER Source File: qps_mosek.py
def qps_mosek(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None,
              x0=None, opt=None):
    """Quadratic Program Solver based on MOSEK.

    A wrapper function providing a PYPOWER standardized interface for using
    MOSEKOPT to solve the following QP (quadratic programming) problem::

        min 1/2 x'*H*x + c'*x
         x

    subject to::

        l <= A*x <= u       (linear constraints)
        xmin <= x <= xmax   (variable bounds)

    Inputs (all optional except C{H}, C{C}, C{A} and C{L}):
        - C{H} : matrix (possibly sparse) of quadratic cost coefficients
        - C{C} : vector of linear cost coefficients
        - C{A, l, u} : define the optional linear constraints. Default
        values for the elements of L and U are -Inf and Inf, respectively.
        - xmin, xmax : optional lower and upper bounds on the
        C{x} variables, defaults are -Inf and Inf, respectively.
        - C{x0} : optional starting value of optimization vector C{x}
        - C{opt} : optional options structure with the following fields,
        all of which are also optional (default values shown in parentheses)
            - C{verbose} (0) - controls level of progress output displayed
                - 0 = no progress output
                - 1 = some progress output
                - 2 = verbose progress output
            - C{max_it} (0) - maximum number of iterations allowed
                - 0 = use algorithm default
            - C{mosek_opt} - options struct for MOSEK, values in
            C{verbose} and C{max_it} override these options
        - C{problem} : The inputs can alternatively be supplied in a single
        C{problem} struct with fields corresponding to the input arguments
        described above: C{H, c, A, l, u, xmin, xmax, x0, opt}

    Outputs:
        - C{x} : solution vector
        - C{f} : final objective function value
        - C{exitflag} : exit flag
              - 1 = success
              - 0 = terminated at maximum number of iterations
              - -1 = primal or dual infeasible
              < 0 = the negative of the MOSEK return code
        - C{output} : output dict with the following fields:
            - C{r} - MOSEK return code
            - C{res} - MOSEK result dict
        - C{lmbda} : dict containing the Langrange and Kuhn-Tucker
        multipliers on the constraints, with fields:
            - C{mu_l} - lower (left-hand) limit on linear constraints
            - C{mu_u} - upper (right-hand) limit on linear constraints
            - C{lower} - lower bound on optimization variables
            - C{upper} - upper bound on optimization variables

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ##----- input argument handling  -----
    ## gather inputs
    if isinstance(H, dict):       ## problem struct
        p = H
    else:                                ## individual args
        p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}
        if xmin is not None:
            p['xmin'] = xmin
        if xmax is not None:
            p['xmax'] = xmax
        if x0 is not None:
            p['x0'] = x0
        if opt is not None:
            p['opt'] = opt

    ## define nx, set default values for H and c
    if 'H' not in p or len(p['H']) or not any(any(p['H'])):
        if ('A' not in p) | len(p['A']) == 0 & \
                ('xmin' not in p) | len(p['xmin']) == 0 & \
                ('xmax' not in p) | len(p['xmax']) == 0:
            stderr.write('qps_mosek: LP problem must include constraints or variable bounds\n')
        else:
            if 'A' in p & len(p['A']) > 0:
                nx = shape(p['A'])[1]
            elif 'xmin' in p & len(p['xmin']) > 0:
                nx = len(p['xmin'])
            else:    # if isfield(p, 'xmax') && ~isempty(p.xmax)
                nx = len(p['xmax'])
        p['H'] = sparse((nx, nx))
        qp = 0
    else:
        nx = shape(p['H'])[0]
        qp = 1

    if 'c' not in p | len(p['c']) == 0:
        p['c'] = zeros(nx)

    if 'x0' not in p | len(p['x0']) == 0:
        p['x0'] = zeros(nx)

    ## default options
    if 'opt' not in p:
        p['opt'] = []

    if 'verbose' in p['opt']:
        verbose = p['opt']['verbose']
    else:
        verbose = 0

    if 'max_it' in p['opt']:
        max_it = p['opt']['max_it']
    else:
        max_it = 0

    if 'mosek_opt' in p['opt']:
        mosek_opt = mosek_options(p['opt']['mosek_opt'])
    else:
        mosek_opt = mosek_options()

    if max_it:
        mosek_opt['MSK_IPAR_INTPNT_MAX_ITERATIONS'] = max_it

    if qp:
        mosek_opt['MSK_IPAR_OPTIMIZER'] = 0   ## default solver only for QP

    ## set up problem struct for MOSEK
    prob = {}
    prob['c'] = p['c']
    if qp:
        prob['qosubi'], prob['qosubj'], prob['qoval'] = find(tril(sparse(p['H'])))

    if 'A' in p & len(p['A']) > 0:
        prob['a'] = sparse(p['A'])

    if 'l' in p & len(p['A']) > 0:
        prob['blc'] = p['l']

    if 'u' in p & len(p['A']) > 0:
        prob['buc'] = p['u']

    if 'xmin' in p & len(p['xmin']) > 0:
        prob['blx'] = p['xmin']

    if 'xmax' in p & len(p['xmax']) > 0:
        prob['bux'] = p['xmax']

    ## A is not allowed to be empty
    if 'a' not in prob | len(prob['a']) == 0:
        unconstrained = True
        prob['a'] = sparse((1, (1, 1)), (1, nx))
        prob.blc = -Inf
        prob.buc =  Inf
    else:
        unconstrained = False

    ##-----  run optimization  -----
    if verbose:
        methods = [
            'default',
            'interior point',
            '<default>',
            '<default>',
            'primal simplex',
            'dual simplex',
            'primal dual simplex',
            'automatic simplex',
            '<default>',
            '<default>',
            'concurrent'
        ]
        if len(H) == 0 or not any(any(H)):
            lpqp = 'LP'
        else:
            lpqp = 'QP'

        # (this code is also in mpver.m)
        # MOSEK Version 6.0.0.93 (Build date: 2010-10-26 13:03:27)
        # MOSEK Version 6.0.0.106 (Build date: 2011-3-17 10:46:54)
#        pat = 'Version (\.*\d)+.*Build date: (\d\d\d\d-\d\d-\d\d)';
        pat = 'Version (\.*\d)+.*Build date: (\d+-\d+-\d+)'
        s, e, tE, m, t = re.compile(eval('mosekopt'), pat)
        if len(t) == 0:
            vn = '<unknown>'
        else:
            vn = t[0][0]

        print('MOSEK Version %s -- %s %s solver\n' %
              (vn, methods[mosek_opt['MSK_IPAR_OPTIMIZER'] + 1], lpqp))

    cmd = 'minimize echo(%d)' % verbose
    r, res = mosekopt(cmd, prob, mosek_opt)

    ##-----  repackage results  -----
    if 'sol' in res:
        if 'bas' in res['sol']:
            sol = res['sol.bas']
        else:
            sol = res['sol.itr']
        x = sol['xx']
    else:
        sol = array([])
        x = array([])

    ##-----  process return codes  -----
    if 'symbcon' in res:
        sc = res['symbcon']
    else:
        r2, res2 = mosekopt('symbcon echo(0)')
        sc = res2['symbcon']

    eflag = -r
    msg = ''
    if r == sc.MSK_RES_OK:
        if len(sol) > 0:
#            if sol['solsta'] == sc.MSK_SOL_STA_OPTIMAL:
            if sol['solsta'] == 'OPTIMAL':
                msg = 'The solution is optimal.'
                eflag = 1
            else:
                eflag = -1
#                if sol['prosta'] == sc['MSK_PRO_STA_PRIM_INFEAS']:
                if sol['prosta'] == 'PRIMAL_INFEASIBLE':
                    msg = 'The problem is primal infeasible.'
#                elif sol['prosta'] == sc['MSK_PRO_STA_DUAL_INFEAS']:
                elif sol['prosta'] == 'DUAL_INFEASIBLE':
                    msg = 'The problem is dual infeasible.'
                else:
                    msg = sol['solsta']

    elif r == sc['MSK_RES_TRM_MAX_ITERATIONS']:
        eflag = 0
        msg = 'The optimizer terminated at the maximum number of iterations.'
    else:
        if 'rmsg' in res and 'rcodestr' in res:
            msg = '%s : %s' % (res['rcodestr'], res['rmsg'])
        else:
            msg = 'MOSEK return code = %d' % r

    ## always alert user if license is expired
    if (verbose or r == 1001) and len(msg) < 0:
        stdout.write('%s\n' % msg)

    ##-----  repackage results  -----
    if r == 0:
        f = p['c'].T * x
        if len(p['H']) > 0:
            f = 0.5 * x.T * p['H'] * x + f
    else:
        f = array([])

    output = {}
    output['r'] = r
    output['res'] = res

    if 'sol' in res:
        lmbda = {}
        lmbda['lower'] = sol['slx']
        lmbda['upper'] = sol['sux']
        lmbda['mu_l']  = sol['slc']
        lmbda['mu_u']  = sol['suc']
        if unconstrained:
            lmbda['mu_l']  = array([])
            lmbda['mu_u']  = array([])
    else:
        lmbda = array([])

    return x, f, eflag, output, lmbda

Example 31

Project: pyresample Source File: kd_tree.py
def get_sample_from_neighbour_info(resample_type, output_shape, data,
                                   valid_input_index, valid_output_index,
                                   index_array, distance_array=None,
                                   weight_funcs=None, fill_value=0,
                                   with_uncert=False):
    """Resamples swath based on neighbour info

    Parameters
    ----------
    resample_type : {'nn', 'custom'}
        'nn': Use nearest neighbour resampling
        'custom': Resample based on weight_funcs
    output_shape : (int, int)
        Shape of output as (rows, cols)
    data : numpy array
        Source data
    valid_input_index : numpy array
        valid_input_index from get_neighbour_info
    valid_output_index : numpy array
        valid_output_index from get_neighbour_info
    index_array : numpy array
        index_array from get_neighbour_info
    distance_array : numpy array, optional
        distance_array from get_neighbour_info
        Not needed for 'nn' resample type
    weight_funcs : list of function objects or function object, optional       
        List of weight functions f(dist) to use for the weighting 
        of each channel 1 to k.
        If only one channel is resampled weight_funcs is
        a single function object.
        Must be supplied when using 'custom' resample type
    fill_value : int or None, optional
        Set undetermined pixels to this value.
        If fill_value is None a masked array is returned 
        with undetermined pixels masked

    Returns
    -------
    result : numpy array 
        Source data resampled to target geometry
    """

    if data.ndim > 2 and data.shape[0] * data.shape[1] == valid_input_index.size:
        data = data.reshape(data.shape[0] * data.shape[1], data.shape[2])
    elif data.shape[0] != valid_input_index.size:
        data = data.ravel()

    if valid_input_index.size != data.shape[0]:
        raise ValueError('Mismatch between geometry and dataset')

    is_multi_channel = (data.ndim > 1)
    valid_input_size = valid_input_index.sum()
    valid_output_size = valid_output_index.sum()

    # Handle empty result set
    if valid_input_size == 0 or valid_output_size == 0:
        if is_multi_channel:
            output_shape = list(output_shape)
            output_shape.append(data.shape[1])

        if fill_value is None:
            # Use masked array for fill values
            return np.ma.array(np.zeros(output_shape, data.dtype),
                               mask=np.ones(output_shape, dtype=np.bool))
        else:
            # Return fill vaues for all pixels
            return np.ones(output_shape, dtype=data.dtype) * fill_value

    # Get size of output and reduced input
    input_size = valid_input_size
    if len(output_shape) > 1:
        output_size = output_shape[0] * output_shape[1]
    else:
        output_size = output_shape[0]

    # Check validity of input
    if not isinstance(data, np.ndarray):
        raise TypeError('data must be numpy array')
    elif valid_input_index.ndim != 1:
        raise TypeError('valid_index must be one dimensional array')
    elif data.shape[0] != valid_input_index.size:
        raise TypeError('Not the same number of datapoints in '
                        'valid_input_index and data')

    valid_types = ('nn', 'custom')
    if not resample_type in valid_types:
        raise TypeError('Invalid resampling type: %s' % resample_type)

    if resample_type == 'custom' and weight_funcs is None:
        raise ValueError('weight_funcs must be supplied when using '
                         'custom resampling')

    if not isinstance(fill_value, (long, int, float)) and fill_value is not None:
        raise TypeError('fill_value must be number or None')

    if index_array.ndim == 1:
        neighbours = 1
    else:
        neighbours = index_array.shape[1]
        if resample_type == 'nn':
            raise ValueError('index_array contains more neighbours than '
                             'just the nearest')

    # Reduce data
    new_data = data[valid_input_index]

    # Nearest neighbour resampling should conserve data type
    # Get data type
    conserve_input_data_type = False
    if resample_type == 'nn':
        conserve_input_data_type = True
        input_data_type = new_data.dtype

    # Handle masked array input
    is_masked_data = False
    if np.ma.is_masked(new_data):
        # Add the mask as channels to the dataset
        is_masked_data = True
        new_data = np.column_stack((new_data.data, new_data.mask))

    if new_data.ndim > 1:  # Multiple channels or masked input
        output_shape = list(output_shape)
        output_shape.append(new_data.shape[1])

    # Prepare weight_funcs argument for handeling mask data
    if weight_funcs is not None and is_masked_data:
        if is_multi_channel:
            weight_funcs = weight_funcs * 2
        else:
            weight_funcs = (weight_funcs,) * 2

    # Handle request for masking intead of using fill values
    use_masked_fill_value = False
    if fill_value is None:
        use_masked_fill_value = True
        fill_value = _get_fill_mask_value(new_data.dtype)

    # Resample based on kd-tree query result
    if resample_type == 'nn' or neighbours == 1:
        # Get nearest neighbour using array indexing
        index_mask = (index_array == input_size)
        new_index_array = np.where(index_mask, 0, index_array)
        result = new_data[new_index_array]
        result[index_mask] = fill_value
    else:
        # Calculate result using weighting.
        # Note: the code below has low readability in order
        #       to avoid looping over numpy arrays

        # Get neighbours and masks of valid indices
        ch_neighbour_list = []
        index_mask_list = []
        for i in range(neighbours):  # Iterate over number of neighbours
            # Make working copy neighbour index and
            # set out of bounds indices to zero
            index_ni = index_array[:, i].copy()
            index_mask_ni = (index_ni == input_size)
            index_ni[index_mask_ni] = 0

            # Get channel data for the corresponing indices
            ch_ni = new_data[index_ni]
            ch_neighbour_list.append(ch_ni)
            index_mask_list.append(index_mask_ni)

        # Calculate weights
        weight_list = []
        for i in range(neighbours):  # Iterate over number of neighbours
            # Make working copy of neighbour distances and
            # set out of bounds distance to 1 in order to avoid numerical Inf
            distance = distance_array[:, i].copy()
            distance[index_mask_list[i]] = 1

            if new_data.ndim > 1:  # More than one channel in data set.
                # Calculate weights for each channel
                weights = []
                num_weights = valid_output_index.sum()
                num_channels = new_data.shape[1]
                for j in range(num_channels):
                    calc_weight = weight_funcs[j](distance)
                    # Turn a scalar weight into a numpy array
                    # (no effect if calc_weight already is an array)
                    expanded_calc_weight = np.ones(num_weights) * calc_weight
                    weights.append(expanded_calc_weight)

                # Collect weights for all channels for neighbour number
                weight_list.append(np.column_stack(weights))
            else:  # Only one channel
                weights = weight_funcs(distance)
                weight_list.append(weights)

        result = 0
        norm = 0
        count = 0
        norm_sqr = 0
        stddev = 0

        # Calculate result
        for i in range(neighbours):  # Iterate over number of neighbours
            # Find invalid indices to be masked of from calculation
            if new_data.ndim > 1:  # More than one channel in data set.
                inv_index_mask = np.expand_dims(
                    np.invert(index_mask_list[i]), axis=1)
            else:  # Only one channel
                inv_index_mask = np.invert(index_mask_list[i])

            # Aggregate result and norm
            weights_tmp = inv_index_mask * weight_list[i]
            result += weights_tmp * ch_neighbour_list[i]
            norm += weights_tmp

        # Normalize result and set fillvalue
        result_valid_index = (norm > 0)
        result[result_valid_index] /= norm[result_valid_index]

        if with_uncert:  # Calculate uncertainties
            # 2. pass to calculate standard deviation
            for i in range(neighbours):  # Iterate over number of neighbours
                # Find invalid indices to be masked of from calculation
                if new_data.ndim > 1:  # More than one channel in data set.
                    inv_index_mask = np.expand_dims(
                        np.invert(index_mask_list[i]), axis=1)
                else:  # Only one channel
                    inv_index_mask = np.invert(index_mask_list[i])

                # Aggregate stddev information
                weights_tmp = inv_index_mask * weight_list[i]
                count += inv_index_mask
                norm_sqr += weights_tmp ** 2
                values = inv_index_mask * ch_neighbour_list[i]
                stddev += weights_tmp * (values - result) ** 2

            # Calculate final stddev
            new_valid_index = (count > 1)
            v1 = norm[new_valid_index]
            v2 = norm_sqr[new_valid_index]
            stddev[new_valid_index] = np.sqrt(
                (v1 / (v1 ** 2 - v2)) * stddev[new_valid_index])
            stddev[~new_valid_index] = np.NaN

        # Add fill values
        result[np.invert(result_valid_index)] = fill_value

    # Create full result
    if new_data.ndim > 1:  # More than one channel
        output_raw_shape = ((output_size, new_data.shape[1]))
    else:  # One channel
        output_raw_shape = output_size

    full_result = np.ones(output_raw_shape) * fill_value
    full_result[valid_output_index] = result
    result = full_result

    if with_uncert:  # Add fill values for uncertainty
        full_stddev = np.ones(output_raw_shape) * np.nan
        full_count = np.zeros(output_raw_shape)
        full_stddev[valid_output_index] = stddev
        full_count[valid_output_index] = count
        stddev = full_stddev
        count = full_count

        stddev = stddev.reshape(output_shape)
        count = count.reshape(output_shape)

        if is_masked_data:  # Ignore uncert computation of masks
            stddev = _remask_data(stddev, is_to_be_masked=False)
            count = _remask_data(count, is_to_be_masked=False)

        # Set masks for invalid stddev
        stddev = np.ma.array(stddev, mask=np.isnan(stddev))

    # Reshape resampled data to correct shape
    result = result.reshape(output_shape)

    # Remap mask channels to create masked output
    if is_masked_data:
        result = _remask_data(result)

    # Create masking of fill values
    if use_masked_fill_value:
        result = np.ma.masked_equal(result, fill_value)

    # Set output data type to input data type if relevant
    if conserve_input_data_type:
        result = result.astype(input_data_type)

    if with_uncert:
        if np.ma.isMA(result):
            stddev = np.ma.array(stddev, mask=(result.mask | stddev.mask))
            count = np.ma.array(count, mask=result.mask)
        return result, stddev, count
    else:
        return result

Example 32

Project: PYPOWER Source File: runpf.py
def runpf(casedata=None, ppopt=None, fname='', solvedcase=''):
    """Runs a power flow.

    Runs a power flow [full AC Newton's method by default] and optionally
    returns the solved values in the data matrices, a flag which is C{True} if
    the algorithm was successful in finding a solution, and the elapsed
    time in seconds. All input arguments are optional. If C{casename} is
    provided it specifies the name of the input data file or dict
    containing the power flow data. The default value is 'case9'.

    If the ppopt is provided it overrides the default PYPOWER options
    vector and can be used to specify the solution algorithm and output
    options among other things. If the 3rd argument is given the pretty
    printed output will be appended to the file whose name is given in
    C{fname}. If C{solvedcase} is specified the solved case will be written
    to a case file in PYPOWER format with the specified name. If C{solvedcase}
    ends with '.mat' it saves the case as a MAT-file otherwise it saves it
    as a Python-file.

    If the C{ENFORCE_Q_LIMS} options is set to C{True} [default is false] then
    if any generator reactive power limit is violated after running the AC
    power flow, the corresponding bus is converted to a PQ bus, with Qg at
    the limit, and the case is re-run. The voltage magnitude at the bus
    will deviate from the specified value in order to satisfy the reactive
    power limit. If the reference bus is converted to PQ, the first
    remaining PV bus will be used as the slack bus for the next iteration.
    This may result in the real power output at this generator being
    slightly off from the specified values.

    Enforcing of generator Q limits inspired by contributions from Mu Lin,
    Lincoln University, New Zealand (1/14/05).

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ## default arguments
    if casedata is None:
        casedata = join(dirname(__file__), 'case9')
    ppopt = ppoption(ppopt)

    ## options
    verbose = ppopt["VERBOSE"]
    qlim = ppopt["ENFORCE_Q_LIMS"]  ## enforce Q limits on gens?
    dc = ppopt["PF_DC"]             ## use DC formulation?

    ## read data
    ppc = loadcase(casedata)

    ## add zero columns to branch for flows if needed
    if ppc["branch"].shape[1] < QT:
        ppc["branch"] = c_[ppc["branch"],
                           zeros((ppc["branch"].shape[0],
                                  QT - ppc["branch"].shape[1] + 1))]

    ## convert to internal indexing
    ppc = ext2int(ppc)
    baseMVA, bus, gen, branch = \
        ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]

    ## get bus index lists of each type of bus
    ref, pv, pq = bustypes(bus, gen)

    ## generator info
    on = find(gen[:, GEN_STATUS] > 0)      ## which generators are on?
    gbus = gen[on, GEN_BUS].astype(int)    ## what buses are they at?

    ##-----  run the power flow  -----
    t0 = time()
    if verbose > 0:
        v = ppver('all')
        stdout.write('PYPOWER Version %s, %s' % (v["Version"], v["Date"]))

    if dc:                               # DC formulation
        if verbose:
            stdout.write(' -- DC Power Flow\n')

        ## initial state
        Va0 = bus[:, VA] * (pi / 180)

        ## build B matrices and phase shift injections
        B, Bf, Pbusinj, Pfinj = makeBdc(baseMVA, bus, branch)

        ## compute complex bus power injections [generation - load]
        ## adjusted for phase shifters and real shunts
        Pbus = makeSbus(baseMVA, bus, gen).real - Pbusinj - bus[:, GS] / baseMVA

        ## "run" the power flow
        Va = dcpf(B, Pbus, Va0, ref, pv, pq)

        ## update data matrices with solution
        branch[:, [QF, QT]] = zeros((branch.shape[0], 2))
        branch[:, PF] = (Bf * Va + Pfinj) * baseMVA
        branch[:, PT] = -branch[:, PF]
        bus[:, VM] = ones(bus.shape[0])
        bus[:, VA] = Va * (180 / pi)
        ## update Pg for slack generator (1st gen at ref bus)
        ## (note: other gens at ref bus are accounted for in Pbus)
        ##      Pg = Pinj + Pload + Gs
        ##      newPg = oldPg + newPinj - oldPinj
        refgen = zeros(len(ref), dtype=int)
        for k in range(len(ref)):
            temp = find(gbus == ref[k])
            refgen[k] = on[temp[0]]
        gen[refgen, PG] = gen[refgen, PG] + (B[ref, :] * Va - Pbus[ref]) * baseMVA

        success = 1
    else:                                ## AC formulation
        alg = ppopt['PF_ALG']
        if verbose > 0:
            if alg == 1:
                solver = 'Newton'
            elif alg == 2:
                solver = 'fast-decoupled, XB'
            elif alg == 3:
                solver = 'fast-decoupled, BX'
            elif alg == 4:
                solver = 'Gauss-Seidel'
            else:
                solver = 'unknown'
            print(' -- AC Power Flow (%s)\n' % solver)

        ## initial state
        # V0    = ones(bus.shape[0])            ## flat start
        V0  = bus[:, VM] * exp(1j * pi/180 * bus[:, VA])
        V0[gbus] = gen[on, VG] / abs(V0[gbus]) * V0[gbus]

        if qlim:
            ref0 = ref                         ## save index and angle of
            Varef0 = bus[ref0, VA]             ##   original reference bus(es)
            limited = []                       ## list of indices of gens @ Q lims
            fixedQg = zeros(gen.shape[0])      ## Qg of gens at Q limits

        repeat = True
        while repeat:
            ## build admittance matrices
            Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)

            ## compute complex bus power injections [generation - load]
            Sbus = makeSbus(baseMVA, bus, gen)

            ## run the power flow
            alg = ppopt["PF_ALG"]
            if alg == 1:
                V, success, _ = newtonpf(Ybus, Sbus, V0, ref, pv, pq, ppopt)
            elif alg == 2 or alg == 3:
                Bp, Bpp = makeB(baseMVA, bus, branch, alg)
                V, success, _ = fdpf(Ybus, Sbus, V0, Bp, Bpp, ref, pv, pq, ppopt)
            elif alg == 4:
                V, success, _ = gausspf(Ybus, Sbus, V0, ref, pv, pq, ppopt)
            else:
                stderr.write('Only Newton''s method, fast-decoupled, and '
                             'Gauss-Seidel power flow algorithms currently '
                             'implemented.\n')

            ## update data matrices with solution
            bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V, ref, pv, pq)

            if qlim:             ## enforce generator Q limits
                ## find gens with violated Q constraints
                gen_status = gen[:, GEN_STATUS] > 0
                qg_max_lim = gen[:, QG] > gen[:, QMAX]
                qg_min_lim = gen[:, QG] < gen[:, QMIN]
                
                mx = find( gen_status & qg_max_lim )
                mn = find( gen_status & qg_min_lim )
                
                if len(mx) > 0 or len(mn) > 0:  ## we have some Q limit violations
                    # No PV generators
                    if len(pv) == 0:
                        if verbose:
                            if len(mx) > 0:
                                print('Gen %d [only one left] exceeds upper Q limit : INFEASIBLE PROBLEM\n' % mx + 1)
                            else:
                                print('Gen %d [only one left] exceeds lower Q limit : INFEASIBLE PROBLEM\n' % mn + 1)

                        success = 0
                        break

                    ## one at a time?
                    if qlim == 2:    ## fix largest violation, ignore the rest
                        k = argmax(r_[gen[mx, QG] - gen[mx, QMAX],
                                      gen[mn, QMIN] - gen[mn, QG]])
                        if k > len(mx):
                            mn = mn[k - len(mx)]
                            mx = []
                        else:
                            mx = mx[k]
                            mn = []

                    if verbose and len(mx) > 0:
                        for i in range(len(mx)):
                            print('Gen ' + str(mx[i] + 1) + ' at upper Q limit, converting to PQ bus\n')

                    if verbose and len(mn) > 0:
                        for i in range(len(mn)):
                            print('Gen ' + str(mn[i] + 1) + ' at lower Q limit, converting to PQ bus\n')

                    ## save corresponding limit values
                    fixedQg[mx] = gen[mx, QMAX]
                    fixedQg[mn] = gen[mn, QMIN]
                    mx = r_[mx, mn].astype(int)

                    ## convert to PQ bus
                    gen[mx, QG] = fixedQg[mx]      ## set Qg to binding 
                    for i in range(len(mx)):            ## [one at a time, since they may be at same bus]
                        gen[mx[i], GEN_STATUS] = 0        ## temporarily turn off gen,
                        bi = gen[mx[i], GEN_BUS]   ## adjust load accordingly,
                        bus[bi, [PD, QD]] = (bus[bi, [PD, QD]] - gen[mx[i], [PG, QG]])
                    
                    if len(ref) > 1 and any(bus[gen[mx, GEN_BUS], BUS_TYPE] == REF):
                        raise ValueError('Sorry, PYPOWER cannot enforce Q '
                                         'limits for slack buses in systems '
                                         'with multiple slacks.')
                    
                    bus[gen[mx, GEN_BUS].astype(int), BUS_TYPE] = PQ   ## & set bus type to PQ

                    ## update bus index lists of each type of bus
                    ref_temp = ref
                    ref, pv, pq = bustypes(bus, gen)
                    if verbose and ref != ref_temp:
                        print('Bus %d is new slack bus\n' % ref)

                    limited = r_[limited, mx].astype(int)
                else:
                    repeat = 0 ## no more generator Q limits violated
            else:
                repeat = 0     ## don't enforce generator Q limits, once is enough

        if qlim and len(limited) > 0:
            ## restore injections from limited gens [those at Q limits]
            gen[limited, QG] = fixedQg[limited]    ## restore Qg value,
            for i in range(len(limited)):               ## [one at a time, since they may be at same bus]
                bi = gen[limited[i], GEN_BUS]           ## re-adjust load,
                bus[bi, [PD, QD]] = bus[bi, [PD, QD]] + gen[limited[i], [PG, QG]]
                gen[limited[i], GEN_STATUS] = 1           ## and turn gen back on
            
            if ref != ref0:
                ## adjust voltage angles to make original ref bus correct
                bus[:, VA] = bus[:, VA] - bus[ref0, VA] + Varef0

    ppc["et"] = time() - t0
    ppc["success"] = success

    ##-----  output results  -----
    ## convert back to original bus numbering & print results
    ppc["bus"], ppc["gen"], ppc["branch"] = bus, gen, branch
    results = int2ext(ppc)

    ## zero out result fields of out-of-service gens & branches
    if len(results["order"]["gen"]["status"]["off"]) > 0:
        results["gen"][ix_(results["order"]["gen"]["status"]["off"], [PG, QG])] = 0

    if len(results["order"]["branch"]["status"]["off"]) > 0:
        results["branch"][ix_(results["order"]["branch"]["status"]["off"], [PF, QF, PT, QT])] = 0

    if fname:
        fd = None
        try:
            fd = open(fname, "a")
        except Exception as detail:
            stderr.write("Error opening %s: %s.\n" % (fname, detail))
        finally:
            if fd is not None:
                printpf(results, fd, ppopt)
                fd.close()
    else:
        printpf(results, stdout, ppopt)

    ## save solved case
    if solvedcase:
        savecase(solvedcase, results)

    return results, success

Example 33

Project: RoBO Source File: fabolas.py
    def run(self, num_iterations=10, X=None, Y=None, C=None):
        """
        Runs the main Bayesian optimization loop

        Parameters
        ----------
        num_iterations : int, optional
            Specifies the number of iterations.
        X : (N, D) numpy array, optional
            Initial points where BO starts from.
        Y : (N, D) numpy array, optional
            The function values of the initial points. Make sure the number of
            points is the same.
        C : (N, D) numpy array, optional
            The costs of the initial points. Make sure the number of
            points is the same.

        Returns
        -------
        incuembent : (1, D) numpy array
            The estimated optimum that was found after the specified number of
            iterations.
        """
        self.time_start = time.time()

        if X is None and Y is None and C is None:
            self.time_func_eval = np.zeros([1])
            self.time_overhead = np.zeros([1])
            self.X = np.zeros([1, self.task.n_dims])
            self.Y = np.zeros([1, 1])
            self.C = np.zeros([1, 1])

            init = extrapolative_initial_design(self.task,
	                                       N=self.init_points)

            for i, x in enumerate(init):
                x = x[np.newaxis, :]
                start_time = time.time()

                logger.info("Evaluate: %s" % x)

                start_time = time.time()

                y, c = self.task.evaluate(x)

                # Transform cost to log scale
                c = np.log(c)

                if i == 0:
                    self.X[i] = x[0, :]
                    self.Y[i] = y[0, :]
                    self.C[i] = c[0, :]
                    self.time_func_eval[i] = time.time() - start_time
                    self.time_overhead[i] = 0.0
                else:
                    self.X = np.append(self.X, x, axis=0)
                    self.Y = np.append(self.Y, y, axis=0)
                    self.C = np.append(self.C, c, axis=0)

                    time_feval = np.array([time.time() - start_time])
                    self.time_func_eval = np.append(self.time_func_eval,
                                                    time_feval, axis=0)
                    self.time_overhead = np.append(self.time_overhead,
                                                   np.array([0]), axis=0)

                logger.info("Configuration achieved a"
                            "performance of %f and %f costs in %f seconds" %
                            (self.Y[i], self.C[i], self.time_func_eval[i]))

                # Use best point seen so far as incuembent
                best_idx = np.argmin(self.Y)
                best_idx = np.argmin(self.Y)
                # Copy because we are going to change the system size to smax
                self.incuembent = np.copy(self.X[best_idx])
                self.incuembent_value = self.Y[best_idx]
                bounds_subspace = self.task.X_upper[self.task.is_env == 1]
                self.incuembent[self.task.is_env == 1] = bounds_subspace

                self.incuembent = self.incuembent[np.newaxis, :]
                self.incuembent_value = self.incuembent_value[np.newaxis, :]

                self.incuembents.append(self.incuembent)
                self.incuembent_values.append(self.incuembent_value)
                self.runtime.append(time.time() - self.start_time)

                if self.save_dir is not None and (i) % self.num_save == 0:
                    self.save_iteration(i, costs=self.C[-1],
                                        hyperparameters=None,
                                        acquisition_value=0)

        else:
            self.X = X
            self.Y = Y
            self.C = C
            self.time_func_eval = np.zeros([self.X.shape[0]])
            self.time_overhead = np.zeros([self.X.shape[0]])

        for it in range(0, num_iterations):
            logger.info("Start iteration %d ... ", it)
            # Choose a new configuration
            start_time = time.time()
            if it % self.train_intervall == 0:
                do_optimize = True
            else:
                do_optimize = False
            new_x = self.choose_next(self.X, self.Y, self.C, do_optimize)

            # Estimate current incuembent from the posterior
            # over the configuration space
            start_time_inc = time.time()
            startpoints = init_random_uniform(self.task.X_lower,
                                              self.task.X_upper,
                                              self.n_restarts)
            self.incuembent, self.incuembent_value = \
                self.estimator.estimate_incuembent(startpoints)

            self.incuembents.append(self.incuembent)
            self.incuembent_values.append(self.incuembent_value)

            logger.info("New incuembent %s found in %f seconds"\
                        " with predicted performance %f",
                        str(self.incuembent), time.time() - start_time_inc,
                        self.incuembent_value)

            # Compute the time we needed to pick a new point
            time_overhead = time.time() - start_time
            self.time_overhead = np.append(self.time_overhead,
                                           np.array([time_overhead]))
            logger.info("Optimization overhead was "
                            "%f seconds" % (self.time_overhead[-1]))

            # Evaluate the configuration
            logger.info("Evaluate candidate %s" % (str(new_x)))
            start_time = time.time()
            new_y, new_cost = self.task.evaluate(new_x)
            time_func_eval = time.time() - start_time

            # We model the log costs
            new_cost = np.log(new_cost)

            self.time_func_eval = np.append(self.time_func_eval,
                                            np.array([time_func_eval]))

            logger.info("Configuration achieved a performance "
                    "of %f in %s seconds" % (new_y[0, 0], new_cost[0]))

            # Add the new observations to the data
            self.X = np.append(self.X, new_x, axis=0)
            self.Y = np.append(self.Y, new_y, axis=0)
            self.C = np.append(self.C, new_cost, axis=0)

            self.runtime.append(time.time() - self.start_time)

            if self.save_dir is not None and (it + self.init_points) % self.num_save == 0:
                hypers = self.model.hypers

                self.save_iteration(it + self.init_points, costs=self.C[-1],
                                hyperparameters=hypers,
                                acquisition_value=self.acquisition_func(new_x))

        logger.info("Return %s as incuembent" % (str(self.incuembent)))
        return self.incuembent

Example 34

Project: PYPOWER Source File: t_hessian.py
def t_hessian(quiet=False):
    """Numerical tests of 2nd derivative code.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    t_begin(44, quiet)

    ## run powerflow to get solved case
    ppopt = ppoption(VERBOSE=0, OUT_ALL=0)
    results, _ = runpf(case30(), ppopt)
    baseMVA, bus, gen, branch = \
        results['baseMVA'], results['bus'], results['gen'], results['branch']

    ## switch to internal bus numbering and build admittance matrices
    _, bus, gen, branch = ext2int1(bus, gen, branch)
    Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)
    Vm = bus[:, VM]
    Va = bus[:, VA] * (pi / 180)
    V = Vm * exp(1j * Va)
    f = branch[:, F_BUS]       ## list of "from" buses
    t = branch[:, T_BUS]       ## list of "to" buses
    nl = len(f)
    nb = len(V)
    Cf = sparse((ones(nl), (range(nl), f)), (nl, nb))  ## connection matrix for line & from buses
    Ct = sparse((ones(nl), (range(nl), t)), (nl, nb))  ## connection matrix for line & to buses
    pert = 1e-8

    ##-----  check d2Sbus_dV2 code  -----
    t = ' - d2Sbus_dV2 (complex power injections)'
    lam = 10 * random.rand(nb)
    num_Haa = zeros((nb, nb), complex)
    num_Hav = zeros((nb, nb), complex)
    num_Hva = zeros((nb, nb), complex)
    num_Hvv = zeros((nb, nb), complex)
    dSbus_dVm, dSbus_dVa = dSbus_dV(Ybus, V)
    Haa, Hav, Hva, Hvv = d2Sbus_dV2(Ybus, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dSbus_dVm_ap, dSbus_dVa_ap = dSbus_dV(Ybus, Vap)
        num_Haa[:, i] = (dSbus_dVa_ap - dSbus_dVa).T * lam / pert
        num_Hva[:, i] = (dSbus_dVm_ap - dSbus_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dSbus_dVm_mp, dSbus_dVa_mp = dSbus_dV(Ybus, Vmp)
        num_Hav[:, i] = (dSbus_dVa_mp - dSbus_dVa).T * lam / pert
        num_Hvv[:, i] = (dSbus_dVm_mp - dSbus_dVm).T * lam / pert

    t_is(Haa.todense(), num_Haa, 4, ['Haa', t])
    t_is(Hav.todense(), num_Hav, 4, ['Hav', t])
    t_is(Hva.todense(), num_Hva, 4, ['Hva', t])
    t_is(Hvv.todense(), num_Hvv, 4, ['Hvv', t])

    ##-----  check d2Sbr_dV2 code  -----
    t = ' - d2Sbr_dV2 (complex power flows)'
    lam = 10 * random.rand(nl)
    # lam = [1 zeros(nl-1, 1)]
    num_Gfaa = zeros((nb, nb), complex)
    num_Gfav = zeros((nb, nb), complex)
    num_Gfva = zeros((nb, nb), complex)
    num_Gfvv = zeros((nb, nb), complex)
    num_Gtaa = zeros((nb, nb), complex)
    num_Gtav = zeros((nb, nb), complex)
    num_Gtva = zeros((nb, nb), complex)
    num_Gtvv = zeros((nb, nb), complex)
    dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, _, _ = dSbr_dV(branch, Yf, Yt, V)
    Gfaa, Gfav, Gfva, Gfvv = d2Sbr_dV2(Cf, Yf, V, lam)
    Gtaa, Gtav, Gtva, Gtvv = d2Sbr_dV2(Ct, Yt, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dSf_dVa_ap, dSf_dVm_ap, dSt_dVa_ap, dSt_dVm_ap, Sf_ap, St_ap = \
            dSbr_dV(branch, Yf, Yt, Vap)
        num_Gfaa[:, i] = (dSf_dVa_ap - dSf_dVa).T * lam / pert
        num_Gfva[:, i] = (dSf_dVm_ap - dSf_dVm).T * lam / pert
        num_Gtaa[:, i] = (dSt_dVa_ap - dSt_dVa).T * lam / pert
        num_Gtva[:, i] = (dSt_dVm_ap - dSt_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dSf_dVa_mp, dSf_dVm_mp, dSt_dVa_mp, dSt_dVm_mp, Sf_mp, St_mp = \
            dSbr_dV(branch, Yf, Yt, Vmp)
        num_Gfav[:, i] = (dSf_dVa_mp - dSf_dVa).T * lam / pert
        num_Gfvv[:, i] = (dSf_dVm_mp - dSf_dVm).T * lam / pert
        num_Gtav[:, i] = (dSt_dVa_mp - dSt_dVa).T * lam / pert
        num_Gtvv[:, i] = (dSt_dVm_mp - dSt_dVm).T * lam / pert

    t_is(Gfaa.todense(), num_Gfaa, 4, ['Gfaa', t])
    t_is(Gfav.todense(), num_Gfav, 4, ['Gfav', t])
    t_is(Gfva.todense(), num_Gfva, 4, ['Gfva', t])
    t_is(Gfvv.todense(), num_Gfvv, 4, ['Gfvv', t])

    t_is(Gtaa.todense(), num_Gtaa, 4, ['Gtaa', t])
    t_is(Gtav.todense(), num_Gtav, 4, ['Gtav', t])
    t_is(Gtva.todense(), num_Gtva, 4, ['Gtva', t])
    t_is(Gtvv.todense(), num_Gtvv, 4, ['Gtvv', t])

    ##-----  check d2Ibr_dV2 code  -----
    t = ' - d2Ibr_dV2 (complex currents)'
    lam = 10 * random.rand(nl)
    # lam = [1, zeros(nl-1)]
    num_Gfaa = zeros((nb, nb), complex)
    num_Gfav = zeros((nb, nb), complex)
    num_Gfva = zeros((nb, nb), complex)
    num_Gfvv = zeros((nb, nb), complex)
    num_Gtaa = zeros((nb, nb), complex)
    num_Gtav = zeros((nb, nb), complex)
    num_Gtva = zeros((nb, nb), complex)
    num_Gtvv = zeros((nb, nb), complex)
    dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, _, _ = dIbr_dV(branch, Yf, Yt, V)
    Gfaa, Gfav, Gfva, Gfvv = d2Ibr_dV2(Yf, V, lam)

    Gtaa, Gtav, Gtva, Gtvv = d2Ibr_dV2(Yt, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dIf_dVa_ap, dIf_dVm_ap, dIt_dVa_ap, dIt_dVm_ap, If_ap, It_ap = \
            dIbr_dV(branch, Yf, Yt, Vap)
        num_Gfaa[:, i] = (dIf_dVa_ap - dIf_dVa).T * lam / pert
        num_Gfva[:, i] = (dIf_dVm_ap - dIf_dVm).T * lam / pert
        num_Gtaa[:, i] = (dIt_dVa_ap - dIt_dVa).T * lam / pert
        num_Gtva[:, i] = (dIt_dVm_ap - dIt_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dIf_dVa_mp, dIf_dVm_mp, dIt_dVa_mp, dIt_dVm_mp, If_mp, It_mp = \
            dIbr_dV(branch, Yf, Yt, Vmp)
        num_Gfav[:, i] = (dIf_dVa_mp - dIf_dVa).T * lam / pert
        num_Gfvv[:, i] = (dIf_dVm_mp - dIf_dVm).T * lam / pert
        num_Gtav[:, i] = (dIt_dVa_mp - dIt_dVa).T * lam / pert
        num_Gtvv[:, i] = (dIt_dVm_mp - dIt_dVm).T * lam / pert

    t_is(Gfaa.todense(), num_Gfaa, 4, ['Gfaa', t])
    t_is(Gfav.todense(), num_Gfav, 4, ['Gfav', t])
    t_is(Gfva.todense(), num_Gfva, 4, ['Gfva', t])
    t_is(Gfvv.todense(), num_Gfvv, 4, ['Gfvv', t])

    t_is(Gtaa.todense(), num_Gtaa, 4, ['Gtaa', t])
    t_is(Gtav.todense(), num_Gtav, 4, ['Gtav', t])
    t_is(Gtva.todense(), num_Gtva, 4, ['Gtva', t])
    t_is(Gtvv.todense(), num_Gtvv, 4, ['Gtvv', t])

    ##-----  check d2ASbr_dV2 code  -----
    t = ' - d2ASbr_dV2 (squared apparent power flows)'
    lam = 10 * random.rand(nl)
    # lam = [1 zeros(nl-1, 1)]
    num_Gfaa = zeros((nb, nb), complex)
    num_Gfav = zeros((nb, nb), complex)
    num_Gfva = zeros((nb, nb), complex)
    num_Gfvv = zeros((nb, nb), complex)
    num_Gtaa = zeros((nb, nb), complex)
    num_Gtav = zeros((nb, nb), complex)
    num_Gtva = zeros((nb, nb), complex)
    num_Gtvv = zeros((nb, nb), complex)
    dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = dSbr_dV(branch, Yf, Yt, V)
    dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm = \
                            dAbr_dV(dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St)
    Gfaa, Gfav, Gfva, Gfvv = d2ASbr_dV2(dSf_dVa, dSf_dVm, Sf, Cf, Yf, V, lam)
    Gtaa, Gtav, Gtva, Gtvv = d2ASbr_dV2(dSt_dVa, dSt_dVm, St, Ct, Yt, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dSf_dVa_ap, dSf_dVm_ap, dSt_dVa_ap, dSt_dVm_ap, Sf_ap, St_ap = \
            dSbr_dV(branch, Yf, Yt, Vap)
        dAf_dVa_ap, dAf_dVm_ap, dAt_dVa_ap, dAt_dVm_ap = \
            dAbr_dV(dSf_dVa_ap, dSf_dVm_ap, dSt_dVa_ap, dSt_dVm_ap, Sf_ap, St_ap)
        num_Gfaa[:, i] = (dAf_dVa_ap - dAf_dVa).T * lam / pert
        num_Gfva[:, i] = (dAf_dVm_ap - dAf_dVm).T * lam / pert
        num_Gtaa[:, i] = (dAt_dVa_ap - dAt_dVa).T * lam / pert
        num_Gtva[:, i] = (dAt_dVm_ap - dAt_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dSf_dVa_mp, dSf_dVm_mp, dSt_dVa_mp, dSt_dVm_mp, Sf_mp, St_mp = \
            dSbr_dV(branch, Yf, Yt, Vmp)
        dAf_dVa_mp, dAf_dVm_mp, dAt_dVa_mp, dAt_dVm_mp = \
            dAbr_dV(dSf_dVa_mp, dSf_dVm_mp, dSt_dVa_mp, dSt_dVm_mp, Sf_mp, St_mp)
        num_Gfav[:, i] = (dAf_dVa_mp - dAf_dVa).T * lam / pert
        num_Gfvv[:, i] = (dAf_dVm_mp - dAf_dVm).T * lam / pert
        num_Gtav[:, i] = (dAt_dVa_mp - dAt_dVa).T * lam / pert
        num_Gtvv[:, i] = (dAt_dVm_mp - dAt_dVm).T * lam / pert

    t_is(Gfaa.todense(), num_Gfaa, 2, ['Gfaa', t])
    t_is(Gfav.todense(), num_Gfav, 2, ['Gfav', t])
    t_is(Gfva.todense(), num_Gfva, 2, ['Gfva', t])
    t_is(Gfvv.todense(), num_Gfvv, 2, ['Gfvv', t])

    t_is(Gtaa.todense(), num_Gtaa, 2, ['Gtaa', t])
    t_is(Gtav.todense(), num_Gtav, 2, ['Gtav', t])
    t_is(Gtva.todense(), num_Gtva, 2, ['Gtva', t])
    t_is(Gtvv.todense(), num_Gtvv, 2, ['Gtvv', t])

    ##-----  check d2ASbr_dV2 code  -----
    t = ' - d2ASbr_dV2 (squared real power flows)'
    lam = 10 * random.rand(nl)
    # lam = [1 zeros(nl-1, 1)]
    num_Gfaa = zeros((nb, nb), complex)
    num_Gfav = zeros((nb, nb), complex)
    num_Gfva = zeros((nb, nb), complex)
    num_Gfvv = zeros((nb, nb), complex)
    num_Gtaa = zeros((nb, nb), complex)
    num_Gtav = zeros((nb, nb), complex)
    num_Gtva = zeros((nb, nb), complex)
    num_Gtvv = zeros((nb, nb), complex)
    dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = dSbr_dV(branch, Yf, Yt, V)
    dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm = \
           dAbr_dV(dSf_dVa.real, dSf_dVm.real, dSt_dVa.real, dSt_dVm.real, Sf.real, St.real)
    Gfaa, Gfav, Gfva, Gfvv = d2ASbr_dV2(dSf_dVa.real, dSf_dVm.real, Sf.real, Cf, Yf, V, lam)
    Gtaa, Gtav, Gtva, Gtvv = d2ASbr_dV2(dSt_dVa.real, dSt_dVm.real, St.real, Ct, Yt, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dSf_dVa_ap, dSf_dVm_ap, dSt_dVa_ap, dSt_dVm_ap, Sf_ap, St_ap = \
            dSbr_dV(branch, Yf, Yt, Vap)
        dAf_dVa_ap, dAf_dVm_ap, dAt_dVa_ap, dAt_dVm_ap = \
            dAbr_dV(dSf_dVa_ap.real, dSf_dVm_ap.real, dSt_dVa_ap.real, dSt_dVm_ap.real, Sf_ap.real, St_ap.real)
        num_Gfaa[:, i] = (dAf_dVa_ap - dAf_dVa).T * lam / pert
        num_Gfva[:, i] = (dAf_dVm_ap - dAf_dVm).T * lam / pert
        num_Gtaa[:, i] = (dAt_dVa_ap - dAt_dVa).T * lam / pert
        num_Gtva[:, i] = (dAt_dVm_ap - dAt_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dSf_dVa_mp, dSf_dVm_mp, dSt_dVa_mp, dSt_dVm_mp, Sf_mp, St_mp = \
            dSbr_dV(branch, Yf, Yt, Vmp)
        dAf_dVa_mp, dAf_dVm_mp, dAt_dVa_mp, dAt_dVm_mp = \
            dAbr_dV(dSf_dVa_mp.real, dSf_dVm_mp.real, dSt_dVa_mp.real, dSt_dVm_mp.real, Sf_mp.real, St_mp.real)
        num_Gfav[:, i] = (dAf_dVa_mp - dAf_dVa).T * lam / pert
        num_Gfvv[:, i] = (dAf_dVm_mp - dAf_dVm).T * lam / pert
        num_Gtav[:, i] = (dAt_dVa_mp - dAt_dVa).T * lam / pert
        num_Gtvv[:, i] = (dAt_dVm_mp - dAt_dVm).T * lam / pert

    t_is(Gfaa.todense(), num_Gfaa, 2, ['Gfaa', t])
    t_is(Gfav.todense(), num_Gfav, 2, ['Gfav', t])
    t_is(Gfva.todense(), num_Gfva, 2, ['Gfva', t])
    t_is(Gfvv.todense(), num_Gfvv, 2, ['Gfvv', t])

    t_is(Gtaa.todense(), num_Gtaa, 2, ['Gtaa', t])
    t_is(Gtav.todense(), num_Gtav, 2, ['Gtav', t])
    t_is(Gtva.todense(), num_Gtva, 2, ['Gtva', t])
    t_is(Gtvv.todense(), num_Gtvv, 2, ['Gtvv', t])

    ##-----  check d2AIbr_dV2 code  -----
    t = ' - d2AIbr_dV2 (squared current magnitudes)'
    lam = 10 * random.rand(nl)
    # lam = [1 zeros(nl-1, 1)]
    num_Gfaa = zeros((nb, nb), complex)
    num_Gfav = zeros((nb, nb), complex)
    num_Gfva = zeros((nb, nb), complex)
    num_Gfvv = zeros((nb, nb), complex)
    num_Gtaa = zeros((nb, nb), complex)
    num_Gtav = zeros((nb, nb), complex)
    num_Gtva = zeros((nb, nb), complex)
    num_Gtvv = zeros((nb, nb), complex)
    dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It = dIbr_dV(branch, Yf, Yt, V)
    dAf_dVa, dAf_dVm, dAt_dVa, dAt_dVm = \
                            dAbr_dV(dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It)
    Gfaa, Gfav, Gfva, Gfvv = d2AIbr_dV2(dIf_dVa, dIf_dVm, If, Yf, V, lam)
    Gtaa, Gtav, Gtva, Gtvv = d2AIbr_dV2(dIt_dVa, dIt_dVm, It, Yt, V, lam)
    for i in range(nb):
        Vap = V.copy()
        Vap[i] = Vm[i] * exp(1j * (Va[i] + pert))
        dIf_dVa_ap, dIf_dVm_ap, dIt_dVa_ap, dIt_dVm_ap, If_ap, It_ap = \
            dIbr_dV(branch, Yf, Yt, Vap)
        dAf_dVa_ap, dAf_dVm_ap, dAt_dVa_ap, dAt_dVm_ap = \
            dAbr_dV(dIf_dVa_ap, dIf_dVm_ap, dIt_dVa_ap, dIt_dVm_ap, If_ap, It_ap)
        num_Gfaa[:, i] = (dAf_dVa_ap - dAf_dVa).T * lam / pert
        num_Gfva[:, i] = (dAf_dVm_ap - dAf_dVm).T * lam / pert
        num_Gtaa[:, i] = (dAt_dVa_ap - dAt_dVa).T * lam / pert
        num_Gtva[:, i] = (dAt_dVm_ap - dAt_dVm).T * lam / pert

        Vmp = V.copy()
        Vmp[i] = (Vm[i] + pert) * exp(1j * Va[i])
        dIf_dVa_mp, dIf_dVm_mp, dIt_dVa_mp, dIt_dVm_mp, If_mp, It_mp = \
            dIbr_dV(branch, Yf, Yt, Vmp)
        dAf_dVa_mp, dAf_dVm_mp, dAt_dVa_mp, dAt_dVm_mp = \
            dAbr_dV(dIf_dVa_mp, dIf_dVm_mp, dIt_dVa_mp, dIt_dVm_mp, If_mp, It_mp)
        num_Gfav[:, i] = (dAf_dVa_mp - dAf_dVa).T * lam / pert
        num_Gfvv[:, i] = (dAf_dVm_mp - dAf_dVm).T * lam / pert
        num_Gtav[:, i] = (dAt_dVa_mp - dAt_dVa).T * lam / pert
        num_Gtvv[:, i] = (dAt_dVm_mp - dAt_dVm).T * lam / pert

    t_is(Gfaa.todense(), num_Gfaa, 3, ['Gfaa', t])
    t_is(Gfav.todense(), num_Gfav, 3, ['Gfav', t])
    t_is(Gfva.todense(), num_Gfva, 3, ['Gfva', t])
    t_is(Gfvv.todense(), num_Gfvv, 2, ['Gfvv', t])

    t_is(Gtaa.todense(), num_Gtaa, 3, ['Gtaa', t])
    t_is(Gtav.todense(), num_Gtav, 3, ['Gtav', t])
    t_is(Gtva.todense(), num_Gtva, 3, ['Gtva', t])
    t_is(Gtvv.todense(), num_Gtvv, 2, ['Gtvv', t])

    t_end()

Example 35

Project: qutip Source File: countstat.py
def countstat_current_noise(L, c_ops, wlist=None, rhoss=None, J_ops=None, 
                            sparse=True, method='direct'):
    """
    Compute the cross-current noise spectrum for a list of collapse operators
    `c_ops` corresponding to monitored currents, given the system
    Liouvillian `L`. The current collapse operators `c_ops` should be part
    of the dissipative processes in `L`, but the `c_ops` given here does not
    necessarily need to be all collapse operators contributing to dissipation
    in the Liouvillian. Optionally, the steadystate density matrix `rhoss`
    and the current operators `J_ops` correpsonding to the current collapse 
    operators `c_ops` can also be specified. If either of
    `rhoss` and `J_ops` are omitted, they will be computed internally.
    'wlist' is an optional list of frequencies at which to evaluate the noise 
    spectrum.  
    
    Note:
    The default method is a direct solution using dense matrices, as sparse 
    matrix methods fail for some examples of small systems.
    For larger systems it is reccomended to use the sparse solver
    with the direct method, as it avoids explicit calculation of the
    pseudo-inverse, as described in page 67 of "Electrons in nanostructures"
    C. Flindt, PhD Thesis, available online:
    http://orbit.dtu.dk/fedora/objects/orbit:82314/datastreams/file_4732600/content
    
    Parameters
    ----------

    L : :class:`qutip.Qobj`
        Qobj representing the system Liouvillian.

    c_ops : array / list
        List of current collapse operators.

    rhoss : :class:`qutip.Qobj` (optional)
        The steadystate density matrix corresponding the system Liouvillian
        `L`.
        
    wlist : array / list (optional)
        List of frequencies at which to evaluate (if none are given, evaluates 
        at zero frequency)

    J_ops : array / list (optional)
        List of current superoperators.

    sparse : bool
        Flag that indicates whether to use sparse or dense matrix methods when
        computing the pseudo inverse. Default is false, as sparse solvers
        can fail for small systems. For larger systems the sparse solvers
        are reccomended. 
        
        
    Returns
    --------
    I, S : tuple of arrays
        The currents `I` corresponding to each current collapse operator
        `c_ops` (or, equivalently, each current superopeator `J_ops`) and the
        zero-frequency cross-current correlation `S`.
    """

    if rhoss is None:
        rhoss = steadystate(L, c_ops)

    if J_ops is None:
        J_ops = [sprepost(c, c.dag()) for c in c_ops]

    

    N = len(J_ops)
    I = np.zeros(N)
    
    if wlist is None:
        S = np.zeros((N, N,1))
        wlist=[0.]
    else:
        S = np.zeros((N, N,len(wlist)))
        
    if sparse == False: 
        rhoss_vec = mat2vec(rhoss.full()).ravel()
        for k,w in enumerate(wlist):
            R = pseudo_inverse(L, rhoss=rhoss, w= w, sparse = sparse, method=method)
            for i, Ji in enumerate(J_ops):
                for j, Jj in enumerate(J_ops):
                    if i == j:
                        I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1)
                        S[i, j,k] = I[i]
                    S[i, j,k] -= expect_rho_vec((Ji * R * Jj 
                                                + Jj * R * Ji).data,
                                                rhoss_vec, 1)
    else:
        if method == "direct":
            N = np.prod(L.dims[0][0])
            
            rhoss_vec = operator_to_vector(rhoss)
            
            tr_op = tensor([identity(n) for n in L.dims[0][0]])
            tr_op_vec = operator_to_vector(tr_op)
            
            Pop = sp.kron(rhoss_vec.data, tr_op_vec.data.T, format='csr')
            Iop = sp.eye(N*N, N*N, format='csr')
            Q = Iop - Pop
            
            for k,w in enumerate(wlist):
                
                if w != 0.0:    
                    L_temp = 1.0j*w*spre(tr_op) + L
                else: #At zero frequency some solvers fail for small systems.
                      #Adding a small finite frequency of order 1e-15
                      #helps prevent the solvers from throwing an exception.
                    L_temp =  1.0j*(1e-15)*spre(tr_op) + L
                    
                if not settings.has_mkl:
                    A = L_temp.data.tocsc()
                else:
                    A = L_temp.data.tocsr()
                    A.sort_indices()                      
                      
                rhoss_vec = mat2vec(rhoss.full()).ravel()               
                
                for j, Jj in enumerate(J_ops):
                    Qj = Q.dot( Jj.data.dot( rhoss_vec))
                    try:
                        if settings.has_mkl:
                            X_rho_vec_j = mkl_spsolve(A,Qj)                            
                        else:
                            X_rho_vec_j = sp.linalg.splu(A, permc_spec
                                                 ='COLAMD').solve(Qj)
                    except:
                        X_rho_vec_j = sp.linalg.lsqr(A,Qj)[0]
                    for i, Ji in enumerate(J_ops):
                        Qi = Q.dot( Ji.data.dot(rhoss_vec))
                        try:
                            if settings.has_mkl:                              
                                X_rho_vec_i = mkl_spsolve(A,Qi)  
                            else:
                                X_rho_vec_i = sp.linalg.splu(A, permc_spec
                                                     ='COLAMD').solve(Qi)
                        except:
                             X_rho_vec_i = sp.linalg.lsqr(A,Qi)[0]
                        if i == j:
                            I[i] = expect_rho_vec(Ji.data, 
                                                 rhoss_vec, 1)
                            S[j, i, k] = I[i]
                        
                        S[j, i, k] -= (expect_rho_vec(Jj.data * Q, 
                                        X_rho_vec_i, 1) 
                                        + expect_rho_vec(Ji.data * Q, 
                                        X_rho_vec_j, 1))

        else:
            rhoss_vec = mat2vec(rhoss.full()).ravel()
            for k,w in enumerate(wlist):

                R = pseudo_inverse(L,rhoss=rhoss, w= w, sparse = sparse, 
                                   method=method)
                                   
                for i, Ji in enumerate(J_ops):
                    for j, Jj in enumerate(J_ops):
                        if i == j:
                            I[i] = expect_rho_vec(Ji.data, rhoss_vec, 1)
                            S[i, j, k] = I[i]
                        S[i, j, k] -= expect_rho_vec((Ji * R * Jj 
                                                     + Jj * R * Ji).data,
                                                     rhoss_vec, 1)
    return I, S

Example 36

Project: PYPOWER Source File: t_loadcase.py
def t_loadcase(quiet=False):
    """Test that C{loadcase} works with an object as well as case file.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    t_begin(240, quiet)

    ## compare result of loading from M-file file to result of using data matrices
    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    matfile  = join(tdir, 't_mat9_opf')
    pfcasefile = join(tdir, 't_case9_pf')
    pfmatfile  = join(tdir, 't_mat9_pf')
    casefilev2 = join(tdir, 't_case9_opfv2')
    matfilev2  = join(tdir, 't_mat9_opfv2')
    pfcasefilev2 = join(tdir, 't_case9_pfv2')
    pfmatfilev2  = join(tdir, 't_mat9_pfv2')

    ## read version 1 OPF data matrices
    baseMVA, bus, gen, branch, areas, gencost = t_case9_opf()
    ## save as .mat file
    savemat(matfile + '.mat', {'baseMVA': baseMVA, 'bus': bus, 'gen': gen,
            'branch': branch, 'areas': areas, 'gencost': gencost}, oned_as='row')

    ## read version 2 OPF data matrices
    ppc = t_case9_opfv2()
    ## save as .mat file
    savemat(matfilev2 + '.mat', {'ppc': ppc}, oned_as='column')

    ## prepare expected matrices for v1 load
    ## (missing gen cap curve & branch ang diff lims)
    tmp1 = (ppc['baseMVA'], ppc['bus'].copy(), ppc['gen'].copy(), ppc['branch'].copy(),
        ppc['areas'].copy(), ppc['gencost'].copy())
    tmp2 = (ppc['baseMVA'], ppc['bus'].copy(), ppc['gen'].copy(), ppc['branch'].copy(),
        ppc['areas'].copy(), ppc['gencost'].copy())
    ## remove capability curves, angle difference limits
    tmp1[2][1:3, [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX]] = zeros((2,6))
    tmp1[3][0, ANGMAX] = 360
    tmp1[3][8, ANGMIN] = -360

    baseMVA, bus, gen, branch, areas, gencost = tmp1

    ##-----  load OPF data into individual matrices  -----
    t = 'loadcase(opf_PY_file_v1) without .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefile, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_PY_file_v1) with .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefile + '.py', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v1) without .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfile, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v1) with .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfile + '.mat', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'loadcase(opf_PY_file_v2) without .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefilev2, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_PY_file_v2) with .py extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(casefilev2 + '.py', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v2) without .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfilev2, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_MAT_file_v2) with .mat extension : '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = \
            loadcase(matfilev2 + '.mat', False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])
    t_is(areas1,    areas,      12, [t, 'areas'])
    t_is(gencost1,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'loadcase(opf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = t_case9_opf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    c['areas']     = areas1.copy()
    c['gencost']   = gencost1.copy()
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_struct_v1) (version=\'1\'): '
    c['version']   = '1'
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'loadcase(opf_struct_v2) (no version): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    t = 'loadcase(opf_struct_v2) (version=''2''): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    c['version']   = '2'
    baseMVA2, bus2, gen2, branch2, areas2, gencost2 = loadcase(c, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])
    t_is(areas2,    areas,      12, [t, 'areas'])
    t_is(gencost2,  gencost,    12, [t, 'gencost'])

    ##-----  load OPF data into struct  -----
    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'ppc = loadcase(opf_PY_file_v1) without .py extension : '
    ppc1 = loadcase(casefile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_PY_file_v1) with .py extension : '
    ppc1 = loadcase(casefile + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v1) without .mat extension : '
    ppc1 = loadcase(matfile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v1) with .mat extension : '
    ppc1 = loadcase(matfile + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'ppc = loadcase(opf_PY_file_v2) without .m extension : '
    ppc1 = loadcase(casefilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_PY_file_v2) with .py extension : '
    ppc1 = loadcase(casefilev2 + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v2) without .mat extension : '
    ppc1 = loadcase(matfilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_MAT_file_v2) with .mat extension : '
    ppc1 = loadcase(matfilev2 + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc1['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc1['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v1 load
    baseMVA, bus, gen, branch, areas, gencost = tmp1

    t = 'ppc = loadcase(opf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1, areas1, gencost1 = t_case9_opf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    c['areas']     = areas1.copy()
    c['gencost']   = gencost1.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])

    t = 'ppc = loadcase(opf_struct_v1) (version=''1''): '
    c['version']   = '1'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])

    ## prepare expected matrices for v2 load
    baseMVA, bus, gen, branch, areas, gencost = tmp2

    t = 'ppc = loadcase(opf_struct_v2) (no version): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])
    t_ok(ppc2['version'] == '2', [t, 'version'])

    t = 'ppc = loadcase(opf_struct_v2) (version=''2''): '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['areas']     = areas.copy()
    c['gencost']   = gencost.copy()
    c['version']   = '2'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])
    t_is(ppc2['areas'],    areas,      12, [t, 'areas'])
    t_is(ppc2['gencost'],  gencost,    12, [t, 'gencost'])


    ## read version 1 PF data matrices
    baseMVA, bus, gen, branch = t_case9_pf()
    savemat(pfmatfile + '.mat',
        {'baseMVA': baseMVA, 'bus': bus, 'gen': gen, 'branch': branch},
        oned_as='column')

    ## read version 2 PF data matrices
    ppc = t_case9_pfv2()
    tmp = (ppc['baseMVA'], ppc['bus'].copy(),
           ppc['gen'].copy(), ppc['branch'].copy())
    baseMVA, bus, gen, branch = tmp
    ## save as .mat file
    savemat(pfmatfilev2 + '.mat', {'ppc': ppc}, oned_as='column')

    ##-----  load PF data into individual matrices  -----
    t = 'loadcase(pf_PY_file_v1) without .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefile, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v1) with .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefile + '.py', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v1) without .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfile, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v1) with .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfile + '.mat', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v2) without .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefilev2, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_PY_file_v2) with .py extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfcasefilev2 + '.py', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v2) without .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfilev2, False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_MAT_file_v2) with .mat extension : '
    baseMVA1, bus1, gen1, branch1 = \
            loadcase(pfmatfilev2 + '.mat', False, False, False)
    t_is(baseMVA1,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus1,      bus,        12, [t, 'bus'])
    t_is(gen1,      gen,        12, [t, 'gen'])
    t_is(branch1,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1 = t_case9_pf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v1) (version=''1''): '
    c['version']   = '1'
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])

    t = 'loadcase(pf_struct_v2) : '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['version']   = '2'
    baseMVA2, bus2, gen2, branch2 = loadcase(c, False, False, False)
    t_is(baseMVA2,  baseMVA,    12, [t, 'baseMVA'])
    t_is(bus2,      bus,        12, [t, 'bus'])
    t_is(gen2,      gen,        12, [t, 'gen'])
    t_is(branch2,   branch,     12, [t, 'branch'])






    ##-----  load PF data into struct  -----
    t = 'ppc = loadcase(pf_PY_file_v1) without .py extension : '
    ppc1 = loadcase(pfcasefile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v1) with .py extension : '
    ppc1 = loadcase(pfcasefile + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v1) without .mat extension : '
    ppc1 = loadcase(pfmatfile)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v1) with .mat extension : '
    ppc1 = loadcase(pfmatfile + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v2) without .py extension : '
    ppc1 = loadcase(pfcasefilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_PY_file_v2) with .py extension : '
    ppc1 = loadcase(pfcasefilev2 + '.py')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v2) without .mat extension : '
    ppc1 = loadcase(pfmatfilev2)
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_MAT_file_v2) with .mat extension : '
    ppc1 = loadcase(pfmatfilev2 + '.mat')
    t_is(ppc1['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc1['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc1['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc1['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v1) (no version): '
    baseMVA1, bus1, gen1, branch1 = t_case9_pf()
    c = {}
    c['baseMVA']   = baseMVA1
    c['bus']       = bus1.copy()
    c['gen']       = gen1.copy()
    c['branch']    = branch1.copy()
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v1) (version=''1''): '
    c['version']   = '1'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    t = 'ppc = loadcase(pf_struct_v2) : '
    c = {}
    c['baseMVA']   = baseMVA
    c['bus']       = bus.copy()
    c['gen']       = gen.copy()
    c['branch']    = branch.copy()
    c['version']   = '2'
    ppc2 = loadcase(c)
    t_is(ppc2['baseMVA'],  baseMVA,    12, [t, 'baseMVA'])
    t_is(ppc2['bus'],      bus,        12, [t, 'bus'])
    t_is(ppc2['gen'],      gen,        12, [t, 'gen'])
    t_is(ppc2['branch'],   branch,     12, [t, 'branch'])

    ## cleanup
    os.remove(matfile + '.mat')
    os.remove(pfmatfile + '.mat')
    os.remove(matfilev2 + '.mat')
    os.remove(pfmatfilev2 + '.mat')

    t = 'runpf(my_PY_file)'
    ppopt = ppoption(VERBOSE=0, OUT_ALL=0)
    results3, success = runpf(pfcasefile, ppopt)
    baseMVA3, bus3, gen3, branch3 = results3['baseMVA'], results3['bus'], \
            results3['gen'], results3['branch']
    t_ok( success, t )

    t = 'runpf(my_object)'
    results4, success = runpf(c, ppopt)
    baseMVA4, bus4, gen4, branch4 = results4['baseMVA'], results4['bus'], \
            results4['gen'], results4['branch']
    t_ok( success, t )

    t = 'runpf result comparison : '
    t_is(baseMVA3,  baseMVA4,   12, [t, 'baseMVA'])
    t_is(bus3,      bus4,       12, [t, 'bus'])
    t_is(gen3,      gen4,       12, [t, 'gen'])
    t_is(branch3,   branch4,    12, [t, 'branch'])

    t = 'runpf(modified_struct)'
    c['gen'][2, 1] = c['gen'][2, 1] + 1            ## increase gen 3 output by 1
    results5, success = runpf(c, ppopt)
    gen5 = results5['gen']
    t_is(gen5[0, 1], gen4[0, 1] - 1, 1, t)   ## slack bus output should decrease by 1

    t_end()

Example 37

Project: PYPOWER Source File: t_off2case.py
def t_off2case(quiet=False):
    """Tests for code in C{off2case}.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    n_tests = 35

    t_begin(n_tests, quiet)

    ## generator data
    #    bus Pg Qg Qmax Qmin Vg mBase status Pmax Pmin Pc1 Pc2 Qc1min Qc1max Qc2min Qc2max ramp_agc ramp_10 ramp_30 ramp_q apf
    gen0 = array([
        [1,   10,   0,  60, -15, 1, 100, 1, 60, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [2,   10,   0,  60, -15, 1, 100, 1, 60, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [7,  -30, -15,   0, -15, 1, 100, 1, 0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [13,  10,   0,  60, -15, 1, 100, 1, 60, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        [30, -30, 7.5, 7.5,   0, 1, 100, 1, 0, -30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    ], float)
    ## generator cost data
    #    1    startup    shutdown    n    x1    y1    ...    xn    yn
    #    2    startup    shutdown    n    c(n-1)    ...    c0
    gencost0 = array([
        [1, 0,   0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 100, 0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 0,   0, 4, -30, 0, -20, 1000, -10, 2000,  0, 3000],
        [1, 0,   0, 4,   0, 0,  12, 240,   36, 1200, 60, 2400],
        [1, 0,  50, 4, -30, 0, -20, 1000, -10, 2000,  0, 3000]
    ], float)

    try:
        from pypower.extras.smartmarket import off2case
    except ImportError:
        t_skip(n_tests, 'smartmarket code not available')
        return

    t = 'isload()'
    t_is(isload(gen0), array([0, 0, 1, 0, 1], bool), 8, t)

    G = find( ~isload(gen0) )
    L = find(  isload(gen0) )
    nGL = len(G) + len(L)

    t = 'P offers only';
    offers = {'P': {}}
    offers['P']['qty'] = array([[25], [26], [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty'].flatten()
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700],
    ]), zeros((3, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    offers['P']['qty'] = array([[25], [26], [0], [27],  [0]], float)
    offers['P']['prc'] = array([[10], [50], [0], [100], [0]], float)
    gen, gencost = off2case(gen0, gencost0, offers)
    t_is( gen, gen1, 8, [t, ' (all rows in offer) - gen'] )
    t_is( gencost, gencost1, 8, [t, ' (all rows in offer) - gencost'] )

    t = 'P offers only (GEN_STATUS=0 for 0 qty offer)';
    offers['P']['qty'] = array([ [0], [26],  [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G[1:3], PMAX] = offers['P']['qty'].flatten()[1:3]
    gen1[G[0], GEN_STATUS] = 0
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[1:3], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((2, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers, lim[\'P\'][\'max_offer\']';
    offers['P']['qty'] = array([[25], [26], [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    lim = {'P': {'max_offer': 75}}
    gen, gencost = off2case(gen0, gencost0, offers, lim=lim)

    gen1 = gen0.copy()
    gen1[G[:2], PMAX] = offers['P']['qty'].flatten()[:2, :]
    gen1[r_[G[2], L], GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[:2], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300]
    ]), zeros((2, 4))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids';
    bids = {'P': {'qty': array([ [20], [28]], float),
                  'prc': array([[100], [10]], float)}}
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-20, -10, 0],
        [-28,   0, 7]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :8].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 4))] = array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 4))] = array([
        [2, -20, -2000, 0, 0],
        [2, -28,  -280, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (all rows in bid)';
    bids['P']['qty'] = array([[0], [0],  [20], [0], [28]], float)
    bids['P']['prc'] = array([[0], [0], [100], [0], [10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    t_is( gen, gen1, 8, [t, ' - gen'] )
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (GEN_STATUS=0 for 0 qty bid)';
    bids['P']['qty'] = array([  [0], [28]], float)
    bids['P']['prc'] = array([[100], [10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[L[0], GEN_STATUS] = 0
    gen1[L[1], [PMIN, QMIN, QMAX]] = array([-28, 0, 7])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25, 250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((3, 4))]
    gencost1[L[1], NCOST:NCOST + 8] = c_[array([
        [2, -28, -280, 0, 0]
    ]), zeros((1, 4))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids (1 gen with both)';
    gen2 = gen0.copy()
    gen2[1, PMIN] = -5
    bids['P']['qty'] = array([[0],  [3],  [20], [0], [28]], float)
    bids['P']['prc'] = array([[0], [50], [100], [0], [10]], float)
    gen, gencost = off2case(gen2, gencost0, offers, bids)

    gen1 = gen2.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[1, PMIN] = -sum( bids['P']['qty'][1, :] )
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-20, -10, 0],
        [-28,   0, 7]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :10].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 7))] = array([
        [2,  0,    0, 25,  250,  0,    0],
        [3, -3, -150,  0,    0, 26, 1300],
        [2,  0,    0, 27, 2700,  0,    0]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 7))] = c_[array([
        [2, -20, -2000, 0, 0],
        [2, -28,  -280, 0, 0]
    ]), zeros((2, 2))]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids, lim[\'P\'][\'max_offer\']/[\'min_bid\']'
    bids['P']['qty'] = array([[20],  [28]], float)
    bids['P']['prc'] = array([[100], [10]], float)
    lim['P']['min_bid'] = 50.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[G[:2], PMAX] = offers['P']['qty'][:2, :]
    gen1[r_[G[2], L[1]], GEN_STATUS] = 0
    gen1[L[0], [PMIN, QMIN, QMAX]] = array([-20, -10, 0])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G[:2], range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300]
    ]), zeros((2, 4))]
    gencost1[L[0], NCOST:NCOST + 9] = array([2, -20, -2000, 0, 0, 0, 0, 0, 0])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'P offers & P bids, lim[\'P\'][\'max_offer\']/[\'min_bid\'], multi-block'
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[ 20, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[G, PMAX] = array([10, 50, 25])
    gen1[ix_(L, [PMIN, QMIN, QMAX])] = array([
        [-30, -15, 0],
        [-12,   0, 3]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :10].copy()
    gencost1[ix_(G, range(NCOST, NCOST + 7))] = array([
        [2, 0, 0, 10,  100, 0,     0],
        [3, 0, 0, 20,  500, 50, 2450],
        [2, 0, 0, 25, 1250, 0,     0]
    ])
    gencost1[ix_(L, range(NCOST, NCOST + 7))] = array([
        [3, -30, -2600, -20, -2000, 0, 0],
        [2, -12,  -840,   0,     0, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    ##-----  reactive  -----
    ## generator cost data
    #    1    startup    shutdown    n    x1    y1    ...    xn    yn
    #    2    startup    shutdown    n    c(n-1)    ...    c0
    gencost0 = array([
        [1,   0,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1, 100,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1,   0,  0, 4, -30,    0, -20, 1000, -10, 2000,  0, 3000],
        [1,   0,  0, 4,   0,    0,  12,  240,  36, 1200, 60, 2400],
        [1,   0, 50, 4, -30,    0, -20, 1000, -10, 2000,  0, 3000],
        [1,   0,  0, 4, -15, -150,   0,    0,  30,  150, 60,  450],
        [1, 100,  0, 2,   0,    0,   0,    0,   0,    0,  0,    0],
        [1,   0,  0, 3, -20,  -15, -10,  -10,   0,    0,  0,    0],
        [1,   0,  0, 3,   0,    0,  40,   80,  60,  180,  0,    0],
        [1,   0, 50, 2,   0,    0,   0,    0,   0,    0,  0,    0]
    ], float)

    t = 'PQ offers only';
    offers['P']['qty'] = array([[25], [26],  [27]], float)
    offers['P']['prc'] = array([[10], [50], [100]], float)
    offers['Q']['qty'] = array([[10], [20],  [30]], float)
    offers['Q']['prc'] = array([[10],  [5],   [1]], float)
    gen, gencost = off2case(gen0, gencost0, offers)

    gen1 = gen0.copy()
    gen1[G, PMAX] = offers['P']['qty']
    gen1[G, QMAX] = offers['Q']['qty']
    gen1[G, QMIN] = 0
    gen1[L, GEN_STATUS] = 0
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0.copy()
    gencost1[ix_(G, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 25,  250],
        [2, 0, 0, 26, 1300],
        [2, 0, 0, 27, 2700]
    ]), zeros((3, 4))]
    gencost1[ix_(G + nGL - 1, range(NCOST, NCOST + 9))] = c_[array([
        [2, 0, 0, 10, 100],
        [2, 0, 0, 20, 100],
        [2, 0, 0, 30,  30]
    ]), zeros((3, 4))]

    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, lim.P/Q.max_offer/min_bid, multi-block';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[ 20, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    offers['Q']['qty'] = array([[ 5,  5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [ 5, 60], [ 1, 10]], float)
    bids['Q']['qty'] = array([ 15, 10, 15,  15,  0], float)
    bids['Q']['prc'] = array([-10,  0,  5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15,  10],
        [1,  12, 50, -10,  10],
        [1, -10,  0,  -5,   0],
        [1,  12, 25,   0,  30],
        [0, -30,  0,   0, 7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,     0,  10,   100,   0,    0,  0,    0],
        [3,   0,     0,  20,   500,  50, 2450,  0,    0],
        [3, -30, -2600, -20, -2000,   0,    0,  0,    0],
        [2,   0,     0,  25,  1250,   0,    0,  0,    0],
        [4, -30,     0, -20,  1000, -10, 2000,  0, 3000],
        [4, -15,   150,   0,     0,   5,   50, 10,  150],
        [3, -10,     0,   0,     0,  10,   50,  0,    0],
        [2, -15,   -75,   0,     0,   0,    0,  0,    0],
        [3,   0,     0,  15,    15,  30,  165,  0,    0],
        [2,   0,     0,   0,     0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, for gen, no P, no shutdown';
    gen2 = gen0.copy()
    gen2[0, PMIN] = 0
    offers['P']['qty'] = array([[0, 40], [20, 30], [25, 25]], float)
    gen, gencost = off2case(gen2, gencost0, offers, bids, lim)

    gen1[0, [PMIN, PMAX, QMIN, QMAX]] = array([0, 0, -15, 10])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1[0, NCOST:NCOST + 9] = gencost0[0, NCOST:NCOST + 9]
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, for gen, no Q, no shutdown';
    offers['P']['qty'] = array([[10, 40], [20, 30], [25, 25]], float)
    offers['Q']['qty'] = array([[ 5,  5], [ 0, 10], [15, 15]], float)
    bids['Q']['qty'] = array([15, 0, 15, 15, 0], float)
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1[0, [PMIN, PMAX, QMIN, QMAX]] = array([10, 10, -15, 10])    ## restore original
    gen1[1, [PMIN, PMAX, QMIN, QMAX]] = array([12, 50,   0,  0])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1[ix_([0, 1, 6], range(NCOST, NCOST + 9))] = array([
        [2, 0, 0, 10, 100,  0,    0, 0, 0],
        [3, 0, 0, 20, 500, 50, 2450, 0, 0],
        [2, 0, 0,  0,   0,  0,    0, 0, 0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, lim.P/Q.max_offer/min_bid, multi-block';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[10,   0], [12, 18]], float)
    bids['P']['prc'] = array([[100, 60], [70, 10]], float)
    offers['Q']['qty'] = array([[5, 5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [5, 60], [1, 10]], float)
    bids['Q']['qty'] = array([15, 10, 10, 15, 0], float)
    bids['Q']['prc'] = array([-10, 0, 5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15, 10],
        [1,  12, 50, -10, 10],
        [1, -10,  0,  -5,  0],
        [1,  12, 25,   0, 30],
        [0, -30,  0,   0,  7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST:NCOST + 9] = array([
        [2,   0,     0,  10,  100,   0,    0,  0,    0],
        [3,   0,     0,  20,  500,  50, 2450,  0,    0],
        [2, -10, -1000,   0,    0,   0,    0,  0,    0],
        [2,   0,     0,  25, 1250,   0,    0,  0,    0],
        [4, -30,     0, -20, 1000, -10, 2000,  0, 3000],
        [4, -15,   150,   0,    0,   5,   50, 10,  150],
        [3, -10,     0,   0,    0,  10,   50,  0,    0],
        [2, -10,   -50,   0,    0,   0,    0,  0,    0],
        [3,   0,     0,  15,   15,  30,  165,  0,    0],
        [2,   0,     0,   0,    0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, zero Q load w/P bid, shutdown bugfix';
    gen1 = gen0.copy()
    gen1[4, [QG, QMIN, QMAX]] = 0
    gen, gencost = off2case(gen1, gencost0, offers, bids, lim)

    gen1[:, [PMIN, PMAX, QMIN, QMAX]] = array([
        [ 10, 10, -15, 10],
        [ 12, 50, -10, 10],
        [-10,  0,  -5,  0],
        [ 12, 25,   0, 30],
        [-12,  0,   0,  0]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,     0, 10,  100,  0,    0,  0,   0],
        [3,   0,     0, 20,  500, 50, 2450,  0,   0],
        [2, -10, -1000,  0,    0,  0,    0,  0,   0],
        [2,   0,     0, 25, 1250,  0,    0,  0,   0],
        [2, -12,  -840,  0,    0,  0,    0,  0,   0],
        [4, -15,   150,  0,    0,  5,   50, 10, 150],
        [3, -10,     0,  0,    0, 10,   50,  0,   0],
        [2, -10,   -50,  0,    0,  0,    0,  0,   0],
        [3,   0,     0, 15,   15, 30,  165,  0,   0],
        [2,   0,     0,  0,    0,  0,    0,  0,   0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t = 'PQ offers & PQ bids, non-zero Q load w/no P bid, shutdown bugfix';
    offers['P']['qty'] = array([[10,  40], [20, 30], [25, 25]], float)
    offers['P']['prc'] = array([[10, 100], [25, 65], [50, 90]], float)
    bids['P']['qty'] = array([[0, 10], [12, 18]], float)
    bids['P']['prc'] = array([[100, 40], [70, 10]], float)
    offers['Q']['qty'] = array([[ 5,  5], [10, 10], [15, 15]], float)
    offers['Q']['prc'] = array([[10, 20], [ 5, 60], [ 1, 10]], float)
    bids['Q']['qty'] = array([ 15, 10, 15,  15,  0], float)
    bids['Q']['prc'] = array([-10,  0,  5, -20, 10], float)
    lim['Q']['max_offer'] = 50.0
    lim['Q']['min_bid'] = -15.0
    gen, gencost = off2case(gen0, gencost0, offers, bids, lim)

    gen1 = gen0.copy()
    gen1[:, [GEN_STATUS, PMIN, PMAX, QMIN, QMAX]] = array([
        [1,  10, 10, -15, 10],
        [1,  12, 50, -10, 10],
        [0, -30,  0, -15,  0],
        [1,  12, 25,   0, 30],
        [0, -30,  0,   0, 7.5]
    ])
    t_is( gen, gen1, 8, [t, ' - gen'] )

    gencost1 = gencost0[:, :12].copy()
    gencost1[:, NCOST - 1:NCOST + 9] = array([
        [2,   0,   0,  10,  100,   0,    0,  0,    0],
        [3,   0,   0,  20,  500,  50, 2450,  0,    0],
        [4, -30,   0, -20, 1000, -10, 2000,  0, 3000],
        [2,   0,   0,  25, 1250,   0,    0,  0,    0],
        [4, -30,   0, -20, 1000, -10, 2000,  0, 3000],
        [4, -15, 150,   0,    0,   5,   50, 10,  150],
        [3, -10,   0,   0,    0,  10,   50,  0,    0],
        [3, -20, -15, -10,  -10,   0,    0,  0,    0],
        [3,   0,   0,  15,   15,  30,  165,  0,    0],
        [2,   0,   0,   0,    0,   0,    0,  0,    0]
    ])
    t_is( gencost, gencost1, 8, [t, ' - gencost'] )

    t_end()

Example 38

Project: cortex Source File: rbm_ni.py
Function: train
def train(
    out_path=None, name='', model_to_load=None, save_images=True, test_every=None,
    show_every=None, dim_h=None, preprocessing=None,
    learning_args=None,
    inference_args=None,
    dataset_args=None):

    # ========================================================================
    if preprocessing is None: preprocessing = []
    if learning_args is None: learning_args = dict()
    if inference_args is None: inference_args = dict()
    if dataset_args is None: raise ValueError('Dataset args must be provided')

    learning_args = init_learning_args(**learning_args)
    inference_args = init_inference_args(**inference_args)

    print 'Dataset args: %s' % pprint.pformat(dataset_args)
    print 'Learning args: %s' % pprint.pformat(learning_args)
    print 'Inference args: %s' % pprint.pformat(inference_args)

    # ========================================================================
    print_section('Setting up data')
    batch_size = learning_args.pop('batch_size')
    valid_batch_size = learning_args.pop('valid_batch_size')
    dataset = dataset_args['dataset']
    dataset_class = resolve_dataset(dataset)

    train, valid, test, idx = load_data_split(
        dataset_class,
        train_batch_size=batch_size,
        valid_batch_size=valid_batch_size,
        **dataset_args)
    dataset_args['idx'] = idx

    # ========================================================================
    print_section('Setting model and variables')
    dim_in = train.dims[train.name]

    X = T.matrix('x', dtype=floatX)
    X.tag.test_value = np.zeros((batch_size, dim_in), dtype=X.dtype)
    trng = get_trng()

    preproc = Preprocessor(preprocessing)
    X_i = preproc(X, data_iter=train)
    inps = [X]

    # ========================================================================
    print_section('Loading model and forming graph')

    def create_model():
        model = RBM(dim_in, dim_h, v_dist=train.distributions[train.name],
                    h_dist='binomial', mean_image=train.mean_image)
        models = OrderedDict()
        models[model.name] = model
        return models

    models = set_model(create_model, model_to_load, unpack)
    model = models['rbm']
    tparams = model.set_tparams()
    print_profile(tparams)

    # ==========================================================================
    print_section('Getting cost')

    persistent = inference_args.pop('persistent')
    if persistent:
        H_p = theano.shared(
            np.zeros((inference_args['n_chains'], model.h_dist.dim)).astype(floatX),
            name='h_p')
    else:
        H_p = None
    results, samples, updates, constants = model(
        X_i, h_p=H_p, **inference_args)

    updates = theano.OrderedUpdates()
    if persistent:
        updates += theano.OrderedUpdates([(H_p, samples['hs'][-1])])

    cost = results['cost']
    extra_outs = [results['free_energy']]
    extra_outs_keys = ['cost', 'free_energy']

    l1_decay = learning_args.pop('l1_decay')
    l2_decay = learning_args.pop('l2_decay')

    assert not(l1_decay and l2_decay)
    if l1_decay:
        l1_cost = model.l1_decay(l1_decay)
        results.update(l1_cost=l1_cost)
        cost += l1_cost
    if l2_decay:
        l2_cost = model.l2_decay(l2_decay)
        results.update(l1_cost=l2_cost)
        cost += l2_cost

    # ==========================================================================
    print_section('Test functions')
    f_test_keys = results.keys()
    f_test = theano.function([X], results.values())

    try:
        _, z_updates = model.update_partition_function(K=1000)
        f_update_partition = theano.function([], [], updates=z_updates)
    except NotImplementedError:
        f_update_partition = None

    H0 = model.trng.binomial(size=(10, model.h_dist.dim), dtype=floatX)
    s_outs, s_updates = model.sample(H0, n_steps=100)
    f_chain = theano.function(
        [], model.v_dist.get_center(s_outs['pvs']), updates=s_updates)

    # ========================================================================
    print_section('Setting final tparams and save function')
    excludes = learning_args.pop('excludes')
    tparams, all_params = set_params(tparams, updates, excludes=excludes)

    def save(tparams, outfile):
        d = dict((k, v.get_value()) for k, v in all_params.items())
        d.update(
            dim_in=dim_in,
            dim_h=dim_h,
            dataset_args=dataset_args
        )
        np.savez(outfile, **d)

    def save_images():
        w = model.W.get_value().T
        train.save_images(w, path.join(out_path, 'weights.png'))

    # ========================================================================
    print_section('Getting gradients and building optimizer.')
    f_grad_shared, f_grad_updates, learning_args = set_optimizer(
        [X], cost, tparams, constants, updates, extra_outs, **learning_args)

    # ========================================================================
    print_section('Actually running (main loop)')
    monitor = SimpleMonitor()

    main_loop(
        train, valid, tparams,
        f_grad_shared, f_grad_updates, f_test, f_test_keys,
        f_extra=f_update_partition,
        test_every=test_every,
        show_every=show_every,
        save=save,
        save_images=save_images,
        monitor=monitor,
        out_path=out_path,
        name=name,
        extra_outs_keys=extra_outs_keys,
        **learning_args)

Example 39

Project: PYPOWER Source File: t_opf_ipopt.py
def t_opf_ipopt(quiet=False):
    """Tests for IPOPT-based AC optimal power flow.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    num_tests = 101

    t_begin(num_tests, quiet)

    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    verbose = 0#not quiet

    t0 = 'IPOPT : '
    ppopt = ppoption(OPF_VIOLATION=1e-6, PDIPM_GRADTOL=1e-8,
                   PDIPM_COMPTOL=1e-8, PDIPM_COSTTOL=1e-9)
    ppopt = ppoption(ppopt, OUT_ALL=0, VERBOSE=verbose, OPF_ALG=580)

    ## set up indices
    ib_data     = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)]
    ib_voltage  = arange(VM, VA + 1)
    ib_lam      = arange(LAM_P, LAM_Q + 1)
    ib_mu       = arange(MU_VMAX, MU_VMIN + 1)
    ig_data     = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)]
    ig_disp     = array([PG, QG, VG])
    ig_mu       = arange(MU_PMAX, MU_QMIN + 1)
    ibr_data    = arange(ANGMAX + 1)
    ibr_flow    = arange(PF, QT + 1)
    ibr_mu      = array([MU_SF, MU_ST])
    ibr_angmu   = array([MU_ANGMIN, MU_ANGMAX])

    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF
    t = t0
    r = runopf(casefile, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ## run with automatic conversion of single-block pwl to linear costs
    t = ''.join([t0, '(single-block PWL) : '])
    ppc = loadcase(casefile)
    ppc['gencost'][2, NCOST] = 2
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    xr = r_[r['var']['val']['Va'], r['var']['val']['Vm'], r['var']['val']['Pg'],
            r['var']['val']['Qg'], 0, r['var']['val']['y']]
    t_is(r['x'], xr, 8, [t, 'check on raw x returned from OPF'])

    ## get solved AC power flow case from MAT-file
    soln9_opf_Plim = loadmat(join(tdir, 'soln9_opf_Plim.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_Plim['bus_soln']
    gen_soln = soln9_opf_Plim['gen_soln']
    branch_soln = soln9_opf_Plim['branch_soln']
    f_soln = soln9_opf_Plim['f_soln'][0]

    ## run OPF with active power line limits
    t = ''.join([t0, '(P line lim) : '])
    ppopt1 = ppoption(ppopt, OPF_FLOW_LIM=1)
    r = runopf(casefile, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with quadratic gen costs moved to generalized costs  -----
    ppc = loadcase(casefile)
    ppc['gencost'] = array([
        [2,   1500, 0,   3,   0.11,    5,   0],
        [2,   2000, 0,   3,   0.085,   1.2, 0],
        [2,   3000, 0,   3,   0.1225,  1,   0]
    ])
    r = runopf(ppc, ppopt)
    bus_soln, gen_soln, branch_soln, f_soln, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    branch_soln = branch_soln[:, :MU_ST + 1]

    A = None
    l = array([])
    u = array([])
    nb = ppc['bus'].shape[0]      # number of buses
    ng = ppc['gen'].shape[0]      # number of gens
    thbas = 0;                thend    = thbas + nb
    vbas     = thend;     vend     = vbas + nb
    pgbas    = vend;      pgend    = pgbas + ng
#    qgbas    = pgend;     qgend    = qgbas + ng
    nxyz = 2 * nb + 2 * ng
    N = sparse((ppc['baseMVA'] * ones(ng), (arange(ng), arange(pgbas, pgend))), (ng, nxyz))
    fparm = ones((ng, 1)) * array([[1, 0, 0, 1]])
    ix = argsort(ppc['gen'][:, 0])
    H = 2 * spdiags(ppc['gencost'][ix, 4], 0, ng, ng, 'csr')
    Cw = ppc['gencost'][ix, 5]
    ppc['gencost'][:, 4:7] = 0

    ## run OPF with quadratic gen costs moved to generalized costs
    t = ''.join([t0, 'w/quadratic generalized gen cost : '])
    r = opf(ppc, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['cost']['usr'], f, 12, [t, 'user cost'])

    ##-----  run OPF with extra linear user constraints & costs  -----
    ## single new z variable constrained to be greater than or equal to
    ## deviation from 1 pu voltage at bus 1, linear cost on this z
    ## get solved AC power flow case from MAT-file
    soln9_opf_extras1 = loadmat(join(tdir, 'soln9_opf_extras1.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_extras1['bus_soln']
    gen_soln = soln9_opf_extras1['gen_soln']
    branch_soln = soln9_opf_extras1['branch_soln']
    f_soln = soln9_opf_extras1['f_soln'][0]

    row = [0, 0, 1, 1]
    col = [9, 24, 9, 24]
    A = sparse(([-1, 1, 1, 1], (row, col)), (2, 25))
    u = array([Inf, Inf])
    l = array([-1, 1])

    N = sparse(([1], ([0], [24])), (1, 25))    ## new z variable only
    fparm = array([[1, 0, 0, 1]])              ## w = r = z
    H = sparse((1, 1))                ## no quadratic term
    Cw = array([100.0])

    t = ''.join([t0, 'w/extra constraints & costs 1 : '])
    r = opf(casefile, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['var']['val']['z'], 0.025419, 6, [t, 'user variable'])
    t_is(r['cost']['usr'], 2.5419, 4, [t, 'user cost'])

    ##-----  test OPF with capability curves  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove angle diff limits
    ppc['branch'][0, ANGMAX] =  360
    ppc['branch'][8, ANGMIN] = -360

    ## get solved AC power flow case from MAT-file
    soln9_opf_PQcap = loadmat(join(tdir, 'soln9_opf_PQcap.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_PQcap['bus_soln']
    gen_soln = soln9_opf_PQcap['gen_soln']
    branch_soln = soln9_opf_PQcap['branch_soln']
    f_soln = soln9_opf_PQcap['f_soln'][0]

    ## run OPF with capability curves
    t = ''.join([t0, 'w/capability curves : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with angle difference limits  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove capability curves
    ppc['gen'][ix_(arange(1, 3),
                   [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX])] = zeros((2, 6))

    ## get solved AC power flow case from MAT-file
    soln9_opf_ang = loadmat(join(tdir, 'soln9_opf_ang.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_ang['bus_soln']
    gen_soln = soln9_opf_ang['gen_soln']
    branch_soln = soln9_opf_ang['branch_soln']
    f_soln = soln9_opf_ang['f_soln'][0]

    ## run OPF with angle difference limits
    t = ''.join([t0, 'w/angle difference limits : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  1, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(branch[:, ibr_angmu ], branch_soln[:, ibr_angmu ],  2, [t, 'branch angle mu'])

    ##-----  test OPF with ignored angle difference limits  -----
    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF with ignored angle difference limits
    t = ''.join([t0, 'w/ignored angle difference limits : '])
    ppopt1 = ppoption(ppopt, OPF_IGNORE_ANG_LIM=1)
    r = runopf(ppc, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    ## ang limits are not in this solution data, so let's remove them
    branch[0, ANGMAX] =  360
    branch[8, ANGMIN] = -360
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    t_end()

Example 40

Project: pyspeckit Source File: model.py
Function: make_parinfo
    def _make_parinfo(self, params=None, parnames=None, parvalues=None,
                      parlimits=None, parlimited=None, parfixed=None,
                      parerror=None, partied=None, fitunits=None,
                      parsteps=None, npeaks=1, parinfo=None, names=None,
                      values=None, limits=None, limited=None, fixed=None,
                      error=None, tied=None, steps=None, negamp=None,
                      limitedmin=None, limitedmax=None, minpars=None,
                      maxpars=None, vheight=False, debug=False, **kwargs):
        """
        Generate a `ParinfoList` that matches the inputs

        This code is complicated - it can take inputs in a variety of different
        forms with different priority.  It will return a `ParinfoList` (and
        therefore must have values within parameter ranges)

        """

        # for backwards compatibility - partied = tied, etc.
        locals_dict = locals()
        for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
            shortvarname = varname.replace("par","")
            if locals_dict.get(shortvarname) is not None and locals_dict.get(varname) is not None:
                raise ValueError("Cannot specify both {0} and {1}".format(varname, shortvarname))

        input_pardict = {k: locals_dict.get(k)
                         for k in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")}
        _tip = {'par'+k: locals_dict.get(k)
                for k in str.split("names,values,steps,limits,limited,fixed,error,tied",",")
                if locals_dict.get(k)
               }
        input_pardict.update(_tip)

        if params is not None and parvalues is not None:
            raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
        elif params is not None and parvalues is None:
            input_pardict['parvalues'] = params
        log.debug("Parvalues = {0}, npeaks = {1}".format(input_pardict['parvalues'], npeaks))

        # this is used too many damned times to keep referencing a dict.
        parnames = input_pardict['parnames']
        parlimited = input_pardict['parlimited']
        parlimits = input_pardict['parlimits']
        parvalues = input_pardict['parvalues']

        if parnames is not None:
            self.parnames = parnames
        elif parnames is None and hasattr(self,'parnames') and self.parnames is not None:
            parnames = self.parnames
        elif self.default_parinfo is not None and parnames is None:
            parnames = [p['parname'] for p in self.default_parinfo]

        input_pardict['parnames'] = parnames

        assert input_pardict['parnames'] is not None

        if limitedmin is not None:
            if limitedmax is not None:
                parlimited = list(zip(limitedmin,limitedmax))
            else:
                parlimited = list(zip(limitedmin,(False,)*len(parnames)))
        elif limitedmax is not None:
            parlimited = list(zip((False,)*len(parnames),limitedmax))
        elif self.default_parinfo is not None and parlimited is None:
            parlimited = [p['limited'] for p in self.default_parinfo]

        input_pardict['parlimited'] = parlimited

        if minpars is not None:
            if maxpars is not None:
                parlimits = list(zip(minpars,maxpars))
            else:
                parlimits = list(zip(minpars,(False,)*len(parnames)))
        elif maxpars is not None:
            parlimits = list(zip((False,)*len(parnames),maxpars))
        elif limits is not None:
            parlimits = limits
        elif self.default_parinfo is not None and parlimits is None:
            parlimits = [p['limits'] for p in self.default_parinfo]

        input_pardict['parlimits'] = parlimits

        self.npeaks = int(npeaks)

        # the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
        # that the height guess isn't assigned to the amplitude
        self.vheight = vheight
        if ((vheight and len(self.parinfo) == self.default_npars and
             len(parvalues) == self.default_npars + 1)):
            # if the right number of parameters are passed, the first is the height
            self.parinfo = [{'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
                             'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
                             'error': 0, 'tied':""}]
        elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
            # if you're one par short, guess zero
            self.parinfo = [ {'n':0, 'value': 0, 'limits':(0,0),
                'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
                'error': 0, 'tied':"" } ]
        elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
            # the right numbers are passed *AND* there is already a height param
            self.parinfo = [ {'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
                'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
                'error': 0, 'tied':"" } ]
            #heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
            #for hpn in heightparnum:
            #    self.parinfo[hpn]['value'] = parvalues[0]
        elif vheight:
            raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
        else:
            self.parinfo = []

        log.debug("After VHEIGHT parse len(parinfo): %i   vheight: %s" % (len(self.parinfo), vheight))


        # this is a clever way to turn the parameter lists into a dict of lists
        # clever = hard to read
        temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks,
                                                       dtype='bool'))
                                    if input_pardict.get(varname) is None else
                                    (varname, list(input_pardict.get(varname)))
            for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
        temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
        temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
        for k,v in temp_pardict.items():
            if (self.npars*self.npeaks) / len(v) > 1:
                n_components = ((self.npars*self.npeaks) / len(v))
                if n_components != int(n_components):
                    raise ValueError("The number of parameter values is not a "
                                     "multiple of the number of allowed "
                                     "parameters.")
                temp_pardict[k] = list(v) * int(n_components)

        # generate the parinfo dict
        # note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
        # parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
        self.parinfo += [ {'n':ii+self.npars*jj+vheight,
            'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
            'step':temp_pardict['parsteps'][ii+self.npars*jj],
            'limits':temp_pardict['parlimits'][ii+self.npars*jj],
            'limited':temp_pardict['parlimited'][ii+self.npars*jj],
            'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
            'parname':temp_pardict['parnames'][ii].upper()+"%0i" % int(jj),
            'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
            'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""} 
            for jj in range(self.npeaks)
            for ii in range(self.npars) ] # order matters!

        log.debug("After Generation step len(parinfo): %i   vheight: %s "
                  "parinfo: %s" % (len(self.parinfo), vheight, self.parinfo))

        if debug > True:
            import pdb; pdb.set_trace()

        # special keyword to specify emission/absorption lines
        if negamp is not None:
            if negamp:
                for p in self.parinfo:
                    if 'AMP' in p['parname']:
                        p['limited'] = (p['limited'][0], True)
                        p['limits']  = (p['limits'][0],  0)
            else:
                for p in self.parinfo:
                    if 'AMP' in p['parname']:
                        p['limited'] = (True, p['limited'][1])
                        p['limits']  = (0, p['limits'][1])

        # This is effectively an override of all that junk above (3/11/2012)
        # Much of it is probably unnecessary, but it was easier to do this than
        # rewrite the above
        self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])

        # New feature: scaleability
        for par in self.parinfo:
            if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
                par.scaleable = True

        log.debug("Parinfo has been set: {0}".format(self.parinfo))
        log.debug("kwargs {0} were passed.".format(kwargs))

        assert self.parinfo != []

        return self.parinfo, kwargs

Example 41

Project: PYPOWER Source File: t_opf_pips.py
def t_opf_pips(quiet=False):
    """Tests for PIPS-based AC optimal power flow.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    num_tests = 101

    t_begin(num_tests, quiet)

    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    verbose = 0#not quiet

    t0 = 'PIPS : '
    ppopt = ppoption(OPF_VIOLATION=1e-6, PDIPM_GRADTOL=1e-8,
                   PDIPM_COMPTOL=1e-8, PDIPM_COSTTOL=1e-9)
    ppopt = ppoption(ppopt, OUT_ALL=0, VERBOSE=verbose, OPF_ALG=560)

    ## set up indices
    ib_data     = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)]
    ib_voltage  = arange(VM, VA + 1)
    ib_lam      = arange(LAM_P, LAM_Q + 1)
    ib_mu       = arange(MU_VMAX, MU_VMIN + 1)
    ig_data     = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)]
    ig_disp     = array([PG, QG, VG])
    ig_mu       = arange(MU_PMAX, MU_QMIN + 1)
    ibr_data    = arange(ANGMAX + 1)
    ibr_flow    = arange(PF, QT + 1)
    ibr_mu      = array([MU_SF, MU_ST])
    ibr_angmu   = array([MU_ANGMIN, MU_ANGMAX])

    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF
    t = t0
    r = runopf(casefile, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ## run with automatic conversion of single-block pwl to linear costs
    t = ''.join([t0, '(single-block PWL) : '])
    ppc = loadcase(casefile)
    ppc['gencost'][2, NCOST] = 2
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    xr = r_[r['var']['val']['Va'], r['var']['val']['Vm'], r['var']['val']['Pg'],
            r['var']['val']['Qg'], 0, r['var']['val']['y']]
    t_is(r['x'], xr, 8, [t, 'check on raw x returned from OPF'])

    ## get solved AC power flow case from MAT-file
    soln9_opf_Plim = loadmat(join(tdir, 'soln9_opf_Plim.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_Plim['bus_soln']
    gen_soln = soln9_opf_Plim['gen_soln']
    branch_soln = soln9_opf_Plim['branch_soln']
    f_soln = soln9_opf_Plim['f_soln'][0]

    ## run OPF with active power line limits
    t = ''.join([t0, '(P line lim) : '])
    ppopt1 = ppoption(ppopt, OPF_FLOW_LIM=1)
    r = runopf(casefile, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with quadratic gen costs moved to generalized costs  -----
    ppc = loadcase(casefile)
    ppc['gencost'] = array([
        [2,   1500, 0,   3,   0.11,    5,   0],
        [2,   2000, 0,   3,   0.085,   1.2, 0],
        [2,   3000, 0,   3,   0.1225,  1,   0]
    ])
    r = runopf(ppc, ppopt)
    bus_soln, gen_soln, branch_soln, f_soln, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    branch_soln = branch_soln[:, :MU_ST + 1]

    A = None
    l = array([])
    u = array([])
    nb = ppc['bus'].shape[0]      # number of buses
    ng = ppc['gen'].shape[0]      # number of gens
    thbas = 0;                thend    = thbas + nb
    vbas     = thend;     vend     = vbas + nb
    pgbas    = vend;      pgend    = pgbas + ng
#    qgbas    = pgend;     qgend    = qgbas + ng
    nxyz = 2 * nb + 2 * ng
    N = sparse((ppc['baseMVA'] * ones(ng), (arange(ng), arange(pgbas, pgend))), (ng, nxyz))
    fparm = ones((ng, 1)) * array([[1, 0, 0, 1]])
    ix = argsort(ppc['gen'][:, 0])
    H = 2 * spdiags(ppc['gencost'][ix, 4], 0, ng, ng, 'csr')
    Cw = ppc['gencost'][ix, 5]
    ppc['gencost'][:, 4:7] = 0

    ## run OPF with quadratic gen costs moved to generalized costs
    t = ''.join([t0, 'w/quadratic generalized gen cost : '])
    r = opf(ppc, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['cost']['usr'], f, 12, [t, 'user cost'])

    ##-----  run OPF with extra linear user constraints & costs  -----
    ## single new z variable constrained to be greater than or equal to
    ## deviation from 1 pu voltage at bus 1, linear cost on this z
    ## get solved AC power flow case from MAT-file
    soln9_opf_extras1 = loadmat(join(tdir, 'soln9_opf_extras1.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_extras1['bus_soln']
    gen_soln = soln9_opf_extras1['gen_soln']
    branch_soln = soln9_opf_extras1['branch_soln']
    f_soln = soln9_opf_extras1['f_soln'][0]

    row = [0, 0, 1, 1]
    col = [9, 24, 9, 24]
    A = sparse(([-1, 1, 1, 1], (row, col)), (2, 25))
    u = array([Inf, Inf])
    l = array([-1, 1])

    N = sparse(([1], ([0], [24])), (1, 25))    ## new z variable only
    fparm = array([[1, 0, 0, 1]])              ## w = r = z
    H = sparse((1, 1))                ## no quadratic term
    Cw = array([100.0])

    t = ''.join([t0, 'w/extra constraints & costs 1 : '])
    r = opf(casefile, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['var']['val']['z'], 0.025419, 6, [t, 'user variable'])
    t_is(r['cost']['usr'], 2.5419, 4, [t, 'user cost'])

    ##-----  test OPF with capability curves  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove angle diff limits
    ppc['branch'][0, ANGMAX] =  360
    ppc['branch'][8, ANGMIN] = -360

    ## get solved AC power flow case from MAT-file
    soln9_opf_PQcap = loadmat(join(tdir, 'soln9_opf_PQcap.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_PQcap['bus_soln']
    gen_soln = soln9_opf_PQcap['gen_soln']
    branch_soln = soln9_opf_PQcap['branch_soln']
    f_soln = soln9_opf_PQcap['f_soln'][0]

    ## run OPF with capability curves
    t = ''.join([t0, 'w/capability curves : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with angle difference limits  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove capability curves
    ppc['gen'][ix_(arange(1, 3),
                   [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX])] = zeros((2, 6))

    ## get solved AC power flow case from MAT-file
    soln9_opf_ang = loadmat(join(tdir, 'soln9_opf_ang.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_ang['bus_soln']
    gen_soln = soln9_opf_ang['gen_soln']
    branch_soln = soln9_opf_ang['branch_soln']
    f_soln = soln9_opf_ang['f_soln'][0]

    ## run OPF with angle difference limits
    t = ''.join([t0, 'w/angle difference limits : '])
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  1, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(branch[:, ibr_angmu ], branch_soln[:, ibr_angmu ],  2, [t, 'branch angle mu'])

    ##-----  test OPF with ignored angle difference limits  -----
    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF with ignored angle difference limits
    t = ''.join([t0, 'w/ignored angle difference limits : '])
    ppopt1 = ppoption(ppopt, OPF_IGNORE_ANG_LIM=1)
    r = runopf(ppc, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    ## ang limits are not in this solution data, so let's remove them
    branch[0, ANGMAX] =  360
    branch[8, ANGMIN] = -360
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    t_end()

Example 42

Project: PYPOWER Source File: t_opf_pips_sc.py
def t_opf_pips_sc(quiet=False):
    """Tests for step-controlled PIPS-based AC optimal power flow.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    num_tests = 101

    t_begin(num_tests, quiet)

    tdir = dirname(__file__)
    casefile = join(tdir, 't_case9_opf')
    verbose = 0#not quiet

    t0 = 'PIPS-sc : '
    ppopt = ppoption(OPF_VIOLATION=1e-6, PDIPM_GRADTOL=1e-8,
                     PDIPM_COMPTOL=1e-8, PDIPM_COSTTOL=1e-9)
    ppopt = ppoption(ppopt, OUT_ALL=0, VERBOSE=verbose, OPF_ALG=565)

    ## set up indices
    ib_data     = r_[arange(BUS_AREA + 1), arange(BASE_KV, VMIN + 1)]
    ib_voltage  = arange(VM, VA + 1)
    ib_lam      = arange(LAM_P, LAM_Q + 1)
    ib_mu       = arange(MU_VMAX, MU_VMIN + 1)
    ig_data     = r_[[GEN_BUS, QMAX, QMIN], arange(MBASE, APF + 1)]
    ig_disp     = array([PG, QG, VG])
    ig_mu       = arange(MU_PMAX, MU_QMIN + 1)
    ibr_data    = arange(ANGMAX + 1)
    ibr_flow    = arange(PF, QT + 1)
    ibr_mu      = array([MU_SF, MU_ST])
    ibr_angmu   = array([MU_ANGMIN, MU_ANGMAX])

    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF
    t = t0
    r = runopf(casefile, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ## run with automatic conversion of single-block pwl to linear costs
    t = ''.join([t0, '(single-block PWL) : '])
    ppc = loadcase(casefile)
    ppc['gencost'][2, NCOST] = 2
    r = runopf(ppc, ppopt)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    xr = r_[r['var']['val']['Va'], r['var']['val']['Vm'], r['var']['val']['Pg'],
            r['var']['val']['Qg'], 0, r['var']['val']['y']]
    t_is(r['x'], xr, 8, [t, 'check on raw x returned from OPF'])

    ## get solved AC power flow case from MAT-file
    soln9_opf_Plim = loadmat(join(tdir, 'soln9_opf_Plim.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_Plim['bus_soln']
    gen_soln = soln9_opf_Plim['gen_soln']
    branch_soln = soln9_opf_Plim['branch_soln']
    f_soln = soln9_opf_Plim['f_soln'][0]

    ## run OPF with active power line limits
    t = ''.join([t0, '(P line lim) : '])
    ppopt1 = ppoption(ppopt, OPF_FLOW_LIM=1)
    r = runopf(casefile, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with quadratic gen costs moved to generalized costs  -----
    ppc = loadcase(casefile)
    ppc['gencost'] = array([
        [2,   1500, 0,   3,   0.11,    5,   0],
        [2,   2000, 0,   3,   0.085,   1.2, 0],
        [2,   3000, 0,   3,   0.1225,  1,   0]
    ])
    r = runopf(ppc, ppopt)
    bus_soln, gen_soln, branch_soln, f_soln, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    branch_soln = branch_soln[:, :MU_ST + 1]

    A = None
    l = array([])
    u = array([])
    nb = ppc['bus'].shape[0]      # number of buses
    ng = ppc['gen'].shape[0]      # number of gens
    thbas = 0;            thend    = thbas + nb
    vbas     = thend;     vend     = vbas + nb
    pgbas    = vend;      pgend    = pgbas + ng
#    qgbas    = pgend;     qgend    = qgbas + ng
    nxyz = 2 * nb + 2 * ng
    N = sparse((ppc['baseMVA'] * ones(ng), (arange(ng), arange(pgbas, pgend))), (ng, nxyz))
    fparm = ones((ng, 1)) * array([[1, 0, 0, 1]])
    ix = argsort(ppc['gen'][:, 0])
    H = 2 * spdiags(ppc['gencost'][ix, 4], 0, ng, ng, 'csr')
    Cw = ppc['gencost'][ix, 5]
    ppc['gencost'][:, 4:7] = 0

    ## run OPF with quadratic gen costs moved to generalized costs
    t = ''.join([t0, 'w/quadratic generalized gen cost : '])
    r = opf(ppc, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['cost']['usr'], f, 12, [t, 'user cost'])

    ##-----  run OPF with extra linear user constraints & costs  -----
    ## single new z variable constrained to be greater than or equal to
    ## deviation from 1 pu voltage at bus 1, linear cost on this z
    ## get solved AC power flow case from MAT-file
    soln9_opf_extras1 = loadmat(join(tdir, 'soln9_opf_extras1.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_extras1['bus_soln']
    gen_soln = soln9_opf_extras1['gen_soln']
    branch_soln = soln9_opf_extras1['branch_soln']
    f_soln = soln9_opf_extras1['f_soln'][0]

    row = [0, 0, 1, 1]
    col = [9, 24, 9, 24]
    A = sparse(([-1, 1, 1, 1], (row, col)), (2, 25))
    u = array([Inf, Inf])
    l = array([-1, 1])

    N = sparse(([1], ([0], [24])), (1, 25))    ## new z variable only
    fparm = array([[1, 0, 0, 1]])              ## w = r = z
    H = sparse((1, 1))                ## no quadratic term
    Cw = array([100.0])

    t = ''.join([t0, 'w/extra constraints & costs 1 : '])
    r = opf(casefile, A, l, u, ppopt, N, fparm, H, Cw)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(r['var']['val']['z'], 0.025419, 6, [t, 'user variable'])
    t_is(r['cost']['usr'], 2.5419, 4, [t, 'user cost'])

    ##-----  test OPF with capability curves  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove angle diff limits
    ppc['branch'][0, ANGMAX] = 360
    ppc['branch'][8, ANGMIN] = -360

    ## get solved AC power flow case from MAT-file
    soln9_opf_PQcap = loadmat(join(tdir, 'soln9_opf_PQcap.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_PQcap['bus_soln']
    gen_soln = soln9_opf_PQcap['gen_soln']
    branch_soln = soln9_opf_PQcap['branch_soln']
    f_soln = soln9_opf_PQcap['f_soln'][0]

    ## run OPF with capability curves
    t = ''.join([t0, 'w/capability curves : '])
    r = runopf(ppc, ppopt)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    ##-----  test OPF with angle difference limits  -----
    ppc = loadcase(join(tdir, 't_case9_opfv2'))
    ## remove capability curves
    ppc['gen'][ix_(arange(1, 3),
                   [PC1, PC2, QC1MIN, QC1MAX, QC2MIN, QC2MAX])] = zeros((2, 6))

    ## get solved AC power flow case from MAT-file
    soln9_opf_ang = loadmat(join(tdir, 'soln9_opf_ang.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf_ang['bus_soln']
    gen_soln = soln9_opf_ang['gen_soln']
    branch_soln = soln9_opf_ang['branch_soln']
    f_soln = soln9_opf_ang['f_soln'][0]

    ## run OPF with angle difference limits
    t = ''.join([t0, 'w/angle difference limits : '])
    r = runopf(ppc, ppopt)
    f, bus, gen, branch, success = \
            r['f'], r['bus'], r['gen'], r['branch'], r['success']
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  1, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])
    t_is(branch[:, ibr_angmu ], branch_soln[:, ibr_angmu ],  2, [t, 'branch angle mu'])

    ##-----  test OPF with ignored angle difference limits  -----
    ## get solved AC power flow case from MAT-file
    soln9_opf = loadmat(join(tdir, 'soln9_opf.mat'), struct_as_record=True)
    ## defines bus_soln, gen_soln, branch_soln, f_soln
    bus_soln = soln9_opf['bus_soln']
    gen_soln = soln9_opf['gen_soln']
    branch_soln = soln9_opf['branch_soln']
    f_soln = soln9_opf['f_soln'][0]

    ## run OPF with ignored angle difference limits
    t = ''.join([t0, 'w/ignored angle difference limits : '])
    ppopt1 = ppoption(ppopt, OPF_IGNORE_ANG_LIM=1)
    r = runopf(ppc, ppopt1)
    bus, gen, branch, f, success = \
            r['bus'], r['gen'], r['branch'], r['f'], r['success']
    ## ang limits are not in this solution data, so let's remove them
    branch[0, ANGMAX] =  360
    branch[8, ANGMIN] = -360
    t_ok(success, [t, 'success'])
    t_is(f, f_soln, 3, [t, 'f'])
    t_is(   bus[:, ib_data   ],    bus_soln[:, ib_data   ], 10, [t, 'bus data'])
    t_is(   bus[:, ib_voltage],    bus_soln[:, ib_voltage],  3, [t, 'bus voltage'])
    t_is(   bus[:, ib_lam    ],    bus_soln[:, ib_lam    ],  3, [t, 'bus lambda'])
    t_is(   bus[:, ib_mu     ],    bus_soln[:, ib_mu     ],  2, [t, 'bus mu'])
    t_is(   gen[:, ig_data   ],    gen_soln[:, ig_data   ], 10, [t, 'gen data'])
    t_is(   gen[:, ig_disp   ],    gen_soln[:, ig_disp   ],  3, [t, 'gen dispatch'])
    t_is(   gen[:, ig_mu     ],    gen_soln[:, ig_mu     ],  3, [t, 'gen mu'])
    t_is(branch[:, ibr_data  ], branch_soln[:, ibr_data  ], 10, [t, 'branch data'])
    t_is(branch[:, ibr_flow  ], branch_soln[:, ibr_flow  ],  3, [t, 'branch flow'])
    t_is(branch[:, ibr_mu    ], branch_soln[:, ibr_mu    ],  2, [t, 'branch mu'])

    t_end()

Example 43

Project: PYPOWER Source File: t_scale_load.py
def t_scale_load(quiet=False):
    """Tests for code in C{scale_load}.

    @author: Ray Zimmerman (PSERC Cornell)
    """
    n_tests = 275

    t_begin(n_tests, quiet)

    ppc = loadcase(join(dirname(__file__), 't_auction_case'))
    ppc['gen'][7, GEN_BUS] = 2    ## multiple d. loads per area, same bus as gen
    ppc['gen'][7, [QG, QMIN, QMAX]] = array([3, 0, 3])
    ## put it load before gen in matrix

    ppc['gen'] = vstack([ppc['gen'][7, :], ppc['gen'][:7, :], ppc['gen'][8, :]])
    ld = find(isload(ppc['gen']))
    a = [None] * 3
    lda = [None] * 3
    for k in range(3):
        a[k] = find(ppc['bus'][:, BUS_AREA] == k + 1)  ## buses in area k
        tmp = find( in1d(ppc['gen'][ld, GEN_BUS] - 1, a[k]) )
        lda[k] = ld[tmp]                       ## disp loads in area k

    area = [None] * 3
    for k in range(3):
        area[k] = {'fixed': {}, 'disp': {}, 'both': {}}
        area[k]['fixed']['p'] = sum(ppc['bus'][a[k], PD])
        area[k]['fixed']['q'] = sum(ppc['bus'][a[k], QD])
        area[k]['disp']['p'] = -sum(ppc['gen'][lda[k], PMIN])
        area[k]['disp']['qmin'] = -sum(ppc['gen'][lda[k], QMIN])
        area[k]['disp']['qmax'] = -sum(ppc['gen'][lda[k], QMAX])
        area[k]['disp']['q'] = area[k]['disp']['qmin'] + area[k]['disp']['qmax']
        area[k]['both']['p'] = area[k]['fixed']['p'] + area[k]['disp']['p']
        area[k]['both']['q'] = area[k]['fixed']['q'] + area[k]['disp']['q']

    total = {'fixed': {}, 'disp': {}, 'both': {}}
    total['fixed']['p'] = sum(ppc['bus'][:, PD])
    total['fixed']['q'] = sum(ppc['bus'][:, QD])
    total['disp']['p'] = -sum(ppc['gen'][ld, PMIN])
    total['disp']['qmin'] = -sum(ppc['gen'][ld, QMIN])
    total['disp']['qmax'] = -sum(ppc['gen'][ld, QMAX])
    total['disp']['q'] = total['disp']['qmin'] + total['disp']['qmax']
    total['both']['p'] = total['fixed']['p'] + total['disp']['p']
    total['both']['q'] = total['fixed']['q'] + total['disp']['q']

    ##-----  single load zone, one scale factor  -----
    load = array([2])
    t = 'all fixed loads (PQ) * 2 : '
    bus, _ = scale_load(load, ppc['bus'])
    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
    opt = {'which': 'FIXED'}

    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)

    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all fixed loads (P) * 2 : '
    opt = {'pq': 'P'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    opt = {'pq': 'P', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all loads (PQ) * 2 : '
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all loads (P) * 2 : '
    opt = {'pq': 'P'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all disp loads (PQ) * 2 : '
    opt = {'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all disp loads (P) * 2 : '
    opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    ##-----  single load zone, one scale quantity  -----
    load = array([200.0])
    t = 'all fixed loads (PQ) => total = 200 : '
    opt = {'scale': 'QUANTITY'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), load / total['fixed']['p'] * total['fixed']['q'], 8, [t, 'total fixed Q'])
    opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), (load - total['disp']['p'])/total['fixed']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all fixed loads (P) => total = 200 : '
    opt = {'scale': 'QUANTITY', 'pq': 'P'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all loads (PQ) => total = 200 : '
    opt = {'scale': 'QUANTITY'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), load / total['both']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), load / total['both']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), load / total['both']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all loads (P) => total = 200 : '
    opt = {'scale': 'QUANTITY', 'pq': 'P'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all disp loads (PQ) => total = 200 : '
    opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    t = 'all disp loads (P) => total = 200 : '
    opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
    t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
    t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
    t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
    t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])

    ##-----  3 zones, area scale factors  -----
    t = 'area fixed loads (PQ) * [3 2 1] : '
    load = array([3, 2, 1])
    bus, _ = scale_load(load, ppc['bus'])
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))

    opt = {'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area fixed loads (P) * [3 2 1] : '
    load = array([3, 2, 1])
    opt = {'pq': 'P'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))

    opt = {'pq': 'P', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'all area loads (PQ) * [3 2 1] : '
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))


    t = 'all area loads (P) * [3 2 1] : '
    opt = {'pq': 'P'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area disp loads (PQ) * [3 2 1] : '
    opt = {'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area disp loads (P) * [3 2 1] : '
    opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    ##-----  3 zones, area scale quantities  -----
    t = 'area fixed loads (PQ) => total = [100 80 60] : '
    load = array([100, 80, 60], float)
    opt = {'scale': 'QUANTITY'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), load[k] / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))

    opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] - area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), (load[k] - area[k]['disp']['p']) / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area fixed loads (P) => total = [100 80 60] : '
    load = array([100, 80, 60], float)
    opt = {'scale': 'QUANTITY', 'pq': 'P'}
    bus, _ = scale_load(load, ppc['bus'], None, None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))

    opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k]-area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'all area loads (PQ) => total = [100 80 60] : '
    opt = {'scale': 'QUANTITY'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'all area loads (P) => total = [100 80 60] : '
    opt = {'scale': 'QUANTITY', 'pq': 'P'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area disp loads (PQ) => total = [100 80 60] : throws expected exception'
    load = array([100, 80, 60], float)
    opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
    err = 0
    try:
        bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    except ScalingError as e:
        expected = 'scale_load: impossible to make zone 2 load equal 80 by scaling non-existent dispatchable load'
        err = expected not in str(e)
    t_ok(err, t)

    t = 'area disp loads (PQ) => total = [100 74.3941 60] : '
    load = array([100, area[1]['fixed']['p'], 60], float)
    opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
        if k == 1:
            t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
            t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
        else:
            t_is(-sum(gen[lda[k], QMIN]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
            t_is(-sum(gen[lda[k], QMAX]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    t = 'area disp loads (P) => total = [100 74.3941 60] : '
    opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
    for k in range(len(load)):
        t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
        t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
        t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
        t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
        t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))

    ##-----  explict single load zone  -----
    t = 'explicit single load zone'
    load_zone = zeros(ppc['bus'].shape[0])
    load_zone[[2, 3]] = 1
    load = array([2.0])
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
    Pd = ppc['bus'][:, PD]
    Pd[[2, 3]] = load * Pd[[2, 3]]
    t_is( bus[:, PD], Pd, 8, t)

    ##-----  explict multiple load zone  -----
    t = 'explicit multiple load zone'
    load_zone = zeros(ppc['bus'].shape[0])
    load_zone[[2, 3]] = 1
    load_zone[[6, 7]] = 2
    load = array([2, 0.5])
    bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
    Pd = ppc['bus'][:, PD]
    Pd[[2, 3]] = load[0] * Pd[[2, 3]]
    Pd[[6, 7]] = load[1] * Pd[[6, 7]]
    t_is( bus[:, PD], Pd, 8, t)

    t_end()

Example 44

Project: filterpy Source File: UKF.py
    def __init__(self, dim_x, dim_z, dt, hx, fx, points,
                 sqrt_fn=None, x_mean_fn=None, z_mean_fn=None,
                 residual_x=None,
                 residual_z=None):
        r""" Create a Kalman filter. You are responsible for setting the
        various state variables to reasonable values; the defaults below will
        not give you a functional filter.

        Parameters
        ----------

        dim_x : int
            Number of state variables for the filter. For example, if
            you are tracking the position and velocity of an object in two
            dimensions, dim_x would be 4.


        dim_z : int
            Number of of measurement inputs. For example, if the sensor
            provides you with position in (x,y), dim_z would be 2.

        dt : float
            Time between steps in seconds.

        hx : function(x)
            Measurement function. Converts state vector x into a measurement
            vector of shape (dim_z).

        fx : function(x,dt)
            function that returns the state x transformed by the
            state transistion function. dt is the time step in seconds.

        points : class
            Class which computes the sigma points and weights for a UKF
            algorithm. You can vary the UKF implementation by changing this
            class. For example, MerweScaledSigmaPoints implements the alpha,
            beta, kappa parameterization of Van der Merwe, and
            JulierSigmaPoints implements Julier's original kappa
            parameterization. See either of those for the required
            signature of this class if you want to implement your own.

        sqrt_fn : callable(ndarray), default = scipy.linalg.cholesky
            Defines how we compute the square root of a matrix, which has
            no unique answer. Cholesky is the default choice due to its
            speed. Typically your alternative choice will be
            scipy.linalg.sqrtm. Different choices affect how the sigma points
            are arranged relative to the eigenvectors of the covariance matrix.
            Usually this will not matter to you; if so the default cholesky()
            yields maximal performance. As of van der Merwe's dissertation of
            2004 [6] this was not a well reseached area so I have no advice
            to give you.

            If your method returns a triangular matrix it must be upper
            triangular. Do not use numpy.linalg.cholesky - for historical
            reasons it returns a lower triangular matrix. The SciPy version
            does the right thing.

        x_mean_fn : callable  (sigma_points, weights), optional
            Function that computes the mean of the provided sigma points
            and weights. Use this if your state variable contains nonlinear
            values such as angles which cannot be summed.

            .. code-block:: Python

                def state_mean(sigmas, Wm):
                    x = np.zeros(3)
                    sum_sin, sum_cos = 0., 0.

                    for i in range(len(sigmas)):
                        s = sigmas[i]
                        x[0] += s[0] * Wm[i]
                        x[1] += s[1] * Wm[i]
                        sum_sin += sin(s[2])*Wm[i]
                        sum_cos += cos(s[2])*Wm[i]
                    x[2] = atan2(sum_sin, sum_cos)
                    return x

        z_mean_fn : callable  (sigma_points, weights), optional
            Same as x_mean_fn, except it is called for sigma points which
            form the measurements after being passed through hx().

        residual_x : callable (x, y), optional
        residual_z : callable (x, y), optional
            Function that computes the residual (difference) between x and y.
            You will have to supply this if your state variable cannot support
            subtraction, such as angles (359-1 degreees is 2, not 358). x and y
            are state vectors, not scalars. One is for the state variable,
            the other is for the measurement state.

            .. code-block:: Python

                def residual(a, b):
                    y = a[0] - b[0]
                    if y > np.pi:
                        y -= 2*np.pi
                    if y < -np.pi:
                        y = 2*np.pi
                    return y


        References
        ----------

        .. [3] S. Julier, J. Uhlmann, and H. Durrant-Whyte. "A new method for
               the nonlinear transformation of means and covariances in filters
               and estimators," IEEE Transactions on Automatic Control, 45(3),
               pp. 477-482 (March 2000).

        .. [4] E. A. Wan and R. Van der Merwe, “The Unscented Kalman filter for
               Nonlinear Estimation,” in Proc. Symp. Adaptive Syst. Signal
               Process., Commun. Contr., Lake Louise, AB, Canada, Oct. 2000.

               https://www.seas.harvard.edu/courses/cs281/papers/unscented.pdf

        .. [5] Wan, Merle "The Unscented Kalman Filter," chapter in *Kalman
               Filtering and Neural Networks*, John Wiley & Sons, Inc., 2001.

        .. [6] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
               Inference in Dynamic State-Space Models" (Doctoral dissertation)
        """

        self.Q = eye(dim_x)
        self.R = eye(dim_z)
        self.x = zeros(dim_x)
        self.P = eye(dim_x)
        self._dim_x = dim_x
        self._dim_z = dim_z
        self.points_fn = points
        self._dt = dt
        self._num_sigmas = points.num_sigmas()
        self.hx = hx
        self.fx = fx
        self.x_mean = x_mean_fn
        self.z_mean = z_mean_fn
        self.log_likelihood = 0.0

        if sqrt_fn is None:
            self.msqrt = cholesky
        else:
            self.msqrt = sqrt_fn

        # weights for the means and covariances.
        self.Wm, self.Wc = self.points_fn.weights()

        if residual_x is None:
            self.residual_x = np.subtract
        else:
            self.residual_x = residual_x

        if residual_z is None:
            self.residual_z = np.subtract
        else:
            self.residual_z = residual_z

        # sigma points transformed through f(x) and h(x)
        # variables for efficiency so we don't recreate every update

        self.sigmas_f = zeros((self._num_sigmas, self._dim_x))
        self.sigmas_h = zeros((self._num_sigmas, self._dim_z))

Example 45

Project: scikit-image Source File: _daisy.py
def daisy(img, step=4, radius=15, rings=3, histograms=8, orientations=8,
          normalization='l1', sigmas=None, ring_radii=None, visualize=False):
    '''Extract DAISY feature descriptors densely for the given image.

    DAISY is a feature descriptor similar to SIFT formulated in a way that
    allows for fast dense extraction. Typically, this is practical for
    bag-of-features image representations.

    The implementation follows Tola et al. [1]_ but deviate on the following
    points:

      * Histogram bin contribution are smoothed with a circular Gaussian
        window over the tonal range (the angular range).
      * The sigma values of the spatial Gaussian smoothing in this code do not
        match the sigma values in the original code by Tola et al. [2]_. In
        their code, spatial smoothing is applied to both the input image and
        the center histogram. However, this smoothing is not docuemented in [1]_
        and, therefore, it is omitted.

    Parameters
    ----------
    img : (M, N) array
        Input image (greyscale).
    step : int, optional
        Distance between descriptor sampling points.
    radius : int, optional
        Radius (in pixels) of the outermost ring.
    rings : int, optional
        Number of rings.
    histograms  : int, optional
        Number of histograms sampled per ring.
    orientations : int, optional
        Number of orientations (bins) per histogram.
    normalization : [ 'l1' | 'l2' | 'daisy' | 'off' ], optional
        How to normalize the descriptors

          * 'l1': L1-normalization of each descriptor.
          * 'l2': L2-normalization of each descriptor.
          * 'daisy': L2-normalization of individual histograms.
          * 'off': Disable normalization.

    sigmas : 1D array of float, optional
        Standard deviation of spatial Gaussian smoothing for the center
        histogram and for each ring of histograms. The array of sigmas should
        be sorted from the center and out. I.e. the first sigma value defines
        the spatial smoothing of the center histogram and the last sigma value
        defines the spatial smoothing of the outermost ring. Specifying sigmas
        overrides the following parameter.

            ``rings = len(sigmas) - 1``

    ring_radii : 1D array of int, optional
        Radius (in pixels) for each ring. Specifying ring_radii overrides the
        following two parameters.

            ``rings = len(ring_radii)``
            ``radius = ring_radii[-1]``

        If both sigmas and ring_radii are given, they must satisfy the
        following predicate since no radius is needed for the center
        histogram.

            ``len(ring_radii) == len(sigmas) + 1``

    visualize : bool, optional
        Generate a visualization of the DAISY descriptors

    Returns
    -------
    descs : array
        Grid of DAISY descriptors for the given image as an array
        dimensionality  (P, Q, R) where

            ``P = ceil((M - radius*2) / step)``
            ``Q = ceil((N - radius*2) / step)``
            ``R = (rings * histograms + 1) * orientations``

    descs_img : (M, N, 3) array (only if visualize==True)
        Visualization of the DAISY descriptors.

    References
    ----------
    .. [1] Tola et al. "Daisy: An efficient dense descriptor applied to wide-
           baseline stereo." Pattern Analysis and Machine Intelligence, IEEE
           Transactions on 32.5 (2010): 815-830.
    .. [2] http://cvlab.epfl.ch/software/daisy
    '''

    assert_nD(img, 2, 'img')

    img = img_as_float(img)

    # Validate parameters.
    if sigmas is not None and ring_radii is not None \
            and len(sigmas) - 1 != len(ring_radii):
        raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
    if ring_radii is not None:
        rings = len(ring_radii)
        radius = ring_radii[-1]
    if sigmas is not None:
        rings = len(sigmas) - 1
    if sigmas is None:
        sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
    if ring_radii is None:
        ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
    if normalization not in ['l1', 'l2', 'daisy', 'off']:
        raise ValueError('Invalid normalization method.')

    # Compute image derivatives.
    dx = np.zeros(img.shape)
    dy = np.zeros(img.shape)
    dx[:, :-1] = np.diff(img, n=1, axis=1)
    dy[:-1, :] = np.diff(img, n=1, axis=0)

    # Compute gradient orientation and magnitude and their contribution
    # to the histograms.
    grad_mag = sqrt(dx ** 2 + dy ** 2)
    grad_ori = arctan2(dy, dx)
    orientation_kappa = orientations / pi
    orientation_angles = [2 * o * pi / orientations - pi
                          for o in range(orientations)]
    hist = np.empty((orientations,) + img.shape, dtype=float)
    for i, o in enumerate(orientation_angles):
        # Weigh bin contribution by the circular normal distribution
        hist[i, :, :] = exp(orientation_kappa * cos(grad_ori - o))
        # Weigh bin contribution by the gradient magnitude
        hist[i, :, :] = np.multiply(hist[i, :, :], grad_mag)

    # Smooth orientation histograms for the center and all rings.
    sigmas = [sigmas[0]] + sigmas
    hist_smooth = np.empty((rings + 1,) + hist.shape, dtype=float)
    for i in range(rings + 1):
        for j in range(orientations):
            hist_smooth[i, j, :, :] = gaussian_filter(hist[j, :, :],
                                                      sigma=sigmas[i])

    # Assemble descriptor grid.
    theta = [2 * pi * j / histograms for j in range(histograms)]
    desc_dims = (rings * histograms + 1) * orientations
    descs = np.empty((desc_dims, img.shape[0] - 2 * radius,
                      img.shape[1] - 2 * radius))
    descs[:orientations, :, :] = hist_smooth[0, :, radius:-radius,
                                             radius:-radius]
    idx = orientations
    for i in range(rings):
        for j in range(histograms):
            y_min = radius + int(round(ring_radii[i] * sin(theta[j])))
            y_max = descs.shape[1] + y_min
            x_min = radius + int(round(ring_radii[i] * cos(theta[j])))
            x_max = descs.shape[2] + x_min
            descs[idx:idx + orientations, :, :] = hist_smooth[i + 1, :,
                                                              y_min:y_max,
                                                              x_min:x_max]
            idx += orientations
    descs = descs[:, ::step, ::step]
    descs = descs.swapaxes(0, 1).swapaxes(1, 2)

    # Normalize descriptors.
    if normalization != 'off':
        descs += 1e-10
        if normalization == 'l1':
            descs /= np.sum(descs, axis=2)[:, :, np.newaxis]
        elif normalization == 'l2':
            descs /= sqrt(np.sum(descs ** 2, axis=2))[:, :, np.newaxis]
        elif normalization == 'daisy':
            for i in range(0, desc_dims, orientations):
                norms = sqrt(np.sum(descs[:, :, i:i + orientations] ** 2,
                                    axis=2))
                descs[:, :, i:i + orientations] /= norms[:, :, np.newaxis]

    if visualize:
        descs_img = gray2rgb(img)
        for i in range(descs.shape[0]):
            for j in range(descs.shape[1]):
                # Draw center histogram sigma
                color = [1, 0, 0]
                desc_y = i * step + radius
                desc_x = j * step + radius
                rows, cols, val = draw.circle_perimeter_aa(desc_y, desc_x, int(sigmas[0]))
                draw.set_color(descs_img, (rows, cols), color, alpha=val)
                max_bin = np.max(descs[i, j, :])
                for o_num, o in enumerate(orientation_angles):
                    # Draw center histogram bins
                    bin_size = descs[i, j, o_num] / max_bin
                    dy = sigmas[0] * bin_size * sin(o)
                    dx = sigmas[0] * bin_size * cos(o)
                    rows, cols, val = draw.line_aa(desc_y, desc_x, int(desc_y + dy),
                                                   int(desc_x + dx))
                    draw.set_color(descs_img, (rows, cols), color, alpha=val)
                for r_num, r in enumerate(ring_radii):
                    color_offset = float(1 + r_num) / rings
                    color = (1 - color_offset, 1, color_offset)
                    for t_num, t in enumerate(theta):
                        # Draw ring histogram sigmas
                        hist_y = desc_y + int(round(r * sin(t)))
                        hist_x = desc_x + int(round(r * cos(t)))
                        rows, cols, val = draw.circle_perimeter_aa(hist_y, hist_x,
                                                                   int(sigmas[r_num + 1]))
                        draw.set_color(descs_img, (rows, cols), color, alpha=val)
                        for o_num, o in enumerate(orientation_angles):
                            # Draw histogram bins
                            bin_size = descs[i, j, orientations + r_num *
                                             histograms * orientations +
                                             t_num * orientations + o_num]
                            bin_size /= max_bin
                            dy = sigmas[r_num + 1] * bin_size * sin(o)
                            dx = sigmas[r_num + 1] * bin_size * cos(o)
                            rows, cols, val = draw.line_aa(hist_y, hist_x,
                                                           int(hist_y + dy),
                                                           int(hist_x + dx))
                            draw.set_color(descs_img, (rows, cols), color, alpha=val)
        return descs, descs_img
    else:
        return descs

Example 46

Project: scikit-learn Source File: nmf.py
Function: non_negative_factorization
def non_negative_factorization(X, W=None, H=None, n_components=None,
                               init='random', update_H=True, solver='cd',
                               tol=1e-4, max_iter=200, alpha=0., l1_ratio=0.,
                               regularization=None, random_state=None,
                               verbose=0, shuffle=False, nls_max_iter=2000,
                               sparseness=None, beta=1, eta=0.1):
    """Compute Non-negative Matrix Factorization (NMF)

    Find two non-negative matrices (W, H) whose product approximates the non-
    negative matrix X. This factorization can be used for example for
    dimensionality reduction, source separation or topic extraction.

    The objective function is::

        0.5 * ||X - WH||_Fro^2
        + alpha * l1_ratio * ||vec(W)||_1
        + alpha * l1_ratio * ||vec(H)||_1
        + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
        + 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2

    Where::

        ||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
        ||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)

    The objective function is minimized with an alternating minimization of W
    and H. If H is given and update_H=False, it solves for W only.

    Parameters
    ----------
    X : array-like, shape (n_samples, n_features)
        Constant matrix.

    W : array-like, shape (n_samples, n_components)
        If init='custom', it is used as initial guess for the solution.

    H : array-like, shape (n_components, n_features)
        If init='custom', it is used as initial guess for the solution.
        If update_H=False, it is used as a constant, to solve for W only.

    n_components : integer
        Number of components, if n_components is not set all features
        are kept.

    init :  None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
        Method used to initialize the procedure.
        Default: 'nndsvd' if n_components < n_features, otherwise random.
        Valid options:

        - 'random': non-negative random matrices, scaled with:
            sqrt(X.mean() / n_components)

        - 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
            initialization (better for sparseness)

        - 'nndsvda': NNDSVD with zeros filled with the average of X
            (better when sparsity is not desired)

        - 'nndsvdar': NNDSVD with zeros filled with small random values
            (generally faster, less accurate alternative to NNDSVDa
            for when sparsity is not desired)

        - 'custom': use custom matrices W and H

    update_H : boolean, default: True
        Set to True, both W and H will be estimated from initial guesses.
        Set to False, only W will be estimated.

    solver : 'pg' | 'cd'
        Numerical solver to use:
        'pg' is a (deprecated) Projected Gradient solver.
        'cd' is a Coordinate Descent solver.

    tol : float, default: 1e-4
        Tolerance of the stopping condition.

    max_iter : integer, default: 200
        Maximum number of iterations before timing out.

    alpha : double, default: 0.
        Constant that multiplies the regularization terms.

    l1_ratio : double, default: 0.
        The regularization mixing parameter, with 0 <= l1_ratio <= 1.
        For l1_ratio = 0 the penalty is an elementwise L2 penalty
        (aka Frobenius Norm).
        For l1_ratio = 1 it is an elementwise L1 penalty.
        For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.

    regularization : 'both' | 'components' | 'transformation' | None
        Select whether the regularization affects the components (H), the
        transformation (W), both or none of them.

    random_state : integer seed, RandomState instance, or None (default)
        Random number generator seed control.

    verbose : integer, default: 0
        The verbosity level.

    shuffle : boolean, default: False
        If true, randomize the order of coordinates in the CD solver.

    nls_max_iter : integer, default: 2000
        Number of iterations in NLS subproblem.
        Used only in the deprecated 'pg' solver.

    sparseness : 'data' | 'components' | None, default: None
        Where to enforce sparsity in the model.
        Used only in the deprecated 'pg' solver.

    beta : double, default: 1
        Degree of sparseness, if sparseness is not None. Larger values mean
        more sparseness. Used only in the deprecated 'pg' solver.

    eta : double, default: 0.1
        Degree of correctness to maintain, if sparsity is not None. Smaller
        values mean larger error. Used only in the deprecated 'pg' solver.

    Returns
    -------
    W : array-like, shape (n_samples, n_components)
        Solution to the non-negative least squares problem.

    H : array-like, shape (n_components, n_features)
        Solution to the non-negative least squares problem.

    n_iter : int
        Actual number of iterations.

    References
    ----------
    C.-J. Lin. Projected gradient methods for non-negative matrix
    factorization. Neural Computation, 19(2007), 2756-2779.
    http://www.csie.ntu.edu.tw/~cjlin/nmf/

    Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
    large scale nonnegative matrix and tensor factorizations."
    IEICE transactions on fundamentals of electronics, communications and
    computer sciences 92.3: 708-721, 2009.
    """

    X = check_array(X, accept_sparse=('csr', 'csc'))
    check_non_negative(X, "NMF (input X)")
    _check_string_param(sparseness, solver)

    n_samples, n_features = X.shape
    if n_components is None:
        n_components = n_features

    if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
        raise ValueError("Number of components must be a positive integer;"
                         " got (n_components=%r)" % n_components)
    if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
        raise ValueError("Maximum number of iterations must be a positive integer;"
                         " got (max_iter=%r)" % max_iter)
    if not isinstance(tol, numbers.Number) or tol < 0:
        raise ValueError("Tolerance for stopping criteria must be "
                         "positive; got (tol=%r)" % tol)

    # check W and H, or initialize them
    if init == 'custom' and update_H:
        _check_init(H, (n_components, n_features), "NMF (input H)")
        _check_init(W, (n_samples, n_components), "NMF (input W)")
    elif not update_H:
        _check_init(H, (n_components, n_features), "NMF (input H)")
        W = np.zeros((n_samples, n_components))
    else:
        W, H = _initialize_nmf(X, n_components, init=init,
                               random_state=random_state)

    if solver == 'pg':
        warnings.warn("'pg' solver will be removed in release 0.19."
                      " Use 'cd' solver instead.", DeprecationWarning)
        if update_H:  # fit_transform
            W, H, n_iter = _fit_projected_gradient(X, W, H, tol,
                                                   max_iter,
                                                   nls_max_iter,
                                                   alpha, l1_ratio,
                                                   sparseness,
                                                   beta, eta)
        else:  # transform
            W, H, n_iter = _update_projected_gradient_w(X, W, H,
                                                        tol, nls_max_iter,
                                                        alpha, l1_ratio,
                                                        sparseness, beta,
                                                        eta)
    elif solver == 'cd':
        W, H, n_iter = _fit_coordinate_descent(X, W, H, tol,
                                               max_iter,
                                               alpha, l1_ratio,
                                               regularization,
                                               update_H=update_H,
                                               verbose=verbose,
                                               shuffle=shuffle,
                                               random_state=random_state)
    else:
        raise ValueError("Invalid solver parameter '%s'." % solver)

    if n_iter == max_iter:
        warnings.warn("Maximum number of iteration %d reached. Increase it to"
                      " improve convergence." % max_iter, ConvergenceWarning)

    return W, H, n_iter

Example 47

Project: RoBO Source File: multi_task_bayesian_optimization.py
    def run(self, num_iterations=10, X=None, Y=None, C=None):
        """
        Runs the main Bayesian optimization loop

        Parameters
        ----------
        num_iterations : int, optional
            Specifies the number of iterations.
        X : (N, D) numpy array, optional
            Initial points where BO starts from.
        Y : (N, D) numpy array, optional
            The function values of the initial points. Make sure the number of
            points is the same.
        C : (N, D) numpy array, optional
            The costs of the initial points. Make sure the number of
            points is the same.

        Returns
        -------
        incuembent : (1, D) numpy array
            The estimated optimum that was found after the specified number of
            iterations.
        """
        self.time_start = time.time()

        if X is None and Y is None and C is None:
            self.time_func_eval = np.zeros([self.init_points])
            self.time_overhead = np.zeros([self.init_points])
            self.X = np.zeros([1, self.task.n_dims])
            self.Y = np.zeros([1, 1])
            self.C = np.zeros([1, 1])

            init = self.initial_design(self.task.X_lower,
                                       self.task.X_upper,
                                       self.init_points)

            # Evaluate only on cheaper task
            init[:, -1] = 0

            for i, x in enumerate(init):
                x = x[np.newaxis, :]

                logger.info("Evaluate: %s" % x)

                start_time = time.time()
                y, c = self.task.evaluate(x)

                # Transform cost to log scale
                c = np.log(c)

                if i == 0:
                    self.X[i] = x[0, :]
                    self.Y[i] = y[0, :]
                    self.C[i] = c[0, :]
                    self.time_func_eval[i] = time.time() - start_time
                    self.time_overhead[i] = 0.0
                else:
                    self.X = np.append(self.X, x, axis=0)
                    self.Y = np.append(self.Y, y, axis=0)
                    self.C = np.append(self.C, c, axis=0)

                    time_feval = np.array([time.time() - start_time])
                    self.time_func_eval = np.append(self.time_func_eval,
                                                    time_feval, axis=0)
                    self.time_overhead = np.append(self.time_overhead,
                                                   np.array([0]), axis=0)
                logger.info("Configuration achieved a"
                            "performance of %f and %f costs in %f seconds" %
                            (self.Y[i], self.C[i], self.time_func_eval[i]))

                # Use best point seen so far as incuembent
                best_idx = np.argmin(self.Y)
                best_idx = np.argmin(self.Y)
                # Copy because we are going to change the system size to smax
                self.incuembent = np.copy(self.X[best_idx])
                self.incuembent_value = self.Y[best_idx]
                self.runtime.append(time.time() - self.start_time)

                self.incuembent[-1] = 1

                self.incuembent = self.incuembent[np.newaxis, :]
                self.incuembent_value = self.incuembent_value[np.newaxis, :]

                self.incuembents.append(self.incuembent)
                self.incuembent_values.append(self.incuembent_value)

                if self.save_dir is not None and (i) % self.num_save == 0:
                    self.save_iteration(i, costs=self.C[-1],
                                        hyperparameters=None,
                                        acquisition_value=0)

        else:
            self.X = X
            self.Y = Y
            self.C = C
            self.time_func_eval = np.zeros([self.X.shape[0]])
            self.time_overhead = np.zeros([self.X.shape[0]])

        for it in range(self.init_points, num_iterations):
            logger.info("Start iteration %d ... ", it)
            # Choose a new configuration
            start_time = time.time()
            if it % self.train_intervall == 0:
                do_optimize = True
            else:
                do_optimize = False
            new_x = self.choose_next(self.X, self.Y, self.C, do_optimize)

            # Estimate current incuembent from the posterior
            # over the configuration space
            start_time_inc = time.time()
            startpoints = init_random_uniform(self.task.X_lower,
                                              self.task.X_upper,
                                              self.n_restarts)
            self.incuembent, self.incuembent_value = \
                self.estimator.estimate_incuembent(startpoints)

            self.incuembents.append(self.incuembent)
            self.incuembent_values.append(self.incuembent_value)

            logger.info("New incuembent %s found in %f seconds",
                        str(self.incuembent), time.time() - start_time_inc)

            # Compute the time we needed to pick a new point
            time_overhead = time.time() - start_time
            self.time_overhead = np.append(self.time_overhead,
                                           np.array([time_overhead]))
            logger.info("Optimization overhead was "
                            "%f seconds" % (self.time_overhead[-1]))

            # Evaluate the configuration
            logger.info("Evaluate candidate %s" % (str(new_x)))
            start_time = time.time()
            new_y, new_cost = self.task.evaluate(new_x)
            time_func_eval = time.time() - start_time

            # We model the log costs
            new_cost = np.log(new_cost)

            self.time_func_eval = np.append(self.time_func_eval,
                                            np.array([time_func_eval]))

            logger.info("Configuration achieved a performance "
                    "of %f in %s seconds" % (new_y[0, 0], new_cost[0]))

            # Add the new observations to the data
            self.X = np.append(self.X, new_x, axis=0)
            self.Y = np.append(self.Y, new_y, axis=0)
            self.C = np.append(self.C, new_cost, axis=0)
            self.runtime.append(time.time() - self.start_time)

            if self.save_dir is not None and (it) % self.num_save == 0:
                hypers = self.model.hypers

                self.save_iteration(it, costs=self.C[-1],
                                hyperparameters=hypers,
                                acquisition_value=self.acquisition_func(new_x))

        logger.info("Return %s as incuembent" % (str(self.incuembent)))
        return self.incuembent

Example 48

Project: pystruct Source File: n_slack_ssvm.py
    def fit(self, X, Y, constraints=None, warm_start=None, initialize=True):
        """Learn parameters using cutting plane method.

        Parameters
        ----------
        X : iterable
            Traing instances. Contains the structured input objects.
            No requirement on the particular form of entries of X is made.

        Y : iterable
            Training labels. Contains the strctured labels for inputs in X.
            Needs to have the same length as X.

        contraints : iterable
            Known constraints for warm-starts. List of same length as X.
            Each entry is itself a list of constraints for a given instance x .
            Each constraint is of the form [y_hat, delta_joint_feature, loss], where
            y_hat is a labeling, ``delta_joint_feature = joint_feature(x, y) - joint_feature(x, y_hat)``
            and loss is the loss for predicting y_hat instead of the true label
            y.

        initialize : boolean, default=True
            Whether to initialize the model for the data.
            Leave this true except if you really know what you are doing.
        """
        if self.verbose:
            print("Training n-slack dual structural SVM")
        cvxopt.solvers.options['show_progress'] = self.verbose > 3
        if initialize:
            self.model.initialize(X, Y)
        self.w = np.zeros(self.model.size_joint_feature)
        n_samples = len(X)
        stopping_criterion = False
        if constraints is None:
            # fresh start
            constraints = [[] for i in range(n_samples)]
            self.last_active = [[] for i in range(n_samples)]
            self.objective_curve_ = []
            self.primal_objective_curve_ = []
            self.timestamps_ = [time()]
        else:
            # warm start
            objective = self._solve_n_slack_qp(constraints, n_samples)
        try:
            # catch ctrl+c to stop training
            # we have to update at least once after going through the dataset
            for iteration in range(self.max_iter):
                # main loop
                self.timestamps_.append(time() - self.timestamps_[0])
                if self.verbose > 0:
                    print("iteration %d" % iteration)
                if self.verbose > 2:
                    print(self)
                new_constraints = 0
                # generate slices through dataset from batch_size
                if self.batch_size < 1 and not self.batch_size == -1:
                    raise ValueError("batch_size should be integer >= 1 or -1,"
                                     "got %s." % str(self.batch_size))
                batch_size = (self.batch_size if self.batch_size != -1 else
                              len(X))
                n_batches = int(np.ceil(float(len(X)) / batch_size))
                slices = gen_even_slices(n_samples, n_batches)
                indices = np.arange(n_samples)
                slack_sum = 0
                for batch in slices:
                    new_constraints_batch = 0
                    verbose = max(0, self.verbose - 3)
                    X_b = X[batch]
                    Y_b = Y[batch]
                    indices_b = indices[batch]
                    candidate_constraints = Parallel(
                        n_jobs=self.n_jobs, verbose=verbose)(
                            delayed(find_constraint)(self.model, x, y, self.w)
                            for x, y in zip(X_b, Y_b))

                    # for each batch, gather new constraints
                    for i, x, y, constraint in zip(indices_b, X_b, Y_b,
                                                   candidate_constraints):
                        # loop over samples in batch
                        y_hat, delta_joint_feature, slack, loss = constraint
                        slack_sum += slack

                        if self.verbose > 3:
                            print("current slack: %f" % slack)

                        if not loss > 0:
                            # can have y != y_hat but loss = 0 in latent svm.
                            # we need this here as djoint_feature is then != 0
                            continue

                        if self._check_bad_constraint(y_hat, slack,
                                                      constraints[i]):
                            continue

                        constraints[i].append([y_hat, delta_joint_feature, loss])
                        new_constraints_batch += 1

                    # after processing the slice, solve the qp
                    if new_constraints_batch:
                        objective = self._solve_n_slack_qp(constraints,
                                                           n_samples)
                        new_constraints += new_constraints_batch

                self.objective_curve_.append(objective)
                self._compute_training_loss(X, Y, iteration)

                primal_objective = (self.C
                                    * slack_sum
                                    + np.sum(self.w ** 2) / 2)
                self.primal_objective_curve_.append(primal_objective)

                if self.verbose > 0:
                    print("new constraints: %d, "
                          "cutting plane objective: %f primal objective: %f" %
                          (new_constraints, objective, primal_objective))

                if new_constraints == 0:
                    if self.verbose:
                        print("no additional constraints")
                    stopping_criterion = True

                if (iteration > 1 and self.objective_curve_[-1]
                        - self.objective_curve_[-2] < self.tol):
                    if self.verbose:
                        print("objective converged.")
                    stopping_criterion = True

                if stopping_criterion:
                    if (self.switch_to is not None and
                            self.model.inference_method != self.switch_to):
                        if self.verbose:
                            print("Switching to %s inference" %
                                  str(self.switch_to))
                        self.model.inference_method_ = \
                            self.model.inference_method
                        self.model.inference_method = self.switch_to
                        stopping_criterion = False
                        continue
                    else:
                        break

                if self.verbose > 5:
                    print(self.w)

                if self.logger is not None:
                    self.logger(self, iteration)
        except KeyboardInterrupt:
            pass

        self.constraints_ = constraints
        if self.verbose and self.n_jobs == 1:
            print("calls to inference: %d" % self.model.inference_calls)

        if verbose:
            print("Computing final objective.")
        self.timestamps_.append(time() - self.timestamps_[0])
        self.primal_objective_curve_.append(self._objective(X, Y))
        self.objective_curve_.append(objective)
        if self.logger is not None:
            self.logger(self, 'final')
        return self

Example 49

Project: pylon Source File: ipopf.py
    def solve(self):
        """ Solves AC optimal power flow.
        """
        case = self.om.case
        base_mva = case.base_mva
        # TODO: Explain this value.
        self.opt["cost_mult"] = 1e-4

        # Unpack the OPF model.
        bs, ln, gn, _ = self._unpack_model(self.om)
        # Compute problem dimensions.
        ipol, _, nb, nl, _, ny, nxyz = self._dimension_data(bs, ln, gn)

        # Compute problem dimensions.
        ng = len(gn)
#        gpol = [g for g in gn if g.pcost_model == POLYNOMIAL]
        # Indexes of constrained lines.
        il = array([i for i,l in enumerate(ln) if 0.0 < l.rate_a < 1e10])
        nl2 = len(il)

        # Linear constraints (l <= A*x <= u).
        A, l, u = self.om.linear_constraints()
#        AA, bb = self._linear_constraints(self.om)

        _, xmin, xmax = self._var_bounds()

        # Select an interior initial point for interior point solver.
        x0 = self._initial_interior_point(bs, gn, xmin, xmax, ny)

        # Build admittance matrices.
        Ybus, Yf, Yt = case.Y

        # Optimisation variables.
        Va = self.om.get_var("Va")
        Vm = self.om.get_var("Vm")
        Pg = self.om.get_var("Pg")
        Qg = self.om.get_var("Qg")

        # Adds a constraint on the reference bus angles.
#        xmin, xmax = self._ref_bus_angle_constraint(bs, Va, xmin, xmax)

        def f_fcn(x, user_data=None):
            """ Evaluates the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            # Polynomial cost of P and Q.
            xx = r_[p_gen, q_gen] * base_mva
            if len(ipol) > 0:
                f = sum([g.total_cost(xx[i]) for i,g in enumerate(gn)])
            else:
                f = 0

            # Piecewise linear cost of P and Q.
            if ny:
                y = self.om.get_var("y")
                ccost = csr_matrix((ones(ny),
                    (range(y.i1, y.iN + 1), zeros(ny))), shape=(nxyz, 1)).T
                f = f + ccost * x
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            return f


        def df_fcn(x, usr_data=None):
            """ Calculates gradient of the objective function.
            """
            p_gen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            q_gen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            xx = r_[p_gen, q_gen] * base_mva

            if ny > 0:
                y = self.om.get_var("y")
                iy = range(y.i1, y.iN + 1)
                ccost = \
                    csr_matrix((ones(ny), (iy, zeros(ny))), shape=(nxyz, 1)).T
            else:
                ccost = zeros((1, nxyz))
                # TODO: Generalised cost term.

            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)

            # Polynomial cost of P and Q.
            df_dPgQg = zeros((2 * ng, 1))        # w.r.t p.u. Pg and Qg
#            df_dPgQg[ipol] = matrix([g.poly_cost(xx[i], 1) for g in gpol])
#            for i, g in enumerate(gn):
#                der = polyder(list(g.p_cost))
#                df_dPgQg[i] = polyval(der, xx[i]) * base_mva
            for i in ipol:
                df_dPgQg[i] = \
                    base_mva * polyval(polyder(list(gn[i].p_cost)), xx[i])

            df = zeros((nxyz, 1))
            df[iPg] = df_dPgQg[:ng]
            df[iQg] = df_dPgQg[ng:ng + ng]

            # Piecewise linear cost of P and Q.
            df = df + ccost.T
            # TODO: Generalised cost term.

            return asarray(df).flatten()


        def g_fcn(x, usr_data=None):
            """ Evaluates the non-linear constraint values.
            """
            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            # Rebuild the net complex bus power injection vector in p.u.
            Sbus = case.getSbus(bs)

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Evaluate the power flow equations.
            mis = V * conj(Ybus * V) - Sbus

            # Equality constraints (power flow).
            g = r_[mis.real,  # active power mismatch for all buses
                   mis.imag]  # reactive power mismatch for all buses

            # Inequality constraints (branch flow limits).
            # (line constraint is actually on square of limit)
            flow_max = array([(l.rate_a / base_mva)**2 for l in ln])
            # FIXME: There must be a more elegant method for this.
            for i, v in enumerate(flow_max):
                if v == 0.0:
                    flow_max[i] = Inf

            if self.flow_lim == IFLOW:
                If = Yf * V
                It = Yt * V
                # Branch current limits.
                h = r_[(If * conj(If)) - flow_max,
                       (If * conj(It)) - flow_max]
            else:
                i_fbus = [e.from_bus._i for e in ln]
                i_tbus = [e.to_bus._i for e in ln]
                # Complex power injected at "from" bus (p.u.).
                Sf = V[i_fbus] * conj(Yf * V)
                # Complex power injected at "to" bus (p.u.).
                St = V[i_tbus] * conj(Yt * V)
                if self.flow_lim == PFLOW: # active power limit, P (Pan Wei)
                    # Branch real power limits.
                    h = r_[Sf.real()**2 - flow_max,
                           St.real()**2 - flow_max]
                elif self.flow_lim == SFLOW: # apparent power limit, |S|
                    # Branch apparent power limits.
                    h = r_[(Sf * conj(Sf)) - flow_max,
                           (St * conj(St)) - flow_max].real
                else:
                    raise ValueError

            return r_[g, h]


        def dg_fcn(x, flag, usr_data=None):
            """ Calculates the Jacobian matrix. It takes two arguments, the
                first is the variable x and the second is a Boolean flag. If
                the flag is true, the function returns a tuple of arrays
                (row, col) to indicate the sparse structure of the Jacobian
                matrix. If the flag is false the function returns the values
                of the Jacobian matrix with length nnzj.
            """
            iVa = range(Va.i1, Va.iN + 1)
            iVm = range(Vm.i1, Vm.iN + 1)
            iPg = range(Pg.i1, Pg.iN + 1)
            iQg = range(Qg.i1, Qg.iN + 1)
            iVaVmPgQg = r_[iVa, iVm, iPg, iQg].T

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)

            # Compute partials of injected bus powers.
            dSbus_dVm, dSbus_dVa = case.dSbus_dV(Ybus, V)

            i_gbus = [gen.bus._i for gen in gn]
            neg_Cg = csr_matrix((-ones(ng), (i_gbus, range(ng))), (nb, ng))

            # Transposed Jacobian of the power balance equality constraints.
            dg = lil_matrix((nxyz, 2 * nb))

            blank = csr_matrix((nb, ng))
            dg[iVaVmPgQg, :] = vstack([
                hstack([dSbus_dVa.real, dSbus_dVm.real, neg_Cg, blank]),
                hstack([dSbus_dVa.imag, dSbus_dVm.imag, blank, neg_Cg])
            ], "csr").T

            # Compute partials of flows w.r.t V.
            if self.flow_lim == IFLOW:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dIbr_dV(Yf, Yt, V)
            else:
                dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft = \
                    case.dSbr_dV(Yf, Yt, V, bs, ln)
            if self.flow_lim == PFLOW:
                dFf_dVa = dFf_dVa.real
                dFf_dVm = dFf_dVm.real
                dFt_dVa = dFt_dVa.real
                dFt_dVm = dFt_dVm.real
                Ff = Ff.real
                Ft = Ft.real

            # Squared magnitude of flow (complex power, current or real power).
            df_dVa, df_dVm, dt_dVa, dt_dVm = \
                case.dAbr_dV(dFf_dVa, dFf_dVm, dFt_dVa, dFt_dVm, Ff, Ft)

            # Construct Jacobian of inequality constraints (branch limits) and
            # transpose it.
            dh = lil_matrix((nxyz, 2 * nl))
            dh[r_[iVa, iVm].T, :] = vstack([hstack([df_dVa, df_dVm]),
                                            hstack([dt_dVa, dt_dVm])], "csr").T

            J = vstack([dg, dh, A]).tocoo()

            if flag:
                return (J.row, J.col)
            else:
                return J.data


        def h_fcn(x, lagrange, obj_factor, flag, usr_data=None):
            """ Evaluates the Hessian of the Lagrangian.
            """
            neqnln = 2 * nb
            niqnln = 2 * len(il) # no. of lines with constraints

            Pgen = x[Pg.i1:Pg.iN + 1] # Active generation in p.u.
            Qgen = x[Qg.i1:Qg.iN + 1] # Reactive generation in p.u.

            for i, g in enumerate(gn):
                g.p = Pgen[i] * base_mva # active generation in MW
                g.q = Qgen[i] * base_mva # reactive generation in MVAr

            Vang = x[Va.i1:Va.iN + 1]
            Vmag = x[Vm.i1:Vm.iN + 1]
            V = Vmag * exp(1j * Vang)
            nxtra = nxyz - 2 * nb

            #------------------------------------------------------------------
            #  Evaluate d2f.
            #------------------------------------------------------------------

            d2f_dPg2 = lil_matrix((ng, 1)) # w.r.t p.u. Pg
            d2f_dQg2 = lil_matrix((ng, 1)) # w.r.t p.u. Qg]

            for i in ipol:
                d2f_dPg2[i, 0] = polyval(polyder(list(gn[i].p_cost), 2),
                                         Pg.v0[i] * base_mva) * base_mva**2
#            for i in ipol:
#                d2f_dQg2[i] = polyval(polyder(list(gn[i].p_cost), 2),
#                                      Qg.v0[i] * base_mva) * base_mva**2

            i = r_[range(Pg.i1, Pg.iN + 1), range(Qg.i1, Qg.iN + 1)]

            d2f = csr_matrix((vstack([d2f_dPg2, d2f_dQg2]).toarray().flatten(),
                              (i, i)), shape=(nxyz, nxyz))
            # TODO: Generalised cost model.
            d2f = d2f * self.opt["cost_mult"]

            #------------------------------------------------------------------
            #  Evaluate Hessian of power balance constraints.
            #------------------------------------------------------------------

            eqnonlin = lagrange[:neqnln]
#            nlam = len(lagrange["eqnonlin"]) / 2
            nlam = len(eqnonlin) / 2
            lamP = eqnonlin[:nlam]
            lamQ = eqnonlin[nlam:nlam + nlam]
            Gpaa, Gpav, Gpva, Gpvv = case.d2Sbus_dV2(Ybus, V, lamP)
            Gqaa, Gqav, Gqva, Gqvv = case.d2Sbus_dV2(Ybus, V, lamQ)

            d2G = vstack([
                hstack([
                    vstack([hstack([Gpaa, Gpav]),
                            hstack([Gpva, Gpvv])]).real +
                    vstack([hstack([Gqaa, Gqav]),
                            hstack([Gqva, Gqvv])]).imag,
                    csr_matrix((2 * nb, nxtra))]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            #------------------------------------------------------------------
            #  Evaluate Hessian of flow constraints.
            #------------------------------------------------------------------

            ineqnonlin = lagrange[neqnln:neqnln + niqnln]
            nmu = len(ineqnonlin) / 2
            muF = ineqnonlin[:nmu]
            muT = ineqnonlin[nmu:nmu + nmu]
            if self.flow_lim == "I":
                dIf_dVa, dIf_dVm, dIt_dVa, dIt_dVm, If, It = \
                    case.dIbr_dV(Yf, Yt, V)
                Hfaa, Hfav, Hfva, Hfvv = \
                    case.d2AIbr_dV2(dIf_dVa, dIf_dVm, If, Yf, V, muF)
                Htaa, Htav, Htva, Htvv = \
                    case.d2AIbr_dV2(dIt_dVa, dIt_dVm, It, Yt, V, muT)
            else:
                f = [e.from_bus._i for e in ln]
                t = [e.to_bus._i for e in ln]
                # Line-bus connection matrices.
                Cf = csr_matrix((ones(nl), (range(nl), f)), (nl, nb))
                Ct = csr_matrix((ones(nl), (range(nl), t)), (nl, nb))
                dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St = \
                    case.dSbr_dV(Yf, Yt, V)
                if self.flow_lim == PFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa.real(), dSf_dVm.real(),
                                        Sf.real(), Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa.real(), dSt_dVm.real(),
                                        St.real(), Ct, Yt, V, muT)
                elif self.flow_lim == SFLOW:
                    Hfaa, Hfav, Hfva, Hfvv = \
                        case.d2ASbr_dV2(dSf_dVa, dSf_dVm, Sf, Cf, Yf, V, muF)
                    Htaa, Htav, Htva, Htvv = \
                        case.d2ASbr_dV2(dSt_dVa, dSt_dVm, St, Ct, Yt, V, muT)
                else:
                    raise ValueError

            d2H = vstack([
                hstack([
                    vstack([hstack([Hfaa, Hfav]),
                            hstack([Hfva, Hfvv])]) +
                    vstack([hstack([Htaa, Htav]),
                            hstack([Htva, Htvv])]),
                    csr_matrix((2 * nb, nxtra))
                ]),
                hstack([
                    csr_matrix((nxtra, 2 * nb)),
                    csr_matrix((nxtra, nxtra))
                ])
            ], "csr")

            H = d2f + d2G + d2H

            if flag:
                return (H.row, H.col)
            else:
                return H.data

        n = len(x0) # the number of variables
        gl = r_[zeros(2 * nb), -Inf * ones(2 * nl2), l]
        gu = r_[zeros(2 * nb),       zeros(2 * nl2), u]
        m = len(gl) # the number of constraints
        nnzj = 0 # the number of nonzeros in Jacobian matrix
        nnzh = 0 # the number of non-zeros in Hessian matrix

        nlp = pyipopt.create(n, xmin, xmax, m, gl, gu, nnzj, nnzh,
                             f_fcn, df_fcn, g_fcn, dg_fcn, h_fcn)

#        x, zl, zu, obj = nlp.solve(x0)
        success = nlp.solve(x0)
        nlp.close()

        print "Success:", success
        print "Solution of the primal variables, x"
#        print x
        print "Solution of the bound multipliers, z_L and z_U"
#        print zl, zu
        print "Objective value"

Example 50

Project: scikit-learn Source File: _scipy_sparse_lsqr_backport.py
Function: lsqr
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
         iter_lim=None, show=False, calc_var=False):
    """Find the least-squares solution to a large, sparse, linear system
    of equations.

    The function solves ``Ax = b``  or  ``min ||b - Ax||^2`` or
    ``min ||Ax - b||^2 + d^2 ||x||^2``.

    The matrix A may be square or rectangular (over-determined or
    under-determined), and may have any rank.

    ::

      1. Unsymmetric equations --    solve  A*x = b

      2. Linear least squares  --    solve  A*x = b
                                     in the least-squares sense

      3. Damped least squares  --    solve  (   A    )*x = ( b )
                                            ( damp*I )     ( 0 )
                                     in the least-squares sense

    Parameters
    ----------
    A : {sparse matrix, ndarray, LinearOperatorLinear}
        Representation of an m-by-n matrix.  It is required that
        the linear operator can produce ``Ax`` and ``A^T x``.
    b : (m,) ndarray
        Right-hand side vector ``b``.
    damp : float
        Damping coefficient.
    atol, btol : float, default 1.0e-8
        Stopping tolerances. If both are 1.0e-9 (say), the final
        residual norm should be accurate to about 9 digits.  (The
        final x will usually have fewer correct digits, depending on
        cond(A) and the size of damp.)
    conlim : float
        Another stopping tolerance.  lsqr terminates if an estimate of
        ``cond(A)`` exceeds `conlim`.  For compatible systems ``Ax =
        b``, `conlim` could be as large as 1.0e+12 (say).  For
        least-squares problems, conlim should be less than 1.0e+8.
        Maximum precision can be obtained by setting ``atol = btol =
        conlim = zero``, but the number of iterations may then be
        excessive.
    iter_lim : int
        Explicit limitation on number of iterations (for safety).
    show : bool
        Display an iteration log.
    calc_var : bool
        Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.

    Returns
    -------
    x : ndarray of float
        The final solution.
    istop : int
        Gives the reason for termination.
        1 means x is an approximate solution to Ax = b.
        2 means x approximately solves the least-squares problem.
    itn : int
        Iteration number upon termination.
    r1norm : float
        ``norm(r)``, where ``r = b - Ax``.
    r2norm : float
        ``sqrt( norm(r)^2  +  damp^2 * norm(x)^2 )``.  Equal to `r1norm` if
        ``damp == 0``.
    anorm : float
        Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
    acond : float
        Estimate of ``cond(Abar)``.
    arnorm : float
        Estimate of ``norm(A'*r - damp^2*x)``.
    xnorm : float
        ``norm(x)``
    var : ndarray of float
        If ``calc_var`` is True, estimates all diagonals of
        ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
        damp^2*I)^{-1}``.  This is well defined if A has full column
        rank or ``damp > 0``.  (Not sure what var means if ``rank(A)
        < n`` and ``damp = 0.``)

    Notes
    -----
    LSQR uses an iterative method to approximate the solution.  The
    number of iterations required to reach a certain accuracy depends
    strongly on the scaling of the problem.  Poor scaling of the rows
    or columns of A should therefore be avoided where possible.

    For example, in problem 1 the solution is unaltered by
    row-scaling.  If a row of A is very small or large compared to
    the other rows of A, the corresponding row of ( A  b ) should be
    scaled up or down.

    In problems 1 and 2, the solution x is easily recovered
    following column-scaling.  Unless better information is known,
    the nonzero columns of A should be scaled so that they all have
    the same Euclidean norm (e.g., 1.0).

    In problem 3, there is no freedom to re-scale if damp is
    nonzero.  However, the value of damp should be assigned only
    after attention has been paid to the scaling of A.

    The parameter damp is intended to help regularize
    ill-conditioned systems, by preventing the true solution from
    being very large.  Another aid to regularization is provided by
    the parameter acond, which may be used to terminate iterations
    before the computed solution becomes very large.

    If some initial estimate ``x0`` is known and if ``damp == 0``,
    one could proceed as follows:

      1. Compute a residual vector ``r0 = b - A*x0``.
      2. Use LSQR to solve the system  ``A*dx = r0``.
      3. Add the correction dx to obtain a final solution ``x = x0 + dx``.

    This requires that ``x0`` be available before and after the call
    to LSQR.  To judge the benefits, suppose LSQR takes k1 iterations
    to solve A*x = b and k2 iterations to solve A*dx = r0.
    If x0 is "good", norm(r0) will be smaller than norm(b).
    If the same stopping tolerances atol and btol are used for each
    system, k1 and k2 will be similar, but the final solution x0 + dx
    should be more accurate.  The only way to reduce the total work
    is to use a larger stopping tolerance for the second system.
    If some value btol is suitable for A*x = b, the larger value
    btol*norm(b)/norm(r0)  should be suitable for A*dx = r0.

    Preconditioning is another way to reduce the number of iterations.
    If it is possible to solve a related system ``M*x = b``
    efficiently, where M approximates A in some helpful way (e.g. M -
    A has low rank or its elements are small relative to those of A),
    LSQR may converge more rapidly on the system ``A*M(inverse)*z =
    b``, after which x can be recovered by solving M*x = z.

    If A is symmetric, LSQR should not be used!

    Alternatives are the symmetric conjugate-gradient method (cg)
    and/or SYMMLQ.  SYMMLQ is an implementation of symmetric cg that
    applies to any symmetric A and will converge more rapidly than
    LSQR.  If A is positive definite, there are other implementations
    of symmetric cg that require slightly less work per iteration than
    SYMMLQ (but will take the same number of iterations).

    References
    ----------
    .. [1] C. C. Paige and M. A. Saunders (1982a).
           "LSQR: An algorithm for sparse linear equations and
           sparse least squares", ACM TOMS 8(1), 43-71.
    .. [2] C. C. Paige and M. A. Saunders (1982b).
           "Algorithm 583.  LSQR: Sparse linear equations and least
           squares problems", ACM TOMS 8(2), 195-209.
    .. [3] M. A. Saunders (1995).  "Solution of sparse rectangular
           systems using LSQR and CRAIG", BIT 35, 588-604.

    """
    A = aslinearoperator(A)
    if len(b.shape) > 1:
        b = b.squeeze()

    m, n = A.shape
    if iter_lim is None:
        iter_lim = 2 * n
    var = np.zeros(n)

    msg = ('The exact solution is  x = 0                              ',
         'Ax - b is small enough, given atol, btol                  ',
         'The least-squares solution is good enough, given atol     ',
         'The estimate of cond(Abar) has exceeded conlim            ',
         'Ax - b is small enough for this machine                   ',
         'The least-squares solution is good enough for this machine',
         'Cond(Abar) seems to be too large for this machine         ',
         'The iteration limit has been reached                      ')

    if show:
        print(' ')
        print('LSQR            Least-squares solution of  Ax = b')
        str1 = 'The matrix A has %8g rows  and %8g cols' % (m, n)
        str2 = 'damp = %20.14e   calc_var = %8g' % (damp, calc_var)
        str3 = 'atol = %8.2e                 conlim = %8.2e' % (atol, conlim)
        str4 = 'btol = %8.2e               iter_lim = %8g' % (btol, iter_lim)
        print(str1)
        print(str2)
        print(str3)
        print(str4)

    itn = 0
    istop = 0
    nstop = 0
    ctol = 0
    if conlim > 0:
        ctol = 1/conlim
    anorm = 0
    acond = 0
    dampsq = damp**2
    ddnorm = 0
    res2 = 0
    xnorm = 0
    xxnorm = 0
    z = 0
    cs2 = -1
    sn2 = 0

    """
    Set up the first vectors u and v for the bidiagonalization.
    These satisfy  beta*u = b,  alfa*v = A'u.
    """
    __xm = np.zeros(m)  # a matrix for temporary holding
    __xn = np.zeros(n)  # a matrix for temporary holding
    v = np.zeros(n)
    u = b
    x = np.zeros(n)
    alfa = 0
    beta = np.linalg.norm(u)
    w = np.zeros(n)

    if beta > 0:
        u = (1/beta) * u
        v = A.rmatvec(u)
        alfa = np.linalg.norm(v)

    if alfa > 0:
        v = (1/alfa) * v
        w = v.copy()

    rhobar = alfa
    phibar = beta
    bnorm = beta
    rnorm = beta
    r1norm = rnorm
    r2norm = rnorm

    # Reverse the order here from the original matlab code because
    # there was an error on return when arnorm==0
    arnorm = alfa * beta
    if arnorm == 0:
        print(msg[0])
        return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

    head1 = '   Itn      x[0]       r1norm     r2norm '
    head2 = ' Compatible    LS      Norm A   Cond A'

    if show:
        print(' ')
        print(head1, head2)
        test1 = 1
        test2 = alfa / beta
        str1 = '%6g %12.5e' % (itn, x[0])
        str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
        str3 = '  %8.1e %8.1e' % (test1, test2)
        print(str1, str2, str3)

    # Main iteration loop.
    while itn < iter_lim:
        itn = itn + 1
        """
        %     Perform the next step of the bidiagonalization to obtain the
        %     next  beta, u, alfa, v.  These satisfy the relations
        %                beta*u  =  a*v   -  alfa*u,
        %                alfa*v  =  A'*u  -  beta*v.
        """
        u = A.matvec(v) - alfa * u
        beta = np.linalg.norm(u)

        if beta > 0:
            u = (1/beta) * u
            anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
            v = A.rmatvec(u) - beta * v
            alfa = np.linalg.norm(v)
            if alfa > 0:
                v = (1 / alfa) * v

        # Use a plane rotation to eliminate the damping parameter.
        # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
        rhobar1 = sqrt(rhobar**2 + damp**2)
        cs1 = rhobar / rhobar1
        sn1 = damp / rhobar1
        psi = sn1 * phibar
        phibar = cs1 * phibar

        # Use a plane rotation to eliminate the subdiagonal element (beta)
        # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
        cs, sn, rho = _sym_ortho(rhobar1, beta)

        theta = sn * alfa
        rhobar = -cs * alfa
        phi = cs * phibar
        phibar = sn * phibar
        tau = sn * phi

        # Update x and w.
        t1 = phi / rho
        t2 = -theta / rho
        dk = (1 / rho) * w

        x = x + t1 * w
        w = v + t2 * w
        ddnorm = ddnorm + np.linalg.norm(dk)**2

        if calc_var:
            var = var + dk**2

        # Use a plane rotation on the right to eliminate the
        # super-diagonal element (theta) of the upper-bidiagonal matrix.
        # Then use the result to estimate norm(x).
        delta = sn2 * rho
        gambar = -cs2 * rho
        rhs = phi - delta * z
        zbar = rhs / gambar
        xnorm = sqrt(xxnorm + zbar**2)
        gamma = sqrt(gambar**2 + theta**2)
        cs2 = gambar / gamma
        sn2 = theta / gamma
        z = rhs / gamma
        xxnorm = xxnorm + z**2

        # Test for convergence.
        # First, estimate the condition of the matrix  Abar,
        # and the norms of  rbar  and  Abar'rbar.
        acond = anorm * sqrt(ddnorm)
        res1 = phibar**2
        res2 = res2 + psi**2
        rnorm = sqrt(res1 + res2)
        arnorm = alfa * abs(tau)

        # Distinguish between
        #    r1norm = ||b - Ax|| and
        #    r2norm = rnorm in current code
        #           = sqrt(r1norm^2 + damp^2*||x||^2).
        #    Estimate r1norm from
        #    r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
        # Although there is cancellation, it might be accurate enough.
        r1sq = rnorm**2 - dampsq * xxnorm
        r1norm = sqrt(abs(r1sq))
        if r1sq < 0:
            r1norm = -r1norm
        r2norm = rnorm

        # Now use these norms to estimate certain other quantities,
        # some of which will be small near a solution.
        test1 = rnorm / bnorm
        test2 = arnorm / (anorm * rnorm + eps)
        test3 = 1 / (acond + eps)
        t1 = test1 / (1 + anorm * xnorm / bnorm)
        rtol = btol + atol * anorm * xnorm / bnorm

        # The following tests guard against extremely small values of
        # atol, btol  or  ctol.  (The user may have set any or all of
        # the parameters  atol, btol, conlim  to 0.)
        # The effect is equivalent to the normal tests using
        # atol = eps,  btol = eps,  conlim = 1/eps.
        if itn >= iter_lim:
            istop = 7
        if 1 + test3 <= 1:
            istop = 6
        if 1 + test2 <= 1:
            istop = 5
        if 1 + t1 <= 1:
            istop = 4

        # Allow for tolerances set by the user.
        if test3 <= ctol:
            istop = 3
        if test2 <= atol:
            istop = 2
        if test1 <= rtol:
            istop = 1

        # See if it is time to print something.
        prnt = False
        if n <= 40:
            prnt = True
        if itn <= 10:
            prnt = True
        if itn >= iter_lim-10:
            prnt = True
        # if itn%10 == 0: prnt = True
        if test3 <= 2*ctol:
            prnt = True
        if test2 <= 10*atol:
            prnt = True
        if test1 <= 10*rtol:
            prnt = True
        if istop != 0:
            prnt = True

        if prnt:
            if show:
                str1 = '%6g %12.5e' % (itn, x[0])
                str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
                str3 = '  %8.1e %8.1e' % (test1, test2)
                str4 = ' %8.1e %8.1e' % (anorm, acond)
                print(str1, str2, str3, str4)

        if istop != 0:
            break

    # End of iteration loop.
    # Print the stopping condition.
    if show:
        print(' ')
        print('LSQR finished')
        print(msg[istop])
        print(' ')
        str1 = 'istop =%8g   r1norm =%8.1e' % (istop, r1norm)
        str2 = 'anorm =%8.1e   arnorm =%8.1e' % (anorm, arnorm)
        str3 = 'itn   =%8g   r2norm =%8.1e' % (itn, r2norm)
        str4 = 'acond =%8.1e   xnorm  =%8.1e' % (acond, xnorm)
        print(str1 + '   ' + str2)
        print(str3 + '   ' + str4)
        print(' ')

    return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
See More Examples - Go to Next Page
Page 1 Selected Page 2 Page 3 Page 4