numpy.pi

Here are the examples of the python api numpy.pi taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: sherpa
Source File: __init__.py
View license
    def __init__(self, name='sersic2d'):
        self.r0 = Parameter(name, 'r0', 10, 0, hard_min=0)
        self.xpos = Parameter(name, 'xpos', 0)
        self.ypos = Parameter(name, 'ypos', 0)
        self.ellip = Parameter(name, 'ellip', 0, 0, 0.999, 0, 0.9999)
        self.theta = Parameter(name, 'theta', 0, -2*numpy.pi, 2*numpy.pi, -2*numpy.pi,
                               4*numpy.pi, 'radians')
        self.ampl = Parameter(name, 'ampl', 1)
        self.n = Parameter(name, 'n', 1, .1, 10, 0.01, 100, frozen=True)
        ArithmeticModel.__init__(self, name,
                                 (self.r0, self.xpos, self.ypos, self.ellip,
                                  self.theta, self.ampl, self.n))
        self.cache = 0

Example 2

Project: pyeq2
Source File: Trigonometric.py
View license
    def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
        x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
        
        amplitude = inCoeffs[0]
        center = inCoeffs[1]
        width = inCoeffs[2]

        try:
            temp = amplitude * numpy.sin(numpy.pi * (x_in - center) / width) * numpy.sin(numpy.pi * (x_in - center) / width) / (numpy.pi * (x_in - center) / width)
            return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
        except:
            return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300

Example 3

Project: pyeq3
Source File: Trigonometric.py
View license
    def CalculateModelPredictions(self, inCoeffs, inDataCacheDictionary):
        x_in = inDataCacheDictionary['X'] # only need to perform this dictionary look-up once
        
        amplitude = inCoeffs[0]
        center = inCoeffs[1]
        width = inCoeffs[2]

        try:
            temp = amplitude * numpy.sin(numpy.pi * (x_in - center) / width) * numpy.sin(numpy.pi * (x_in - center) / width) / (numpy.pi * (x_in - center) / width)
            return self.extendedVersionHandler.GetAdditionalModelPredictions(temp, inCoeffs, inDataCacheDictionary, self)
        except:
            return numpy.ones(len(inDataCacheDictionary['DependentData'])) * 1.0E300

Example 4

Project: scikit-rf
Source File: touchstone.py
View license
    def get_sparameter_data(self, format='ri'):
        """
        get the data of the s-parameter with the given format.
        supported formats are:
          orig:  unmodified s-parameter data
          ri:    data in real/imaginary
          ma:    data in magnitude and angle (degree)
          db:    data in log magnitute and angle (degree)
        Returns a list of numpy.arrays
        """
        ret = {}
        if format == 'orig':
            values = self.sparameters
        else:
            values = self.sparameters.copy()
            # use frequency in hz unit
            values[:,0] = values[:,0]*self.frequency_mult
            if (self.format == 'db') and (format == 'ma'):
                values[:,1::2] = 10**(values[:,1::2]/20.0)
            elif (self.format == 'db') and (format == 'ri'):
                v_complex = ((10**values[:,1::2]/20.0)
                             * numpy.exp(1j*numpy.pi/180 * values[:,2::2]))
                values[:,1::2] = numpy.real(v_complex)
                values[:,2::2] = numpy.imag(v_complex)
            elif (self.format == 'ma') and (format == 'db'):
                values[:,1::2] = 20*numpy.log10(values[:,1::2])
            elif (self.format == 'ma') and (format == 'ri'):
                v_complex = (values[:,1::2] * numpy.exp(1j*numpy.pi/180 * values[:,2::2]))
                values[:,1::2] = numpy.real(v_complex)
                values[:,2::2] = numpy.imag(v_complex)
            elif (self.format == 'ri') and (format == 'ma'):
                v_complex = numpy.absolute(values[:,1::2] + 1j* self.sparameters[:,2::2])
                values[:,1::2] = numpy.absolute(v_complex)
                values[:,2::2] = numpy.angle(v_complex)*(180/numpy.pi)
            elif (self.format == 'ri') and (format == 'db'):
                v_complex = numpy.absolute(values[:,1::2] + 1j* self.sparameters[:,2::2])
                values[:,1::2] = 20*numpy.log10(numpy.absolute(v_complex))
                values[:,2::2] = numpy.angle(v_complex)*(180/numpy.pi)

        for i,n in enumerate(self.get_sparameter_names(format=format)):
            ret[n] = values[:,i]
        return ret

Example 5

Project: clusterpy
Source File: mrpolygons.py
View license
def mrpolygon(alp,sigma,mu=10,X_0=10,dt=0.001,nPoints=30):
    """Creates a mean reverting polygon (MR-Polygon)

    :param alp: mean reverting speed to be used in the stochastic process 
    :type alp: float
    :param alp: noise gain of the stochastic process 
    :type alp: float
    :param mu: mean value of the mean reverting process 
    :type mu: float
    :param X_0: initial value of the mean reverting process
    :type X_0: Float
    :param dt: delta time of the mean reverting process
    :type dt: Float
    :param nPoints: number of points in which the MR-Polygon is sampled
    :type nPoints: integer

    :rtype: List of 6 elements with the stochastic process, the original and the sampled polygon
    :return: List
    """
    X1 = [X_0]
    X1e = [X_0]
    t = dt
    a = [0]
    r = [X_0]
    sr = []
    sa = []
    times = [0]
    dim = []
    times = [0]
    lengthPolar = 0
    lengthCarte = 0
    while t < 2*numpy.pi:
        bt = numpy.random.normal()
        X1.append(X1[-1] + alp*(mu - X1[-1])*dt + sigma*numpy.sqrt(dt)*bt)
        l = (dt**2 + (X1[-2]-X1[-1])**2)**0.5
        alpha = numpy.arccos((2*r[-1]**2 - dt**2)/float(2*r[-1]**2)) # phi_1
        beta = (numpy.pi - alpha)/float(2)
        #beta = numpy.arcsin(r[-1]*numpy.sin(alpha)/float(dt)) # phi_2
        if X1[-1] >= X1[-2]: #phi_3
            betap = numpy.pi - beta
        else:
            betap = beta
        psi = numpy.arcsin(dt*numpy.sin(betap)/float(l)) #phi_4
        phi = numpy.pi - betap - psi #phi_5
        rtp = l*numpy.sin(phi)/float(numpy.sin(betap))
        if alpha + a[-1] >= 2*numpy.pi:
            r.append(r[0])
            a.append(numpy.pi*2)
            distance = abs(r[0] - r[-1])
            lengthCarte += distance
            lengthPolar += distance
        else:
            if X1[-1] >= X1[-2]:
                sign = "+"
                r.append(r[-1] + rtp)
            else:
                sign = "-"
                if r[-1] - rtp <= 1:
                    r.append(1)
                else:
                    r.append(r[-1] - rtp)
            a.append(a[-1] + alpha)
            distance = (r[-2]**2 + r[-1]**2 - 2*r[-2]*r[-1]*numpy.cos(alpha))**0.5
            lengthCarte += l
            lengthPolar += distance
        times.append(t)
        t = a[-1]
    realPoints = len(a)
    step = int(realPoints/(nPoints-1))
    for cont in range(realPoints):
        if cont%step == 0:
            sr.append(r[cont])
            sa.append(a[cont])
    if sa[-1] != a[-1]:
        sa.append(a[-1])
        sr.append(r[-1])
    return a, r, sa, sr, times, X1

Example 6

Project: snakemakelib
Source File: scrnaseq.py
View license
def scrnaseq_alignment_qc_plots(rseqc_read_distribution=None, rseqc_gene_coverage=None,
                      star_results=None):
    """Make alignment QC plots for scrnaseq workflow

    Args:
      rseqc_read_distribution (str): RSeQC read distribution results csv file
      rseqc_gene_coverage (str): RSeQC gene coverage results csv file
      star_results (str): star alignment results csv file

    Returns: 
      dict: dictionary with keys 'fig' pointing to a (:py:class:`~bokeh.models.GridPlot`) Bokeh GridPlot object and key 'table' pointing to a (:py:class:`~bokeh.widgets.DataTable`) DataTable

    """
    df_star = pd.read_csv(star_results, index_col="Sample")
    df_rseqc_rd = pd.read_csv(rseqc_read_distribution, index_col="Sample").reset_index().pivot_table(columns=["Group"], values=["Tag_count"], index=["Sample"])
    df_rseqc_rd.columns = ["_".join(x) if isinstance(x, tuple) else x for x in df_rseqc_rd.columns]
    df_rseqc_gc = pd.read_csv(rseqc_gene_coverage, index_col="Sample")
    df_all = df_star.join(df_rseqc_rd)
    df_all = df_all.join(df_rseqc_gc['three_prime_map'])
    source = ColumnDataSource(df_all)
    columns = [
        TableColumn(field="Sample", title="Sample"),
        TableColumn(field="Number_of_input_reads",
                    title="Number of input reads"),
        TableColumn(field="Uniquely_mapped_reads_PCT",
                    title="Uniquely mapped reads (%)"),
        TableColumn(field="Mismatch_rate_per_base__PCT",
                    title="Mismatch rate per base (%)"),
        TableColumn(field="Insertion_rate_per_base",
                    title="Insertion rate per base (%)"),
        TableColumn(field="Deletion_rate_per_base",
                    title="Deletion rate per base (%)"),
        TableColumn(field="PCT_of_reads_unmapped",
                    title="Unmapped reads (%)"),
    ]
    table = DataTable(source=source, columns=columns,
                      editable=False, width=1000)
    TOOLS = "pan,wheel_zoom,box_zoom,box_select,lasso_select,resize,reset,save,hover"
    kwfig = {'plot_width': 400, 'plot_height': 400, 
             'title_text_font_size': "12pt"}
    kwxaxis = {'axis_label': 'Sample',
               'major_label_orientation': np.pi/3}
    kwyaxis = {'axis_label_text_font_size': '10pt',
               'major_label_orientation': np.pi/3}

    # Input reads
    p1 = figure(title="Number of input reads",
                x_range=list(df_all.index), tools=TOOLS,
                y_axis_type="log", **kwfig)
    dotplot(p1, "Sample", "Number_of_input_reads", source=source)
    xaxis(p1, **kwxaxis)
    yaxis(p1, axis_label="Reads", **kwyaxis)
    tooltips(p1, HoverTool, [('Sample', '@Sample'),
                             ('Reads', '@Number_of_input_reads')])

    # Uniquely mapping
    p2 = figure(title="Uniquely mapping reads",
                x_range=p1.x_range,
                y_range=[0, 100],
                tools=TOOLS,
                **kwfig)
    dotplot(p2, "Sample", "Uniquely_mapped_reads_PCT", source=source)
    xaxis(p2, **kwxaxis)
    yaxis(p2, axis_label="Percent", **kwyaxis)
    tooltips(p2, HoverTool, [('Sample', '@Sample'),
                             ('Pct_mapped', '@Uniquely_mapped_reads_PCT')])

    # Unmapped
    p3 = figure(title="Unmapped reads",
                x_range=p1.x_range,
                y_range=[0, 100],
                tools=TOOLS,
                **kwfig)
    dotplot(p3, "Sample", "PCT_of_reads_unmapped", source=source)
    xaxis(p3, **kwxaxis)
    yaxis(p3, axis_label="Percent", **kwyaxis)
    tooltips(p3, HoverTool, [('Sample', '@Sample'),
                             ('Pct_unmapped', '@PCT_of_reads_unmapped')])

    # Mismatch/indel rate
    p4 = figure(title="Mismatch and indel rates",
                x_range=p1.x_range,
                tools=TOOLS,
                **kwfig)
    dotplot(p4, "Sample", "Mismatch_rate_per_base__PCT", source=source, legend="Mismatch")
    dotplot(p4, "Sample", "Insertion_rate_per_base", source=source, legend="Insertion", color="red")
    dotplot(p4, "Sample", "Deletion_rate_per_base", source=source, legend="Deletion", color="green")
    xaxis(p4, **kwxaxis)
    yaxis(p4, axis_label="Percent", **kwyaxis)
    tooltips(p4, HoverTool,  [('Sample', '@samples'),
                                              ('Mismatch rate per base',
                                               '@Mismatch_rate_per_base__PCT'),
                                              ('Insertion rate per base',
                                               '@Insertion_rate_per_base'),
                                              ('Deletion rate per base',
                                               '@Deletion_rate_per_base'), ])
    select_tool = p4.select(dict(type=BoxSelectTool))
    select_tool.dimensions = ['width']

    

             
    # Unmapped
    p5 = figure(title="Mismatch/indel sum",
                x_range=p1.x_range,
                tools=TOOLS,
                **kwfig)
    dotplot(p5, "Sample", "mismatch_sum", source=source)
    xaxis(p5, **kwxaxis)
    yaxis(p5, axis_label="Percent", **kwyaxis)
    tooltips(p5, HoverTool, [('Sample', '@Sample'),
                             ('Mismatch/indel rate per base',
                              '@mismatch_sum'), ])
    select_tool = p5.select(dict(type=BoxSelectTool))
    select_tool.dimensions = ['width']

    # Fraction reads mapping to 10% right-most end
    p6 = figure(title="Tags mapping to exons",
                x_range=p1.x_range,
                tools=TOOLS,
                **kwfig)
    dotplot(p6, "Sample", "Tag_count_ExonMap", source=source)
    xaxis(p6, **kwxaxis)
    yaxis(p6, axis_label="Percent", **kwyaxis)
    tooltips(p6, HoverTool, [('Sample', '@Sample'),
                             ('ExonMap', '@Tag_count_ExonMap'), ])

    # Fraction reads mapping to 10% right-most end
    p7 = figure(title="Reads mapping to 3' end",
                x_range=p1.x_range,
                tools=TOOLS,
                **kwfig)
    dotplot(p7, "Sample", "three_prime_map", source=source)
    xaxis(p7, **kwxaxis)
    yaxis(p7, axis_label="Percent", **kwyaxis)
    tooltips(p7, HoverTool, [('Sample', '@Sample'),
                             ("3' map", '@three_prime_map'), ])


    return {'fig': gridplot([[p1, p2, p3], [p4, p5, p6], [p7, None, None]]),
            'table': table}

Example 7

Project: snakemakelib
Source File: scrnaseq.py
View license
def scrnaseq_pca_plots(pca_results_file=None, metadata=None, pcacomp=(1,2), pcaobjfile=None):
    """Make PCA QC plots for scrnaseq workflow

    Args:
      pca_results_file (str): pca results file
      metadata (str): metadata file name
      pcacomp (int): tuple of ints corresponding to components to draw
      pcaobjfile (str): file name containing pickled pca object

    Returns: 
      dict: dictionary with keys 'fig' pointing to a (:py:class:`~bokeh.models.GridPlot`) Bokeh GridPlot object and key 'table' pointing to a (:py:class:`~bokeh.widgets.DataTable`) DataTable

    """
    if not metadata is None:
        md = pd.read_csv(metadata, index_col=0)
    if not pcaobjfile is None:
        with open(pcaobjfile, 'rb') as fh:
            pcaobj = pickle.load(fh)
    df_pca = pd.read_csv(pca_results_file, index_col="sample")
    df_pca['color'] = ['red'] * df_pca.shape[0]
    df_pca['x'] = df_pca['0']
    df_pca['y'] = df_pca['1']

    source = ColumnDataSource(df_pca)
    TOOLS = "pan,wheel_zoom,box_zoom,box_select,resize,reset,save,hover"

    # Add radio button group
    cmap = colorbrewer(datalen = df_pca.shape[0])
    callback_rbg = CustomJS(args=dict(source=source), code="""
        var data = source.get('data');
        var active = cb_obj.get('active')
        var label = cb_obj.get('label')[active]
    var Paired = {
    3	: [ "#A6CEE3","#1F78B4","#B2DF8A"],
    4	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C"],
    5	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99"],
    6	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C"],
    7	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F"],
    8	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00"] ,
    9	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6"],
    10	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A"],
    11	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A","#FFFF99"],
    12	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A","#FFFF99", "#B15928"]};
        var colormap = {};

        var j = 0;
        for (i = 0; i < data['sample'].length; i++) {
            if (data[label][i] in colormap) {
            } else {
                colormap[data[label][i]] = j;
                j++;
            }
        }
        var nfac = Object.keys(colormap).length;
        if (nfac > 12) {
            nfac = 12;
        } 
        if (nfac < 3) {
           nfac = 3;
        }
        var colors = Paired[nfac];
        for (i = 0; i < data[label].length; i++) {
              data['color'][i] = colors[colormap[data[label][i]]]
        }
        source.trigger('change');
    """)
    callback  = CustomJS(args=dict(source=source), code="""
        var data = source.get('data');
        var active = cb_obj.get('active');
        var label = cb_obj.get('label');
    var Paired = {
    3	: [ "#A6CEE3","#1F78B4","#B2DF8A"],
    4	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C"],
    5	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99"],
    6	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C"],
    7	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F"],
    8	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00"] ,
    9	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6"],
    10	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A"],
    11	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A","#FFFF99"],
    12	: [ "#A6CEE3","#1F78B4","#B2DF8A","#33A02C","#FB9A99","#E31A1C","#FDBF6F","#FF7F00","#CAB2D6","#6A3D9A","#FFFF99", "#B15928"]};
        var colormap = {};
    if (!active) {
        var j = 0;
        for (i = 0; i < data['sample'].length; i++) {
            if (data[label][i] in colormap) {
            } else {
                colormap[data[label][i]] = j;
                j++;
            }
        }
        var nfac = Object.keys(colormap).length;
        if (nfac > 12) {
            nfac = 12;
        } 
        if (nfac < 3) {
           nfac = 3;
        }
        var colors = Paired[nfac];
        for (i = 0; i < data[label].length; i++) {
              data['color'][i] = colors[colormap[data[label][i]]]
        }
        source.trigger('change');
    }
    """)
    if not md is None:
        # Waiting for callbacks to be implemented upstream in bokeh
        # rbg = RadioButtonGroup(labels=list(md.columns),
        #                        callback=callback)
        toggle_buttons = [Toggle(label=x, callback=callback) for x in list(md.columns)]
    else:
        toggle_buttons = []
        # rbg = RadioButtonGroup()
    # PC components
    xcallback = CustomJS(args=dict(source=source), code="""
        var data = source.get('data');
        var active = cb_obj.get('active')
        var value = cb_obj.get('value')
        x = data['x']
        for (i = 0; i < x.length; i++) {
              x[i] = data[value][i]
              data['sample'][i] = value
              data['FileID'][i] = active
        }

        source.trigger('change');
    """)
    ycallback = CustomJS(args=dict(source=source), code="""
        var data = source.get('data');
        var value = cb_obj.get('value')
        y = data['y']
        for (i = 0; i < y.length; i++) {
             y[i] = data[value][i]
        }
        source.trigger('change');
    """)

    pca_components = sorted([int(x) + 1 for x in source.column_names if re.match("\d+", x)])
    menulist = [(str(x), str(x)) for x in pca_components]
    component_x = Dropdown(label = "PCA component x", menu = menulist, default_value="1",
                           callback=xcallback)
    component_y = Dropdown(label = "PCA component y", menu = menulist, default_value="2",
                           callback=ycallback)

    # Make the pca plot
    kwfig = {'plot_width': 400, 'plot_height': 400,
             'title_text_font_size': "12pt"}


    p1 = figure(title="Principal component analysis",
                tools=TOOLS, **kwfig)

    points(p1, 'x', 'y', source=source, color='color', size=10,
           alpha=.7)
    kwxaxis = {'axis_label': "Component {} ({:.2f}%)".format(
        pcacomp[0], 100.0 * pcaobj.explained_variance_ratio_[pcacomp[0] - 1]),
               'axis_label_text_font_size': '10pt',
               'major_label_orientation': np.pi/3}
    kwyaxis = {'axis_label': "Component {} ({:.2f}%)".format(
        pcacomp[1], 100.0 * pcaobj.explained_variance_ratio_[pcacomp[1] - 1]),
               'axis_label_text_font_size': '10pt',
               'major_label_orientation': np.pi/3}
    xaxis(p1, **kwxaxis)
    yaxis(p1, **kwyaxis)
    tooltiplist = [("sample", "@sample")] if "sample" in source.column_names else []
    if not md is None:
        tooltiplist = tooltiplist + [(str(x), "@{}".format(x)) for x
                                     in md.columns]
    tooltips(p1, HoverTool, tooltiplist)

    # Detected genes, FPKM and TPM
    p2 = figure(title="Number of detected genes",
                x_range=list(df_pca.index), tools=TOOLS,
                **kwfig)
    kwxaxis.update({'axis_label': "Sample"})
    kwyaxis.update({'axis_label': "Detected genes"})
    dotplot(p2, "sample", "FPKM", source=source)
    xaxis(p2, **kwxaxis)
    yaxis(p2, **kwyaxis)
    tooltips(p2, HoverTool, [('sample', '@sample'),
                             ('# genes (FPKM)', '@FPKM')])
    return {'fig':vform(*(toggle_buttons + [gridplot([[p1, p2]])]))}

Example 8

Project: PythonQwt
Source File: scale_draw.py
View license
    def minLabelDist(self, font):
        """
        Determine the minimum distance between two labels, that is necessary
        that the texts don't overlap.

        :param QFont font: Font
        :return: The maximum width of a label
        
        .. seealso::
        
            :py:meth:`getBorderDistHint()`
        """
        if not self.hasComponent(QwtAbstractScaleDraw.Labels):
            return 0
        
        ticks = self.scaleDiv().ticks(QwtScaleDiv.MajorTick)
        if not ticks:
            return 0
        
        fm = QFontMetrics(font)
        vertical = self.orientation() == Qt.Vertical
        
        bRect1 = QRectF()
        bRect2 = self.labelRect(font, ticks[0])
        if vertical:
            bRect2.setRect(-bRect2.bottom(), 0.,
                           bRect2.height(), bRect2.width())
        
        maxDist = 0.
        
        for tick in ticks:
            bRect1 = bRect2
            bRect2 = self.labelRect(font, tick)
            if vertical:
                bRect2.setRect(-bRect2.bottom(), 0.,
                               bRect2.height(), bRect2.width())
            
            dist = fm.leading()
            if bRect1.right() > 0:
                dist += bRect1.right()
            if bRect2.left() < 0:
                dist += -bRect2.left()
            
            if dist > maxDist:
                maxDist = dist
            
        angle = qwtRadians(self.labelRotation())
        if vertical:
            angle += np.pi/2
        
        sinA = np.sin(angle)
        if qFuzzyCompare(sinA+1., 1.):
            return np.ceil(maxDist)
        
        fmHeight = fm.ascent()-2
        
        labelDist = fmHeight/np.sin(angle)*np.cos(angle)
        if labelDist < 0:
            labelDist = -labelDist
        
        if labelDist > maxDist:
            labelDist = maxDist
        
        if labelDist < fmHeight:
            labelDist = fmHeight
        
        return np.ceil(labelDist)

Example 9

Project: PythonQwt
Source File: CurveDemo2.py
View license
    def __init__(self, *args):
        QFrame.__init__(self, *args)

        self.setFrameStyle(QFrame.Box | QFrame.Raised)
        self.setLineWidth(2)
        self.setMidLineWidth(3)

        p = QPalette()
        p.setColor(self.backgroundRole(), QColor(30, 30, 50))
        self.setPalette(p)
        # make curves and maps
        self.tuples = []
        # curve 1
        curve = QwtPlotCurve()
        curve.setPen(QPen(QColor(150, 150, 200), 2))
        curve.setStyle(QwtPlotCurve.Lines)
        curve.setSymbol(QwtSymbol(QwtSymbol.XCross,
                                      QBrush(),
                                      QPen(Qt.yellow, 2),
                                      QSize(7, 7)))
        self.tuples.append((curve,
                            QwtScaleMap(0, 100, -1.5, 1.5),
                            QwtScaleMap(0, 100, 0.0, 2*np.pi)))
        # curve 2
        curve = QwtPlotCurve()
        curve.setPen(QPen(QColor(200, 150, 50),
                                1,
                                Qt.DashDotDotLine))
        curve.setStyle(QwtPlotCurve.Sticks)
        curve.setSymbol(QwtSymbol(QwtSymbol.Ellipse,
                                      QBrush(Qt.blue),
                                      QPen(Qt.yellow),
                                      QSize(5, 5)))
        self.tuples.append((curve,
                            QwtScaleMap(0, 100, 0.0, 2*np.pi),
                            QwtScaleMap(0, 100, -3.0, 1.1)))
        # curve 3
        curve = QwtPlotCurve()
        curve.setPen(QPen(QColor(100, 200, 150)))
        curve.setStyle(QwtPlotCurve.Lines)
        self.tuples.append((curve,
                            QwtScaleMap(0, 100, -1.1, 3.0),
                            QwtScaleMap(0, 100, -1.1, 3.0)))
        # curve 4
        curve = QwtPlotCurve()
        curve.setPen(QPen(Qt.red))
        curve.setStyle(QwtPlotCurve.Lines)
        self.tuples.append((curve,
                            QwtScaleMap(0, 100, -5.0, 1.1),
                            QwtScaleMap(0, 100, -1.1, 5.0)))
        # data
        self.phase = 0.0
        self.base = np.arange(0.0, 2.01*np.pi, 2*np.pi/(USize-1))
        self.uval = np.cos(self.base)
        self.vval = np.sin(self.base)
        self.uval[1::2] *= 0.5
        self.vval[1::2] *= 0.5
        self.newValues()
        # start timer
        self.tid = self.startTimer(250)

Example 10

Project: pyunicorn
Source File: grid.py
View license
    def node_number(self, lat_node, lon_node):
        """
        Return the index of the closest node given geographical coordinates.

        **Example:**

        >>> Grid.SmallTestGrid().node_number(lat_node=14., lon_node=9.)
        3

        :type lat_node: number (float)
        :arg lat_node: The latitude coordinate.

        :type lon_node: number (float)
        :arg lon_node: The longitude coordinate.

        :rtype: number (int)
        :return: the closest node's index.
        """
        #  Get sequences of cosLat, sinLat, cosLon and sinLon for all nodes
        cos_lat = self.cos_lat()
        sin_lat = self.sin_lat()
        cos_lon = self.cos_lon()
        sin_lon = self.sin_lon()

        sin_lat_v = np.sin(lat_node * np.pi / 180)
        cos_lat_v = np.cos(lat_node * np.pi / 180)
        sin_lon_v = np.sin(lon_node * np.pi / 180)
        cos_lon_v = np.cos(lon_node * np.pi / 180)

        #  Calculate angular distance from the given coordinate to all
        #  other nodes
        expr = sin_lat*sin_lat_v + cos_lat*cos_lat_v * (sin_lon*sin_lon_v
                                                        + cos_lon*cos_lon_v)

        #  Correct for rounding errors
        expr[expr < -1.] = -1.
        expr[expr > 1.] = 1.

        angdist = np.arccos(expr)

        #  Get index of closest node
        n_node = angdist.argmin()

        return n_node

Example 11

Project: OpenPNM
Source File: throat_offset_vertices.py
View license
def distance_transform(network, geometry, offset, **kwargs):
    r"""
    Use the Voronoi vertices and perform image analysis to obtain throat properties
    """

    import math
    import numpy as np
    from skimage.morphology import convex_hull_image
    from skimage.measure import regionprops
    from scipy import ndimage

    Nt = geometry.num_throats()
    area = sp.zeros(Nt)
    perimeter = sp.zeros(Nt)
    centroid = sp.zeros([Nt, 3])
    incentre = sp.zeros([Nt, 3])
    inradius = sp.zeros(Nt)
    equiv_diameter = sp.zeros(Nt)
    eroded_verts = sp.ndarray(Nt, dtype=object)

    res = 200
    vertices = geometry['throat.vertices']
    normals = geometry['throat.normal']
    z_axis = [0, 0, 1]

    for i in range(Nt):
        logger.info("Processing throat " + str(i+1)+" of "+str(Nt))
        # For boundaries some facets will already be aligned with the axis - if this
        # is the case a rotation is unnecessary and could also cause problems
        angle = tr.angle_between_vectors(normals[i], z_axis)
        if angle == 0.0 or angle == np.pi:
            # We are already aligned
            rotate_facet = False
            facet = vertices[i]
        else:
            rotate_facet = True
            M = tr.rotation_matrix(tr.angle_between_vectors(normals[i], z_axis),
                                   tr.vector_product(normals[i], z_axis))
            facet = np.dot(vertices[i], M[:3, :3].T)
        x = facet[:, 0]
        y = facet[:, 1]
        z = facet[:, 2]
        # Get points in 2d for image analysis
        pts = np.column_stack((x, y))
        # Translate points so min sits at the origin
        translation = [pts[:, 0].min(), pts[:, 1].min()]
        pts -= translation
        order = np.int(math.ceil(-np.log10(np.max(pts))))
        # Normalise and scale the points so that largest span equals the resolution
        # to save on memory and create clear image"
        max_factor = np.max([pts[:, 0].max(), pts[:, 1].max()])
        f = res/max_factor
        # Scale the offset and define a circular structuring element with radius
        r = f*offset
        # Only proceed if r is less than half the span of the image"
        if r <= res/2:
            pts *= f
            minp1 = pts[:, 0].min()
            minp2 = pts[:, 1].min()
            maxp1 = pts[:, 0].max()
            maxp2 = pts[:, 1].max()
            img = np.zeros([np.int(math.ceil(maxp1-minp1)+1),
                            np.int(math.ceil(maxp2-minp2)+1)])
            int_pts = np.around(pts, 0).astype(int)
            for pt in int_pts:
                img[pt[0]][pt[1]] = 1
            # Pad with zeros all the way around the edges
            img_pad = np.zeros([np.shape(img)[0] + 2, np.shape(img)[1] + 2])
            img_pad[1:np.shape(img)[0]+1, 1:np.shape(img)[1]+1] = img

            # All points should lie on this plane but could be some rounding errors
            # so use the order parameter
            z_plane = sp.unique(np.around(z, order+2))
            if len(z_plane) > 1:
                logger.error('Rotation for image analysis failed')
                temp_arr = np.ones(1)
                temp_arr.fill(np.mean(z_plane))
                z_plane = temp_arr
            "Fill in the convex hull polygon"
            convhullimg = convex_hull_image(img_pad)
            # Perform a Distance Transform and black out points less than r to create
            # binary erosion. This is faster than performing an erosion and dt can
            # also be used later to find incircle"
            eroded = ndimage.distance_transform_edt(convhullimg)
            eroded[eroded <= r] = 0
            eroded[eroded > r] = 1
            # If we are left with less than 3 non-zero points then the throat is
            # fully occluded
            if np.sum(eroded) >= 3:
                # Do some image analysis to extract the key properties
                regions = regionprops(eroded[1:np.shape(img)[0]+1,
                                             1:np.shape(img)[1]+1].astype(int))
                # Change this to cope with genuine multi-region throats
                if len(regions) == 1:
                    for props in regions:
                        x0, y0 = props.centroid
                        equiv_diameter[i] = props.equivalent_diameter
                        area[i] = props.area
                        perimeter[i] = props.perimeter
                        coords = props.coords
                    # Undo the translation, scaling and truncation on the centroid
                    centroid2d = [x0, y0]/f
                    centroid2d += (translation)
                    centroid3d = np.concatenate((centroid2d, z_plane))
                    # Distance transform the eroded facet to find the incentre and
                    # inradius
                    dt = ndimage.distance_transform_edt(eroded)
                    inx0, iny0 = \
                        np.asarray(np.unravel_index(dt.argmax(), dt.shape)) \
                          .astype(float)
                    incentre2d = [inx0, iny0]
                    # Undo the translation, scaling and truncation on the incentre
                    incentre2d /= f
                    incentre2d += (translation)
                    incentre3d = np.concatenate((incentre2d, z_plane))
                    # The offset vertices will be those in the coords that are
                    # closest to the originals"
                    offset_verts = []
                    for pt in int_pts:
                        vert = np.argmin(np.sum(np.square(coords-pt), axis=1))
                        if vert not in offset_verts:
                            offset_verts.append(vert)
                    # If we are left with less than 3 different vertices then the
                    # throat is fully occluded as we can't make a shape with
                    # non-zero area
                    if len(offset_verts) >= 3:
                        offset_coords = coords[offset_verts].astype(float)
                        # Undo the translation, scaling and truncation on the
                        # offset_verts
                        offset_coords /= f
                        offset_coords_3d = \
                            np.vstack((offset_coords[:, 0]+translation[0],
                                       offset_coords[:, 1]+translation[1],
                                       np.ones(len(offset_verts))*z_plane)).T

                        # Get matrix to un-rotate the co-ordinates back to the
                        # original orientation if we rotated in the first place
                        if rotate_facet:
                            MI = tr.inverse_matrix(M)
                            # Unrotate the offset coordinates
                            incentre[i] = np.dot(incentre3d, MI[:3, :3].T)
                            centroid[i] = np.dot(centroid3d, MI[:3, :3].T)
                            eroded_verts[i] = np.dot(offset_coords_3d, MI[:3, :3].T)

                        else:
                            incentre[i] = incentre3d
                            centroid[i] = centroid3d
                            eroded_verts[i] = offset_coords_3d

                        inradius[i] = dt.max()
                        # Undo scaling on other parameters
                        area[i] /= f*f
                        perimeter[i] /= f
                        equiv_diameter[i] /= f
                        inradius[i] /= f
                    else:
                        area[i] = 0
                        perimeter[i] = 0
                        equiv_diameter[i] = 0

    if kwargs['set_dependent'] is True:
        geometry['throat.area'] = area
        geometry['throat.perimeter'] = perimeter
        geometry['throat.centroid'] = centroid
        geometry['throat.diameter'] = equiv_diameter
        geometry['throat.indiameter'] = inradius*2
        geometry['throat.incentre'] = incentre

    return eroded_verts

Example 12

Project: HPOlib
Source File: plotBranin.py
View license
def plot_contour(trial_list, name_list, save="", title=""):
    # constraints:
    # -5 <= x <= 10, 0 <= y <= 15
    # three global optima:  (-pi, 12.275), (pi, 2.275), (9.42478, 2.475), where
    # branin = 0.397887
    
    markers = itertools.cycle(['o', 's', '^', 'x'])
    colors = itertools.cycle(['b', 'g', 'r', 'k'])
    size = 5
    
    # Get handles    
    ratio = 5
    gs = gridSpec.GridSpec(ratio, 1)
    fig = matplotlib.pyplot.figure(1, dpi=100)
    fig.suptitle(title)
    ax = matplotlib.pyplot.subplot(gs[0:ratio, :])
    ax.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
    xopt = [-np.pi, np.pi, 9.42478]
    yopt = [12.275, 2.275, 2.475]
    
    # Plot Branin
    step = 0.1
    xi = np.arange(-5, 10 + step, step)
    yi = np.arange(-0, 15 + step, step)
    
    z = np.zeros([len(xi), len(yi)])
    for i in range(len(xi)):
        for j in range(len(yi)):
            #z[j, i] = np.power(np.e, branin.branin({"x":xi[i], "y":yi[j]}))
            z[j, i] = HPOlib.benchmark_functions.branin(x=xi[i], y=yi[j])
    xi, yi = np.meshgrid(xi, yi)
    cax = ax.contourf(xi, yi, z, 50, cmap=matplotlib.cm.gray)
    fig.colorbar(cax)

    # Plot Optimums after all work is done
    matplotlib.pyplot.scatter(xopt, yopt, marker="o", facecolor='w', edgecolor='w', s=20*size, label="Optimum")

    # Get values
    for opt in range(len(name_list)):
        print name_list[opt], "has", len(trial_list[opt]['trials']), "samples"
        m = markers.next()
        c = colors.next()
        x = np.zeros(len(trial_list[opt]["trials"]))
        y = np.zeros(len(trial_list[opt]["trials"]))
        for i in range(len(x)):
            if '-x' in trial_list[opt]["trials"][i]["params"]:
                x[i] = float(trial_list[opt]["trials"][i]["params"]["-x"].strip("'"))
                y[i] = float(trial_list[opt]["trials"][i]["params"]["-y"].strip("'"))
            else:
                x[i] = float(trial_list[opt]["trials"][i]["params"]["x"].strip("'"))
                y[i] = float(trial_list[opt]["trials"][i]["params"]["y"].strip("'"))
        matplotlib.pyplot.scatter(x[0:10], y[0:10], marker=m,
                                  s=size, facecolors=c, linewidth=0.1)
        matplotlib.pyplot.scatter(x[10:-10], y[10:-10], marker=m,
                                  linewidth=0.1, s=4*size, facecolors=c)
        matplotlib.pyplot.scatter(x[-10:-1], y[-10:-1], marker=m,
                                  linewidth=0.1, s=6*size, facecolors=c, label=name_list[opt][0])
        
        matplotlib.pyplot.xlim([-5, 10])
        matplotlib.pyplot.ylim([-0, 15])
        matplotlib.pyplot.xlabel("X")
        matplotlib.pyplot.ylabel("Y")
    
    # Describe the plot
    matplotlib.pyplot.title(title)
    leg = matplotlib.pyplot.legend(loc="best", fancybox=True)
    leg.get_frame().set_alpha(0.5)
    
    if save != "":
        matplotlib.pyplot.subplots_adjust(top=0.85)
        matplotlib.pyplot.savefig(save, dpi=600, facecolor='w', edgecolor='w',
                                  orientation='portrait', papertype=None, format=None,
                                  transparent=False, bbox_inches="tight", pad_inches=0.1)
    else:
        matplotlib.pyplot.show()

Example 13

Project: pymc3
Source File: advi_minibatch.py
View license
def _elbo_t(logp, uw_g, uw_l, inarray_g, inarray_l, n_mcsamples, random_seed):
    """Return expression of approximate ELBO based on Monte Carlo sampling.
    """
    if random_seed is None:
        r = MRG_RandomStreams(gen_random_state())
    else:
        r = MRG_RandomStreams(seed=random_seed)

    normal_const = floatX(1 + np.log(2.0 * np.pi))

    elbo = 0

    # Sampling local variational parameters
    if uw_l is not None:
        l_l = (uw_l.size / 2).astype('int64')
        l_l_ = (uw_l.size / 2).astype(floatX_str)
        u_l = uw_l[:l_l]
        w_l = uw_l[l_l:]
        ns_l = r.normal(size=(n_mcsamples, inarray_l.tag.test_value.shape[0]))
        zs_l = ns_l * tt.exp(w_l) + u_l
        elbo += tt.sum(w_l) + 0.5 * l_l_ * normal_const
    else:
        zs_l = None

    # Sampling global variational parameters
    if uw_g is not None:
        l_g = (uw_g.size / 2).astype('int64')
        l_g_ = (uw_g.size / 2).astype(floatX_str)
        u_g = uw_g[:l_g]
        w_g = uw_g[l_g:]
        ns_g = r.normal(size=(n_mcsamples, inarray_g.tag.test_value.shape[0]))
        zs_g = ns_g * tt.exp(w_g) + u_g
        elbo += tt.sum(w_g) + 0.5 * l_g_ * normal_const
    else:
        zs_g = None

    if (zs_l is not None) and (zs_g is not None):
        def logp_(z_g, z_l):
            return theano.clone(
                logp, OrderedDict({inarray_g: z_g, inarray_l: z_l}),
                strict=False
            )
        sequences = [zs_g, zs_l]

    elif zs_l is not None:
        def logp_(z_l):
            return theano.clone(
                logp, OrderedDict({inarray_l: z_l}),
                strict=False
            )
        sequences = [zs_l]

    else:
        def logp_(z_g):
            return theano.clone(
                logp, OrderedDict({inarray_g: z_g}),
                strict=False
            )
        sequences = [zs_g]

    logps, _ = theano.scan(fn=logp_, outputs_info=None, sequences=sequences)
    elbo += tt.mean(logps)

    return elbo

Example 14

Project: python-oceans
Source File: sw_extras.py
View license
def soundspeed(S, T, D, equation='mackenzie'):
    """
    Various sound-speed equations.
    1)  soundspeed(s, t, d) returns the sound speed (m/sec) given vectors
       of salinity (ppt), temperature (deg C) and DEPTH (m) using
       the formula of Mackenzie:  Mackenzie, K.V. "Nine-term Equation for
       Sound Speed in the Oceans", J. Acoust. Soc. Am. 70 (1981), 807-812.

    2) soundspeed(s, t, p, 'del_grosso') returns the sound speed (m/sec)given
       vectors of salinity (ppt), temperature (deg C), and  PRESSURE (dbar)
       using the Del Grosso equation:  Del Grosso, "A New Equation for the
       speed of sound in Natural Waters", J. Acoust. Soc. Am. 56#4 (1974).

    3) soundspeed(s, t, p, 'chen') returns the sound speed (m/sec) given
       vectors of salinity (ppt), temperature (deg C), and PRESSURE (dbar)
       using the Chen and Millero equation:  Chen and Millero, "The Sound
       Speed in Seawater", J. Acoust. Soc. Am. 62 (1977), 1129-1135.

    4) soundspeed(s, t, p, 'state') returns the sound speed (m/sec) given
       vectors of salinity (ppt), temperature (deg C), and PRESSURE (dbar) by
       using derivatives of the EOS80 equation of state for seawater and the
       adiabatic lapse rate.

    Notes: RP (WHOI) 3/dec/91
            Added state equation ss

    """
    if equation == 'mackenzie':
        c = 1.44896e3
        t = 4.591e0
        t2 = -5.304e-2
        t3 = 2.374e-4
        s = 1.340e0
        d = 1.630e-2
        d2 = 1.675e-7
        ts = -1.025e-2
        td3 = -7.139e-13
        ssp = (c + t * T + t2 * T * T + t3 * T * T * T + s * (S-35.0) + d *
               D + d2 * D * D + ts * T * (S-35.0) + td3 * T * D * D * D)
    elif equation == 'del_grosso':
        # Del grosso uses pressure in kg/cm^2.  To get to this from dbars
        # we  must divide by "g".  From the UNESCO algorithms (referring to
        # ANON (1970) BULLETIN GEODESIQUE) we have this formula for g as a
        # function of latitude and pressure.  We set latitude to 45 degrees
        # for convenience!
        XX = np.sin(45 * np.pi/180)
        GR = 9.780318 * (1.0 + (5.2788E-3 + 2.36E-5 * XX) * XX) + 1.092E-6 * D
        P = D / GR
        # This is from VSOUND.f.
        C000 = 1402.392
        DCT = (0.501109398873e1 - (0.550946843172e-1 - 0.221535969240e-3 * T) *
               T) * T
        DCS = (0.132952290781e1 + 0.128955756844e-3 * S) * S
        DCP = (0.156059257041e0 + (0.244998688441e-4 - 0.883392332513e-8 * P) *
               P) * P
        DCSTP = ((-0.127562783426e-1 * T * S + 0.635191613389e-2 * T * P +
                  0.265484716608e-7 * T * T * P * P - 0.159349479045e-5 * T *
                  P * P + 0.522116437235e-9 * T * P * P * P -
                  0.438031096213e-6 * T * T * T * P) - 0.161674495909e-8 * S *
                 S * P * P + 0.968403156410e-4 * T * T * S +
                 0.485639620015e-5 * T * S * S * P - 0.340597039004e-3 * T *
                 S * P)
        ssp = C000 + DCT + DCS + DCP + DCSTP
    elif equation == 'chen':
        P0 = D
        # This is copied directly from the UNESCO algorithms.
        # CHECKVALUE: SVEL=1731.995 M/S, S=40 (IPSS-78),T=40 DEG C,P=10000 DBAR
        # SCALE PRESSURE TO BARS
        P = P0 / 10.
        SR = np.sqrt(np.abs(S))
        # S**2 TERM.
        D = 1.727E-3 - 7.9836E-6 * P
        # S**3/2 TERM.
        B1 = 7.3637E-5 + 1.7945E-7 * T
        B0 = -1.922E-2 - 4.42E-5 * T
        B = B0 + B1 * P
        # S**1 TERM.
        A3 = (-3.389E-13 * T + 6.649E-12) * T + 1.100E-10
        A2 = ((7.988E-12 * T - 1.6002E-10) * T + 9.1041E-9) * T - 3.9064E-7
        A1 = ((((-2.0122E-10 * T + 1.0507E-8) * T - 6.4885E-8) * T -
               1.2580E-5) * T + 9.4742E-5)
        A0 = ((((-3.21E-8 * T + 2.006E-6) * T + 7.164E-5) * T - 1.262E-2) *
              T + 1.389)
        A = ((A3 * P + A2) * P + A1) * P + A0
        # S**0 TERM.
        C3 = (-2.3643E-12 * T + 3.8504E-10) * T - 9.7729E-9
        C2 = (((1.0405E-12 * T - 2.5335E-10) * T + 2.5974E-8) * T -
              1.7107E-6) * T + 3.1260E-5
        C1 = (((-6.1185E-10 * T + 1.3621E-7) * T - 8.1788E-6) * T +
              6.8982E-4) * T + 0.153563
        C0 = ((((3.1464E-9 * T - 1.47800E-6) * T + 3.3420E-4) * T -
               5.80852E-2) * T + 5.03711) * T + 1402.388
        C = ((C3 * P + C2) * P + C1) * P + C0
        # SOUND SPEED RETURN.
        ssp = C + (A + B * SR + D * S) * S
    else:
        raise TypeError('Unrecognizable equation specified: {}'.format(equation))
    return ssp

Example 15

Project: python-control
Source File: margins.py
View license
def stability_margins(sysdata, returnall=False, epsw=1e-8):
    """Calculate stability margins and associated crossover frequencies.

    Parameters
    ----------
    sysdata: LTI system or (mag, phase, omega) sequence
        sys : LTI system
            Linear SISO system
        mag, phase, omega : sequence of array_like
            Arrays of magnitudes (absolute values, not dB), phases (degrees),
            and corresponding frequencies.  Crossover frequencies returned are
            in the same units as those in `omega` (e.g., rad/sec or Hz).
    returnall: bool, optional
        If true, return all margins found. If false (default), return only the
        minimum stability margins.  For frequency data or FRD systems, only one
        margin is found and returned.
    epsw: float, optional
        Frequencies below this value (default 1e-8) are considered static gain,
        and not returned as margin.

    Returns
    -------
    gm: float or array_like
        Gain margin
    pm: float or array_loke
        Phase margin
    sm: float or array_like
        Stability margin, the minimum distance from the Nyquist plot to -1
    wg: float or array_like
        Gain margin crossover frequency (where phase crosses -180 degrees)
    wp: float or array_like
        Phase margin crossover frequency (where gain crosses 0 dB)
    ws: float or array_like
        Stability margin frequency (where Nyquist plot is closest to -1)
    """

    try:
        if isinstance(sysdata, frdata.FRD):
            sys = frdata.FRD(sysdata, smooth=True)
        elif isinstance(sysdata, xferfcn.TransferFunction):
            sys = sysdata
        elif getattr(sysdata, '__iter__', False) and len(sysdata) == 3:
            mag, phase, omega = sysdata
            sys = frdata.FRD(mag * np.exp(1j * phase * np.pi/180), 
                             omega, smooth=True)
        else:
            sys = xferfcn._convertToTransferFunction(sysdata)
    except Exception as e:
        print (e)
        raise ValueError("Margin sysdata must be either a linear system or "
                         "a 3-sequence of mag, phase, omega.")

    # calculate gain of system
    if isinstance(sys, xferfcn.TransferFunction):

        # check for siso
        if not issiso(sys):
            raise ValueError("Can only do margins for SISO system")

        # real and imaginary part polynomials in omega:
        rnum, inum = _polyimsplit(sys.num[0][0])
        rden, iden = _polyimsplit(sys.den[0][0])

        # test (imaginary part of tf) == 0, for phase crossover/gain margins
        test_w_180 = np.polyadd(np.polymul(inum, rden), np.polymul(rnum, -iden))
        w_180 = np.roots(test_w_180)
        #print ('1:w_180', w_180)

        # first remove imaginary and negative frequencies, epsw removes the
        # "0" frequency for type-2 systems
        w_180 = np.real(w_180[(np.imag(w_180) == 0) * (w_180 >= epsw)])
        #print ('2:w_180', w_180)

        # evaluate response at remaining frequencies, to test for phase 180 vs 0
        resp_w_180 = np.real(np.polyval(sys.num[0][0], 1.j*w_180) /
                             np.polyval(sys.den[0][0], 1.j*w_180))
        #print ('resp_w_180', resp_w_180)                     

        # only keep frequencies where the negative real axis is crossed
        w_180 = w_180[np.real(resp_w_180) < 0.0]

        # and sort
        w_180.sort()
        #print ('3:w_180', w_180)

        # test magnitude is 1 for gain crossover/phase margins
        test_wc = np.polysub(np.polyadd(_polysqr(rnum), _polysqr(inum)),
                             np.polyadd(_polysqr(rden), _polysqr(iden)))
        wc = np.roots(test_wc)
        wc = np.real(wc[(np.imag(wc) == 0) * (wc > epsw)])
        wc.sort()

        # stability margin was a bitch to elaborate, relies on magnitude to
        # point -1, then take the derivative. Second derivative needs to be >0
        # to have a minimum
        test_wstabd = np.polyadd(_polysqr(rden), _polysqr(iden))
        test_wstabn = np.polyadd(_polysqr(np.polyadd(rnum,rden)),
                                 _polysqr(np.polyadd(inum,iden)))
        test_wstab = np.polysub(
            np.polymul(np.polyder(test_wstabn),test_wstabd),
            np.polymul(np.polyder(test_wstabd),test_wstabn))

        # find the solutions, for positive omega, and only real ones
        wstab = np.roots(test_wstab)
        #print('wstabr', wstab)
        wstab = np.real(wstab[(np.imag(wstab) == 0) * 
                        (np.real(wstab) >= 0)])
        #print('wstab', wstab)

        # and find the value of the 2nd derivative there, needs to be positive
        wstabplus = np.polyval(np.polyder(test_wstab), wstab)
        #print('wstabplus', wstabplus)
        wstab = np.real(wstab[(np.imag(wstab) == 0) * (wstab > epsw) *
                              (wstabplus > 0.)])
        #print('wstab', wstab)
        wstab.sort()

    else:
        # a bit coarse, have the interpolated frd evaluated again
        def mod(w):
            """to give the function to calculate |G(jw)| = 1"""
            return np.abs(sys.evalfr(w)[0][0]) - 1

        def arg(w):
            """function to calculate the phase angle at -180 deg"""
            return np.angle(-sys.evalfr(w)[0][0])

        def dstab(w):
            """function to calculate the distance from -1 point"""
            return np.abs(sys.evalfr(w)[0][0] + 1.)

        # Find all crossings, note that this depends on omega having
        # a correct range
        widx = np.where(np.diff(np.sign(mod(sys.omega))))[0]
        wc = np.array(
            [ sp.optimize.brentq(mod, sys.omega[i], sys.omega[i+1])
              for i in widx if i+1 < len(sys.omega)])
        
        # find the phase crossings ang(H(jw) == -180
        widx = np.where(np.diff(np.sign(arg(sys.omega))))[0]
        #print('widx (180)', widx, sys.omega[widx])
        #print('x', sys.evalfr(sys.omega[widx])[0][0])
        widx = widx[np.real(sys.evalfr(sys.omega[widx])[0][0]) <= 0]
        #print('widx (180,2)', widx)
        w_180 = np.array(
            [ sp.optimize.brentq(arg, sys.omega[i], sys.omega[i+1])
              for i in widx if i+1 < len(sys.omega) ])
        #print('x', sys.evalfr(w_180)[0][0])
        #print('w_180', w_180)

        # find all stab margins?
        widx = np.where(np.diff(np.sign(np.diff(dstab(sys.omega)))))[0]
        #print('widx', widx)
        #print('wstabx', sys.omega[widx])
        wstab = np.array([ sp.optimize.minimize_scalar(
                  dstab, bracket=(sys.omega[i], sys.omega[i+1])).x
              for i in widx if i+1 < len(sys.omega) and
              np.diff(np.diff(dstab(sys.omega[i-1:i+2])))[0] > 0 ])
        #print('wstabf0', wstab)
        wstab = wstab[(wstab >= sys.omega[0]) * 
                      (wstab <= sys.omega[-1])]
        #print ('wstabf', wstab)
        

    # margins, as iterables, converted frdata and xferfcn calculations to
    # vector for this
    GM = 1/np.abs(sys.evalfr(w_180)[0][0])
    SM = np.abs(sys.evalfr(wstab)[0][0]+1)
    PM = np.angle(sys.evalfr(wc)[0][0], deg=True) + 180
    
    if returnall:
        return GM, PM, SM, w_180, wc, wstab
    else:
        return (
            (GM.shape[0] or None) and np.amin(GM),
            (PM.shape[0] or None) and np.amin(PM),
            (SM.shape[0] or None) and np.amin(SM),
            (w_180.shape[0] or None) and w_180[GM==np.amin(GM)][0],
            (wc.shape[0] or None) and wc[PM==np.amin(PM)][0],
            (wstab.shape[0] or None) and wstab[SM==np.amin(SM)][0])

Example 16

Project: folium
Source File: image_overlay.py
View license
def mercator_transform(data, lat_bounds, origin='upper', height_out=None):
    """Transforms an image computed in (longitude,latitude) coordinates into
    the a Mercator projection image.

    Parameters
    ----------

    data: numpy array or equivalent list-like object.
        Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)

    lat_bounds : length 2 tuple
        Minimal and maximal value of the latitude of the image.
        Bounds must be between -85.051128779806589 and 85.051128779806589
        otherwise they will be clipped to that values.

    origin : ['upper' | 'lower'], optional, default 'upper'
        Place the [0,0] index of the array in the upper left or lower left
        corner of the axes.

    height_out : int, default None
        The expected height of the output.
        If None, the height of the input is used.

    See https://en.wikipedia.org/wiki/Web_Mercator for more details.
    """
    import numpy as np

    def mercator(x):
        return np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi

    array = np.atleast_3d(data).copy()
    height, width, nblayers = array.shape

    lat_min = max(lat_bounds[0], -85.051128779806589)
    lat_max = min(lat_bounds[1], 85.051128779806589)
    if height_out is None:
        height_out = height

    # Eventually flip the image
    if origin == 'upper':
        array = array[::-1, :, :]

    lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) *
            (lat_max-lat_min))
    latslats = (mercator(lat_min) +
                np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) *
                (mercator(lat_max)-mercator(lat_min)))

    out = np.zeros((height_out, width, nblayers))
    for i in range(width):
        for j in range(nblayers):
            out[:, i, j] = np.interp(latslats, mercator(lats),  array[:, i, j])

    # Eventually flip the image.
    if origin == 'upper':
        out = out[::-1, :, :]
    return out

Example 17

Project: pyorbital
Source File: orbital.py
View license
    def __init__(self, orbit_elements):
        self.mode = None

        perigee = orbit_elements.perigee
        self.eo = orbit_elements.excentricity
        self.xincl = orbit_elements.inclination
        self.xno = orbit_elements.original_mean_motion
        k_2 = CK2
        k_4 = CK4
        k_e = XKE
        self.bstar = orbit_elements.bstar
        self.omegao = orbit_elements.arg_perigee
        self.xmo = orbit_elements.mean_anomaly
        self.xnodeo = orbit_elements.right_ascension
        self.t_0 = orbit_elements.epoch
        self.xn_0 = orbit_elements.mean_motion
        A30 = -XJ3 * AE**3

        if not(0 < self.eo < ECC_LIMIT_HIGH):
            raise OrbitalError('Eccentricity out of range: %e' % self.eo)
        elif not((0.0035 * 2 * np.pi / XMNPDA) < self.xn_0 < (18 * 2 * np.pi / XMNPDA)):
            raise OrbitalError('Mean motion out of range: %e' % self.xn_0)
        elif not(0 < self.xincl < np.pi):
            raise OrbitalError('Inclination out of range: %e' % self.xincl)

        if self.eo < 0:
            self.mode = self.SGDP4_ZERO_ECC
            return

        self.cosIO = np.cos(self.xincl)
        self.sinIO = np.sin(self.xincl)
        theta2 = self.cosIO**2
        theta4 = theta2 ** 2
        self.x3thm1 = 3.0 * theta2 - 1.0
        self.x1mth2 = 1.0 - theta2
        self.x7thm1 = 7.0 * theta2 - 1.0

        a1 = (XKE / self.xn_0) ** (2. / 3)
        betao2 = 1.0 - self.eo**2
        betao = np.sqrt(betao2)
        temp0 = 1.5 * CK2 * self.x3thm1 / (betao * betao2)
        del1 = temp0 / (a1**2)
        a0 = a1 * \
            (1.0 - del1 * (1.0 / 3.0 + del1 * (1.0 + del1 * 134.0 / 81.0)))
        del0 = temp0 / (a0**2)
        self.xnodp = self.xn_0 / (1.0 + del0)
        self.aodp = (a0 / (1.0 - del0))
        self.perigee = (self.aodp * (1.0 - self.eo) - AE) * XKMPER
        self.apogee = (self.aodp * (1.0 + self.eo) - AE) * XKMPER
        self.period = (2 * np.pi * 1440.0 / XMNPDA) / self.xnodp

        if self.period >= 225:
            # Deep-Space model
            self.mode = SGDP4_DEEP_NORM
        elif self.perigee < 220:
            # Near-space, simplified equations
            self.mode = SGDP4_NEAR_SIMP
        else:
            # Near-space, normal equations
            self.mode = SGDP4_NEAR_NORM

        if self.perigee < 156:
            s4 = self.perigee - 78
            if s4 < 20:
                s4 = 20

            qoms24 = ((120 - s4) * (AE / XKMPER))**4
            s4 = (s4 / XKMPER + AE)
        else:
            s4 = KS
            qoms24 = QOMS2T

        pinvsq = 1.0 / (self.aodp**2 * betao2**2)
        tsi = 1.0 / (self.aodp - s4)
        self.eta = self.aodp * self.eo * tsi
        etasq = self.eta**2
        eeta = self.eo * self.eta
        psisq = np.abs(1.0 - etasq)
        coef = qoms24 * tsi**4
        coef_1 = coef / psisq**3.5

        self.c2 = (coef_1 * self.xnodp * (self.aodp *
                                          (1.0 + 1.5 * etasq + eeta * (4.0 + etasq)) +
                                          (0.75 * CK2) * tsi / psisq * self.x3thm1 *
                                          (8.0 + 3.0 * etasq * (8.0 + etasq))))

        self.c1 = self.bstar * self.c2

        self.c4 = (2.0 * self.xnodp * coef_1 * self.aodp * betao2 * (self.eta *
                                                                     (2.0 + 0.5 * etasq) + self.eo * (0.5 + 2.0 *
                                                                                                      etasq) - (2.0 * CK2) * tsi / (self.aodp * psisq) * (-3.0 *
                                                                                                                                                          self.x3thm1 * (1.0 - 2.0 * eeta + etasq *
                                                                                                                                                                         (1.5 - 0.5 * eeta)) + 0.75 * self.x1mth2 * (2.0 *
                                                                                                                                                                                                                     etasq - eeta * (1.0 + etasq)) * np.cos(2.0 * self.omegao))))

        self.c5, self.c3, self.omgcof = 0.0, 0.0, 0.0

        if self.mode == SGDP4_NEAR_NORM:
            self.c5 = (2.0 * coef_1 * self.aodp * betao2 *
                       (1.0 + 2.75 * (etasq + eeta) + eeta * etasq))
            if self.eo > ECC_ALL:
                self.c3 = coef * tsi * A3OVK2 * \
                    self.xnodp * AE * self.sinIO / self.eo
            self.omgcof = self.bstar * self.c3 * np.cos(self.omegao)

        temp1 = 3.0 * CK2 * pinvsq * self.xnodp
        temp2 = temp1 * CK2 * pinvsq
        temp3 = 1.25 * CK4 * pinvsq**2 * self.xnodp

        self.xmdot = (self.xnodp + (0.5 * temp1 * betao * self.x3thm1 + 0.0625 *
                                    temp2 * betao * (13.0 - 78.0 * theta2 +
                                                     137.0 * theta4)))

        x1m5th = 1.0 - 5.0 * theta2

        self.omgdot = (-0.5 * temp1 * x1m5th + 0.0625 * temp2 *
                       (7.0 - 114.0 * theta2 + 395.0 * theta4) +
                       temp3 * (3.0 - 36.0 * theta2 + 49.0 * theta4))

        xhdot1 = -temp1 * self.cosIO
        self.xnodot = (xhdot1 + (0.5 * temp2 * (4.0 - 19.0 * theta2) +
                                 2.0 * temp3 * (3.0 - 7.0 * theta2)) * self.cosIO)

        if self.eo > ECC_ALL:
            self.xmcof = (-(2. / 3) * AE) * coef * self.bstar / eeta
        else:
            self.xmcof = 0.0

        self.xnodcf = 3.5 * betao2 * xhdot1 * self.c1
        self.t2cof = 1.5 * self.c1

        # Check for possible divide-by-zero for X/(1+cos(xincl)) when
        # calculating xlcof */
        temp0 = 1.0 + self.cosIO
        if np.abs(temp0) < EPS_COS:
            temp0 = np.sign(temp0) * EPS_COS

        self.xlcof = 0.125 * A3OVK2 * self.sinIO * \
            (3.0 + 5.0 * self.cosIO) / temp0

        self.aycof = 0.25 * A3OVK2 * self.sinIO

        self.cosXMO = np.cos(self.xmo)
        self.sinXMO = np.sin(self.xmo)
        self.delmo = (1.0 + self.eta * self.cosXMO)**3

        if self.mode == SGDP4_NEAR_NORM:
            c1sq = self.c1**2
            self.d2 = 4.0 * self.aodp * tsi * c1sq
            temp0 = self.d2 * tsi * self.c1 / 3.0
            self.d3 = (17.0 * self.aodp + s4) * temp0
            self.d4 = 0.5 * temp0 * self.aodp * tsi * \
                (221.0 * self.aodp + 31.0 * s4) * self.c1
            self.t3cof = self.d2 + 2.0 * c1sq
            self.t4cof = 0.25 * \
                (3.0 * self.d3 + self.c1 * (12.0 * self.d2 + 10.0 * c1sq))
            self.t5cof = (0.2 * (3.0 * self.d4 + 12.0 * self.c1 * self.d3 + 6.0 * self.d2**2 +
                                 15.0 * c1sq * (2.0 * self.d2 + c1sq)))

        elif self.mode == SGDP4_DEEP_NORM:
            raise NotImplementedError('Deep space calculations not supported')

Example 18

Project: qutip
Source File: grape.py
View license
def grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None,
                  u_limits=None, interp_kind='linear', use_interp=False,
                  alpha=None, beta=None, phase_sensitive=True,
                  progress_bar=BaseProgressBar()):
    """
    Calculate control pulses for the Hamiltonian operators in H_ops so that the
    unitary U is realized.

    Experimental: Work in progress.

    Parameters
    ----------
    U : Qobj
        Target unitary evolution operator.

    H0 : Qobj
        Static Hamiltonian (that cannot be tuned by the control fields).

    H_ops: list of Qobj
        A list of operators that can be tuned in the Hamiltonian via the
        control fields.

    R : int
        Number of GRAPE iterations.

    time : array / list
        Array of time coordinates for control pulse evalutation.

    u_start : array
        Optional array with initial control pulse values.

    Returns
    -------
        Instance of GRAPEResult, which contains the control pulses calculated
        with GRAPE, a time-dependent Hamiltonian that is defined by the
        control pulses, as well as the resulting propagator.
    
    """

    if eps is None:
        eps = 0.1 * (2 * np.pi) / (times[-1])

    M = len(times)
    J = len(H_ops)

    u = np.zeros((R, J, M))

    if u_limits and len(u_limits) != 2:
        raise ValueError("u_limits must be a list with two values")

    if u_limits:
        warnings.warn("Caution: Using experimental feature u_limits")

    if u_limits and u_start:
        # make sure that no values in u0 violates the u_limits conditions
        u_start = np.array(u_start)
        u_start[u_start < u_limits[0]] = u_limits[0]
        u_start[u_start > u_limits[1]] = u_limits[1]

    if u_start is not None:
        for idx, u0 in enumerate(u_start):
            u[0, idx, :] = u0

    if beta:
        warnings.warn("Causion: Using experimental feature time-penalty")

    progress_bar.start(R)
    for r in range(R - 1):
        progress_bar.update(r)

        dt = times[1] - times[0]

        if use_interp:
            ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind,
                                 bounds_error=False, fill_value=u[r, j, -1])
                        for j in range(J)]

            def _H_t(t, args=None):
                return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
                                 for j in range(J)])

            U_list = [(-1j * _H_t(times[idx]) * dt).expm()
                      for idx in range(M-1)]

        else:
            def _H_idx(idx):
                return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)])

            U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]

        U_f_list = []
        U_b_list = []

        U_f = 1
        U_b = 1
        for n in range(M - 1):

            U_f = U_list[n] * U_f
            U_f_list.append(U_f)

            U_b_list.insert(0, U_b)
            U_b = U_list[M - 2 - n].dag() * U_b

        for j in range(J):
            for m in range(M-1):
                P = U_b_list[m] * U
                Q = 1j * dt * H_ops[j] * U_f_list[m]

                if phase_sensitive:
                    du = - _overlap(P, Q)
                else:
                    du = - 2 * _overlap(P, Q) * _overlap(U_f_list[m], P)

                if alpha:
                    # penalty term for high power control signals u
                    du += -2 * alpha * u[r, j, m] * dt

                if beta:
                    # penalty term for late control signals u
                    du += -2 * beta * m * u[r, j, m] * dt

                u[r + 1, j, m] = u[r, j, m] + eps * du.real

                if u_limits:
                    if u[r + 1, j, m] < u_limits[0]:
                        u[r + 1, j, m] = u_limits[0]
                    elif u[r + 1, j, m] > u_limits[1]:
                        u[r + 1, j, m] = u_limits[1]

            u[r + 1, j, -1] = u[r + 1, j, -2]

    if use_interp:
        ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind,
                             bounds_error=False, fill_value=u[R - 1, j, -1])
                    for j in range(J)]

        H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
                            for j in range(J)]
    else:
        H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)]

    progress_bar.finished()

    # return U_f_list[-1], H_td_func, u
    return GRAPEResult(u=u, U_f=U_f_list[-1], H_t=H_td_func)

Example 19

Project: qutip
Source File: grape.py
View license
def cy_grape_unitary(U, H0, H_ops, R, times, eps=None, u_start=None,
                     u_limits=None, interp_kind='linear', use_interp=False,
                     alpha=None, beta=None, phase_sensitive=True,
                     progress_bar=BaseProgressBar()):
    """
    Calculate control pulses for the Hamitonian operators in H_ops so that the
    unitary U is realized.

    Experimental: Work in progress.

    Parameters
    ----------
    U : Qobj
        Target unitary evolution operator.

    H0 : Qobj
        Static Hamiltonian (that cannot be tuned by the control fields).

    H_ops: list of Qobj
        A list of operators that can be tuned in the Hamiltonian via the
        control fields.

    R : int
        Number of GRAPE iterations.

    time : array / list
        Array of time coordinates for control pulse evalutation.

    u_start : array
        Optional array with initial control pulse values.

    Returns
    -------
        Instance of GRAPEResult, which contains the control pulses calculated
        with GRAPE, a time-dependent Hamiltonian that is defined by the
        control pulses, as well as the resulting propagator.
    
    """

    if eps is None:
        eps = 0.1 * (2 * np.pi) / (times[-1])

    M = len(times)
    J = len(H_ops)

    u = np.zeros((R, J, M))

    H_ops_data = [H_op.data for H_op in H_ops]

    if u_limits and len(u_limits) != 2:
        raise ValueError("u_limits must be a list with two values")

    if u_limits:
        warnings.warn("Causion: Using experimental feature u_limits")

    if u_limits and u_start:
        # make sure that no values in u0 violates the u_limits conditions
        u_start = np.array(u_start)
        u_start[u_start < u_limits[0]] = u_limits[0]
        u_start[u_start > u_limits[1]] = u_limits[1]

    if u_limits:
        use_u_limits = 1
        u_min = u_limits[0]
        u_max = u_limits[1]
    else:
        use_u_limits = 0
        u_min = 0.0
        u_max = 0.0

    if u_start is not None:
        for idx, u0 in enumerate(u_start):
            u[0, idx, :] = u0

    if beta:
        warnings.warn("Causion: Using experimental feature time-penalty")

    alpha_val = alpha if alpha else 0.0
    beta_val = beta if beta else 0.0

    progress_bar.start(R)
    for r in range(R - 1):
        progress_bar.update(r)

        dt = times[1] - times[0]

        if use_interp:
            ip_funcs = [interp1d(times, u[r, j, :], kind=interp_kind,
                                 bounds_error=False, fill_value=u[r, j, -1])
                        for j in range(J)]

            def _H_t(t, args=None):
                return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
                                 for j in range(J)])

            U_list = [(-1j * _H_t(times[idx]) * dt).expm().data
                      for idx in range(M-1)]

        else:
            def _H_idx(idx):
                return H0 + sum([u[r, j, idx] * H_ops[j] for j in range(J)])

            U_list = [(-1j * _H_idx(idx) * dt).expm().data
                      for idx in range(M-1)]

        U_f_list = []
        U_b_list = []

        U_f = 1
        U_b = sp.eye(*(U.shape))
        for n in range(M - 1):

            U_f = U_list[n] * U_f
            U_f_list.append(U_f)

            U_b_list.insert(0, U_b)
            U_b = U_list[M - 2 - n].T.conj().tocsr() * U_b

        cy_grape_inner(U.data, u, r, J, M, U_b_list, U_f_list, H_ops_data,
                       dt, eps, alpha_val, beta_val, phase_sensitive,
                       use_u_limits, u_min, u_max)

    if use_interp:
        ip_funcs = [interp1d(times, u[R - 1, j, :], kind=interp_kind,
                             bounds_error=False, fill_value=u[R - 1, j, -1])
                    for j in range(J)]

        H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
                            for j in range(J)]
    else:
        H_td_func = [H0] + [[H_ops[j], u[-1, j, :]] for j in range(J)]

    progress_bar.finished()

    return GRAPEResult(u=u, U_f=Qobj(U_f_list[-1], dims=U.dims),
                       H_t=H_td_func)

Example 20

Project: qutip
Source File: grape.py
View license
def grape_unitary_adaptive(U, H0, H_ops, R, times, eps=None, u_start=None,
                           u_limits=None, interp_kind='linear',
                           use_interp=False, alpha=None, beta=None,
                           phase_sensitive=False, overlap_terminate=1.0,
                           progress_bar=BaseProgressBar()):
    """
    Calculate control pulses for the Hamiltonian operators in H_ops so that
    the unitary U is realized.

    Experimental: Work in progress.

    Parameters
    ----------
    U : Qobj
        Target unitary evolution operator.

    H0 : Qobj
        Static Hamiltonian (that cannot be tuned by the control fields).

    H_ops: list of Qobj
        A list of operators that can be tuned in the Hamiltonian via the
        control fields.

    R : int
        Number of GRAPE iterations.

    time : array / list
        Array of time coordinates for control pulse evalutation.

    u_start : array
        Optional array with initial control pulse values.

    Returns
    -------
        Instance of GRAPEResult, which contains the control pulses calculated
        with GRAPE, a time-dependent Hamiltonian that is defined by the
        control pulses, as well as the resulting propagator.
    
    """

    if eps is None:
        eps = 0.1 * (2 * np.pi) / (times[-1])

    eps_vec = np.array([eps / 2, eps, 2 * eps])
    eps_log = np.zeros(R)
    overlap_log = np.zeros(R)

    best_k = 0
    _k_overlap = np.array([0.0, 0.0, 0.0])

    M = len(times)
    J = len(H_ops)
    K = len(eps_vec)
    Uf = [None for _ in range(K)]

    u = np.zeros((R, J, M, K))

    if u_limits and len(u_limits) != 2:
        raise ValueError("u_limits must be a list with two values")

    if u_limits:
        warnings.warn("Causion: Using experimental feature u_limits")

    if u_limits and u_start:
        # make sure that no values in u0 violates the u_limits conditions
        u_start = np.array(u_start)
        u_start[u_start < u_limits[0]] = u_limits[0]
        u_start[u_start > u_limits[1]] = u_limits[1]

    if u_start is not None:
        for idx, u0 in enumerate(u_start):
            for k in range(K):
                u[0, idx, :, k] = u0

    if beta:
        warnings.warn("Causion: Using experimental feature time-penalty")

    if phase_sensitive:
        _fidelity_function = lambda x: x
    else:
        _fidelity_function = lambda x: abs(x) ** 2

    best_k = 1
    _r = 0
    _prev_overlap = 0

    progress_bar.start(R)
    for r in range(R - 1):
        progress_bar.update(r)

        _r = r
        eps_log[r] = eps_vec[best_k]

        logger.debug("eps_vec: {}".format(eps_vec))

        _t0 = time.time()

        dt = times[1] - times[0]

        if use_interp:
            ip_funcs = [interp1d(times, u[r, j, :, best_k], kind=interp_kind,
                                 bounds_error=False,
                                 fill_value=u[r, j, -1, best_k])
                        for j in range(J)]

            def _H_t(t, args=None):
                return H0 + sum([float(ip_funcs[j](t)) * H_ops[j]
                                 for j in range(J)])

            U_list = [(-1j * _H_t(times[idx]) * dt).expm()
                      for idx in range(M-1)]

        else:
            def _H_idx(idx):
                return H0 + sum([u[r, j, idx, best_k] * H_ops[j]
                                 for j in range(J)])

            U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]

        logger.debug("Time 1: %fs" % (time.time() - _t0))
        _t0 = time.time()

        U_f_list = []
        U_b_list = []

        U_f = 1
        U_b = 1
        for m in range(M - 1):

            U_f = U_list[m] * U_f
            U_f_list.append(U_f)

            U_b_list.insert(0, U_b)
            U_b = U_list[M - 2 - m].dag() * U_b

        logger.debug("Time 2: %fs" % (time.time() - _t0))
        _t0 = time.time()

        for j in range(J):
            for m in range(M-1):
                P = U_b_list[m] * U
                Q = 1j * dt * H_ops[j] * U_f_list[m]

                if phase_sensitive:
                    du = - cy_overlap(P.data, Q.data)
                else:
                    du = (- 2 * cy_overlap(P.data, Q.data) *
                          cy_overlap(U_f_list[m].data, P.data))

                if alpha:
                    # penalty term for high power control signals u
                    du += -2 * alpha * u[r, j, m, best_k] * dt

                if beta:
                    # penalty term for late control signals u
                    du += -2 * beta * k ** 2 * u[r, j, k] * dt

                for k, eps_val in enumerate(eps_vec):
                    u[r + 1, j, m, k] = u[r, j, m, k] + eps_val * du.real

                    if u_limits:
                        if u[r + 1, j, m, k] < u_limits[0]:
                            u[r + 1, j, m, k] = u_limits[0]
                        elif u[r + 1, j, m, k] > u_limits[1]:
                            u[r + 1, j, m, k] = u_limits[1]

            u[r + 1, j, -1, :] = u[r + 1, j, -2, :]

        logger.debug("Time 3: %fs" % (time.time() - _t0))
        _t0 = time.time()

        for k, eps_val in enumerate(eps_vec):

            def _H_idx(idx):
                return H0 + sum([u[r + 1, j, idx, k] * H_ops[j]
                                 for j in range(J)])

            U_list = [(-1j * _H_idx(idx) * dt).expm() for idx in range(M-1)]

            Uf[k] = gate_sequence_product(U_list)
            _k_overlap[k] = _fidelity_function(cy_overlap(Uf[k].data,
                                                          U.data)).real

        best_k = np.argmax(_k_overlap)
        logger.debug("k_overlap: ", _k_overlap, best_k)

        if _prev_overlap > _k_overlap[best_k]:
            logger.debug("Regression, stepping back with smaller eps.")

            u[r + 1, :, :, :] = u[r, :, :, :]
            eps_vec /= 2
        else:

            if best_k == 0:
                eps_vec /= 2

            elif best_k == 2:
                eps_vec *= 2

            _prev_overlap = _k_overlap[best_k]

        overlap_log[r] = _k_overlap[best_k]

        if overlap_terminate < 1.0:
            if _k_overlap[best_k] > overlap_terminate:
                logger.info("Reached target fidelity, terminating.")
                break

        logger.debug("Time 4: %fs" % (time.time() - _t0))
        _t0 = time.time()

    if use_interp:
        ip_funcs = [interp1d(times, u[_r, j, :, best_k], kind=interp_kind,
                             bounds_error=False, fill_value=u[R - 1, j, -1])
                    for j in range(J)]

        H_td_func = [H0] + [[H_ops[j], lambda t, args, j=j: ip_funcs[j](t)]
                            for j in range(J)]
    else:
        H_td_func = [H0] + [[H_ops[j], u[_r, j, :, best_k]] for j in range(J)]

    progress_bar.finished()

    result = GRAPEResult(u=u[:_r, :, :, best_k], U_f=Uf[best_k],
                         H_t=H_td_func)

    result.eps = eps_log
    result.overlap = overlap_log

    return result

Example 21

Project: qutip
Source File: circuit.py
View license
    def resolve_gates(self, basis=["CNOT", "RX", "RY", "RZ"]):
        """
        Unitary matrix calculator for N qubits returning the individual
        steps as unitary matrices operating from left to right in the specified
        basis.

        Parameters
        ----------
        basis : list.
            Basis of the resolved circuit.

        Returns
        -------
        qc : QubitCircuit
            Returns QubitCircuit of resolved gates for the qubit circuit in the
            desired basis.
        
        """
        qc_temp = QubitCircuit(self.N, self.reverse_states)
        temp_resolved = []

        basis_1q = []
        basis_2q = None

        basis_1q_valid = ["RX", "RY", "RZ"]
        basis_2q_valid = ["CNOT", "CSIGN", "ISWAP", "SQRTSWAP", "SQRTISWAP"]

        if isinstance(basis, list):
            for gate in basis:
                if gate not in (basis_1q_valid + basis_2q_valid):
                    raise ValueError("%s is not a valid basis gate" % gate)

                if gate in basis_2q_valid:
                    if basis_2q is not None:
                        raise ValueError("At most one two-qubit gate allowed")
                    basis_2q = gate

                else:
                    basis_1q.append(gate)

            if len(basis_1q) == 1:
                raise ValueError("Not sufficient single-qubit gates in basis")
            elif len(basis_1q) == 0:
                basis_1q = ["RX", "RY", "RZ"]

        else:
            basis_1q = ["RX", "RY", "RZ"]
            if basis in basis_2q_valid:
                basis_2q = basis
            else:
                raise ValueError("%s is not a valid two-qubit basis gate"
                                 % basis)

        for gate in self.gates:
            if gate.name == "RX":
                temp_resolved.append(gate)
            elif gate.name == "RY":
                temp_resolved.append(gate)
            elif gate.name == "RZ":
                temp_resolved.append(gate)
            elif gate.name == "SQRTNOT":
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 4,
                                          arg_label=r"\pi/4"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
            elif gate.name == "SNOT":
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
            elif gate.name == "PHASEGATE":
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=gate.arg_value / 2,
                                          arg_label=gate.arg_label))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          gate.arg_value, gate.arg_label))
            elif gate.name == "CPHASE":
                raise NotImplementedError("Cannot be resolved in this basis")
            elif gate.name == "CNOT":
                temp_resolved.append(gate)
            elif gate.name == "CSIGN" and basis_2q is not "CSIGN":
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("CNOT", gate.targets, gate.controls))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
            elif gate.name == "BERKELEY":
                raise NotImplementedError("Cannot be resolved in this basis")
            elif gate.name == "SWAPalpha":
                raise NotImplementedError("Cannot be resolved in this basis")
            elif gate.name == "SWAP" and basis_2q is not "ISWAP":
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.targets[0]))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
            elif gate.name == "ISWAP" and basis_2q is not "ISWAP":
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.targets[0]))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
                temp_resolved.append(Gate("RZ", gate.targets[0], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RZ", gate.targets[1], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets[0], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
                temp_resolved.append(Gate("RY", gate.targets[0], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
            elif gate.name == "SQRTSWAP" and basis_2q not in ["SQRTSWAP",
                                                              "ISWAP"]:
                raise NotImplementedError("Cannot be resolved in this basis")
            elif gate.name == "SQRTISWAP" and basis_2q not in ["SQRTISWAP",
                                                               "ISWAP"]:
                raise NotImplementedError("Cannot be resolved in this basis")
            elif gate.name == "FREDKIN":
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.controls))
                temp_resolved.append(Gate("RZ", gate.controls, None,
                                          arg_value=np.pi / 8,
                                          arg_label=r"\pi/8"))
                temp_resolved.append(Gate("RZ", [gate.targets[0]], None,
                                          arg_value=-np.pi / 8,
                                          arg_label=r"-\pi/8"))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.controls))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets[1], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=-np.pi / 2,
                                          arg_label=r"-\pi/2"))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RZ", gate.targets[0], None,
                                          arg_value=np.pi / 8,
                                          arg_label=r"\pi/8"))
                temp_resolved.append(Gate("RZ", gate.targets[1], None,
                                          arg_value=np.pi / 8,
                                          arg_label=r"\pi/8"))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.controls))
                temp_resolved.append(Gate("RZ", gate.targets[1], None,
                                          arg_value=-np.pi / 8,
                                          arg_label=r"-\pi/8"))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.targets[0]))
                temp_resolved.append(Gate("RZ", gate.targets[1], None,
                                          arg_value=np.pi / 8,
                                          arg_label=r"\pi/8"))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.controls))
                temp_resolved.append(Gate("RZ", gate.targets[1], None,
                                          arg_value=-np.pi / 8,
                                          arg_label=r"-\pi/8"))
                temp_resolved.append(Gate("CNOT", gate.targets[1],
                                          gate.targets[0]))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets[1], None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=-np.pi / 2,
                                          arg_label=r"-\pi/2"))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("CNOT", gate.targets[0],
                                          gate.targets[1]))

            elif gate.name == "TOFFOLI":
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=1 * np.pi / 8,
                                          arg_label=r"\pi/8"))
                temp_resolved.append(Gate("RZ", gate.controls[1], None,
                                          arg_value=np.pi/2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RZ", gate.controls[0], None,
                                          arg_value=np.pi / 4,
                                          arg_label=r"\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.controls[1],
                                          gate.controls[0]))
                temp_resolved.append(Gate("RZ", gate.controls[1], None,
                                          arg_value=-np.pi / 4,
                                          arg_label=r"-\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.controls[1],
                                          gate.controls[0]))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))
                temp_resolved.append(Gate("RZ", gate.controls[1], None,
                                          arg_value=-np.pi / 4,
                                          arg_label=r"-\pi/4"))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=np.pi / 4,
                                          arg_label=r"\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.targets,
                                          gate.controls[0]))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=-np.pi / 4,
                                          arg_label=r"-\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.targets,
                                          gate.controls[1]))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=np.pi / 4,
                                          arg_label=r"\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.targets,
                                          gate.controls[0]))
                temp_resolved.append(Gate("RZ", gate.targets, None,
                                          arg_value=-np.pi / 4,
                                          arg_label=r"-\pi/4"))
                temp_resolved.append(Gate("CNOT", gate.targets,
                                          gate.controls[1]))
                temp_resolved.append(Gate("GLOBALPHASE", None, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RY", gate.targets, None,
                                          arg_value=np.pi / 2,
                                          arg_label=r"\pi/2"))
                temp_resolved.append(Gate("RX", gate.targets, None,
                                          arg_value=np.pi, arg_label=r"\pi"))

            elif gate.name == "GLOBALPHASE":
                temp_resolved.append(Gate(gate.name, gate.targets,
                                          gate.controls,
                                          gate.arg_value, gate.arg_label))
            else:
                temp_resolved.append(gate)

        if basis_2q == "CSIGN":
            for gate in temp_resolved:
                if gate.name == "CNOT":
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("CSIGN", gate.targets,
                                              gate.controls))
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                else:
                    qc_temp.gates.append(gate)
        elif basis_2q == "ISWAP":
            for gate in temp_resolved:
                if gate.name == "CNOT":
                    qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
                                              arg_value=np.pi / 4,
                                              arg_label=r"\pi/4"))
                    qc_temp.gates.append(Gate("ISWAP", [gate.controls[0],
                                                        gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RY", gate.controls, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RZ", gate.controls, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                    qc_temp.gates.append(Gate("ISWAP", [gate.controls[0],
                                                        gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                elif gate.name == "SWAP":
                    qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
                                              arg_value=np.pi / 4,
                                              arg_label=r"\pi/4"))
                    qc_temp.gates.append(Gate("ISWAP", gate.targets, None))
                    qc_temp.gates.append(Gate("RX", gate.targets[0], None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("ISWAP", gate.targets, None))
                    qc_temp.gates.append(Gate("RX", gate.targets[1], None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("ISWAP", [gate.targets[1],
                                                        gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RX", gate.targets[0], None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                else:
                    qc_temp.gates.append(gate)
        elif basis_2q == "SQRTSWAP":
            for gate in temp_resolved:
                if gate.name == "CNOT":
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                    qc_temp.gates.append(Gate("SQRTSWAP", [gate.controls[0],
                                                           gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RZ", gate.controls, None,
                                              arg_value=np.pi,
                                              arg_label=r"\pi"))
                    qc_temp.gates.append(Gate("SQRTSWAP", [gate.controls[0],
                                                           gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RZ", gate.controls, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                else:
                    qc_temp.gates.append(gate)
        elif basis_2q == "SQRTISWAP":
            for gate in temp_resolved:
                if gate.name == "CNOT":
                    qc_temp.gates.append(Gate("RY", gate.controls, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RX", gate.controls, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                    qc_temp.gates.append(Gate("RX", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("SQRTISWAP", [gate.controls[0],
                                                            gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RX", gate.controls, None,
                                              arg_value=np.pi,
                                              arg_label=r"\pi"))
                    qc_temp.gates.append(Gate("SQRTISWAP", [gate.controls[0],
                                                            gate.targets[0]],
                                              None))
                    qc_temp.gates.append(Gate("RY", gate.controls, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                    qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
                                              arg_value=np.pi / 4,
                                              arg_label=r"\pi/4"))
                    qc_temp.gates.append(Gate("RZ", gate.controls, None,
                                              arg_value=np.pi,
                                              arg_label=r"\pi"))
                    qc_temp.gates.append(Gate("GLOBALPHASE", None, None,
                                              arg_value=3 * np.pi / 2,
                                              arg_label=r"3\pi/2"))
                else:
                    qc_temp.gates.append(gate)
        else:
            qc_temp.gates = temp_resolved

        if len(basis_1q) == 2:
            temp_resolved = qc_temp.gates
            qc_temp.gates = []
            for gate in temp_resolved:
                if gate.name == "RX" and "RX" not in basis_1q:
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              gate.arg_value, gate.arg_label))
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                elif gate.name == "RY" and "RY" not in basis_1q:
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RX", gate.targets, None,
                                              gate.arg_value, gate.arg_label))
                    qc_temp.gates.append(Gate("RZ", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                elif gate.name == "RZ" and "RZ" not in basis_1q:
                    qc_temp.gates.append(Gate("RX", gate.targets, None,
                                              arg_value=-np.pi / 2,
                                              arg_label=r"-\pi/2"))
                    qc_temp.gates.append(Gate("RY", gate.targets, None,
                                              gate.arg_value, gate.arg_label))
                    qc_temp.gates.append(Gate("RX", gate.targets, None,
                                              arg_value=np.pi / 2,
                                              arg_label=r"\pi/2"))
                else:
                    qc_temp.gates.append(gate)

        return qc_temp

Example 22

Project: qutip
Source File: cqed.py
View license
    def __init__(self, N, correct_global_phase=True, Nres=None, deltamax=None,
                 epsmax=None, w0=None, wq=None, eps=None, delta=None, g=None):
        """
        Parameters
        ----------
        Nres: Integer
            The number of energy levels in the resonator.

        deltamax: Integer/List
            The sigma-x coefficient for each of the qubits in the system.

        epsmax: Integer/List
            The sigma-z coefficient for each of the qubits in the system.

        wo: Integer
            The base frequency of the resonator.

        wq: Integer/List
            The frequency of the qubits.

        eps: Integer/List
            The epsilon for each of the qubits in the system.

        delta: Integer/List
            The epsilon for each of the qubits in the system.

        g: Integer/List
            The interaction strength for each of the qubit with the resonator.
        """

        super(DispersivecQED, self).__init__(N, correct_global_phase)

        # user definable
        if Nres is None:
            self.Nres = 10
        else:
            self.Nres = Nres

        if deltamax is None:
            self.sx_coeff = np.array([1.0 * 2 * np.pi] * N)
        elif not isinstance(deltamax, list):
            self.sx_coeff = np.array([deltamax * 2 * np.pi] * N)
        else:
            self.sx_coeff = np.array(deltamax)

        if epsmax is None:
            self.sz_coeff = np.array([9.5 * 2 * np.pi] * N)
        elif not isinstance(epsmax, list):
            self.sz_coeff = np.array([epsmax * 2 * np.pi] * N)
        else:
            self.sz_coeff = np.array(epsmax)

        if w0 is None:
            self.w0 = 10 * 2 * np.pi
        else:
            self.w0 = w0

        if eps is None:
            self.eps = np.array([9.5 * 2 * np.pi] * N)
        elif not isinstance(eps, list):
            self.eps = np.array([eps * 2 * np.pi] * N)
        else:
            self.eps = np.array(eps)

        if delta is None:
            self.delta = np.array([0.0 * 2 * np.pi] * N)
        elif not isinstance(delta, list):
            self.delta = np.array([delta * 2 * np.pi] * N)
        else:
            self.delta = np.array(delta)

        if g is None:
            self.g = np.array([0.01 * 2 * np.pi] * N)
        elif not isinstance(g, list):
            self.g = np.array([g * 2 * np.pi] * N)
        else:
            self.g = np.array(g)

        if wq is not None:
            if not isinstance(wq, list):
                self.wq = np.array([wq] * N)
            else:
                self.wq = np.array(wq)

        if wq is None:
            if eps is None:
                self.eps = np.array([9.5 * 2 * np.pi] * N)
            elif not isinstance(eps, list):
                self.eps = np.array([eps] * N)
            else:
                self.eps = np.array(eps)

            if delta is None:
                self.delta = np.array([0.0 * 2 * np.pi] * N)
            elif not isinstance(delta, list):
                self.delta = np.array([delta] * N)
            else:
                self.delta = np.array(delta)

        # computed
        self.wq = np.sqrt(self.eps ** 2 + self.delta ** 2)
        self.Delta = self.wq - self.w0

        # rwa/dispersive regime tests
        if any(self.g / (self.w0 - self.wq) > 0.05):
            warnings.warn("Not in the dispersive regime")

        if any((self.w0 - self.wq) / (self.w0 + self.wq) > 0.05):
            warnings.warn(
                "The rotating-wave approximation might not be valid.")

        self.sx_ops = [tensor([identity(self.Nres)] +
                              [sigmax() if m == n else identity(2)
                               for n in range(N)])
                       for m in range(N)]
        self.sz_ops = [tensor([identity(self.Nres)] +
                              [sigmaz() if m == n else identity(2)
                               for n in range(N)])
                       for m in range(N)]

        self.a = tensor([destroy(self.Nres)] + [identity(2) for n in range(N)])

        self.cavityqubit_ops = []
        for n in range(N):
            sm = tensor([identity(self.Nres)] +
                        [destroy(2) if m == n else identity(2)
                         for m in range(N)])
            self.cavityqubit_ops.append(self.a.dag() * sm + self.a * sm.dag())

        self.psi_proj = tensor([basis(self.Nres, 0)] +
                               [identity(2) for n in range(N)])

Example 23

Project: qutip
Source File: rcsolve.py
View license
def rcsolve(Hsys, psi0, tlist, e_ops, Q, wc, alpha, N, w_th, sparse=False,
            options=None):
    """
    Function to solve for an open quantum system using the
    reaction coordinate (RC) model.

    Parameters
    ----------
    Hsys: Qobj
        The system hamiltonian.
    psi0: Qobj
        Initial state of the system.
    tlist: List.
        Time over which system evolves.
    e_ops: list of :class:`qutip.Qobj` / callback function single
        Single operator or list of operators for which to evaluate
        expectation values.
    Q: Qobj
        The coupling between system and bath.
    wc: Float
        Cutoff frequency.
    alpha: Float
        Coupling strength.
    N: Integer
        Number of cavity fock states.
    w_th: Float
        Temperature.
    sparse: Boolean
        Optional argument to call the sparse eigenstates solver if needed.
    options : :class:`qutip.Options`
        With options for the solver.

    Returns
    -------
    output: Result
        System evolution.
    """
    if options is None:
        options = Options()

    dot_energy, dot_state = Hsys.eigenstates(sparse=sparse)
    deltaE = dot_energy[1] - dot_energy[0]
    if (w_th < deltaE/2):
        warnings.warn("Given w_th might not provide accurate results")
    gamma = deltaE / (2 * np.pi * wc)
    wa = 2 * np.pi * gamma * wc  # reaction coordinate frequency
    g = np.sqrt(np.pi * wa * alpha / 2.0)  # reaction coordinate coupling
    nb = (1 / (np.exp(wa/w_th) - 1))

    # Reaction coordinate hamiltonian/operators

    dimensions = dims(Q)
    a = tensor(destroy(N), qeye(dimensions[1]))
    unit = tensor(qeye(N), qeye(dimensions[1]))
    Nmax = N * dimensions[1][0]
    Q_exp = tensor(qeye(N), Q)
    Hsys_exp = tensor(qeye(N), Hsys)
    e_ops_exp = [tensor(qeye(N), kk) for kk in e_ops]

    na = a.dag() * a
    xa = a.dag() + a

    # decoupled Hamiltonian
    H0 = wa * a.dag() * a + Hsys_exp
    # interaction
    H1 = (g * (a.dag() + a) * Q_exp)
    H = H0 + H1
    L = 0
    PsipreEta = 0
    PsipreX = 0

    all_energy, all_state = H.eigenstates(sparse=sparse)
    Apre = spre((a + a.dag()))
    Apost = spost(a + a.dag())
    for j in range(Nmax):
        for k in range(Nmax):
            A = xa.matrix_element(all_state[j].dag(), all_state[k])
            delE = (all_energy[j] - all_energy[k])
            if abs(A) > 0.0:
                if abs(delE) > 0.0:
                    X = (0.5 * np.pi * gamma*(all_energy[j] - all_energy[k])
                         * (np.cosh((all_energy[j] - all_energy[k]) /
                            (2 * w_th))
                         / (np.sinh((all_energy[j] - all_energy[k]) /
                            (2 * w_th)))) * A)
                    eta = (0.5 * np.pi * gamma *
                           (all_energy[j] - all_energy[k]) * A)
                    PsipreX = PsipreX + X * all_state[j] * all_state[k].dag()
                    PsipreEta = PsipreEta + (eta * all_state[j]
                                             * all_state[k].dag())
                else:
                    X = 0.5 * np.pi * gamma * A * 2 * w_th
                    PsipreX = PsipreX + X * all_state[j] * all_state[k].dag()

    A = a + a.dag()
    L = ((-spre(A * PsipreX)) + (sprepost(A, PsipreX))
         + (sprepost(PsipreX, A)) + (-spost(PsipreX * A))
         + (spre(A * PsipreEta)) + (sprepost(A, PsipreEta))
         + (-sprepost(PsipreEta, A)) + (-spost(PsipreEta * A)))

    # Setup the operators and the Hamiltonian and the master equation
    # and solve for time steps in tlist
    rho0 = (tensor(thermal_dm(N, nb), psi0))
    output = mesolve(H, rho0, tlist, [L], e_ops_exp, options=options)

    return output

Example 24

Project: rlpy
Source File: test_FiniteTrackCartPole.py
View license
def test_physicality():
    """
    Test coordinate system [vertical up is 0]
        1) gravity acts in proper direction based on origin
        2) force actions behave as expected in that frame
    """
    # Apply a bunch of non-force actions, ensure that monotonically accelerate

    LEFT_FORCE = 0
    NO_FORCE = 1
    RIGHT_FORCE = 2
    domain = FiniteCartPoleBalanceModern()
    domain.force_noise_max = 0 # no stochasticity in applied force

    domain.int_type = 'rk4'

    # Slightly positive angle, just right of vertical up
    s = np.array([10.0 * np.pi/180.0, 0.0, 0.0, 0.0]) # pendulum slightly right
    domain.state = s

    for i in np.arange(5): # do for 5 steps and ensure works
        domain.step(NO_FORCE)
        assert np.all(domain.state[0:2] > s[0:2]) # angle and angular velocity increase
        # no energy should enter or leave system under no force action
        assert np.abs(_cartPoleEnergy(domain, s) - _cartPoleEnergy(domain, domain.state)) < 0.01

        s = domain.state

    # Negative angle (left)
    s = np.array([-10.0 * np.pi/180.0, 0.0, 0.0, 0.0]) # pendulum slightly right
    domain.state = s

    for i in np.arange(5): # do for 5 steps and ensure works
        domain.step(NO_FORCE)
        assert np.all(domain.state[0:2] < s[0:2]) # angle and angular velocity increase
        # no energy should enter or leave system under no force action
        assert np.abs(_cartPoleEnergy(domain, s) - _cartPoleEnergy(domain, domain.state)) < 0.01
        s = domain.state


    # Start vertical, ensure that force increases angular velocity in direction
    # Negative force on cart, yielding positive rotation
    s = np.array([0.0, 0.0, 0.0, 0.0])
    domain.state = s

    for i in np.arange(5): # do for 5 steps and ensure works
        domain.step(LEFT_FORCE)
        assert np.all(domain.state[0:2] > s[0:2]) # angle and angular velocity increase
        s = domain.state

    # Positive force on cart, yielding negative rotation
    s = np.array([0.0, 0.0, 0.0, 0.0])
    domain.state = s

    for i in np.arange(5): # do for 5 steps and ensure works
        domain.step(RIGHT_FORCE)
        assert np.all(domain.state[0:2] < s[0:2]) # angle and angular velocity increase
        s = domain.state

Example 25

Project: galry
Source File: mesh_visual.py
View license
    def initialize(self, camera_angle=None, camera_ratio=None, autocolor=None,
        camera_zrange=None, position=None, color=None, normal=None, index=None,
        vertex_shader=None):
        """Initialize the template.
        
        Arguments:
          * camera_angle: the view angle of the camera, in radians.
          * camera_ratio: the W/H ratio of the camera.
          * camera_zrange: a pair with the far and near z values for the camera
            projection.
        
        Template fields are:
          * `position`: an attribute with the positions as 3D vertices,
          * `normal`: an attribute with the normals as 3D vectors,
          * `color`: an attribute with the color of each vertex, as 4D vertices.
          * `projection`: an uniform with the 4x4 projection matrix, returned by
            `projection_matrix()`.
          * `camera`: an uniform with the 4x4 camera matrix, returned by
            `camera_matrix()`.
          * `transform`: an uniform with the 4x4 transform matrix, returned by
            a dot product of `scale_matrix()`, `rotation_matrix()` and
            `translation_matrix()`.
          * `light_direction`: the direction of the diffuse light as a
            3-vector.
          * `ambient_light`: the amount of ambient light (white color).
            
        """
        
        if autocolor is not None:
            color = get_color(autocolor)
        
        
        # default camera parameters
        if camera_angle is None:
            camera_angle = np.pi / 4
        if camera_ratio is None:
            camera_ratio = 4./3.
        if camera_zrange is None:
            camera_zrange = (.5, 10.)
            
        self.size = position.shape[0]
        if self.primitive_type is None:
            self.primitive_type = 'TRIANGLES'
        
        # attributes
        self.add_attribute("position", vartype="float", ndim=3, data=position)
        self.add_attribute("normal", vartype="float", ndim=3, data=normal)
        self.add_attribute("color", vartype="float", ndim=4, data=color)
        if index is not None:
            self.add_index("index", data=index)
        
        # varying color
        self.add_varying("varying_color", vartype="float", ndim=4)
        
        # default matrices
        projection = projection_matrix(camera_angle, camera_ratio, *camera_zrange)
        camera = camera_matrix(np.array([0., 0., -4.]),  # position
                               np.zeros(3),              # look at
                               np.array([0., 1., 0.]))   # top
        transform = np.eye(4)
                
        # matrix uniforms
        self.add_uniform("projection", ndim=(4, 4), size=None, data=projection)
        self.add_uniform("camera", ndim=(4, 4), size=None, data=camera)
        self.add_uniform("transform", ndim=(4, 4), size=None, data=transform)
        
        # diffuse and ambient light
        light_direction = normalize(np.array([0., 0., -1.]))
        ambient_light = .25
        self.add_uniform("light_direction", size=None, ndim=3, data=light_direction)
        self.add_uniform("ambient_light", size=None, ndim=1, data=ambient_light)
        
        # vertex shader with transformation matrices and basic lighting
        if not vertex_shader:
            vertex_shader = """
            // convert the position from 3D to 4D.
            gl_Position = vec4(position, 1.0);
            // compute the amount of light
            float light = dot(light_direction, normalize(mat3(camera) * mat3(transform) * normal));
            light = clamp(light, 0, 1);
            // add the ambient term
            light = clamp(ambient_light + light, 0, 1);
            // compute the final color
            varying_color = color * light;
            // keep the transparency
            varying_color.w = color.w;
            """
        self.add_vertex_main(vertex_shader)
        
        # basic fragment shader
        self.add_fragment_main("""
            out_color = varying_color;
        """)
        
        # self.initialize_viewport(True)

Example 26

Project: pyquante2
Source File: functionals.py
View license
def cpbe_point(rhoa,rhob,gama,gamb,gamab,tol=1e-10):
    rho = rhoa+rhob
    ec = vca = vcb = vcgama = vcgamb = vcgamab = 0
    gam = 0.031091
    ohm = 0.046644
    bet = 0.066725
    if rho > tol:
        Rs = np.power(3./(4.*np.pi*rho),1./3.)
        Zeta = (rhoa-rhob)/rho
        Kf = np.power(3*np.pi*np.pi*rho,1./3.)
        Ks = np.sqrt(4*Kf/np.pi)
        Phi = 0.5*(np.power(1+Zeta,2./3.) + np.power(1-Zeta,2./3.))
        Phi3 = Phi*Phi*Phi
        gradrho = np.sqrt(gama+gamb+2.*gamab)
        T = gradrho/(2*Phi*Ks*rho)
        T2 = T*T
        T4 = T2*T2

        eps,vc0a,vc0b = cpbe_lsd(rhoa,rhob)

        expo = (np.exp(-eps/(gam*Phi3))-1.)
        A = bet/gam/expo
        N = T2+A*T4
        D = 1.+A*T2+A*A*T4
        H = gam*Phi3*np.log(1.+(bet/gam)*N/D)
        ec = rho*(eps+H)

        # Derivative stuff
        dZ_drhoa = (1.-Zeta)/rho
        dZ_drhob = -(1.+Zeta)/rho

        dPhi_dZ = np.power(1.+Zeta,-1./3.)/3.-np.power(1.-Zeta,-1./3.)/3.
        dPhi_drhoa = dPhi_dZ*dZ_drhoa
        dPhi_drhob = dPhi_dZ*dZ_drhob
        
        dKs_drho = Ks/(6*rho)
        
        dT_dPhi = -T/Phi
        dT_dKs = -T/Ks
        dT_drhoa = -T/rho + dT_dPhi*dPhi_drhoa + dT_dKs*dKs_drho
        dT_drhob = -T/rho + dT_dPhi*dPhi_drhob + dT_dKs*dKs_drho

        dA_dPhi = -A/expo*np.exp(-eps/(gam*Phi3))*(3*eps/(gam*Phi3*Phi))
        dA_deps = -A/expo*np.exp(-eps/(gam*Phi3))*(-1/(gam*Phi3))
        deps_drhoa = (vc0a-eps)/rho
        deps_drhob = (vc0b-eps)/rho
        dA_drhoa = dA_dPhi*dPhi_drhoa + dA_deps*deps_drhoa
        dA_drhob = dA_dPhi*dPhi_drhob + dA_deps*deps_drhoa

        dN_dT = 2*T+4*A*T2*T
        dD_dT = 2*A*T + 4*A*A*T*T2
        dN_dA = T4
        dD_dA = T2+2*A*T4

        dH_dPhi = 3*H/Phi
        dH_dT = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dT-N*dD_dT)/D/D
            
        dH_dA = bet*Phi3/(1.+bet/gam*N/D)*(D*dN_dA-N*dD_dA)/D/D
        
        dH_drhoa = dH_dPhi*dPhi_drhoa + dH_dT*dT_drhoa + dH_dA*dA_drhoa
        dH_drhob = dH_dPhi*dPhi_drhob + dH_dT*dT_drhob + dH_dA*dA_drhob
        
        vca = vc0a + H + rho*dH_drhoa
        vcb = vc0b + H + rho*dH_drhob
    # Havent done the dE_dgamma derives yet
    return ec,vca,vcb,vcgama,vcgamab,vcgamb

Example 27

Project: scikit-image
Source File: corner.py
View license
def shape_index(image, sigma=1, mode='constant', cval=0):
    """Compute the shape index.

    The shape index, as defined by Koenderink & van Doorn [1]_, is a
    single valued measure of local curvature, assuming the image as a 3D plane
    with intensities representing heights.

    It is derived from the eigen values of the Hessian, and its
    value ranges from -1 to 1 (and is undefined (=NaN) in *flat* regions),
    with following ranges representing following shapes:

    .. table:: Ranges of the shape index and corresponding shapes.

      ===================  =============
      Interval (s in ...)  Shape
      ===================  =============
      [  -1, -7/8)         Spherical cup
      [-7/8, -5/8)         Through
      [-5/8, -3/8)         Rut
      [-3/8, -1/8)         Saddle rut
      [-1/8, +1/8)         Saddle
      [+1/8, +3/8)         Saddle ridge
      [+3/8, +5/8)         Ridge
      [+5/8, +7/8)         Dome
      [+7/8,   +1]         Spherical cap
      ===================  =============

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float, optional
        Standard deviation used for the Gaussian kernel, which is used for
        smoothing the input data before Hessian eigen value calculation.
    mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
        How to handle values outside the image borders
    cval : float, optional
        Used in conjunction with mode 'constant', the value outside
        the image boundaries.

    Returns
    -------
    s : ndarray
        Shape index

    References
    ----------
    .. [1] Koenderink, J. J. & van Doorn, A. J.,
           "Surface shape and curvature scales",
           Image and Vision Computing, 1992, 10, 557-564.
           DOI:10.1016/0262-8856(92)90076-F

    Examples
    --------
    >>> from skimage.feature import shape_index
    >>> square = np.zeros((5, 5))
    >>> square[2, 2] = 4
    >>> s = shape_index(square, sigma=0.1)
    >>> s
    array([[ nan,  nan, -0.5,  nan,  nan],
           [ nan, -0. ,  nan, -0. ,  nan],
           [-0.5,  nan, -1. ,  nan, -0.5],
           [ nan, -0. ,  nan, -0. ,  nan],
           [ nan,  nan, -0.5,  nan,  nan]])
    """

    Hxx, Hxy, Hyy = hessian_matrix(image, sigma=sigma, mode=mode, cval=cval, order='rc')
    l1, l2 = hessian_matrix_eigvals(Hxx, Hxy, Hyy)

    return (2.0 / np.pi) * np.arctan((l2 + l1) / (l2 - l1))

Example 28

View license
def _upsampled_dft(data, upsampled_region_size,
                   upsample_factor=1, axis_offsets=None):
    """
    Upsampled DFT by matrix multiplication.

    This code is intended to provide the same result as if the following
    operations were performed:
        - Embed the array "data" in an array that is ``upsample_factor`` times
          larger in each dimension.  ifftshift to bring the center of the
          image to (1,1).
        - Take the FFT of the larger array.
        - Extract an ``[upsampled_region_size]`` region of the result, starting
          with the ``[axis_offsets+1]`` element.

    It achieves this result by computing the DFT in the output array without
    the need to zeropad. Much faster and memory efficient than the zero-padded
    FFT approach if ``upsampled_region_size`` is much smaller than
    ``data.size * upsample_factor``.

    Parameters
    ----------
    data : 2D ndarray
        The input data array (DFT of original data) to upsample.
    upsampled_region_size : integer or tuple of integers, optional
        The size of the region to be sampled.  If one integer is provided, it
        is duplicated up to the dimensionality of ``data``.
    upsample_factor : integer, optional
        The upsampling factor.  Defaults to 1.
    axis_offsets : tuple of integers, optional
        The offsets of the region to be sampled.  Defaults to None (uses
        image center)

    Returns
    -------
    output : 2D ndarray
            The upsampled DFT of the specified region.
    """
    # if people pass in an integer, expand it to a list of equal-sized sections
    if not hasattr(upsampled_region_size, "__iter__"):
        upsampled_region_size = [upsampled_region_size, ] * data.ndim
    else:
        if len(upsampled_region_size) != data.ndim:
            raise ValueError("shape of upsampled region sizes must be equal "
                             "to input data's number of dimensions.")

    if axis_offsets is None:
        axis_offsets = [0, ] * data.ndim
    else:
        if len(axis_offsets) != data.ndim:
            raise ValueError("number of axis offsets must be equal to input "
                             "data's number of dimensions.")

    col_kernel = np.exp(
        (-1j * 2 * np.pi / (data.shape[1] * upsample_factor)) *
        (np.fft.ifftshift(np.arange(data.shape[1]))[:, None] -
         np.floor(data.shape[1] / 2)).dot(
             np.arange(upsampled_region_size[1])[None, :] - axis_offsets[1])
    )
    row_kernel = np.exp(
        (-1j * 2 * np.pi / (data.shape[0] * upsample_factor)) *
        (np.arange(upsampled_region_size[0])[:, None] - axis_offsets[0]).dot(
            np.fft.ifftshift(np.arange(data.shape[0]))[None, :] -
            np.floor(data.shape[0] / 2))
    )

    return row_kernel.dot(data).dot(col_kernel)

Example 29

Project: scikit-learn
Source File: graph_lasso_.py
View license
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
                enet_tol=1e-4, max_iter=100, verbose=False,
                return_costs=False, eps=np.finfo(np.float64).eps,
                return_n_iter=False):
    """l1-penalized covariance estimator

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    Parameters
    ----------
    emp_cov : 2D ndarray, shape (n_features, n_features)
        Empirical covariance from which to compute the covariance estimate.

    alpha : positive float
        The regularization parameter: the higher alpha, the more
        regularization, the sparser the inverse covariance.

    cov_init : 2D array (n_features, n_features), optional
        The initial guess for the covariance.

    mode : {'cd', 'lars'}
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    tol : positive float, optional
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped.

    enet_tol : positive float, optional
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'.

    max_iter : integer, optional
        The maximum number of iterations.

    verbose : boolean, optional
        If verbose is True, the objective function and dual gap are
        printed at each iteration.

    return_costs : boolean, optional
        If return_costs is True, the objective function and dual gap
        at each iteration are returned.

    eps : float, optional
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems.

    return_n_iter : bool, optional
        Whether or not to return the number of iterations.

    Returns
    -------
    covariance : 2D ndarray, shape (n_features, n_features)
        The estimated covariance matrix.

    precision : 2D ndarray, shape (n_features, n_features)
        The estimated (sparse) precision matrix.

    costs : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

    n_iter : int
        Number of iterations. Returned only if `return_n_iter` is set to True.

    See Also
    --------
    GraphLasso, GraphLassoCV

    Notes
    -----
    The algorithm employed to solve this problem is the GLasso algorithm,
    from the Friedman 2008 Biostatistics paper. It is the same algorithm
    as in the R `glasso` package.

    One possible difference with the `glasso` R package is that the
    diagonal coefficients are not penalized.

    """
    _, n_features = emp_cov.shape
    if alpha == 0:
        if return_costs:
            precision_ = linalg.inv(emp_cov)
            cost = - 2. * log_likelihood(emp_cov, precision_)
            cost += n_features * np.log(2 * np.pi)
            d_gap = np.sum(emp_cov * precision_) - n_features
            if return_n_iter:
                return emp_cov, precision_, (cost, d_gap), 0
            else:
                return emp_cov, precision_, (cost, d_gap)
        else:
            if return_n_iter:
                return emp_cov, linalg.inv(emp_cov), 0
            else:
                return emp_cov, linalg.inv(emp_cov)
    if cov_init is None:
        covariance_ = emp_cov.copy()
    else:
        covariance_ = cov_init.copy()
    # As a trivial regularization (Tikhonov like), we scale down the
    # off-diagonal coefficients of our starting point: This is needed, as
    # in the cross-validation the cov_init can easily be
    # ill-conditioned, and the CV loop blows. Beside, this takes
    # conservative stand-point on the initial conditions, and it tends to
    # make the convergence go faster.
    covariance_ *= 0.95
    diagonal = emp_cov.flat[::n_features + 1]
    covariance_.flat[::n_features + 1] = diagonal
    precision_ = pinvh(covariance_)

    indices = np.arange(n_features)
    costs = list()
    # The different l1 regression solver have different numerical errors
    if mode == 'cd':
        errors = dict(over='raise', invalid='ignore')
    else:
        errors = dict(invalid='raise')
    try:
        # be robust to the max_iter=0 edge case, see:
        # https://github.com/scikit-learn/scikit-learn/issues/4134
        d_gap = np.inf
        for i in range(max_iter):
            for idx in range(n_features):
                sub_covariance = np.ascontiguousarray(
                    covariance_[indices != idx].T[indices != idx])
                row = emp_cov[idx, indices != idx]
                with np.errstate(**errors):
                    if mode == 'cd':
                        # Use coordinate descent
                        coefs = -(precision_[indices != idx, idx]
                                  / (precision_[idx, idx] + 1000 * eps))
                        coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
                            coefs, alpha, 0, sub_covariance, row, row,
                            max_iter, enet_tol, check_random_state(None), False)
                    else:
                        # Use LARS
                        _, _, coefs = lars_path(
                            sub_covariance, row, Xy=row, Gram=sub_covariance,
                            alpha_min=alpha / (n_features - 1), copy_Gram=True,
                            method='lars', return_path=False)
                # Update the precision matrix
                precision_[idx, idx] = (
                    1. / (covariance_[idx, idx]
                          - np.dot(covariance_[indices != idx, idx], coefs)))
                precision_[indices != idx, idx] = (- precision_[idx, idx]
                                                   * coefs)
                precision_[idx, indices != idx] = (- precision_[idx, idx]
                                                   * coefs)
                coefs = np.dot(sub_covariance, coefs)
                covariance_[idx, indices != idx] = coefs
                covariance_[indices != idx, idx] = coefs
            d_gap = _dual_gap(emp_cov, precision_, alpha)
            cost = _objective(emp_cov, precision_, alpha)
            if verbose:
                print(
                    '[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
                    % (i, cost, d_gap))
            if return_costs:
                costs.append((cost, d_gap))
            if np.abs(d_gap) < tol:
                break
            if not np.isfinite(cost) and i > 0:
                raise FloatingPointError('Non SPD result: the system is '
                                         'too ill-conditioned for this solver')
        else:
            warnings.warn('graph_lasso: did not converge after %i iteration:'
                          ' dual gap: %.3e' % (max_iter, d_gap),
                          ConvergenceWarning)
    except FloatingPointError as e:
        e.args = (e.args[0]
                  + '. The system is too ill-conditioned for this solver',)
        raise e

    if return_costs:
        if return_n_iter:
            return covariance_, precision_, costs, i + 1
        else:
            return covariance_, precision_, costs
    else:
        if return_n_iter:
            return covariance_, precision_, i + 1
        else:
            return covariance_, precision_

Example 30

Project: scikit-learn
Source File: pca.py
View license
def _assess_dimension_(spectrum, rank, n_samples, n_features):
    """Compute the likelihood of a rank ``rank`` dataset

    The dataset is assumed to be embedded in gaussian noise of shape(n,
    dimf) having spectrum ``spectrum``.

    Parameters
    ----------
    spectrum: array of shape (n)
        Data spectrum.
    rank: int
        Tested rank value.
    n_samples: int
        Number of samples.
    n_features: int
        Number of features.

    Returns
    -------
    ll: float,
        The log-likelihood

    Notes
    -----
    This implements the method of `Thomas P. Minka:
    Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
    """
    if rank > len(spectrum):
        raise ValueError("The tested rank cannot exceed the rank of the"
                         " dataset")

    pu = -rank * log(2.)
    for i in range(rank):
        pu += (gammaln((n_features - i) / 2.) -
               log(np.pi) * (n_features - i) / 2.)

    pl = np.sum(np.log(spectrum[:rank]))
    pl = -pl * n_samples / 2.

    if rank == n_features:
        pv = 0
        v = 1
    else:
        v = np.sum(spectrum[rank:]) / (n_features - rank)
        pv = -np.log(v) * n_samples * (n_features - rank) / 2.

    m = n_features * rank - rank * (rank + 1.) / 2.
    pp = log(2. * np.pi) * (m + rank + 1.) / 2.

    pa = 0.
    spectrum_ = spectrum.copy()
    spectrum_[rank:n_features] = v
    for i in range(rank):
        for j in range(i + 1, len(spectrum)):
            pa += log((spectrum[i] - spectrum[j]) *
                      (1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)

    ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.

    return ll

Example 31

Project: scipy
Source File: special_matrices.py
View license
def dft(n, scale=None):
    """
    Discrete Fourier transform matrix.

    Create the matrix that computes the discrete Fourier transform of a
    sequence [1]_.  The n-th primitive root of unity used to generate the
    matrix is exp(-2*pi*i/n), where i = sqrt(-1).

    Parameters
    ----------
    n : int
        Size the matrix to create.
    scale : str, optional
        Must be None, 'sqrtn', or 'n'.
        If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`.
        If `scale` is 'n', the matrix is divided by `n`.
        If `scale` is None (the default), the matrix is not normalized, and the
        return value is simply the Vandermonde matrix of the roots of unity.

    Returns
    -------
    m : (n, n) ndarray
        The DFT matrix.

    Notes
    -----
    When `scale` is None, multiplying a vector by the matrix returned by
    `dft` is mathematically equivalent to (but much less efficient than)
    the calculation performed by `scipy.fftpack.fft`.

    .. versionadded:: 0.14.0

    References
    ----------
    .. [1] "DFT matrix", http://en.wikipedia.org/wiki/DFT_matrix

    Examples
    --------
    >>> from scipy.linalg import dft
    >>> np.set_printoptions(precision=5, suppress=True)
    >>> x = np.array([1, 2, 3, 0, 3, 2, 1, 0])
    >>> m = dft(8)
    >>> m.dot(x)   # Compute the DFT of x
    array([ 12.+0.j,  -2.-2.j,   0.-4.j,  -2.+2.j,   4.+0.j,  -2.-2.j,
            -0.+4.j,  -2.+2.j])

    Verify that ``m.dot(x)`` is the same as ``fft(x)``.

    >>> from scipy.fftpack import fft
    >>> fft(x)     # Same result as m.dot(x)
    array([ 12.+0.j,  -2.-2.j,   0.-4.j,  -2.+2.j,   4.+0.j,  -2.-2.j,
             0.+4.j,  -2.+2.j])
    """
    if scale not in [None, 'sqrtn', 'n']:
        raise ValueError("scale must be None, 'sqrtn', or 'n'; "
                         "%r is not valid." % (scale,))

    omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1)
    m = omegas ** np.arange(n)
    if scale == 'sqrtn':
        m /= math.sqrt(n)
    elif scale == 'n':
        m /= n
    return m

Example 32

Project: scipy
Source File: fir_filter_design.py
View license
def kaiserord(ripple, width):
    """
    Design a Kaiser window to limit ripple and width of transition region.

    Parameters
    ----------
    ripple : float
        Positive number specifying maximum ripple in passband (dB) and minimum
        ripple in stopband.
    width : float
        Width of transition region (normalized so that 1 corresponds to pi
        radians / sample).

    Returns
    -------
    numtaps : int
        The length of the kaiser window.
    beta : float
        The beta parameter for the kaiser window.

    See Also
    --------
    kaiser_beta, kaiser_atten

    Notes
    -----
    There are several ways to obtain the Kaiser window:

    - ``signal.kaiser(numtaps, beta, sym=True)``
    - ``signal.get_window(beta, numtaps)``
    - ``signal.get_window(('kaiser', beta), numtaps)``

    The empirical equations discovered by Kaiser are used.

    References
    ----------
    Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.

    """
    A = abs(ripple)  # in case somebody is confused as to what's meant
    if A < 8:
        # Formula for N is not valid in this range.
        raise ValueError("Requested maximum ripple attentuation %f is too "
                         "small for the Kaiser formula." % A)
    beta = kaiser_beta(A)

    # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
    # order, so we have to add 1 to get the number of taps.
    numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1

    return int(ceil(numtaps)), beta

Example 33

Project: iris
Source File: cartography.py
View license
def area_weights(cube, normalize=False):
    """
    Returns an array of area weights, with the same dimensions as the cube.

    This is a 2D lat/lon area weights array, repeated over the non lat/lon
    dimensions.

    Args:

    * cube (:class:`iris.cube.Cube`):
        The cube to calculate area weights for.

    Kwargs:

    * normalize (False/True):
        If False, weights are grid cell areas. If True, weights are grid
        cell areas divided by the total grid area.

    The cube must have coordinates 'latitude' and 'longitude' with bounds.

    Area weights are calculated for each lat/lon cell as:

        .. math::

            r^2 cos(lat_0) (lon_1 - lon_0) - r^2 cos(lat_1) (lon_1 - lon_0)

    Currently, only supports a spherical datum.
    Uses earth radius from the cube, if present and spherical.
    Defaults to iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS.

    """
    # Get the radius of the earth
    cs = cube.coord_system("CoordSystem")
    if isinstance(cs, iris.coord_systems.GeogCS):
        if cs.inverse_flattening != 0.0:
            warnings.warn("Assuming spherical earth from ellipsoid.")
        radius_of_earth = cs.semi_major_axis
    elif (isinstance(cs, iris.coord_systems.RotatedGeogCS) and
            (cs.ellipsoid is not None)):
        if cs.ellipsoid.inverse_flattening != 0.0:
            warnings.warn("Assuming spherical earth from ellipsoid.")
        radius_of_earth = cs.ellipsoid.semi_major_axis
    else:
        warnings.warn("Using DEFAULT_SPHERICAL_EARTH_RADIUS.")
        radius_of_earth = DEFAULT_SPHERICAL_EARTH_RADIUS

    # Get the lon and lat coords and axes
    try:
        lat, lon = _get_lat_lon_coords(cube)
    except IndexError:
        raise ValueError('Cannot get latitude/longitude '
                         'coordinates from cube {!r}.'.format(cube.name()))

    if lat.ndim > 1:
        raise iris.exceptions.CoordinateMultiDimError(lat)
    if lon.ndim > 1:
        raise iris.exceptions.CoordinateMultiDimError(lon)

    lat_dim = cube.coord_dims(lat)
    lat_dim = lat_dim[0] if lat_dim else None

    lon_dim = cube.coord_dims(lon)
    lon_dim = lon_dim[0] if lon_dim else None

    if not (lat.has_bounds() and lon.has_bounds()):
        msg = ("Coordinates {!r} and {!r} must have bounds to determine "
               "the area weights.".format(lat.name(), lon.name()))
        raise ValueError(msg)

    # Convert from degrees to radians
    lat = lat.copy()
    lon = lon.copy()

    for coord in (lat, lon):
        if coord.units in (cf_units.Unit('degrees'),
                           cf_units.Unit('radians')):
            coord.convert_units('radians')
        else:
            msg = ("Units of degrees or radians required, coordinate "
                   "{!r} has units: {!r}".format(coord.name(),
                                                 coord.units.name))
            raise ValueError(msg)

    # Create 2D weights from bounds.
    # Use the geographical area as the weight for each cell
    # Convert latitudes to co-latitude. I.e from -90 --> +90  to  0 --> pi
    ll_weights = _quadrant_area(lat.bounds + np.pi / 2.,
                                lon.bounds, radius_of_earth)

    # Normalize the weights if necessary.
    if normalize:
        ll_weights /= ll_weights.sum()

    # Now we create an array of weights for each cell. This process will
    # handle adding the required extra dimensions and also take care of
    # the order of dimensions.
    broadcast_dims = [x for x in (lat_dim, lon_dim) if x is not None]
    wshape = []
    for idim, dim in zip((0, 1), (lat_dim, lon_dim)):
        if dim is not None:
            wshape.append(ll_weights.shape[idim])
    ll_weights = ll_weights.reshape(wshape)
    broad_weights = iris.util.broadcast_to_shape(ll_weights,
                                                 cube.shape,
                                                 broadcast_dims)

    return broad_weights

Example 34

Project: scot
Source File: infomax_.py
View license
def infomax(data, weights=None, l_rate=None, block=None, w_change=1e-12,
            anneal_deg=60., anneal_step=0.9, extended=False, n_subgauss=1,
            kurt_size=6000, ext_blocks=1, max_iter=200,
            random_state=None, verbose=None):
    """Run the (extended) Infomax ICA decomposition on raw data

    based on the publications of Bell & Sejnowski 1995 (Infomax)
    and Lee, Girolami & Sejnowski, 1999 (extended Infomax)

    Parameters
    ----------
    data : np.ndarray, shape (n_samples, n_features)
        The data to unmix.
    w_init : np.ndarray, shape (n_features, n_features)
        The initialized unmixing matrix. Defaults to None. If None, the
        identity matrix is used.
    l_rate : float
        This quantity indicates the relative size of the change in weights.
        Note. Smaller learining rates will slow down the procedure.
        Defaults to 0.010d / alog(n_features ^ 2.0)
    block : int
        The block size of randomly chosen data segment.
        Defaults to floor(sqrt(n_times / 3d))
    w_change : float
        The change at which to stop iteration. Defaults to 1e-12.
    anneal_deg : float
        The angle at which (in degree) the learning rate will be reduced.
        Defaults to 60.0
    anneal_step : float
        The factor by which the learning rate will be reduced once
        ``anneal_deg`` is exceeded:
            l_rate *= anneal_step
        Defaults to 0.9
    extended : bool
        Wheather to use the extended infomax algorithm or not. Defaults to
        True.
    n_subgauss : int
        The number of subgaussian components. Only considered for extended
        Infomax.
    kurt_size : int
        The window size for kurtosis estimation. Only considered for extended
        Infomax.
    ext_blocks : int
        The number of blocks after which to recompute Kurtosis.
        Only considered for extended Infomax.
    max_iter : int
        The maximum number of iterations. Defaults to 200.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    unmixing_matrix : np.ndarray of float, shape (n_features, n_features)
        The linear unmixing operator.
    """
    rng = check_random_state(random_state)

    # define some default parameter
    max_weight = 1e8
    restart_fac = 0.9
    min_l_rate = 1e-10
    blowup = 1e4
    blowup_fac = 0.5
    n_small_angle = 20
    degconst = 180.0 / np.pi

    # for extended Infomax
    extmomentum = 0.5
    signsbias = 0.02
    signcount_threshold = 25
    signcount_step = 2
    if ext_blocks > 0:  # allow not to recompute kurtosis
        n_subgauss = 1  # but initialize n_subgauss to 1 if you recompute

    # check data shape
    n_samples, n_features = data.shape
    n_features_square = n_features ** 2

    # check input parameter
    # heuristic default - may need adjustment for
    # large or tiny data sets
    if l_rate is None:
        l_rate = 0.01 / math.log(n_features ** 2.0)

    if block is None:
        block = int(math.floor(math.sqrt(n_samples / 3.0)))

    logger.info('computing%sInfomax ICA' % ' Extended ' if extended is True
                else ' ')

    # collect parameter
    nblock = n_samples // block
    lastt = (nblock - 1) * block + 1

    # initialize training
    if weights is None:
        # initialize weights as identity matrix
        weights = np.identity(n_features, dtype=np.float64)

    BI = block * np.identity(n_features, dtype=np.float64)
    bias = np.zeros((n_features, 1), dtype=np.float64)
    onesrow = np.ones((1, block), dtype=np.float64)
    startweights = weights.copy()
    oldweights = startweights.copy()
    step = 0
    count_small_angle = 0
    wts_blowup = False
    blockno = 0
    signcount = 0

    # for extended Infomax
    if extended is True:
        signs = np.identity(n_features)
        signs.flat[slice(0, n_features * n_subgauss, n_features)]
        kurt_size = min(kurt_size, n_samples)
        old_kurt = np.zeros(n_features, dtype=np.float64)
        oldsigns = np.zeros((n_features, n_features))

    # trainings loop
    olddelta, oldchange = 1., 0.
    while step < max_iter:

        # shuffle data at each step
        permute = list(range(n_samples))
        rng.shuffle(permute)

        # ICA training block
        # loop across block samples
        for t in range(0, lastt, block):
            u = np.dot(data[permute[t:t + block], :], weights)
            u += np.dot(bias, onesrow).T

            if extended is True:
                # extended ICA update
                y = np.tanh(u)
                weights += l_rate * np.dot(weights,
                                           BI - np.dot(np.dot(u.T, y), signs) -
                                           np.dot(u.T, u))
                bias += l_rate * np.reshape(np.sum(y, axis=0,
                                            dtype=np.float64) * -2.0,
                                            (n_features, 1))

            else:
                # logistic ICA weights update
                y = 1.0 / (1.0 + np.exp(-u))
                weights += l_rate * np.dot(weights,
                                           BI + np.dot(u.T, (1.0 - 2.0 * y)))
                bias += l_rate * np.reshape(np.sum((1.0 - 2.0 * y), axis=0,
                                            dtype=np.float64), (n_features, 1))

            # check change limit
            max_weight_val = np.max(np.abs(weights))
            if max_weight_val > max_weight:
                wts_blowup = True

            blockno += 1
            if wts_blowup:
                break

            # ICA kurtosis estimation
            if extended is True:

                n = np.fix(blockno / ext_blocks)

                if np.abs(n) * ext_blocks == blockno:
                    if kurt_size < n_samples:
                        rp = np.floor(rng.uniform(0, 1, kurt_size) *
                                      (n_samples - 1))
                        tpartact = np.dot(data[rp.astype(int), :], weights).T
                    else:
                        tpartact = np.dot(data, weights).T

                    # estimate kurtosis
                    kurt = kurtosis(tpartact, axis=1, fisher=True)

                    if extmomentum != 0:
                        kurt = (extmomentum * old_kurt +
                                (1.0 - extmomentum) * kurt)
                        old_kurt = kurt

                    # estimate weighted signs
                    signs.flat[::n_features + 1] = ((kurt + signsbias) /
                                                    np.abs(kurt + signsbias))

                    ndiff = ((signs.flat[::n_features + 1] -
                              oldsigns.flat[::n_features + 1]) != 0).sum()
                    if ndiff == 0:
                        signcount += 1
                    else:
                        signcount = 0
                    oldsigns = signs

                    if signcount >= signcount_threshold:
                        ext_blocks = np.fix(ext_blocks * signcount_step)
                        signcount = 0

        # here we continue after the for
        # loop over the ICA training blocks
        # if weights in bounds:
        if not wts_blowup:
            oldwtchange = weights - oldweights
            step += 1
            angledelta = 0.0
            delta = oldwtchange.reshape(1, n_features_square)
            change = np.sum(delta * delta, dtype=np.float64)
            if step > 1:
                angledelta = math.acos(np.sum(delta * olddelta) /
                                       math.sqrt(change * oldchange))
                angledelta *= degconst

            # anneal learning rate
            oldweights = weights.copy()
            if angledelta > anneal_deg:
                l_rate *= anneal_step    # anneal learning rate
                # accumulate angledelta until anneal_deg reached l_rates
                olddelta = delta
                oldchange = change
                count_small_angle = 0  # reset count when angle delta is large
            else:
                if step == 1:  # on first step only
                    olddelta = delta  # initialize
                    oldchange = change
                count_small_angle += 1
                if count_small_angle > n_small_angle:
                    max_iter = step

            # apply stopping rule
            if step > 2 and change < w_change:
                step = max_iter
            elif change > blowup:
                l_rate *= blowup_fac

        # restart if weights blow up
        # (for lowering l_rate)
        else:
            step = 0  # start again
            wts_blowup = 0  # re-initialize variables
            blockno = 1
            l_rate *= restart_fac  # with lower learning rate
            weights = startweights.copy()
            oldweights = startweights.copy()
            olddelta = np.zeros((1, n_features_square), dtype=np.float64)
            bias = np.zeros((n_features, 1), dtype=np.float64)

            # for extended Infomax
            if extended:
                signs = np.identity(n_features)
                signs.flat[slice(0, n_features * n_subgauss, n_features)]
                oldsigns = np.zeros((n_features, n_features))

            if l_rate > min_l_rate:
                if verbose:
                    logger.info('... lowering learning rate to %g'
                                '\n... re-starting...' % l_rate)
            else:
                raise ValueError('Error in Infomax ICA: unmixing_matrix matrix'
                                 'might not be invertible!')

    # prepare return values
    return weights.T

Example 35

Project: sfepy
Source File: probes.py
View license
    def get_points(self, refine_flag=None):
        """
        Get the probe points.

        Returns
        -------
        pars : array_like
           The independent coordinate of the probe.
        points : array_like
           The probe points, parametrized by pars.
        """
        # Vector of angles.
        if self.is_refined:
            return self.pars, self.points

        if refine_flag is None:
            pars = nm.linspace(0.0, 2.0*nm.pi, self.n_point + 1)[:-1]

        else:
            pars = Probe.refine_pars(self.pars, refine_flag,
                                     cyclic_val=2.0 * nm.pi)

            self.n_point = pars.shape[0]

        self.pars = pars

        # Create the points in xy plane, centered at the origin.
        x = self.radius * nm.cos(pars[:,None])
        y = self.radius * nm.sin(pars[:,None])

        if len(self.centre) == 3:
            z = nm.zeros((self.n_point, 1), dtype=nm.float64)
            points = nm.c_[x, y, z]

            # Rotate to satisfy the normal, shift to the centre.
            n1 = nm.array([0.0, 0.0, 1.0], dtype=nm.float64)
            axis = nm.cross(n1, self.normal)
            angle = nm.arccos(nm.dot(n1, self.normal))

            if nla.norm(axis) < 0.1:
                # n1 == self.normal
                rot_mtx = nm.eye(3, dtype=nm.float64)
            else:
                rot_mtx = make_axis_rotation_matrix(axis, angle)

            points = nm.dot(points, rot_mtx)

        else:
            points = nm.c_[x, y]

        points += self.centre

        self.points = points

        return pars, points

Example 36

Project: sfs-python
Source File: array.py
View license
def rounded_edge(Nxy, Nr, dx, center=[0, 0, 0], orientation=[1, 0, 0]):
    """Array along the xy-axis with rounded edge at the origin.

    Parameters
    ----------
    Nxy : int
        Number of secondary sources along x- and y-axis.
    Nr : int
        Number of secondary sources in rounded edge.  Radius of edge is
        adjusted to equdistant sampling along entire array.
    center : (3,) array_like, optional
        Position of edge.
    orientation : (3,) array_like, optional
        Normal vector of array.  Default orientation is along xy-axis.

    Returns
    -------
    `ArrayData`
        Positions, orientations and weights of secondary sources.

    Examples
    --------
    .. plot::
        :context: close-figs

        x0, n0, a0 = sfs.array.rounded_edge(8, 5, 0.2)
        sfs.plot.loudspeaker_2d(x0, n0, a0)
        plt.axis('equal')

    """
    # radius of rounded edge
    Nr += 1
    R = 2/np.pi * Nr * dx

    # array along y-axis
    x00, n00, a00 = linear(Nxy, dx, center=[0, Nxy//2*dx+dx/2+R, 0])
    x00 = np.flipud(x00)
    positions = x00
    directions = n00
    weights = a00

    # round part
    x00 = np.zeros((Nr, 3))
    n00 = np.zeros((Nr, 3))
    a00 = np.zeros(Nr)
    for n in range(0, Nr):
        alpha = np.pi/2 * n/Nr
        x00[n, 0] = R * (1-np.cos(alpha))
        x00[n, 1] = R * (1-np.sin(alpha))
        n00[n, 0] = np.cos(alpha)
        n00[n, 1] = np.sin(alpha)
        a00[n] = dx
    positions = np.concatenate((positions, x00))
    directions = np.concatenate((directions, n00))
    weights = np.concatenate((weights, a00))

    # array along x-axis
    x00, n00, a00 = linear(Nxy, dx, center=[Nxy//2*dx-dx/2+R, 0, 0],
                           orientation=[0, 1, 0])
    x00 = np.flipud(x00)
    positions = np.concatenate((positions, x00))
    directions = np.concatenate((directions, n00))
    weights = np.concatenate((weights, a00))

    # rotate array
    positions, directions = _rotate_array(positions, directions,
                                          [1, 0, 0], orientation)
    # shift array to desired position
    positions += center
    return ArrayData(positions, directions, weights)

Example 37

Project: sfs-python
Source File: source.py
View license
def point_modal(omega, x0, n0, grid, L, N=None, deltan=0, c=None):
    """Point source in a rectangular room using a modal room model.

    Parameters
    ----------
    omega : float
        Frequency of source.
    x0 : (3,) array_like
        Position of source.
    n0 : (3,) array_like
        Normal vector (direction) of source (only required for
        compatibility).
    grid : triple of array_like
        The grid that is used for the sound field calculations.
        See `sfs.util.xyz_grid()`.
    L : (3,) array_like
        Dimensionons of the rectangular room.
    N : (3,) array_like or int, optional
        Combination of modal orders in the three-spatial dimensions to
        calculate the sound field for or maximum order for all
        dimensions.  If not given, the maximum modal order is
        approximately determined and the sound field is computed up to
        this maximum order.
    deltan : float, optional
        Absorption coefficient of the walls.
    c : float, optional
        Speed of sound.

    Returns
    -------
    numpy.ndarray
        Sound pressure at positions given by *grid*.

    """
    k = util.wavenumber(omega, c)
    x0 = util.asarray_1d(x0)
    x, y, z = util.as_xyz_components(grid)

    if N is None:
        # determine maximum modal order per dimension
        Nx = int(np.ceil(L[0]/np.pi * k))
        Ny = int(np.ceil(L[1]/np.pi * k))
        Nz = int(np.ceil(L[2]/np.pi * k))
        mm = range(Nx)
        nn = range(Ny)
        ll = range(Nz)
    elif np.isscalar(N):
        # compute up to a given order
        mm = range(N)
        nn = range(N)
        ll = range(N)
    else:
        # compute field for one order combination only
        mm = [N[0]]
        nn = [N[1]]
        ll = [N[2]]

    kmp0 = [((kx + 1j * deltan)**2, np.cos(kx * x) * np.cos(kx * x0[0]))
            for kx in [m * np.pi / L[0] for m in mm]]
    kmp1 = [((ky + 1j * deltan)**2, np.cos(ky * y) * np.cos(ky * x0[1]))
            for ky in [n * np.pi / L[1] for n in nn]]
    kmp2 = [((kz + 1j * deltan)**2, np.cos(kz * z) * np.cos(kz * x0[2]))
            for kz in [l * np.pi / L[2] for l in ll]]
    ksquared = k**2
    p = 0
    for (km0, p0), (km1, p1), (km2, p2) in itertools.product(kmp0, kmp1, kmp2):
        km = km0 + km1 + km2
        p = p + 8 / (ksquared - km) * p0 * p1 * p2
    return p

Example 38

Project: sfs-python
Source File: source.py
View license
def point_modal_velocity(omega, x0, n0, grid, L, N=None, deltan=0, c=None):
    """Velocity of point source in a rectangular room using a modal room model.

    Parameters
    ----------
    omega : float
        Frequency of source.
    x0 : (3,) array_like
        Position of source.
    n0 : (3,) array_like
        Normal vector (direction) of source (only required for
        compatibility).
    grid : triple of array_like
        The grid that is used for the sound field calculations.
        See `sfs.util.xyz_grid()`.
    L : (3,) array_like
        Dimensionons of the rectangular room.
    N : (3,) array_like or int, optional
        Combination of modal orders in the three-spatial dimensions to
        calculate the sound field for or maximum order for all
        dimensions.  If not given, the maximum modal order is
        approximately determined and the sound field is computed up to
        this maximum order.
    deltan : float, optional
        Absorption coefficient of the walls.
    c : float, optional
        Speed of sound.

    Returns
    -------
    `XyzComponents`
        Particle velocity at positions given by *grid*.

    """
    k = util.wavenumber(omega, c)
    x0 = util.asarray_1d(x0)
    x, y, z = util.as_xyz_components(grid)

    if N is None:
        # determine maximum modal order per dimension
        Nx = int(np.ceil(L[0]/np.pi * k))
        Ny = int(np.ceil(L[1]/np.pi * k))
        Nz = int(np.ceil(L[2]/np.pi * k))
        mm = range(Nx)
        nn = range(Ny)
        ll = range(Nz)
    elif np.isscalar(N):
        # compute up to a given order
        mm = range(N)
        nn = range(N)
        ll = range(N)
    else:
        # compute field for one order combination only
        mm = [N[0]]
        nn = [N[1]]
        ll = [N[2]]

    kmp0 = [((kx + 1j * deltan)**2, np.sin(kx * x) * np.cos(kx * x0[0]))
            for kx in [m * np.pi / L[0] for m in mm]]
    kmp1 = [((ky + 1j * deltan)**2, np.sin(ky * y) * np.cos(ky * x0[1]))
            for ky in [n * np.pi / L[1] for n in nn]]
    kmp2 = [((kz + 1j * deltan)**2, np.sin(kz * z) * np.cos(kz * x0[2]))
            for kz in [l * np.pi / L[2] for l in ll]]
    ksquared = k**2
    vx = 0+0j
    vy = 0+0j
    vz = 0+0j
    for (km0, p0), (km1, p1), (km2, p2) in itertools.product(kmp0, kmp1, kmp2):
        km = km0 + km1 + km2
        vx = vx - 8*1j / (ksquared - km) * p0
        vy = vy - 8*1j / (ksquared - km) * p1
        vz = vz - 8*1j / (ksquared - km) * p2
    return util.XyzComponents([vx, vy, vz])

Example 39

Project: GPy
Source File: non_gaussian.py
View license
def student_t_approx(optimize=True, plot=True):
    """
    Example of regressing with a student t likelihood using Laplace
    """
    real_std = 0.1
    #Start a function, any function
    X = np.linspace(0.0, np.pi*2, 100)[:, None]
    Y = np.sin(X) + np.random.randn(*X.shape)*real_std
    Y = Y/Y.max()
    Yc = Y.copy()

    X_full = np.linspace(0.0, np.pi*2, 500)[:, None]
    Y_full = np.sin(X_full)
    Y_full = Y_full/Y_full.max()

    #Slightly noisy data
    Yc[75:80] += 1

    #Very noisy data
    #Yc[10] += 100
    #Yc[25] += 10
    #Yc[23] += 10
    #Yc[26] += 1000
    #Yc[24] += 10
    #Yc = Yc/Yc.max()

    #Add student t random noise to datapoints
    deg_free = 1
    print("Real noise: ", real_std)
    initial_var_guess = 0.5
    edited_real_sd = initial_var_guess

    # Kernel object
    kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
    kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
    kernel3 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
    kernel4 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])

    #Gaussian GP model on clean data
    m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
    # optimize
    m1['.*white'].constrain_fixed(1e-5)
    m1.randomize()

    #Gaussian GP model on corrupt data
    m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
    m2['.*white'].constrain_fixed(1e-5)
    m2.randomize()

    #Student t GP model on clean data
    t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
    laplace_inf = GPy.inference.latent_function_inference.Laplace()
    m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
    m3['.*t_scale2'].constrain_bounded(1e-6, 10.)
    m3['.*white'].constrain_fixed(1e-5)
    m3.randomize()

    #Student t GP model on corrupt data
    t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
    laplace_inf = GPy.inference.latent_function_inference.Laplace()
    m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
    m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
    m4['.*white'].constrain_fixed(1e-5)
    m4.randomize()
    print(m4)
    debug=True
    if debug:
        m4.optimize(messages=1)
        from matplotlib import pyplot as pb
        pb.plot(m4.X, m4.inference_method.f_hat)
        pb.plot(m4.X, m4.Y, 'rx')
        m4.plot()
        print(m4)
        return m4

    if optimize:
        optimizer='scg'
        print("Clean Gaussian")
        m1.optimize(optimizer, messages=1)
        print("Corrupt Gaussian")
        m2.optimize(optimizer, messages=1)
        print("Clean student t")
        m3.optimize(optimizer, messages=1)
        print("Corrupt student t")
        m4.optimize(optimizer, messages=1)

    if plot:
        plt.figure(1)
        plt.suptitle('Gaussian likelihood')
        ax = plt.subplot(211)
        m1.plot(ax=ax)
        plt.plot(X_full, Y_full)
        plt.ylim(-1.5, 1.5)
        plt.title('Gaussian clean')

        ax = plt.subplot(212)
        m2.plot(ax=ax)
        plt.plot(X_full, Y_full)
        plt.ylim(-1.5, 1.5)
        plt.title('Gaussian corrupt')

        plt.figure(2)
        plt.suptitle('Student-t likelihood')
        ax = plt.subplot(211)
        m3.plot(ax=ax)
        plt.plot(X_full, Y_full)
        plt.ylim(-1.5, 1.5)
        plt.title('Student-t rasm clean')

        ax = plt.subplot(212)
        m4.plot(ax=ax)
        plt.plot(X_full, Y_full)
        plt.ylim(-1.5, 1.5)
        plt.title('Student-t rasm corrupt')

    return m1, m2, m3, m4

Example 40

Project: GPy
Source File: sde_standard_periodic.py
View license
    def sde(self): 
        """ 
        Return the state space representation of the covariance.
        
        
        ! Note: one must constrain lengthscale not to drop below 0.25.
        After this bessel functions of the first kind grows to very high.
        
        ! Note: one must keep wevelength also not very low. Because then
        the gradients wrt wavelength become ustable. 
        However this might depend on the data. For test example with
        300 data points the low limit is 0.15.
        """ 
        
        # Params to use: (in that order)
        #self.variance
        #self.wavelengths
        #self.lengthscales
        N = 7 # approximation order        
        
        
        w0 = 2*np.pi/self.wavelengths # frequency
        lengthscales = 2*self.lengthscales         
        
        [q2,dq2l] = seriescoeff(N,lengthscales,self.variance)        
        # lengthscale is multiplied by 2 because of slightly different
        # formula for periodic covariance function.
        # For the same reason:
        
        dq2l = 2*dq2l
        
        if np.any( np.isfinite(q2) == False):
            raise ValueError("SDE periodic covariance error 1")
        
        if np.any( np.isfinite(dq2l) == False):
            raise ValueError("SDE periodic covariance error 2")
        
        F    = np.kron(np.diag(range(0,N+1)),np.array( ((0, -w0), (w0, 0)) ) )
        L    = np.eye(2*(N+1))
        Qc   = np.zeros((2*(N+1), 2*(N+1)))
        P_inf = np.kron(np.diag(q2),np.eye(2))
        H    = np.kron(np.ones((1,N+1)),np.array((1,0)) )
        P0 = P_inf.copy()
        
        # Derivatives
        dF = np.empty((F.shape[0], F.shape[1], 3))
        dQc = np.empty((Qc.shape[0], Qc.shape[1], 3))
        dP_inf = np.empty((P_inf.shape[0], P_inf.shape[1], 3))         
        
        # Derivatives wrt self.variance
        dF[:,:,0] = np.zeros(F.shape)
        dQc[:,:,0] = np.zeros(Qc.shape)
        dP_inf[:,:,0] = P_inf / self.variance

        # Derivatives self.wavelengths
        dF[:,:,1] = np.kron(np.diag(range(0,N+1)),np.array( ((0,  w0), (-w0, 0)) ) / self.wavelengths );
        dQc[:,:,1] = np.zeros(Qc.shape)
        dP_inf[:,:,1] = np.zeros(P_inf.shape)      
        
        # Derivatives self.lengthscales        
        dF[:,:,2] = np.zeros(F.shape)
        dQc[:,:,2] = np.zeros(Qc.shape)
        dP_inf[:,:,2] = np.kron(np.diag(dq2l),np.eye(2))
        dP0 = dP_inf.copy()

        return (F, L, Qc, H, P_inf, P0, dF, dQc, dP_inf, dP0)

Example 41

Project: GPy
Source File: likelihood_tests.py
View license
    def test_laplace_log_likelihood(self):
        debug = False
        real_std = 0.1
        initial_var_guess = 0.5

        #Start a function, any function
        X = np.linspace(0.0, np.pi*2, 100)[:, None]
        Y = np.sin(X) + np.random.randn(*X.shape)*real_std
        Y = Y/Y.max()
        #Yc = Y.copy()
        #Yc[75:80] += 1
        kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
        #FIXME: Make sure you can copy kernels when params is fixed
        #kernel2 = kernel1.copy()
        kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])

        gauss_distr1 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
        exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
        m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf)
        m1['.*white'].constrain_fixed(1e-6)
        m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
        m1.randomize()

        gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
        laplace_inf = GPy.inference.latent_function_inference.Laplace()
        m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf)
        m2['.*white'].constrain_fixed(1e-6)
        m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
        m2.randomize()

        if debug:
            print(m1)
            print(m2)

        optimizer = 'scg'
        print("Gaussian")
        m1.optimize(optimizer, messages=debug, ipython_notebook=False)
        print("Laplace Gaussian")
        m2.optimize(optimizer, messages=debug, ipython_notebook=False)
        if debug:
            print(m1)
            print(m2)

        m2[:] = m1[:]

        #Predict for training points to get posterior mean and variance
        post_mean, post_var = m1.predict(X)
        post_mean_approx, post_var_approx, = m2.predict(X)

        if debug:
            from matplotlib import pyplot as pb
            pb.figure(5)
            pb.title('posterior means')
            pb.scatter(X, post_mean, c='g')
            pb.scatter(X, post_mean_approx, c='r', marker='x')

            pb.figure(6)
            pb.title('plot_f')
            m1.plot_f(fignum=6)
            m2.plot_f(fignum=6)
            fig, axes = pb.subplots(2, 1)
            fig.suptitle('Covariance matricies')
            a1 = pb.subplot(121)
            a1.matshow(m1.likelihood.covariance_matrix)
            a2 = pb.subplot(122)
            a2.matshow(m2.likelihood.covariance_matrix)

            pb.figure(8)
            pb.scatter(X, m1.likelihood.Y, c='g')
            pb.scatter(X, m2.likelihood.Y, c='r', marker='x')

        #Check Y's are the same
        np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
        #Check marginals are the same
        np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
        #Check marginals are the same with random
        m1.randomize()
        m2[:] = m1[:]

        np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)

        #Check they are checkgradding
        #m1.checkgrad(verbose=1)
        #m2.checkgrad(verbose=1)
        self.assertTrue(m1.checkgrad(verbose=True))
        self.assertTrue(m2.checkgrad(verbose=True))

Example 42

Project: simpeg
Source File: test_boundaryPoisson.py
View license
    def getError(self):
        #Test function
        phi = lambda x: np.sin(np.pi*x[:,0])*np.sin(np.pi*x[:,1])
        j_funX = lambda x: np.pi*np.cos(np.pi*x[:,0])*np.sin(np.pi*x[:,1])
        j_funY = lambda x: np.pi*np.sin(np.pi*x[:,0])*np.cos(np.pi*x[:,1])
        q_fun = lambda x: -2*(np.pi**2)*phi(x)

        xc_ana = phi(self.M.gridCC)
        q_ana = q_fun(self.M.gridCC)
        jX_ana = j_funX(self.M.gridFx)
        jY_ana = j_funY(self.M.gridFy)
        j_ana = np.r_[jX_ana,jY_ana]

        #TODO: Check where our boundary conditions are CCx or Nx

        cxm,cxp,cym,cyp = self.M.cellBoundaryInd
        fxm,fxp,fym,fyp = self.M.faceBoundaryInd

        gBFx = self.M.gridFx[(fxm|fxp),:]
        gBFy = self.M.gridFy[(fym|fyp),:]

        gBCx = self.M.gridCC[(cxm|cxp),:]
        gBCy = self.M.gridCC[(cym|cyp),:]

        phi_bc = phi(np.r_[gBFx,gBFy])
        j_bc = np.r_[j_funX(gBFx), j_funY(gBFy)]

        # P = sp.csr_matrix(([-1,1],([0,self.M.nF-1],[0,1])), shape=(self.M.nF, 2))

        P, Pin, Pout = self.M.getBCProjWF('neumann')

        Mc = self.M.getFaceInnerProduct()
        McI = Utils.sdInv(self.M.getFaceInnerProduct())
        V = Utils.sdiag(self.M.vol)
        G = -Pin.T*Pin*self.M.faceDiv.T * V
        D = self.M.faceDiv
        j = McI*(G*xc_ana + P*phi_bc)
        q = V*D*Pin.T*Pin*j + V*D*Pout.T*j_bc

        # Rearrange if we know q to solve for x
        A = V*D*Pin.T*Pin*McI*G
        rhs = V*q_ana - V*D*Pin.T*Pin*McI*P*phi_bc - V*D*Pout.T*j_bc

        if self.myTest == 'j':
            err = np.linalg.norm((Pin*j-Pin*j_ana), np.inf)
        elif self.myTest == 'q':
            err = np.linalg.norm((q-V*q_ana), np.inf)
        elif self.myTest == 'xc':
            #TODO: fix the null space
            xc, info = sp.linalg.minres(A, rhs, tol = 1e-6)
            err = np.linalg.norm((xc-xc_ana), np.inf)
            if info > 0:
                print('Solve does not work well')
                print('ACCURACY', np.linalg.norm(Utils.mkvc(A*xc) - rhs))
        elif self.myTest == 'xcJ':
            #TODO: fix the null space
            xc, info = sp.linalg.minres(A, rhs, tol = 1e-6)
            j = McI*(G*xc + P*phi_bc)
            err = np.linalg.norm((Pin*j-Pin*j_ana), np.inf)
            if info > 0:
                print('Solve does not work well')
                print('ACCURACY', np.linalg.norm(Utils.mkvc(A*xc) - rhs))
        return err

Example 43

View license
    def getError(self):
        # Test function
        def phi_fun(x):
            return np.cos(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1])

        def j_funX(x):
            return +np.pi*np.sin(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1])

        def j_funY(x):
            return +np.pi*np.cos(np.pi*x[:, 0])*np.sin(np.pi*x[:, 1])

        def phideriv_funX(x):
            return -j_funX(x)

        def phideriv_funY(x):
            return -j_funY(x)

        def q_fun(x):
            return +2*(np.pi**2)*phi_fun(x)

        xc_ana = phi_fun(self.M.gridCC)
        q_ana = q_fun(self.M.gridCC)
        jX_ana = j_funX(self.M.gridFx)
        jY_ana = j_funY(self.M.gridFy)
        j_ana = np.r_[jX_ana, jY_ana]

        # Get boundary locations
        fxm, fxp, fym, fyp = self.M.faceBoundaryInd
        gBFxm = self.M.gridFx[fxm, :]
        gBFxp = self.M.gridFx[fxp, :]
        gBFym = self.M.gridFy[fym, :]
        gBFyp = self.M.gridFy[fyp, :]

        # Setup Mixed B.C (alpha, beta, gamma)
        alpha_xm = np.ones_like(gBFxm[:, 0])
        alpha_xp = np.ones_like(gBFxp[:, 0])
        beta_xm = np.ones_like(gBFxm[:, 0])
        beta_xp = np.ones_like(gBFxp[:, 0])
        alpha_ym = np.ones_like(gBFym[:, 1])
        alpha_yp = np.ones_like(gBFyp[:, 1])
        beta_ym = np.ones_like(gBFym[:, 1])
        beta_yp = np.ones_like(gBFyp[:, 1])

        phi_bc_xm, phi_bc_xp = phi_fun(gBFxm), phi_fun(gBFxp)
        phi_bc_ym, phi_bc_yp = phi_fun(gBFym), phi_fun(gBFyp)

        phiderivX_bc_xm = phideriv_funX(gBFxm)
        phiderivX_bc_xp = phideriv_funX(gBFxp)
        phiderivY_bc_ym = phideriv_funY(gBFym)
        phiderivY_bc_yp = phideriv_funY(gBFyp)

        def gamma_fun(alpha, beta, phi, phi_deriv):
            return alpha*phi + beta*phi_deriv

        gamma_xm = gamma_fun(alpha_xm, beta_xm, phi_bc_xm, phiderivX_bc_xm)
        gamma_xp = gamma_fun(alpha_xp, beta_xp, phi_bc_xp, phiderivX_bc_xp)
        gamma_ym = gamma_fun(alpha_ym, beta_ym, phi_bc_ym, phiderivY_bc_ym)
        gamma_yp = gamma_fun(alpha_yp, beta_yp, phi_bc_yp, phiderivY_bc_yp)

        alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp]
        beta = [beta_xm, beta_xp, beta_ym, beta_yp]
        gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp]

        x_BC, y_BC = getxBCyBC_CC(self.M, alpha, beta, gamma)

        sigma = np.ones(self.M.nC)
        Mfrho = self.M.getFaceInnerProduct(1./sigma)
        MfrhoI = self.M.getFaceInnerProduct(1./sigma, invMat=True)
        V = Utils.sdiag(self.M.vol)
        Div = V*self.M.faceDiv
        P_BC, B = self.M.getBCProjWF_simple()
        q = q_fun(self.M.gridCC)
        M = B*self.M.aveCC2F
        G = Div.T - P_BC*Utils.sdiag(y_BC)*M
        rhs = V*q + Div*MfrhoI*P_BC*x_BC
        A = Div*MfrhoI*G

        if self.myTest == 'xc':
            Ainv = Solver(A)
            xc = Ainv*rhs
            err = np.linalg.norm((xc-xc_ana), np.inf)
        else:
            NotImplementedError
        return err

Example 44

View license
    def getError(self):
        # Test function
        def phi_fun(x):
            return (np.cos(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1]) *
                    np.cos(np.pi*x[:, 2]))

        def j_funX(x):
            return (np.pi*np.sin(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1]) *
                    np.cos(np.pi*x[:, 2]))

        def j_funY(x):
            return (np.pi*np.cos(np.pi*x[:, 0])*np.sin(np.pi*x[:, 1]) *
                    np.cos(np.pi*x[:, 2]))

        def j_funZ(x):
            return (np.pi*np.cos(np.pi*x[:, 0])*np.cos(np.pi*x[:, 1]) *
                    np.sin(np.pi*x[:, 2]))

        def phideriv_funX(x): return -j_funX(x)

        def phideriv_funY(x): return -j_funY(x)

        def phideriv_funZ(x): return -j_funZ(x)

        def q_fun(x): return 3*(np.pi**2)*phi_fun(x)

        xc_ana = phi_fun(self.M.gridCC)
        q_ana = q_fun(self.M.gridCC)
        jX_ana = j_funX(self.M.gridFx)
        jY_ana = j_funY(self.M.gridFy)
        j_ana = np.r_[jX_ana, jY_ana, jY_ana]

        # Get boundary locations
        fxm, fxp, fym, fyp, fzm, fzp = self.M.faceBoundaryInd
        gBFxm = self.M.gridFx[fxm, :]
        gBFxp = self.M.gridFx[fxp, :]
        gBFym = self.M.gridFy[fym, :]
        gBFyp = self.M.gridFy[fyp, :]
        gBFzm = self.M.gridFz[fzm, :]
        gBFzp = self.M.gridFz[fzp, :]

        # Setup Mixed B.C (alpha, beta, gamma)
        alpha_xm = np.ones_like(gBFxm[:, 0])
        alpha_xp = np.ones_like(gBFxp[:, 0])
        beta_xm = np.ones_like(gBFxm[:, 0])
        beta_xp = np.ones_like(gBFxp[:, 0])
        alpha_ym = np.ones_like(gBFym[:, 1])
        alpha_yp = np.ones_like(gBFyp[:, 1])
        beta_ym = np.ones_like(gBFym[:, 1])
        beta_yp = np.ones_like(gBFyp[:, 1])
        alpha_zm = np.ones_like(gBFzm[:, 2])
        alpha_zp = np.ones_like(gBFzp[:, 2])
        beta_zm = np.ones_like(gBFzm[:, 2])
        beta_zp = np.ones_like(gBFzp[:, 2])

        phi_bc_xm, phi_bc_xp = phi_fun(gBFxm), phi_fun(gBFxp)
        phi_bc_ym, phi_bc_yp = phi_fun(gBFym), phi_fun(gBFyp)
        phi_bc_zm, phi_bc_zp = phi_fun(gBFzm), phi_fun(gBFzp)

        phiderivX_bc_xm = phideriv_funX(gBFxm)
        phiderivX_bc_xp = phideriv_funX(gBFxp)
        phiderivY_bc_ym = phideriv_funY(gBFym)
        phiderivY_bc_yp = phideriv_funY(gBFyp)
        phiderivY_bc_zm = phideriv_funZ(gBFzm)
        phiderivY_bc_zp = phideriv_funZ(gBFzp)

        def gamma_fun(alpha, beta, phi, phi_deriv):
            return alpha*phi + beta*phi_deriv

        gamma_xm = gamma_fun(alpha_xm, beta_xm, phi_bc_xm, phiderivX_bc_xm)
        gamma_xp = gamma_fun(alpha_xp, beta_xp, phi_bc_xp, phiderivX_bc_xp)
        gamma_ym = gamma_fun(alpha_ym, beta_ym, phi_bc_ym, phiderivY_bc_ym)
        gamma_yp = gamma_fun(alpha_yp, beta_yp, phi_bc_yp, phiderivY_bc_yp)
        gamma_zm = gamma_fun(alpha_zm, beta_zm, phi_bc_zm, phiderivY_bc_zm)
        gamma_zp = gamma_fun(alpha_zp, beta_zp, phi_bc_zp, phiderivY_bc_zp)

        alpha = [alpha_xm, alpha_xp, alpha_ym, alpha_yp, alpha_zm, alpha_zp]
        beta = [beta_xm, beta_xp, beta_ym, beta_yp, beta_zm, beta_zp]
        gamma = [gamma_xm, gamma_xp, gamma_ym, gamma_yp, gamma_zm, gamma_zp]

        x_BC, y_BC = getxBCyBC_CC(self.M, alpha, beta, gamma)

        sigma = np.ones(self.M.nC)
        Mfrho = self.M.getFaceInnerProduct(1./sigma)
        MfrhoI = self.M.getFaceInnerProduct(1./sigma, invMat=True)
        V = Utils.sdiag(self.M.vol)
        Div = V*self.M.faceDiv
        P_BC, B = self.M.getBCProjWF_simple()
        q = q_fun(self.M.gridCC)
        M = B*self.M.aveCC2F
        G = Div.T - P_BC*Utils.sdiag(y_BC)*M
        rhs = V*q + Div*MfrhoI*P_BC*x_BC
        A = Div*MfrhoI*G

        if self.myTest == 'xc':
            # TODO: fix the null space
            Ainv = Solver(A)
            xc = Ainv*rhs
            err = np.linalg.norm((xc-xc_ana), np.inf)
        else:
            NotImplementedError
        return err

Example 45

Project: pyhawkes
Source File: hippocampus.py
View license
def plot_results(result):
    lls, plls, Weffs, Ps, Ls = result

    ### Colored locations
    wheel_cmap = gradient_cmap([colors[0], colors[3], colors[2], colors[1], colors[0]])
    fig = create_figure(figsize=(1.8, 1.8))
    # ax = create_axis_at_location(fig, .1, .1, 1.5, 1.5, box=False)
    ax = create_axis_at_location(fig, .6, .4, 1.1, 1.1)

    for i,k in enumerate(node_perm):
        color = wheel_cmap((np.pi+pfs_th[k])/(2*np.pi))
        # alpha = pfs_rad[k] / 47
        alpha = 0.7
        ax.add_patch(Circle((pfs[k,0], pfs[k,1]),
                            radius=3+4*pf_size[k],
                            color=color, ec="none",
                            alpha=alpha)
                            )

    plt.title("True place fields")
    # ax.text(0, 45, "True Place Fields",
    #         horizontalalignment="center",
    #         fontdict=dict(size=9))
    plt.xlim(-45,45)
    plt.xticks([-40, -20, 0, 20, 40])
    plt.xlabel("$x$ [cm]")
    plt.ylim(-45,45)
    plt.yticks([-40, -20, 0, 20, 40])
    plt.ylabel("$y$ [cm]")
    plt.savefig(os.path.join(results_dir, "hipp_colored_locations.pdf"))


    # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    Weff = np.array(Weffs[N_samples//2:]).mean(0)
    Weff = Weff[np.ix_(node_perm, node_perm)]
    lim = Weff[(1-np.eye(K)).astype(np.bool)].max()
    im = ax.imshow(np.kron(Weff, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmax=lim)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .1, .2,  .3])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0", ".1", ".2",  ".3"]

    ax.set_title("Inferred Weights")
    plt.savefig(os.path.join(results_dir, "hipp_W.pdf"))

    # # Plot the inferred connection probability
    # plt.figure()
    # plt.imshow(P, interpolation="none", cmap="Greys", vmin=0)
    # plt.colorbar()

        # Plot the inferred weighted adjacency matrix
    fig = create_figure(figsize=(1.8, 1.8))
    ax = create_axis_at_location(fig, .4, .4, 1.1, 1.1)

    P = np.array(Ps[N_samples//2:]).mean(0)
    P = P[np.ix_(node_perm, node_perm)]
    im = ax.imshow(np.kron(P, np.ones((20,20))),
                   interpolation="none", cmap="Greys", vmin=0, vmax=1)
    ax.set_xticks([])
    ax.set_yticks([])

    # node_colors = wheel_cmap()
    node_values = ((np.pi+pfs_th[node_perm])/(2*np.pi))[:,None] *np.ones((K,2))
    yax = create_axis_at_location(fig, .2, .4, .3, 1.1)
    remove_plot_labels(yax)
    yax.imshow(node_values, interpolation="none",
               cmap=wheel_cmap)
    yax.set_xticks([])
    yax.set_yticks([])
    yax.set_ylabel("pre")

    xax = create_axis_at_location(fig, .4, .2, 1.1, .3)
    remove_plot_labels(xax)
    xax.imshow(node_values.T, interpolation="none",
               cmap=wheel_cmap)
    xax.set_xticks([])
    xax.set_yticks([])
    xax.set_xlabel("post")

    cbax = create_axis_at_location(fig, 1.55, .4, .04, 1.1)
    plt.colorbar(im, cax=cbax, ticks=[0, .5, 1])
    cbax.tick_params(labelsize=8, pad=1)
    cbax.set_ticklabels=["0.0", "0.5",  "1.0"]

    ax.set_title("Inferred Probability")
    plt.savefig(os.path.join(results_dir, "hipp_P.pdf"))


    plt.show()

Example 46

Project: bayespy
Source File: lssm_sd.py
View license
def simulate_data(N):
    """
    Generate time-series data with switching dynamics.
    """

    # Two states: 1) oscillation, 2) random walk
    w1 = 0.02 * 2*np.pi
    A = [ [[np.cos(w1), -np.sin(w1)],
           [np.sin(w1),  np.cos(w1)]],
          [[        1.0,         0.0],
           [        0.0,         0.0]] ]
    C = [[1.0, 0.0]]

    # State switching probabilities
    q = 0.993        # probability to stay in the same state
    r = (1-q)/(2-1)  # probability to switch
    P = q*np.identity(2) + r*(np.ones((2,2))-np.identity(2))

    X = np.zeros((N, 2))
    Z = np.zeros(N)
    Y = np.zeros(N)
    F = np.zeros(N)
    z = np.random.randint(2)
    x = np.random.randn(2)
    Z[0] = z
    X[0,:] = x
    for n in range(1,N):
        x = np.dot(A[z], x) + np.random.randn(2)
        f = np.dot(C, x)
        y = f + 5*np.random.randn()
        z = np.random.choice(2, p=P[z])

        Z[n] = z
        X[n,:] = x
        Y[n] = y
        F[n] = f

    Y = Y[None,:]

    return (Y, F)

Example 47

Project: trackpy
Source File: static.py
View license
def area_3d_bounded(dist, pos, box):
    """ Calculated using the surface area of a sphere equidistant
    to a certain point.

    When the sphere is truncated by the box boundaries, this distance
    is subtracted using the formula for the sphere cap surface. We
    calculate this by defining h = the distance from point to box edge.

    When for instance sphere is bounded by the top and right boundaries,
    the area in the edge may be counted double. This is the case when
    h1**2 + h2**2 < R**2. This double counted area is calculated
    and added if necessary.

    When the sphere is bounded by three adjacant boundaries,
    the area in the corner may be subtracted double. This is the case when
    h1**2 + h2**2 + h3**2 < R**2. This double counted area is calculated
    and added if necessary.

    The result is the sum of the weights of pos0 and pos1."""

    area = 4*np.pi*dist**2

    h = np.array([pos[:, 0] - box[0, 0], box[0, 1] - pos[:, 0],
                  pos[:, 1] - box[1, 0], box[1, 1] - pos[:, 1],
                  pos[:, 2] - box[2, 0], box[2, 1] - pos[:, 2]])

    for h0 in h:
        mask = _protect_mask(h0 < dist)
        if mask is None:
            continue
        area[mask] -= sphere_cap_area(h0[mask], dist[mask])

    for h1, h2 in [[0, 2], [0, 3], [0, 4], [0, 5],
                   [1, 2], [1, 3], [1, 4], [1, 5],
                   [2, 4], [2, 5], [3, 4], [3, 5]]:  # 2 adjacent sides
        mask = _protect_mask(h[h1]**2 + h[h2]**2 < dist**2)
        if mask is None:
            continue
        area[mask] += sphere_edge_area(h[h1, mask], h[h2, mask],
                                       dist[mask])

    for h1, h2, h3 in [[0, 2, 4], [0, 2, 5], [0, 3, 4], [0, 3, 5], [1, 2, 4],
                       [1, 2, 5], [1, 3, 4], [1, 3, 5]]:  # 3 adjacent sides
        mask = _protect_mask(h[h1]**2 + h[h2]**2 + h[h3]**2 < dist**2)
        if mask is None:
            continue
        area[mask] -= sphere_corner_area(h[h1, mask], h[h2, mask],
                                         h[h3, mask], dist[mask])

    area[area < 10**-7 * dist**2] = np.nan

    return area

Example 48

Project: qgisSpaceSyntaxToolkit
Source File: layout.py
View license
def shell_layout(G, nlist=None, dim=2, scale=1., center=None):
    """Position nodes in concentric circles.

    Parameters
    ----------
    G : NetworkX graph or list of nodes

    nlist : list of lists
       List of node lists for each shell.

    dim : int
       Dimension of layout, currently only dim=2 is supported

    scale : float (default 1)
        Scale factor for positions, i.e.radius of largest shell

    center : array-like (default origin)
       Coordinate around which to center the layout.

    Returns
    -------
    dict :
       A dictionary of positions keyed by node

    Examples
    --------
    >>> G = nx.path_graph(4)
    >>> shells = [[0], [1,2,3]]
    >>> pos = nx.shell_layout(G, shells)

    Notes
    -----
    This algorithm currently only works in two dimensions and does not
    try to minimize edge crossings.

    """
    import numpy as np

    if len(G) == 0:
        return {}

    if nlist is None:
        # draw the whole graph in one shell
        nlist = [list(G)]

    numb_shells = len(nlist)
    if len(nlist[0]) == 1:
        # single node at center
        radius = 0.0
        numb_shells -= 1
    else:
        # else start at r=1
        radius = 1.0
    # distance between shells
    gap = (scale / numb_shells) if numb_shells else scale
    radius *= gap

    npos={}
    twopi = 2.0*np.pi
    for nodes in nlist:
        theta = np.arange(0, twopi, twopi/len(nodes))
        pos = np.column_stack([np.cos(theta), np.sin(theta)]) * radius
        npos.update(zip(nodes, pos))
        radius += gap

    if center is not None:
        center = np.asarray(center)
        for n,p in npos.items():
            npos[n] = p + center

    return npos

Example 49

View license
def solve(fk):
    
    k = ST.wavenumbers(N)
        
    if solver == "sparse":
        # Set up a naive solver based on spsolve
        aij = [2*np.pi*(k[1:]+1)/(k[1:]+2)]
        for i in range(2, N-3, 2):
            aij.append(4*np.pi*(k[1:-i]+1)/(k[1:-i]+2)**2)    
        A = diags(aij, range(0, N-3, 2))

        ck = np.ones(N-3)
        if ST.quad == "GC": ck[-1] = 2
        bij = np.pi/2*np.ones(N-3)*(1+ck*(k[1:]/(k[1:]+2))**4)/k[1:]**2
        bio = -np.pi/2*np.ones(N-5)*(k[1:-2]/(k[1:-2]+2))**2/(k[1:-2]+2)**2
        biu = -np.pi/2*np.ones(N-5)*((k[3:]-2)/(k[3:]))**2/(k[3:]-2)**2
        B = diags([biu, bij, bio], range(-2, 3, 2)) 
        
        uk_hat = la.spsolve(A+kx**2*B, fk[1:-2])
        uk_hat /= k[1:]**2
        
    elif solver == "lu":        
        # Solve by splitting the system into odds and even coefficients,
        # because these are decoupled. Then use tailored LU factorization
        # and solve.
        
        uk_hat = np.zeros(N-3)
        
        M = (N-4)/2
        u0 = np.zeros((2, M+1))   # Diagonal entries of U
        u1 = np.zeros((2, M))     # Diagonal+1 entries of U
        u2 = np.zeros((2, M-1))   # Diagonal+2 entries of U
        L  = np.zeros((2, M))     # The single nonzero row of L 
        SFTc.LU_Helmholtz_1D(N, 1, ST.quad=="GC", kx, u0, u1, u2, L) # do LU factorization
        SFTc.Solve_Helmholtz_1D(N, 1, fk[1:-2], uk_hat, u0, u1, u2, L) # Solve
        
        #kx2 = np.meshgrid(np.arange(10), np.arange(10), indexing="ij")
        #alfa = np.sqrt(kx2[0]*kx2[0]+kx2[1]*kx2[1])
        #u0 = np.zeros((2, M+1, 10, 10))   # Diagonal entries of U
        #u1 = np.zeros((2, M, 10, 10))     # Diagonal+1 entries of U
        #u2 = np.zeros((2, M-1, 10, 10))   # Diagonal+2 entries of U
        #L  = np.zeros((2, M, 10, 10))     # The single nonzero row of L 

        #uk = np.zeros((N-3, 10, 10))
        #fk = np.zeros((N-3, 10, 10))
        #for i in range(10):
            #for j in range(10):
                #fk[:, i, j] = f_hat
                
        #SFTc.LU_Helmholtz_3D(N, 1, ST.quad=="GC", alfa, u0, u1, u2, L)
        #SFTc.Solve_Helmholtz_3D(N, 1, fk, uk, u0, u1, u2, L)        

        #uk = np.zeros((N-3, 10, 10), dtype=np.complex)
        #fk = np.zeros((N-3, 10, 10), dtype=np.complex)
        #for i in range(10):
            #for j in range(10):
                #fk[:, i, j].real = f_hat
                #fk[:, i, j].imag = f_hat
                
        #SFTc.Solve_Helmholtz_3D_complex(N, 1, fk, uk, u0, u1, u2, L)     
        
        #assert np.allclose(uk[:, 1, 1].real, uk_hat)
        #assert np.allclose(uk[:, 1, 1].imag, uk_hat)

            
    return uk_hat

Example 50

Project: spectralDNS
Source File: test_shentransforms.py
View license
def test_CDDmat(SD):
    M = 256
    u = (1-x**2)*sin(np.pi*6*x)
    dudx = u.diff(x, 1)
    points, weights = SD.points_and_weights(M)
    dudx_j = np.array([dudx.subs(x, h) for h in points], dtype=np.float)
    uj = np.array([u.subs(x, h) for h in points], dtype=np.float)
    
    dudx_j = np.zeros(M)
    u_hat = np.zeros(M)
    u_hat = SD.fst(uj, u_hat)
    uj = SD.ifst(u_hat, uj)
    u_hat = SD.fst(uj, u_hat)
    
    uc_hat = np.zeros(M)
    uc_hat = SD.fct(uj, uc_hat)
    du_hat = np.zeros(M)
    dudx_j = SD.fastChebDerivative(uj, dudx_j)
    
    Cm = CDDmat(np.arange(M).astype(np.float))
    TDMASolver = TDMA(SD.quad, False)
    
    cs = Cm.matvec(u_hat)
    
    # Should equal (but not exact so use extra resolution)
    cs2 = np.zeros(M)
    cs2 = SD.fastShenScalar(dudx_j, cs2)
    
    assert np.allclose(cs, cs2)
    
    cs = TDMASolver(cs)
    du = np.zeros(M)
    du = SD.ifst(cs, du)

    assert np.linalg.norm(du-dudx_j)/M < 1e-10
    
    # Multidimensional version
    u3_hat = u_hat.repeat(4*4).reshape((M, 4, 4)) + 1j*u_hat.repeat(4*4).reshape((M, 4, 4))    
    cs = Cm.matvec(u3_hat)
    cs2 = np.zeros((M, 4, 4), dtype=np.complex)
    du3 = dudx_j.repeat(4*4).reshape((M, 4, 4)) + 1j*dudx_j.repeat(4*4).reshape((M, 4, 4))
    cs2 = SD.fastShenScalar(du3, cs2)
    
    assert np.allclose(cs, cs2, 1e-10)
    
    cs = TDMASolver(cs)
    d3 = np.zeros((M, 4, 4), dtype=np.complex)
    d3 = SD.ifst(cs, d3)

    #from IPython import embed; embed()
    assert np.linalg.norm(du3-d3)/(M*16) < 1e-10