bokeh.plotting.output_notebook

Here are the examples of the python api bokeh.plotting.output_notebook taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

6 Examples 7

3 Source : plotting.py
with MIT License
from andyljones

    def __init__(self, run=-1, rule='60s', **kwargs):
        self.run = run
        self.readers = registry.StatsReaders(run, **kwargs)
        self.groups = {}
        self.plotters = {}
        self.handle = None
        self.rule = rule

        bop.output_notebook(hide_banner=True)
        self.refresh()

    def refresh_groups(self):

0 Source : all_activities.py
with GNU Affero General Public License v3.0
from andrewcooke

def all_activities(start, finish):

    f'''
    # All Activities: {start.split()[0]} - {finish.split()[0]}
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (Statistics(s, activity_journal=aj).
                             by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).
                             by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                         for aj in s.query(ActivityJournal).
                             filter(ActivityJournal.start >= local_date_to_time(start),
                                    ActivityJournal.start   <  = local_date_to_time(finish)).
                             order_by(ActivityJournal.start.desc()).all())
            if len(data[N.SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 Source : all_group_activities.py
with GNU Affero General Public License v3.0
from andrewcooke

def all_group_activities(start, finish, activity_group):

    f'''
    # All Activities for {activity_group}: {start.split()[0]} - {finish.split()[0]}
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (Statistics(s, activity_journal=aj).
                             by_name(ActivityReader, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y).
                             by_name(ActivityCalculator, N.ACTIVE_DISTANCE, N.ACTIVE_TIME).df
                         for aj in s.query(ActivityJournal).
                             filter(ActivityJournal.start >= local_date_to_time(start),
                                    ActivityJournal.start   <   local_date_to_time(finish),
                                    ActivityJournal.activity_group_id == ActivityGroup.from_name(s, activity_group)).
                             order_by(ActivityJournal.start.desc()).all())
            if len(data[N.SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 Source : fit_power_parameters.py
with GNU Affero General Public License v3.0
from andrewcooke

def fit_power_parameters(bookmark, large):

    f'''
    # Fit Power Parameters to {bookmark}

    This notebook allows you to calculate power parameters (CdA - air resistance, Crr - rolling resistance)
    from bookmarked activities.

    Beforehand you should have generated the bookmark by running

        > python -m ch2.data.coasting

    or similar to identify sections of activities with little pedalling.
    See that module for more information.

    The `large` parameter means that each bookmark is taken in its entirety.
    The alternative is that they are divided into small sub-samples reflecting the data sample rate.
    '''

    '''
    $contents
    '''

    '''
    ## Load Data
    
    Open a connection to the database and load the data we require.
    '''
    s = session('-v 5')
    large = strtobool(large)
    route = Statistics(s, bookmarks=bookmarks(s, bookmark)). \
        by_name(ActivityReader, N.LATITUDE, N.LONGITUDE, N.SPHERICAL_MERCATOR_X, N.SPHERICAL_MERCATOR_Y,
                N.DISTANCE, N.ELEVATION, N.SPEED, N.CADENCE)
    route.sort_index(inplace=True)  # bookmarks are not sorted by time
    if large:
        route, max_gap = bookend(route), None
    else:
        max_gap = 10
    route = add_differentials(route, max_gap=max_gap)
    if large:
        route = route.iloc[1::2]
    route.describe()

    '''
    ## Add Energy Calculations
    '''
    weight = 64+12  # weight of rider + bike / kg  todo - extract weight from db
    route = add_energy_budget(route, weight)
    route = add_cda_estimate(route)
    route = add_crr_estimate(route, weight)
    route.describe()

    '''
    ## Plot Constraints
    
    The calculations above added an energy budget for each "step" in the data.
    These included values for CdA and Crr that would "explain" the decrease in energy 
    (taking each alone - so the CdA is that required for all energy lost to air resistance, 
    the Crr is that required for all energy lost to rolling resistance).
    
    But we know that both CdA and Crr could be important.
    So what we want is a linear combination of the two.
    For example, maybe the energy loss is 90% due to CdA and 10% due to Crr.
    All these possible linear combinations lie on a line that joins 100% CdA and 0% Crr with 0% CdA and 100% Crr.
    
    So the plot below shows all possible combinations of CdA and Crr.
    And what we are looking for is the most common value.
    So we want to know where the plot is darkest / the lines are most dense. 
    '''
    output_notebook()
    f = figure(plot_width=500, plot_height=400)
    clean = route.loc[route[N.DELTA_ENERGY]   <   0].dropna()
    cs = pd.DataFrame({N.CDA: [(0, cda) for cda in clean[N.CDA]],
                       N.CRR: [(crr, 0) for crr in clean[N.CRR]]})
    f.multi_line(xs=N.CDA, ys=N.CRR, source=cs, line_alpha=0.1, line_color='black')
    f.xaxis.axis_label = 'CdA'
    f.yaxis.axis_label = 'Crr'
    show(f)

    '''
    ## CdA Only
    
    If we ignore Crr then we can estimate CdA by looking at the relative number of constraints of CdA
    where Crr is zero.
    
    We do this by fitting to binned data.
    The peak in the fit(s) gives the value of CdA if Crr is unimportant.
    '''
    bins = np.linspace(0, 1.5, 30)
    width = bins[1] - bins[0]
    counts = clean[N.CDA].groupby(pd.cut(clean[N.CDA], bins)).size()
    print(counts.describe())

    cda = pd.DataFrame({N.CDA: 0.5 * (bins[:-1] + bins[1:]), 'n': counts.values})
    f = figure(plot_width=900, plot_height=300)
    f.quad(top=counts, left=bins[:-1]+0.1*width, right=bins[1:]-0.1*width, bottom=0)
    for order in range(2, 20, 2):
        coeff = sp.polyfit(cda[N.CDA], cda['n'], order)
        p = sp.poly1d(coeff)
        print(order, fmin(lambda x: -p(x), 0.6, disp=0)[0])
        f.line(x=cda[N.CDA], y=p(cda[N.CDA]), line_color='orange')
    show(f)

    '''
    ## Sample Constraints
    
    If we want to include Crr then we need to find a way to measure the "peak" in the messy line plot above.
    To do this we convert to a collection of points and then fit a 2D density function.
    
    Conversion to points is done by selecting points at random on each line.
    (You might think that shorter lines should generate less points.
    The way I see it, each line is an observation that constrains CdA and Crr.
    Each observation has equal weight, so each line generates a point.)
    
    Random points avoids any systematic patterns from uniform sampling 
    and allows re-runs to give some idea of noise. 
    '''

    def sample():
        clean.loc[:, 'random'] = np.random.random(size=len(clean))
        clean.loc[:, 'x'] = clean[N.CDA] * clean['random']
        clean.loc[:, 'y'] = clean[N.CRR] * (1 - clean['random'])
        return clean.loc[:, ['x', 'y']]

    s = pd.concat([sample() for _ in range(100 if large else 10)])
    print(s.describe())

    f = figure(plot_width=600, plot_height=600)
    f.scatter(x='x', y='y', source=s)
    show(f)

    '''
    ## Smooth, Find Maximum
    
    We generate and plot a Gaussian kernel density estimate.
    
    See https://towardsdatascience.com/simple-example-of-2d-density-plots-in-python-83b83b934f67
    
    You may want to play around with bandwidth by supplying a second argument to gaussian_kde. 
    See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html
    '''

    kernel = sp.stats.gaussian_kde(s.transpose())

    xmin, xmax = 0, 1
    ymin, ymax = 0, 0.02
    xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
    xy = np.vstack([xx.ravel(), yy.ravel()])
    smooth = np.reshape(kernel(xy), xx.shape)

    fig = plt.figure(figsize=(8,8))
    ax = fig.gca()
    ax.set_xlim(xmin, xmax)
    ax.set_ylim(ymin, ymax)
    ax.contourf(xx, yy, smooth, cmap='coolwarm')
    cset = ax.contour(xx, yy, smooth, colors='k')
    ax.clabel(cset, inline=1, fontsize=10)
    ax.set_xlabel('CdA')
    ax.set_ylabel('Crr')
    plt.title('2D Gaussian Kernel density estimation')

    '''
    For my data this shows (roughly):
    
    * large=True: Crr ~ 0.005-0.006 and CdA ~ 0.40-0.45
    
    * large=False: Crr ~ 0.01 and CdA ~ 0.35-0.40
    
    which supports the idea that Crr isn't well-constrained by the data.
    
    I chose large=True values since I suspect the larger intervals make the elevation values more accurate, 
    
    Since I am loading data with a command like
    
        > ch2 activities -D kit=cotic -- ~/archive/fit/**/*.fit
    
    I define this constant:
    
        > ch2 constants add --single Power.cotic \
            --description 'Bike namedtuple values to calculate power for this kit' \
            --validate ch2.stats.calculate.power.Bike
        > ch2 constants set Power.cotic '{"cda": 0.42, "crr": 0.0055, "weight": 12}'
        
    With that, the standard configuration should calculate power estimates.
    '''

0 Source : some_activities.py
with GNU Affero General Public License v3.0
from andrewcooke

def some_activities(constraint):

    f'''
    # Some Activities: {constraint}

    This displays thumbnails of routes that match the query over statistics.  For example,

        Active Distance > 40 & Active Distance   <   60

    will show all activities with a distance between 40 and 60 km.
    '''

    '''
    $contents
    '''

    '''
    ## Build Maps
    
    Loop over activities, retrieve data, and construct maps. 
    '''

    s = session('-v2')
    maps = [map_thumbnail(100, 120, data)
            for data in (activity_statistics(s, SPHERICAL_MERCATOR_X, SPHERICAL_MERCATOR_Y,
                                             ACTIVE_DISTANCE, TOTAL_CLIMB,
                                             activity_journal=aj)
                         for aj in constrained_sources(s, constraint))
            if len(data[SPHERICAL_MERCATOR_X].dropna()) > 10]
    print(f'Found {len(maps)} activities')

    '''
    ## Display Maps
    '''

    output_notebook()
    show(htile(maps, 8))

0 Source : ui.py
with Apache License 2.0
from seeq12

def startSupervised(app, buttons, xsignal, ysignal, buttonClusterSupervised):

    for button in buttons:
        button.close()
    
    #bokeh configuration
    output_notebook(INLINE)

    x, y = xsignal.value, ysignal.value
    if x == y:
        print('Must select different signals for X and Y! Please restart.')
        return

    #get the samples
    query_str = ""
    for sig in [x, y]:
        query_str += "Name == '{}' or ".format(sig)
    query = query_str[:-4] #delete "or" from the end
    
    to_pull = app.signals.query(query)

    datadf = seeqInterface.get_signals_samples(
            to_pull, 
            display_range = app.display_range,
            grid = app.grid, quiet = app.quiet
        )

    datadf.dropna(inplace = True)

    #modify the grid to use only signals in visual clustering:
    grid = seeqInterface.get_minumum_maximum_interpolation_for_signals_df(to_pull, app.display_range)
    app.grid = grid


    X = datadf[x]
    Y = datadf[y]

    #check if we have any constant signals. This is not allowed because clustering works on variability:
    if len(set(X.values)) == 1:
        print('Cannot use constant signals in clustering. Signal "{}" is constant'.format(x))
        return 
    if len(set(Y.values)) == 1:
        print('Cannot use constant signals in clustering. Signal "{}" is constant'.format(y))
        return

    #randomly down sample
    indices = np.random.choice(len(X), 1000)
    Xnew = X[indices]
    Ynew = Y[indices]

    global datasource

    s1 = ColumnDataSource(data=dict(x=Xnew, y=Ynew))
    p1 = figure(plot_width=400, plot_height=400, tools="lasso_select", title="Select Cluster")
    p1.circle('x', 'y', source=s1, alpha=0.1)

    X = datadf[x]
    Y = datadf[y]

    bins = 50

    H, xe, ye = np.histogram2d(X, Y, bins=bins)


    # produce an image of the 2d histogram
                                             ### centering here
    under_hist = p1.image(image=[H.T], x=xe[0]-((xe[1]-xe[0])/2), y=ye[0]-((ye[1]-ye[0])/2), dw=xe[-1] - xe[0], dh=ye[-1] - ye[0], 
                          palette=Blues[9][::-1]) 
    #the number is because the pallette is dict of lists keyed by how many colors
    under_hist.level = 'underlay'

    #build histogram grid:
    xcoords = [(xe[i] + xe[i-1])/2 for i in range(1, len(xe))]
    ycoords = [(ye[i] + ye[i-1])/2 for i in range(1, len(ye))]
    
    hist_grid_points = np.array(list(itertools.product(xcoords, ycoords))) # a set of points for each bixel of the histogram

    s1 = ColumnDataSource(data=dict(x=hist_grid_points[:,0], y=hist_grid_points[:,1]))
    p1.circle('x', 'y', source=s1, selection_color='green', alpha=0.0, selection_alpha = 0.5, nonselection_alpha=0.0)
    
    s2 = ColumnDataSource(data=dict(x=[], y=[]))


    #the following will write a global variable to the ipython kernel

    out = s1.selected.js_on_change('indices', CustomJS(args=dict(s1=s1, s2=s2), code="""          
            var inds = cb_obj.indices;
            var d1 = s1.data;
            var d2 = s2.data;
            d2['x'] = []
            d2['y'] = []
            for (var i = 0; i   <   inds.length; i++) {
                d2['x'].push(d1['x'][inds[i]])
                d2['y'].push(d1['y'][inds[i]])
            }
            s2.change.emit()
            var command = "__builtins__.indexofselection =" + inds;

            var kernel = IPython.notebook.kernel;
            kernel.execute(command);
        """)
    )
    layout = row(p1)
    show(layout)
    display(VBox([buttonClusterSupervised]))
    datasource = s1

    return datadf, hist_grid_points
    
def clusterSupervised(app, buttons, xsignal, ysignal, clusterExtent, datadf, indexofselection, hist_grid_points, basename, timeOfRun, closeButton):